64fb2d3d2f
This reverts commit 8e242cd012
.
Revert because lib/pq has known issues with context cancellation.
These issues need to be resolved before these changes can be merged.
Change-Id: I160af51dbc2d67c5449aafa406a403e5367bb555
20018 lines
696 KiB
Go
20018 lines
696 KiB
Go
//lint:file-ignore * generated file
|
|
// AUTOGENERATED BY storj.io/dbx
|
|
// DO NOT EDIT.
|
|
|
|
package satellitedb
|
|
|
|
import (
|
|
"bytes"
|
|
"context"
|
|
"database/sql"
|
|
"errors"
|
|
"fmt"
|
|
"reflect"
|
|
"strconv"
|
|
"strings"
|
|
"sync"
|
|
"time"
|
|
"unicode"
|
|
|
|
"github.com/lib/pq"
|
|
)
|
|
|
|
// Prevent conditional imports from causing build failures
|
|
var _ = strconv.Itoa
|
|
var _ = strings.LastIndex
|
|
var _ = fmt.Sprint
|
|
var _ sync.Mutex
|
|
|
|
var (
|
|
WrapErr = func(err *Error) error { return err }
|
|
Logger func(format string, args ...interface{})
|
|
|
|
errTooManyRows = errors.New("too many rows")
|
|
errUnsupportedDriver = errors.New("unsupported driver")
|
|
errEmptyUpdate = errors.New("empty update")
|
|
)
|
|
|
|
func logError(format string, args ...interface{}) {
|
|
if Logger != nil {
|
|
Logger(format, args...)
|
|
}
|
|
}
|
|
|
|
type ErrorCode int
|
|
|
|
const (
|
|
ErrorCode_Unknown ErrorCode = iota
|
|
ErrorCode_UnsupportedDriver
|
|
ErrorCode_NoRows
|
|
ErrorCode_TxDone
|
|
ErrorCode_TooManyRows
|
|
ErrorCode_ConstraintViolation
|
|
ErrorCode_EmptyUpdate
|
|
)
|
|
|
|
type Error struct {
|
|
Err error
|
|
Code ErrorCode
|
|
Driver string
|
|
Constraint string
|
|
QuerySuffix string
|
|
}
|
|
|
|
func (e *Error) Error() string {
|
|
return e.Err.Error()
|
|
}
|
|
|
|
func wrapErr(e *Error) error {
|
|
if WrapErr == nil {
|
|
return e
|
|
}
|
|
return WrapErr(e)
|
|
}
|
|
|
|
func makeErr(err error) error {
|
|
if err == nil {
|
|
return nil
|
|
}
|
|
e := &Error{Err: err}
|
|
switch err {
|
|
case sql.ErrNoRows:
|
|
e.Code = ErrorCode_NoRows
|
|
case sql.ErrTxDone:
|
|
e.Code = ErrorCode_TxDone
|
|
}
|
|
return wrapErr(e)
|
|
}
|
|
|
|
func unsupportedDriver(driver string) error {
|
|
return wrapErr(&Error{
|
|
Err: errUnsupportedDriver,
|
|
Code: ErrorCode_UnsupportedDriver,
|
|
Driver: driver,
|
|
})
|
|
}
|
|
|
|
func emptyUpdate() error {
|
|
return wrapErr(&Error{
|
|
Err: errEmptyUpdate,
|
|
Code: ErrorCode_EmptyUpdate,
|
|
})
|
|
}
|
|
|
|
func tooManyRows(query_suffix string) error {
|
|
return wrapErr(&Error{
|
|
Err: errTooManyRows,
|
|
Code: ErrorCode_TooManyRows,
|
|
QuerySuffix: query_suffix,
|
|
})
|
|
}
|
|
|
|
func constraintViolation(err error, constraint string) error {
|
|
return wrapErr(&Error{
|
|
Err: err,
|
|
Code: ErrorCode_ConstraintViolation,
|
|
Constraint: constraint,
|
|
})
|
|
}
|
|
|
|
type driver interface {
|
|
Exec(query string, args ...interface{}) (sql.Result, error)
|
|
Query(query string, args ...interface{}) (*sql.Rows, error)
|
|
QueryRow(query string, args ...interface{}) *sql.Row
|
|
}
|
|
|
|
var (
|
|
notAPointer = errors.New("destination not a pointer")
|
|
lossyConversion = errors.New("lossy conversion")
|
|
)
|
|
|
|
type DB struct {
|
|
*sql.DB
|
|
dbMethods
|
|
|
|
Hooks struct {
|
|
Now func() time.Time
|
|
}
|
|
}
|
|
|
|
func Open(driver, source string) (db *DB, err error) {
|
|
var sql_db *sql.DB
|
|
switch driver {
|
|
case "postgres":
|
|
sql_db, err = openpostgres(source)
|
|
case "cockroach":
|
|
sql_db, err = opencockroach(source)
|
|
default:
|
|
return nil, unsupportedDriver(driver)
|
|
}
|
|
if err != nil {
|
|
return nil, makeErr(err)
|
|
}
|
|
defer func(sql_db *sql.DB) {
|
|
if err != nil {
|
|
sql_db.Close()
|
|
}
|
|
}(sql_db)
|
|
|
|
if err := sql_db.Ping(); err != nil {
|
|
return nil, makeErr(err)
|
|
}
|
|
|
|
db = &DB{
|
|
DB: sql_db,
|
|
}
|
|
db.Hooks.Now = time.Now
|
|
|
|
switch driver {
|
|
case "postgres":
|
|
db.dbMethods = newpostgres(db)
|
|
case "cockroach":
|
|
db.dbMethods = newcockroach(db)
|
|
default:
|
|
return nil, unsupportedDriver(driver)
|
|
}
|
|
|
|
return db, nil
|
|
}
|
|
|
|
func (obj *DB) Close() (err error) {
|
|
return obj.makeErr(obj.DB.Close())
|
|
}
|
|
|
|
func (obj *DB) Open(ctx context.Context) (*Tx, error) {
|
|
tx, err := obj.DB.Begin()
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
|
|
return &Tx{
|
|
Tx: tx,
|
|
txMethods: obj.wrapTx(tx),
|
|
}, nil
|
|
}
|
|
|
|
func (obj *DB) NewRx() *Rx {
|
|
return &Rx{db: obj}
|
|
}
|
|
|
|
func DeleteAll(ctx context.Context, db *DB) (int64, error) {
|
|
tx, err := db.Open(ctx)
|
|
if err != nil {
|
|
return 0, err
|
|
}
|
|
defer func() {
|
|
if err == nil {
|
|
err = db.makeErr(tx.Commit())
|
|
return
|
|
}
|
|
|
|
if err_rollback := tx.Rollback(); err_rollback != nil {
|
|
logError("delete-all: rollback failed: %v", db.makeErr(err_rollback))
|
|
}
|
|
}()
|
|
return tx.deleteAll(ctx)
|
|
}
|
|
|
|
type Tx struct {
|
|
Tx *sql.Tx
|
|
txMethods
|
|
}
|
|
|
|
type dialectTx struct {
|
|
tx *sql.Tx
|
|
}
|
|
|
|
func (tx *dialectTx) Commit() (err error) {
|
|
return makeErr(tx.tx.Commit())
|
|
}
|
|
|
|
func (tx *dialectTx) Rollback() (err error) {
|
|
return makeErr(tx.tx.Rollback())
|
|
}
|
|
|
|
type postgresImpl struct {
|
|
db *DB
|
|
dialect __sqlbundle_postgres
|
|
driver driver
|
|
}
|
|
|
|
func (obj *postgresImpl) Rebind(s string) string {
|
|
return obj.dialect.Rebind(s)
|
|
}
|
|
|
|
func (obj *postgresImpl) logStmt(stmt string, args ...interface{}) {
|
|
postgresLogStmt(stmt, args...)
|
|
}
|
|
|
|
func (obj *postgresImpl) makeErr(err error) error {
|
|
constraint, ok := obj.isConstraintError(err)
|
|
if ok {
|
|
return constraintViolation(err, constraint)
|
|
}
|
|
return makeErr(err)
|
|
}
|
|
|
|
type postgresDB struct {
|
|
db *DB
|
|
*postgresImpl
|
|
}
|
|
|
|
func newpostgres(db *DB) *postgresDB {
|
|
return &postgresDB{
|
|
db: db,
|
|
postgresImpl: &postgresImpl{
|
|
db: db,
|
|
driver: db.DB,
|
|
},
|
|
}
|
|
}
|
|
|
|
func (obj *postgresDB) Schema() string {
|
|
return `CREATE TABLE accounting_rollups (
|
|
id bigserial NOT NULL,
|
|
node_id bytea NOT NULL,
|
|
start_time timestamp with time zone NOT NULL,
|
|
put_total bigint NOT NULL,
|
|
get_total bigint NOT NULL,
|
|
get_audit_total bigint NOT NULL,
|
|
get_repair_total bigint NOT NULL,
|
|
put_repair_total bigint NOT NULL,
|
|
at_rest_total double precision NOT NULL,
|
|
PRIMARY KEY ( id )
|
|
);
|
|
CREATE TABLE accounting_timestamps (
|
|
name text NOT NULL,
|
|
value timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( name )
|
|
);
|
|
CREATE TABLE bucket_bandwidth_rollups (
|
|
bucket_name bytea NOT NULL,
|
|
project_id bytea NOT NULL,
|
|
interval_start timestamp NOT NULL,
|
|
interval_seconds integer NOT NULL,
|
|
action integer NOT NULL,
|
|
inline bigint NOT NULL,
|
|
allocated bigint NOT NULL,
|
|
settled bigint NOT NULL,
|
|
PRIMARY KEY ( bucket_name, project_id, interval_start, action )
|
|
);
|
|
CREATE TABLE bucket_storage_tallies (
|
|
bucket_name bytea NOT NULL,
|
|
project_id bytea NOT NULL,
|
|
interval_start timestamp NOT NULL,
|
|
inline bigint NOT NULL,
|
|
remote bigint NOT NULL,
|
|
remote_segments_count integer NOT NULL,
|
|
inline_segments_count integer NOT NULL,
|
|
object_count integer NOT NULL,
|
|
metadata_size bigint NOT NULL,
|
|
PRIMARY KEY ( bucket_name, project_id, interval_start )
|
|
);
|
|
CREATE TABLE coinpayments_transactions (
|
|
id text NOT NULL,
|
|
user_id bytea NOT NULL,
|
|
address text NOT NULL,
|
|
amount bytea NOT NULL,
|
|
received bytea NOT NULL,
|
|
status integer NOT NULL,
|
|
key text NOT NULL,
|
|
timeout integer NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( id )
|
|
);
|
|
CREATE TABLE coupons (
|
|
id bytea NOT NULL,
|
|
project_id bytea NOT NULL,
|
|
user_id bytea NOT NULL,
|
|
amount bigint NOT NULL,
|
|
description text NOT NULL,
|
|
type integer NOT NULL,
|
|
status integer NOT NULL,
|
|
duration bigint NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( id )
|
|
);
|
|
CREATE TABLE coupon_usages (
|
|
coupon_id bytea NOT NULL,
|
|
amount bigint NOT NULL,
|
|
status integer NOT NULL,
|
|
period timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( coupon_id, period )
|
|
);
|
|
CREATE TABLE graceful_exit_progress (
|
|
node_id bytea NOT NULL,
|
|
bytes_transferred bigint NOT NULL,
|
|
pieces_transferred bigint NOT NULL,
|
|
pieces_failed bigint NOT NULL,
|
|
updated_at timestamp NOT NULL,
|
|
PRIMARY KEY ( node_id )
|
|
);
|
|
CREATE TABLE graceful_exit_transfer_queue (
|
|
node_id bytea NOT NULL,
|
|
path bytea NOT NULL,
|
|
piece_num integer NOT NULL,
|
|
root_piece_id bytea,
|
|
durability_ratio double precision NOT NULL,
|
|
queued_at timestamp NOT NULL,
|
|
requested_at timestamp,
|
|
last_failed_at timestamp,
|
|
last_failed_code integer,
|
|
failed_count integer,
|
|
finished_at timestamp,
|
|
order_limit_send_count integer NOT NULL,
|
|
PRIMARY KEY ( node_id, path, piece_num )
|
|
);
|
|
CREATE TABLE injuredsegments (
|
|
path bytea NOT NULL,
|
|
data bytea NOT NULL,
|
|
attempted timestamp,
|
|
PRIMARY KEY ( path )
|
|
);
|
|
CREATE TABLE irreparabledbs (
|
|
segmentpath bytea NOT NULL,
|
|
segmentdetail bytea NOT NULL,
|
|
pieces_lost_count bigint NOT NULL,
|
|
seg_damaged_unix_sec bigint NOT NULL,
|
|
repair_attempt_count bigint NOT NULL,
|
|
PRIMARY KEY ( segmentpath )
|
|
);
|
|
CREATE TABLE nodes (
|
|
id bytea NOT NULL,
|
|
address text NOT NULL,
|
|
last_net text NOT NULL,
|
|
protocol integer NOT NULL,
|
|
type integer NOT NULL,
|
|
email text NOT NULL,
|
|
wallet text NOT NULL,
|
|
free_bandwidth bigint NOT NULL,
|
|
free_disk bigint NOT NULL,
|
|
piece_count bigint NOT NULL,
|
|
major bigint NOT NULL,
|
|
minor bigint NOT NULL,
|
|
patch bigint NOT NULL,
|
|
hash text NOT NULL,
|
|
timestamp timestamp with time zone NOT NULL,
|
|
release boolean NOT NULL,
|
|
latency_90 bigint NOT NULL,
|
|
audit_success_count bigint NOT NULL,
|
|
total_audit_count bigint NOT NULL,
|
|
uptime_success_count bigint NOT NULL,
|
|
total_uptime_count bigint NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
updated_at timestamp with time zone NOT NULL,
|
|
last_contact_success timestamp with time zone NOT NULL,
|
|
last_contact_failure timestamp with time zone NOT NULL,
|
|
contained boolean NOT NULL,
|
|
disqualified timestamp with time zone,
|
|
audit_reputation_alpha double precision NOT NULL,
|
|
audit_reputation_beta double precision NOT NULL,
|
|
uptime_reputation_alpha double precision NOT NULL,
|
|
uptime_reputation_beta double precision NOT NULL,
|
|
exit_initiated_at timestamp,
|
|
exit_loop_completed_at timestamp,
|
|
exit_finished_at timestamp,
|
|
exit_success boolean NOT NULL,
|
|
PRIMARY KEY ( id )
|
|
);
|
|
CREATE TABLE nodes_offline_times (
|
|
node_id bytea NOT NULL,
|
|
tracked_at timestamp with time zone NOT NULL,
|
|
seconds integer NOT NULL,
|
|
PRIMARY KEY ( node_id, tracked_at )
|
|
);
|
|
CREATE TABLE offers (
|
|
id serial NOT NULL,
|
|
name text NOT NULL,
|
|
description text NOT NULL,
|
|
award_credit_in_cents integer NOT NULL,
|
|
invitee_credit_in_cents integer NOT NULL,
|
|
award_credit_duration_days integer,
|
|
invitee_credit_duration_days integer,
|
|
redeemable_cap integer,
|
|
expires_at timestamp with time zone NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
status integer NOT NULL,
|
|
type integer NOT NULL,
|
|
PRIMARY KEY ( id )
|
|
);
|
|
CREATE TABLE peer_identities (
|
|
node_id bytea NOT NULL,
|
|
leaf_serial_number bytea NOT NULL,
|
|
chain bytea NOT NULL,
|
|
updated_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( node_id )
|
|
);
|
|
CREATE TABLE pending_audits (
|
|
node_id bytea NOT NULL,
|
|
piece_id bytea NOT NULL,
|
|
stripe_index bigint NOT NULL,
|
|
share_size bigint NOT NULL,
|
|
expected_share_hash bytea NOT NULL,
|
|
reverify_count bigint NOT NULL,
|
|
path bytea NOT NULL,
|
|
PRIMARY KEY ( node_id )
|
|
);
|
|
CREATE TABLE projects (
|
|
id bytea NOT NULL,
|
|
name text NOT NULL,
|
|
description text NOT NULL,
|
|
usage_limit bigint NOT NULL,
|
|
partner_id bytea,
|
|
owner_id bytea NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( id )
|
|
);
|
|
CREATE TABLE registration_tokens (
|
|
secret bytea NOT NULL,
|
|
owner_id bytea,
|
|
project_limit integer NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( secret ),
|
|
UNIQUE ( owner_id )
|
|
);
|
|
CREATE TABLE reset_password_tokens (
|
|
secret bytea NOT NULL,
|
|
owner_id bytea NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( secret ),
|
|
UNIQUE ( owner_id )
|
|
);
|
|
CREATE TABLE serial_numbers (
|
|
id serial NOT NULL,
|
|
serial_number bytea NOT NULL,
|
|
bucket_id bytea NOT NULL,
|
|
expires_at timestamp NOT NULL,
|
|
PRIMARY KEY ( id )
|
|
);
|
|
CREATE TABLE storagenode_bandwidth_rollups (
|
|
storagenode_id bytea NOT NULL,
|
|
interval_start timestamp NOT NULL,
|
|
interval_seconds integer NOT NULL,
|
|
action integer NOT NULL,
|
|
allocated bigint DEFAULT 0,
|
|
settled bigint NOT NULL,
|
|
PRIMARY KEY ( storagenode_id, interval_start, action )
|
|
);
|
|
CREATE TABLE storagenode_storage_tallies (
|
|
id bigserial NOT NULL,
|
|
node_id bytea NOT NULL,
|
|
interval_end_time timestamp with time zone NOT NULL,
|
|
data_total double precision NOT NULL,
|
|
PRIMARY KEY ( id )
|
|
);
|
|
CREATE TABLE stripe_customers (
|
|
user_id bytea NOT NULL,
|
|
customer_id text NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( user_id ),
|
|
UNIQUE ( customer_id )
|
|
);
|
|
CREATE TABLE stripecoinpayments_invoice_project_records (
|
|
id bytea NOT NULL,
|
|
project_id bytea NOT NULL,
|
|
storage double precision NOT NULL,
|
|
egress bigint NOT NULL,
|
|
objects bigint NOT NULL,
|
|
period_start timestamp with time zone NOT NULL,
|
|
period_end timestamp with time zone NOT NULL,
|
|
state integer NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( id ),
|
|
UNIQUE ( project_id, period_start, period_end )
|
|
);
|
|
CREATE TABLE stripecoinpayments_tx_conversion_rates (
|
|
tx_id text NOT NULL,
|
|
rate bytea NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( tx_id )
|
|
);
|
|
CREATE TABLE users (
|
|
id bytea NOT NULL,
|
|
email text NOT NULL,
|
|
normalized_email text NOT NULL,
|
|
full_name text NOT NULL,
|
|
short_name text,
|
|
password_hash bytea NOT NULL,
|
|
status integer NOT NULL,
|
|
partner_id bytea,
|
|
created_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( id )
|
|
);
|
|
CREATE TABLE value_attributions (
|
|
project_id bytea NOT NULL,
|
|
bucket_name bytea NOT NULL,
|
|
partner_id bytea NOT NULL,
|
|
last_updated timestamp NOT NULL,
|
|
PRIMARY KEY ( project_id, bucket_name )
|
|
);
|
|
CREATE TABLE api_keys (
|
|
id bytea NOT NULL,
|
|
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
|
head bytea NOT NULL,
|
|
name text NOT NULL,
|
|
secret bytea NOT NULL,
|
|
partner_id bytea,
|
|
created_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( id ),
|
|
UNIQUE ( head ),
|
|
UNIQUE ( name, project_id )
|
|
);
|
|
CREATE TABLE bucket_metainfos (
|
|
id bytea NOT NULL,
|
|
project_id bytea NOT NULL REFERENCES projects( id ),
|
|
name bytea NOT NULL,
|
|
partner_id bytea,
|
|
path_cipher integer NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
default_segment_size integer NOT NULL,
|
|
default_encryption_cipher_suite integer NOT NULL,
|
|
default_encryption_block_size integer NOT NULL,
|
|
default_redundancy_algorithm integer NOT NULL,
|
|
default_redundancy_share_size integer NOT NULL,
|
|
default_redundancy_required_shares integer NOT NULL,
|
|
default_redundancy_repair_shares integer NOT NULL,
|
|
default_redundancy_optimal_shares integer NOT NULL,
|
|
default_redundancy_total_shares integer NOT NULL,
|
|
PRIMARY KEY ( id ),
|
|
UNIQUE ( name, project_id )
|
|
);
|
|
CREATE TABLE project_invoice_stamps (
|
|
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
|
invoice_id bytea NOT NULL,
|
|
start_date timestamp with time zone NOT NULL,
|
|
end_date timestamp with time zone NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( project_id, start_date, end_date ),
|
|
UNIQUE ( invoice_id )
|
|
);
|
|
CREATE TABLE project_members (
|
|
member_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
|
|
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
|
created_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( member_id, project_id )
|
|
);
|
|
CREATE TABLE stripecoinpayments_apply_balance_intents (
|
|
tx_id text NOT NULL REFERENCES coinpayments_transactions( id ) ON DELETE CASCADE,
|
|
state integer NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( tx_id )
|
|
);
|
|
CREATE TABLE used_serials (
|
|
serial_number_id integer NOT NULL REFERENCES serial_numbers( id ) ON DELETE CASCADE,
|
|
storage_node_id bytea NOT NULL,
|
|
PRIMARY KEY ( serial_number_id, storage_node_id )
|
|
);
|
|
CREATE TABLE user_credits (
|
|
id serial NOT NULL,
|
|
user_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
|
|
offer_id integer NOT NULL REFERENCES offers( id ),
|
|
referred_by bytea REFERENCES users( id ) ON DELETE SET NULL,
|
|
type text NOT NULL,
|
|
credits_earned_in_cents integer NOT NULL,
|
|
credits_used_in_cents integer NOT NULL,
|
|
expires_at timestamp with time zone NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( id ),
|
|
UNIQUE ( id, offer_id )
|
|
);
|
|
CREATE INDEX bucket_name_project_id_interval_start_interval_seconds ON bucket_bandwidth_rollups ( bucket_name, project_id, interval_start, interval_seconds );
|
|
CREATE INDEX injuredsegments_attempted_index ON injuredsegments ( attempted );
|
|
CREATE INDEX node_last_ip ON nodes ( last_net );
|
|
CREATE INDEX nodes_offline_times_node_id_index ON nodes_offline_times ( node_id );
|
|
CREATE UNIQUE INDEX serial_number ON serial_numbers ( serial_number );
|
|
CREATE INDEX serial_numbers_expires_at_index ON serial_numbers ( expires_at );
|
|
CREATE INDEX storagenode_id_interval_start_interval_seconds ON storagenode_bandwidth_rollups ( storagenode_id, interval_start, interval_seconds );
|
|
CREATE UNIQUE INDEX credits_earned_user_id_offer_id ON user_credits ( id, offer_id );`
|
|
}
|
|
|
|
func (obj *postgresDB) wrapTx(tx *sql.Tx) txMethods {
|
|
return &postgresTx{
|
|
dialectTx: dialectTx{tx: tx},
|
|
postgresImpl: &postgresImpl{
|
|
db: obj.db,
|
|
driver: tx,
|
|
},
|
|
}
|
|
}
|
|
|
|
type postgresTx struct {
|
|
dialectTx
|
|
*postgresImpl
|
|
}
|
|
|
|
func postgresLogStmt(stmt string, args ...interface{}) {
|
|
// TODO: render placeholders
|
|
if Logger != nil {
|
|
out := fmt.Sprintf("stmt: %s\nargs: %v\n", stmt, pretty(args))
|
|
Logger(out)
|
|
}
|
|
}
|
|
|
|
type cockroachImpl struct {
|
|
db *DB
|
|
dialect __sqlbundle_cockroach
|
|
driver driver
|
|
}
|
|
|
|
func (obj *cockroachImpl) Rebind(s string) string {
|
|
return obj.dialect.Rebind(s)
|
|
}
|
|
|
|
func (obj *cockroachImpl) logStmt(stmt string, args ...interface{}) {
|
|
cockroachLogStmt(stmt, args...)
|
|
}
|
|
|
|
func (obj *cockroachImpl) makeErr(err error) error {
|
|
constraint, ok := obj.isConstraintError(err)
|
|
if ok {
|
|
return constraintViolation(err, constraint)
|
|
}
|
|
return makeErr(err)
|
|
}
|
|
|
|
type cockroachDB struct {
|
|
db *DB
|
|
*cockroachImpl
|
|
}
|
|
|
|
func newcockroach(db *DB) *cockroachDB {
|
|
return &cockroachDB{
|
|
db: db,
|
|
cockroachImpl: &cockroachImpl{
|
|
db: db,
|
|
driver: db.DB,
|
|
},
|
|
}
|
|
}
|
|
|
|
func (obj *cockroachDB) Schema() string {
|
|
return `CREATE TABLE accounting_rollups (
|
|
id bigserial NOT NULL,
|
|
node_id bytea NOT NULL,
|
|
start_time timestamp with time zone NOT NULL,
|
|
put_total bigint NOT NULL,
|
|
get_total bigint NOT NULL,
|
|
get_audit_total bigint NOT NULL,
|
|
get_repair_total bigint NOT NULL,
|
|
put_repair_total bigint NOT NULL,
|
|
at_rest_total double precision NOT NULL,
|
|
PRIMARY KEY ( id )
|
|
);
|
|
CREATE TABLE accounting_timestamps (
|
|
name text NOT NULL,
|
|
value timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( name )
|
|
);
|
|
CREATE TABLE bucket_bandwidth_rollups (
|
|
bucket_name bytea NOT NULL,
|
|
project_id bytea NOT NULL,
|
|
interval_start timestamp NOT NULL,
|
|
interval_seconds integer NOT NULL,
|
|
action integer NOT NULL,
|
|
inline bigint NOT NULL,
|
|
allocated bigint NOT NULL,
|
|
settled bigint NOT NULL,
|
|
PRIMARY KEY ( bucket_name, project_id, interval_start, action )
|
|
);
|
|
CREATE TABLE bucket_storage_tallies (
|
|
bucket_name bytea NOT NULL,
|
|
project_id bytea NOT NULL,
|
|
interval_start timestamp NOT NULL,
|
|
inline bigint NOT NULL,
|
|
remote bigint NOT NULL,
|
|
remote_segments_count integer NOT NULL,
|
|
inline_segments_count integer NOT NULL,
|
|
object_count integer NOT NULL,
|
|
metadata_size bigint NOT NULL,
|
|
PRIMARY KEY ( bucket_name, project_id, interval_start )
|
|
);
|
|
CREATE TABLE coinpayments_transactions (
|
|
id text NOT NULL,
|
|
user_id bytea NOT NULL,
|
|
address text NOT NULL,
|
|
amount bytea NOT NULL,
|
|
received bytea NOT NULL,
|
|
status integer NOT NULL,
|
|
key text NOT NULL,
|
|
timeout integer NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( id )
|
|
);
|
|
CREATE TABLE coupons (
|
|
id bytea NOT NULL,
|
|
project_id bytea NOT NULL,
|
|
user_id bytea NOT NULL,
|
|
amount bigint NOT NULL,
|
|
description text NOT NULL,
|
|
type integer NOT NULL,
|
|
status integer NOT NULL,
|
|
duration bigint NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( id )
|
|
);
|
|
CREATE TABLE coupon_usages (
|
|
coupon_id bytea NOT NULL,
|
|
amount bigint NOT NULL,
|
|
status integer NOT NULL,
|
|
period timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( coupon_id, period )
|
|
);
|
|
CREATE TABLE graceful_exit_progress (
|
|
node_id bytea NOT NULL,
|
|
bytes_transferred bigint NOT NULL,
|
|
pieces_transferred bigint NOT NULL,
|
|
pieces_failed bigint NOT NULL,
|
|
updated_at timestamp NOT NULL,
|
|
PRIMARY KEY ( node_id )
|
|
);
|
|
CREATE TABLE graceful_exit_transfer_queue (
|
|
node_id bytea NOT NULL,
|
|
path bytea NOT NULL,
|
|
piece_num integer NOT NULL,
|
|
root_piece_id bytea,
|
|
durability_ratio double precision NOT NULL,
|
|
queued_at timestamp NOT NULL,
|
|
requested_at timestamp,
|
|
last_failed_at timestamp,
|
|
last_failed_code integer,
|
|
failed_count integer,
|
|
finished_at timestamp,
|
|
order_limit_send_count integer NOT NULL,
|
|
PRIMARY KEY ( node_id, path, piece_num )
|
|
);
|
|
CREATE TABLE injuredsegments (
|
|
path bytea NOT NULL,
|
|
data bytea NOT NULL,
|
|
attempted timestamp,
|
|
PRIMARY KEY ( path )
|
|
);
|
|
CREATE TABLE irreparabledbs (
|
|
segmentpath bytea NOT NULL,
|
|
segmentdetail bytea NOT NULL,
|
|
pieces_lost_count bigint NOT NULL,
|
|
seg_damaged_unix_sec bigint NOT NULL,
|
|
repair_attempt_count bigint NOT NULL,
|
|
PRIMARY KEY ( segmentpath )
|
|
);
|
|
CREATE TABLE nodes (
|
|
id bytea NOT NULL,
|
|
address text NOT NULL,
|
|
last_net text NOT NULL,
|
|
protocol integer NOT NULL,
|
|
type integer NOT NULL,
|
|
email text NOT NULL,
|
|
wallet text NOT NULL,
|
|
free_bandwidth bigint NOT NULL,
|
|
free_disk bigint NOT NULL,
|
|
piece_count bigint NOT NULL,
|
|
major bigint NOT NULL,
|
|
minor bigint NOT NULL,
|
|
patch bigint NOT NULL,
|
|
hash text NOT NULL,
|
|
timestamp timestamp with time zone NOT NULL,
|
|
release boolean NOT NULL,
|
|
latency_90 bigint NOT NULL,
|
|
audit_success_count bigint NOT NULL,
|
|
total_audit_count bigint NOT NULL,
|
|
uptime_success_count bigint NOT NULL,
|
|
total_uptime_count bigint NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
updated_at timestamp with time zone NOT NULL,
|
|
last_contact_success timestamp with time zone NOT NULL,
|
|
last_contact_failure timestamp with time zone NOT NULL,
|
|
contained boolean NOT NULL,
|
|
disqualified timestamp with time zone,
|
|
audit_reputation_alpha double precision NOT NULL,
|
|
audit_reputation_beta double precision NOT NULL,
|
|
uptime_reputation_alpha double precision NOT NULL,
|
|
uptime_reputation_beta double precision NOT NULL,
|
|
exit_initiated_at timestamp,
|
|
exit_loop_completed_at timestamp,
|
|
exit_finished_at timestamp,
|
|
exit_success boolean NOT NULL,
|
|
PRIMARY KEY ( id )
|
|
);
|
|
CREATE TABLE nodes_offline_times (
|
|
node_id bytea NOT NULL,
|
|
tracked_at timestamp with time zone NOT NULL,
|
|
seconds integer NOT NULL,
|
|
PRIMARY KEY ( node_id, tracked_at )
|
|
);
|
|
CREATE TABLE offers (
|
|
id serial NOT NULL,
|
|
name text NOT NULL,
|
|
description text NOT NULL,
|
|
award_credit_in_cents integer NOT NULL,
|
|
invitee_credit_in_cents integer NOT NULL,
|
|
award_credit_duration_days integer,
|
|
invitee_credit_duration_days integer,
|
|
redeemable_cap integer,
|
|
expires_at timestamp with time zone NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
status integer NOT NULL,
|
|
type integer NOT NULL,
|
|
PRIMARY KEY ( id )
|
|
);
|
|
CREATE TABLE peer_identities (
|
|
node_id bytea NOT NULL,
|
|
leaf_serial_number bytea NOT NULL,
|
|
chain bytea NOT NULL,
|
|
updated_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( node_id )
|
|
);
|
|
CREATE TABLE pending_audits (
|
|
node_id bytea NOT NULL,
|
|
piece_id bytea NOT NULL,
|
|
stripe_index bigint NOT NULL,
|
|
share_size bigint NOT NULL,
|
|
expected_share_hash bytea NOT NULL,
|
|
reverify_count bigint NOT NULL,
|
|
path bytea NOT NULL,
|
|
PRIMARY KEY ( node_id )
|
|
);
|
|
CREATE TABLE projects (
|
|
id bytea NOT NULL,
|
|
name text NOT NULL,
|
|
description text NOT NULL,
|
|
usage_limit bigint NOT NULL,
|
|
partner_id bytea,
|
|
owner_id bytea NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( id )
|
|
);
|
|
CREATE TABLE registration_tokens (
|
|
secret bytea NOT NULL,
|
|
owner_id bytea,
|
|
project_limit integer NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( secret ),
|
|
UNIQUE ( owner_id )
|
|
);
|
|
CREATE TABLE reset_password_tokens (
|
|
secret bytea NOT NULL,
|
|
owner_id bytea NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( secret ),
|
|
UNIQUE ( owner_id )
|
|
);
|
|
CREATE TABLE serial_numbers (
|
|
id serial NOT NULL,
|
|
serial_number bytea NOT NULL,
|
|
bucket_id bytea NOT NULL,
|
|
expires_at timestamp NOT NULL,
|
|
PRIMARY KEY ( id )
|
|
);
|
|
CREATE TABLE storagenode_bandwidth_rollups (
|
|
storagenode_id bytea NOT NULL,
|
|
interval_start timestamp NOT NULL,
|
|
interval_seconds integer NOT NULL,
|
|
action integer NOT NULL,
|
|
allocated bigint DEFAULT 0,
|
|
settled bigint NOT NULL,
|
|
PRIMARY KEY ( storagenode_id, interval_start, action )
|
|
);
|
|
CREATE TABLE storagenode_storage_tallies (
|
|
id bigserial NOT NULL,
|
|
node_id bytea NOT NULL,
|
|
interval_end_time timestamp with time zone NOT NULL,
|
|
data_total double precision NOT NULL,
|
|
PRIMARY KEY ( id )
|
|
);
|
|
CREATE TABLE stripe_customers (
|
|
user_id bytea NOT NULL,
|
|
customer_id text NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( user_id ),
|
|
UNIQUE ( customer_id )
|
|
);
|
|
CREATE TABLE stripecoinpayments_invoice_project_records (
|
|
id bytea NOT NULL,
|
|
project_id bytea NOT NULL,
|
|
storage double precision NOT NULL,
|
|
egress bigint NOT NULL,
|
|
objects bigint NOT NULL,
|
|
period_start timestamp with time zone NOT NULL,
|
|
period_end timestamp with time zone NOT NULL,
|
|
state integer NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( id ),
|
|
UNIQUE ( project_id, period_start, period_end )
|
|
);
|
|
CREATE TABLE stripecoinpayments_tx_conversion_rates (
|
|
tx_id text NOT NULL,
|
|
rate bytea NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( tx_id )
|
|
);
|
|
CREATE TABLE users (
|
|
id bytea NOT NULL,
|
|
email text NOT NULL,
|
|
normalized_email text NOT NULL,
|
|
full_name text NOT NULL,
|
|
short_name text,
|
|
password_hash bytea NOT NULL,
|
|
status integer NOT NULL,
|
|
partner_id bytea,
|
|
created_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( id )
|
|
);
|
|
CREATE TABLE value_attributions (
|
|
project_id bytea NOT NULL,
|
|
bucket_name bytea NOT NULL,
|
|
partner_id bytea NOT NULL,
|
|
last_updated timestamp NOT NULL,
|
|
PRIMARY KEY ( project_id, bucket_name )
|
|
);
|
|
CREATE TABLE api_keys (
|
|
id bytea NOT NULL,
|
|
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
|
head bytea NOT NULL,
|
|
name text NOT NULL,
|
|
secret bytea NOT NULL,
|
|
partner_id bytea,
|
|
created_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( id ),
|
|
UNIQUE ( head ),
|
|
UNIQUE ( name, project_id )
|
|
);
|
|
CREATE TABLE bucket_metainfos (
|
|
id bytea NOT NULL,
|
|
project_id bytea NOT NULL REFERENCES projects( id ),
|
|
name bytea NOT NULL,
|
|
partner_id bytea,
|
|
path_cipher integer NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
default_segment_size integer NOT NULL,
|
|
default_encryption_cipher_suite integer NOT NULL,
|
|
default_encryption_block_size integer NOT NULL,
|
|
default_redundancy_algorithm integer NOT NULL,
|
|
default_redundancy_share_size integer NOT NULL,
|
|
default_redundancy_required_shares integer NOT NULL,
|
|
default_redundancy_repair_shares integer NOT NULL,
|
|
default_redundancy_optimal_shares integer NOT NULL,
|
|
default_redundancy_total_shares integer NOT NULL,
|
|
PRIMARY KEY ( id ),
|
|
UNIQUE ( name, project_id )
|
|
);
|
|
CREATE TABLE project_invoice_stamps (
|
|
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
|
invoice_id bytea NOT NULL,
|
|
start_date timestamp with time zone NOT NULL,
|
|
end_date timestamp with time zone NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( project_id, start_date, end_date ),
|
|
UNIQUE ( invoice_id )
|
|
);
|
|
CREATE TABLE project_members (
|
|
member_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
|
|
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
|
created_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( member_id, project_id )
|
|
);
|
|
CREATE TABLE stripecoinpayments_apply_balance_intents (
|
|
tx_id text NOT NULL REFERENCES coinpayments_transactions( id ) ON DELETE CASCADE,
|
|
state integer NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( tx_id )
|
|
);
|
|
CREATE TABLE used_serials (
|
|
serial_number_id integer NOT NULL REFERENCES serial_numbers( id ) ON DELETE CASCADE,
|
|
storage_node_id bytea NOT NULL,
|
|
PRIMARY KEY ( serial_number_id, storage_node_id )
|
|
);
|
|
CREATE TABLE user_credits (
|
|
id serial NOT NULL,
|
|
user_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
|
|
offer_id integer NOT NULL REFERENCES offers( id ),
|
|
referred_by bytea REFERENCES users( id ) ON DELETE SET NULL,
|
|
type text NOT NULL,
|
|
credits_earned_in_cents integer NOT NULL,
|
|
credits_used_in_cents integer NOT NULL,
|
|
expires_at timestamp with time zone NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( id ),
|
|
UNIQUE ( id, offer_id )
|
|
);
|
|
CREATE INDEX bucket_name_project_id_interval_start_interval_seconds ON bucket_bandwidth_rollups ( bucket_name, project_id, interval_start, interval_seconds );
|
|
CREATE INDEX injuredsegments_attempted_index ON injuredsegments ( attempted );
|
|
CREATE INDEX node_last_ip ON nodes ( last_net );
|
|
CREATE INDEX nodes_offline_times_node_id_index ON nodes_offline_times ( node_id );
|
|
CREATE UNIQUE INDEX serial_number ON serial_numbers ( serial_number );
|
|
CREATE INDEX serial_numbers_expires_at_index ON serial_numbers ( expires_at );
|
|
CREATE INDEX storagenode_id_interval_start_interval_seconds ON storagenode_bandwidth_rollups ( storagenode_id, interval_start, interval_seconds );
|
|
CREATE UNIQUE INDEX credits_earned_user_id_offer_id ON user_credits ( id, offer_id );`
|
|
}
|
|
|
|
func (obj *cockroachDB) wrapTx(tx *sql.Tx) txMethods {
|
|
return &cockroachTx{
|
|
dialectTx: dialectTx{tx: tx},
|
|
cockroachImpl: &cockroachImpl{
|
|
db: obj.db,
|
|
driver: tx,
|
|
},
|
|
}
|
|
}
|
|
|
|
type cockroachTx struct {
|
|
dialectTx
|
|
*cockroachImpl
|
|
}
|
|
|
|
func cockroachLogStmt(stmt string, args ...interface{}) {
|
|
// TODO: render placeholders
|
|
if Logger != nil {
|
|
out := fmt.Sprintf("stmt: %s\nargs: %v\n", stmt, pretty(args))
|
|
Logger(out)
|
|
}
|
|
}
|
|
|
|
type pretty []interface{}
|
|
|
|
func (p pretty) Format(f fmt.State, c rune) {
|
|
fmt.Fprint(f, "[")
|
|
nextval:
|
|
for i, val := range p {
|
|
if i > 0 {
|
|
fmt.Fprint(f, ", ")
|
|
}
|
|
rv := reflect.ValueOf(val)
|
|
if rv.Kind() == reflect.Ptr {
|
|
if rv.IsNil() {
|
|
fmt.Fprint(f, "NULL")
|
|
continue
|
|
}
|
|
val = rv.Elem().Interface()
|
|
}
|
|
switch v := val.(type) {
|
|
case string:
|
|
fmt.Fprintf(f, "%q", v)
|
|
case time.Time:
|
|
fmt.Fprintf(f, "%s", v.Format(time.RFC3339Nano))
|
|
case []byte:
|
|
for _, b := range v {
|
|
if !unicode.IsPrint(rune(b)) {
|
|
fmt.Fprintf(f, "%#x", v)
|
|
continue nextval
|
|
}
|
|
}
|
|
fmt.Fprintf(f, "%q", v)
|
|
default:
|
|
fmt.Fprintf(f, "%v", v)
|
|
}
|
|
}
|
|
fmt.Fprint(f, "]")
|
|
}
|
|
|
|
type AccountingRollup struct {
|
|
Id int64
|
|
NodeId []byte
|
|
StartTime time.Time
|
|
PutTotal int64
|
|
GetTotal int64
|
|
GetAuditTotal int64
|
|
GetRepairTotal int64
|
|
PutRepairTotal int64
|
|
AtRestTotal float64
|
|
}
|
|
|
|
func (AccountingRollup) _Table() string { return "accounting_rollups" }
|
|
|
|
type AccountingRollup_Update_Fields struct {
|
|
}
|
|
|
|
type AccountingRollup_Id_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func AccountingRollup_Id(v int64) AccountingRollup_Id_Field {
|
|
return AccountingRollup_Id_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f AccountingRollup_Id_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (AccountingRollup_Id_Field) _Column() string { return "id" }
|
|
|
|
type AccountingRollup_NodeId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func AccountingRollup_NodeId(v []byte) AccountingRollup_NodeId_Field {
|
|
return AccountingRollup_NodeId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f AccountingRollup_NodeId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (AccountingRollup_NodeId_Field) _Column() string { return "node_id" }
|
|
|
|
type AccountingRollup_StartTime_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func AccountingRollup_StartTime(v time.Time) AccountingRollup_StartTime_Field {
|
|
return AccountingRollup_StartTime_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f AccountingRollup_StartTime_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (AccountingRollup_StartTime_Field) _Column() string { return "start_time" }
|
|
|
|
type AccountingRollup_PutTotal_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func AccountingRollup_PutTotal(v int64) AccountingRollup_PutTotal_Field {
|
|
return AccountingRollup_PutTotal_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f AccountingRollup_PutTotal_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (AccountingRollup_PutTotal_Field) _Column() string { return "put_total" }
|
|
|
|
type AccountingRollup_GetTotal_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func AccountingRollup_GetTotal(v int64) AccountingRollup_GetTotal_Field {
|
|
return AccountingRollup_GetTotal_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f AccountingRollup_GetTotal_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (AccountingRollup_GetTotal_Field) _Column() string { return "get_total" }
|
|
|
|
type AccountingRollup_GetAuditTotal_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func AccountingRollup_GetAuditTotal(v int64) AccountingRollup_GetAuditTotal_Field {
|
|
return AccountingRollup_GetAuditTotal_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f AccountingRollup_GetAuditTotal_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (AccountingRollup_GetAuditTotal_Field) _Column() string { return "get_audit_total" }
|
|
|
|
type AccountingRollup_GetRepairTotal_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func AccountingRollup_GetRepairTotal(v int64) AccountingRollup_GetRepairTotal_Field {
|
|
return AccountingRollup_GetRepairTotal_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f AccountingRollup_GetRepairTotal_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (AccountingRollup_GetRepairTotal_Field) _Column() string { return "get_repair_total" }
|
|
|
|
type AccountingRollup_PutRepairTotal_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func AccountingRollup_PutRepairTotal(v int64) AccountingRollup_PutRepairTotal_Field {
|
|
return AccountingRollup_PutRepairTotal_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f AccountingRollup_PutRepairTotal_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (AccountingRollup_PutRepairTotal_Field) _Column() string { return "put_repair_total" }
|
|
|
|
type AccountingRollup_AtRestTotal_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value float64
|
|
}
|
|
|
|
func AccountingRollup_AtRestTotal(v float64) AccountingRollup_AtRestTotal_Field {
|
|
return AccountingRollup_AtRestTotal_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f AccountingRollup_AtRestTotal_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (AccountingRollup_AtRestTotal_Field) _Column() string { return "at_rest_total" }
|
|
|
|
type AccountingTimestamps struct {
|
|
Name string
|
|
Value time.Time
|
|
}
|
|
|
|
func (AccountingTimestamps) _Table() string { return "accounting_timestamps" }
|
|
|
|
type AccountingTimestamps_Update_Fields struct {
|
|
Value AccountingTimestamps_Value_Field
|
|
}
|
|
|
|
type AccountingTimestamps_Name_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func AccountingTimestamps_Name(v string) AccountingTimestamps_Name_Field {
|
|
return AccountingTimestamps_Name_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f AccountingTimestamps_Name_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (AccountingTimestamps_Name_Field) _Column() string { return "name" }
|
|
|
|
type AccountingTimestamps_Value_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func AccountingTimestamps_Value(v time.Time) AccountingTimestamps_Value_Field {
|
|
return AccountingTimestamps_Value_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f AccountingTimestamps_Value_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (AccountingTimestamps_Value_Field) _Column() string { return "value" }
|
|
|
|
type BucketBandwidthRollup struct {
|
|
BucketName []byte
|
|
ProjectId []byte
|
|
IntervalStart time.Time
|
|
IntervalSeconds uint
|
|
Action uint
|
|
Inline uint64
|
|
Allocated uint64
|
|
Settled uint64
|
|
}
|
|
|
|
func (BucketBandwidthRollup) _Table() string { return "bucket_bandwidth_rollups" }
|
|
|
|
type BucketBandwidthRollup_Update_Fields struct {
|
|
Inline BucketBandwidthRollup_Inline_Field
|
|
Allocated BucketBandwidthRollup_Allocated_Field
|
|
Settled BucketBandwidthRollup_Settled_Field
|
|
}
|
|
|
|
type BucketBandwidthRollup_BucketName_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func BucketBandwidthRollup_BucketName(v []byte) BucketBandwidthRollup_BucketName_Field {
|
|
return BucketBandwidthRollup_BucketName_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketBandwidthRollup_BucketName_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketBandwidthRollup_BucketName_Field) _Column() string { return "bucket_name" }
|
|
|
|
type BucketBandwidthRollup_ProjectId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func BucketBandwidthRollup_ProjectId(v []byte) BucketBandwidthRollup_ProjectId_Field {
|
|
return BucketBandwidthRollup_ProjectId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketBandwidthRollup_ProjectId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketBandwidthRollup_ProjectId_Field) _Column() string { return "project_id" }
|
|
|
|
type BucketBandwidthRollup_IntervalStart_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func BucketBandwidthRollup_IntervalStart(v time.Time) BucketBandwidthRollup_IntervalStart_Field {
|
|
v = toUTC(v)
|
|
return BucketBandwidthRollup_IntervalStart_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketBandwidthRollup_IntervalStart_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketBandwidthRollup_IntervalStart_Field) _Column() string { return "interval_start" }
|
|
|
|
type BucketBandwidthRollup_IntervalSeconds_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value uint
|
|
}
|
|
|
|
func BucketBandwidthRollup_IntervalSeconds(v uint) BucketBandwidthRollup_IntervalSeconds_Field {
|
|
return BucketBandwidthRollup_IntervalSeconds_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketBandwidthRollup_IntervalSeconds_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketBandwidthRollup_IntervalSeconds_Field) _Column() string { return "interval_seconds" }
|
|
|
|
type BucketBandwidthRollup_Action_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value uint
|
|
}
|
|
|
|
func BucketBandwidthRollup_Action(v uint) BucketBandwidthRollup_Action_Field {
|
|
return BucketBandwidthRollup_Action_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketBandwidthRollup_Action_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketBandwidthRollup_Action_Field) _Column() string { return "action" }
|
|
|
|
type BucketBandwidthRollup_Inline_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value uint64
|
|
}
|
|
|
|
func BucketBandwidthRollup_Inline(v uint64) BucketBandwidthRollup_Inline_Field {
|
|
return BucketBandwidthRollup_Inline_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketBandwidthRollup_Inline_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketBandwidthRollup_Inline_Field) _Column() string { return "inline" }
|
|
|
|
type BucketBandwidthRollup_Allocated_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value uint64
|
|
}
|
|
|
|
func BucketBandwidthRollup_Allocated(v uint64) BucketBandwidthRollup_Allocated_Field {
|
|
return BucketBandwidthRollup_Allocated_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketBandwidthRollup_Allocated_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketBandwidthRollup_Allocated_Field) _Column() string { return "allocated" }
|
|
|
|
type BucketBandwidthRollup_Settled_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value uint64
|
|
}
|
|
|
|
func BucketBandwidthRollup_Settled(v uint64) BucketBandwidthRollup_Settled_Field {
|
|
return BucketBandwidthRollup_Settled_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketBandwidthRollup_Settled_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketBandwidthRollup_Settled_Field) _Column() string { return "settled" }
|
|
|
|
type BucketStorageTally struct {
|
|
BucketName []byte
|
|
ProjectId []byte
|
|
IntervalStart time.Time
|
|
Inline uint64
|
|
Remote uint64
|
|
RemoteSegmentsCount uint
|
|
InlineSegmentsCount uint
|
|
ObjectCount uint
|
|
MetadataSize uint64
|
|
}
|
|
|
|
func (BucketStorageTally) _Table() string { return "bucket_storage_tallies" }
|
|
|
|
type BucketStorageTally_Update_Fields struct {
|
|
}
|
|
|
|
type BucketStorageTally_BucketName_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func BucketStorageTally_BucketName(v []byte) BucketStorageTally_BucketName_Field {
|
|
return BucketStorageTally_BucketName_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketStorageTally_BucketName_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketStorageTally_BucketName_Field) _Column() string { return "bucket_name" }
|
|
|
|
type BucketStorageTally_ProjectId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func BucketStorageTally_ProjectId(v []byte) BucketStorageTally_ProjectId_Field {
|
|
return BucketStorageTally_ProjectId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketStorageTally_ProjectId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketStorageTally_ProjectId_Field) _Column() string { return "project_id" }
|
|
|
|
type BucketStorageTally_IntervalStart_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func BucketStorageTally_IntervalStart(v time.Time) BucketStorageTally_IntervalStart_Field {
|
|
v = toUTC(v)
|
|
return BucketStorageTally_IntervalStart_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketStorageTally_IntervalStart_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketStorageTally_IntervalStart_Field) _Column() string { return "interval_start" }
|
|
|
|
type BucketStorageTally_Inline_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value uint64
|
|
}
|
|
|
|
func BucketStorageTally_Inline(v uint64) BucketStorageTally_Inline_Field {
|
|
return BucketStorageTally_Inline_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketStorageTally_Inline_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketStorageTally_Inline_Field) _Column() string { return "inline" }
|
|
|
|
type BucketStorageTally_Remote_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value uint64
|
|
}
|
|
|
|
func BucketStorageTally_Remote(v uint64) BucketStorageTally_Remote_Field {
|
|
return BucketStorageTally_Remote_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketStorageTally_Remote_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketStorageTally_Remote_Field) _Column() string { return "remote" }
|
|
|
|
type BucketStorageTally_RemoteSegmentsCount_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value uint
|
|
}
|
|
|
|
func BucketStorageTally_RemoteSegmentsCount(v uint) BucketStorageTally_RemoteSegmentsCount_Field {
|
|
return BucketStorageTally_RemoteSegmentsCount_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketStorageTally_RemoteSegmentsCount_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketStorageTally_RemoteSegmentsCount_Field) _Column() string { return "remote_segments_count" }
|
|
|
|
type BucketStorageTally_InlineSegmentsCount_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value uint
|
|
}
|
|
|
|
func BucketStorageTally_InlineSegmentsCount(v uint) BucketStorageTally_InlineSegmentsCount_Field {
|
|
return BucketStorageTally_InlineSegmentsCount_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketStorageTally_InlineSegmentsCount_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketStorageTally_InlineSegmentsCount_Field) _Column() string { return "inline_segments_count" }
|
|
|
|
type BucketStorageTally_ObjectCount_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value uint
|
|
}
|
|
|
|
func BucketStorageTally_ObjectCount(v uint) BucketStorageTally_ObjectCount_Field {
|
|
return BucketStorageTally_ObjectCount_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketStorageTally_ObjectCount_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketStorageTally_ObjectCount_Field) _Column() string { return "object_count" }
|
|
|
|
type BucketStorageTally_MetadataSize_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value uint64
|
|
}
|
|
|
|
func BucketStorageTally_MetadataSize(v uint64) BucketStorageTally_MetadataSize_Field {
|
|
return BucketStorageTally_MetadataSize_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketStorageTally_MetadataSize_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketStorageTally_MetadataSize_Field) _Column() string { return "metadata_size" }
|
|
|
|
type CoinpaymentsTransaction struct {
|
|
Id string
|
|
UserId []byte
|
|
Address string
|
|
Amount []byte
|
|
Received []byte
|
|
Status int
|
|
Key string
|
|
Timeout int
|
|
CreatedAt time.Time
|
|
}
|
|
|
|
func (CoinpaymentsTransaction) _Table() string { return "coinpayments_transactions" }
|
|
|
|
type CoinpaymentsTransaction_Update_Fields struct {
|
|
Received CoinpaymentsTransaction_Received_Field
|
|
Status CoinpaymentsTransaction_Status_Field
|
|
}
|
|
|
|
type CoinpaymentsTransaction_Id_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func CoinpaymentsTransaction_Id(v string) CoinpaymentsTransaction_Id_Field {
|
|
return CoinpaymentsTransaction_Id_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f CoinpaymentsTransaction_Id_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (CoinpaymentsTransaction_Id_Field) _Column() string { return "id" }
|
|
|
|
type CoinpaymentsTransaction_UserId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func CoinpaymentsTransaction_UserId(v []byte) CoinpaymentsTransaction_UserId_Field {
|
|
return CoinpaymentsTransaction_UserId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f CoinpaymentsTransaction_UserId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (CoinpaymentsTransaction_UserId_Field) _Column() string { return "user_id" }
|
|
|
|
type CoinpaymentsTransaction_Address_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func CoinpaymentsTransaction_Address(v string) CoinpaymentsTransaction_Address_Field {
|
|
return CoinpaymentsTransaction_Address_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f CoinpaymentsTransaction_Address_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (CoinpaymentsTransaction_Address_Field) _Column() string { return "address" }
|
|
|
|
type CoinpaymentsTransaction_Amount_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func CoinpaymentsTransaction_Amount(v []byte) CoinpaymentsTransaction_Amount_Field {
|
|
return CoinpaymentsTransaction_Amount_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f CoinpaymentsTransaction_Amount_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (CoinpaymentsTransaction_Amount_Field) _Column() string { return "amount" }
|
|
|
|
type CoinpaymentsTransaction_Received_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func CoinpaymentsTransaction_Received(v []byte) CoinpaymentsTransaction_Received_Field {
|
|
return CoinpaymentsTransaction_Received_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f CoinpaymentsTransaction_Received_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (CoinpaymentsTransaction_Received_Field) _Column() string { return "received" }
|
|
|
|
type CoinpaymentsTransaction_Status_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func CoinpaymentsTransaction_Status(v int) CoinpaymentsTransaction_Status_Field {
|
|
return CoinpaymentsTransaction_Status_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f CoinpaymentsTransaction_Status_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (CoinpaymentsTransaction_Status_Field) _Column() string { return "status" }
|
|
|
|
type CoinpaymentsTransaction_Key_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func CoinpaymentsTransaction_Key(v string) CoinpaymentsTransaction_Key_Field {
|
|
return CoinpaymentsTransaction_Key_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f CoinpaymentsTransaction_Key_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (CoinpaymentsTransaction_Key_Field) _Column() string { return "key" }
|
|
|
|
type CoinpaymentsTransaction_Timeout_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func CoinpaymentsTransaction_Timeout(v int) CoinpaymentsTransaction_Timeout_Field {
|
|
return CoinpaymentsTransaction_Timeout_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f CoinpaymentsTransaction_Timeout_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (CoinpaymentsTransaction_Timeout_Field) _Column() string { return "timeout" }
|
|
|
|
type CoinpaymentsTransaction_CreatedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func CoinpaymentsTransaction_CreatedAt(v time.Time) CoinpaymentsTransaction_CreatedAt_Field {
|
|
return CoinpaymentsTransaction_CreatedAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f CoinpaymentsTransaction_CreatedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (CoinpaymentsTransaction_CreatedAt_Field) _Column() string { return "created_at" }
|
|
|
|
type Coupon struct {
|
|
Id []byte
|
|
ProjectId []byte
|
|
UserId []byte
|
|
Amount int64
|
|
Description string
|
|
Type int
|
|
Status int
|
|
Duration int64
|
|
CreatedAt time.Time
|
|
}
|
|
|
|
func (Coupon) _Table() string { return "coupons" }
|
|
|
|
type Coupon_Update_Fields struct {
|
|
Status Coupon_Status_Field
|
|
}
|
|
|
|
type Coupon_Id_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func Coupon_Id(v []byte) Coupon_Id_Field {
|
|
return Coupon_Id_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Coupon_Id_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Coupon_Id_Field) _Column() string { return "id" }
|
|
|
|
type Coupon_ProjectId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func Coupon_ProjectId(v []byte) Coupon_ProjectId_Field {
|
|
return Coupon_ProjectId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Coupon_ProjectId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Coupon_ProjectId_Field) _Column() string { return "project_id" }
|
|
|
|
type Coupon_UserId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func Coupon_UserId(v []byte) Coupon_UserId_Field {
|
|
return Coupon_UserId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Coupon_UserId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Coupon_UserId_Field) _Column() string { return "user_id" }
|
|
|
|
type Coupon_Amount_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func Coupon_Amount(v int64) Coupon_Amount_Field {
|
|
return Coupon_Amount_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Coupon_Amount_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Coupon_Amount_Field) _Column() string { return "amount" }
|
|
|
|
type Coupon_Description_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func Coupon_Description(v string) Coupon_Description_Field {
|
|
return Coupon_Description_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Coupon_Description_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Coupon_Description_Field) _Column() string { return "description" }
|
|
|
|
type Coupon_Type_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func Coupon_Type(v int) Coupon_Type_Field {
|
|
return Coupon_Type_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Coupon_Type_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Coupon_Type_Field) _Column() string { return "type" }
|
|
|
|
type Coupon_Status_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func Coupon_Status(v int) Coupon_Status_Field {
|
|
return Coupon_Status_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Coupon_Status_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Coupon_Status_Field) _Column() string { return "status" }
|
|
|
|
type Coupon_Duration_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func Coupon_Duration(v int64) Coupon_Duration_Field {
|
|
return Coupon_Duration_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Coupon_Duration_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Coupon_Duration_Field) _Column() string { return "duration" }
|
|
|
|
type Coupon_CreatedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func Coupon_CreatedAt(v time.Time) Coupon_CreatedAt_Field {
|
|
return Coupon_CreatedAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Coupon_CreatedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Coupon_CreatedAt_Field) _Column() string { return "created_at" }
|
|
|
|
type CouponUsage struct {
|
|
CouponId []byte
|
|
Amount int64
|
|
Status int
|
|
Period time.Time
|
|
}
|
|
|
|
func (CouponUsage) _Table() string { return "coupon_usages" }
|
|
|
|
type CouponUsage_Update_Fields struct {
|
|
Status CouponUsage_Status_Field
|
|
}
|
|
|
|
type CouponUsage_CouponId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func CouponUsage_CouponId(v []byte) CouponUsage_CouponId_Field {
|
|
return CouponUsage_CouponId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f CouponUsage_CouponId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (CouponUsage_CouponId_Field) _Column() string { return "coupon_id" }
|
|
|
|
type CouponUsage_Amount_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func CouponUsage_Amount(v int64) CouponUsage_Amount_Field {
|
|
return CouponUsage_Amount_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f CouponUsage_Amount_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (CouponUsage_Amount_Field) _Column() string { return "amount" }
|
|
|
|
type CouponUsage_Status_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func CouponUsage_Status(v int) CouponUsage_Status_Field {
|
|
return CouponUsage_Status_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f CouponUsage_Status_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (CouponUsage_Status_Field) _Column() string { return "status" }
|
|
|
|
type CouponUsage_Period_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func CouponUsage_Period(v time.Time) CouponUsage_Period_Field {
|
|
return CouponUsage_Period_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f CouponUsage_Period_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (CouponUsage_Period_Field) _Column() string { return "period" }
|
|
|
|
type GracefulExitProgress struct {
|
|
NodeId []byte
|
|
BytesTransferred int64
|
|
PiecesTransferred int64
|
|
PiecesFailed int64
|
|
UpdatedAt time.Time
|
|
}
|
|
|
|
func (GracefulExitProgress) _Table() string { return "graceful_exit_progress" }
|
|
|
|
type GracefulExitProgress_Update_Fields struct {
|
|
BytesTransferred GracefulExitProgress_BytesTransferred_Field
|
|
PiecesTransferred GracefulExitProgress_PiecesTransferred_Field
|
|
PiecesFailed GracefulExitProgress_PiecesFailed_Field
|
|
}
|
|
|
|
type GracefulExitProgress_NodeId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func GracefulExitProgress_NodeId(v []byte) GracefulExitProgress_NodeId_Field {
|
|
return GracefulExitProgress_NodeId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f GracefulExitProgress_NodeId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (GracefulExitProgress_NodeId_Field) _Column() string { return "node_id" }
|
|
|
|
type GracefulExitProgress_BytesTransferred_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func GracefulExitProgress_BytesTransferred(v int64) GracefulExitProgress_BytesTransferred_Field {
|
|
return GracefulExitProgress_BytesTransferred_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f GracefulExitProgress_BytesTransferred_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (GracefulExitProgress_BytesTransferred_Field) _Column() string { return "bytes_transferred" }
|
|
|
|
type GracefulExitProgress_PiecesTransferred_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func GracefulExitProgress_PiecesTransferred(v int64) GracefulExitProgress_PiecesTransferred_Field {
|
|
return GracefulExitProgress_PiecesTransferred_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f GracefulExitProgress_PiecesTransferred_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (GracefulExitProgress_PiecesTransferred_Field) _Column() string { return "pieces_transferred" }
|
|
|
|
type GracefulExitProgress_PiecesFailed_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func GracefulExitProgress_PiecesFailed(v int64) GracefulExitProgress_PiecesFailed_Field {
|
|
return GracefulExitProgress_PiecesFailed_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f GracefulExitProgress_PiecesFailed_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (GracefulExitProgress_PiecesFailed_Field) _Column() string { return "pieces_failed" }
|
|
|
|
type GracefulExitProgress_UpdatedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func GracefulExitProgress_UpdatedAt(v time.Time) GracefulExitProgress_UpdatedAt_Field {
|
|
v = toUTC(v)
|
|
return GracefulExitProgress_UpdatedAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f GracefulExitProgress_UpdatedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (GracefulExitProgress_UpdatedAt_Field) _Column() string { return "updated_at" }
|
|
|
|
type GracefulExitTransferQueue struct {
|
|
NodeId []byte
|
|
Path []byte
|
|
PieceNum int
|
|
RootPieceId []byte
|
|
DurabilityRatio float64
|
|
QueuedAt time.Time
|
|
RequestedAt *time.Time
|
|
LastFailedAt *time.Time
|
|
LastFailedCode *int
|
|
FailedCount *int
|
|
FinishedAt *time.Time
|
|
OrderLimitSendCount int
|
|
}
|
|
|
|
func (GracefulExitTransferQueue) _Table() string { return "graceful_exit_transfer_queue" }
|
|
|
|
type GracefulExitTransferQueue_Create_Fields struct {
|
|
RootPieceId GracefulExitTransferQueue_RootPieceId_Field
|
|
RequestedAt GracefulExitTransferQueue_RequestedAt_Field
|
|
LastFailedAt GracefulExitTransferQueue_LastFailedAt_Field
|
|
LastFailedCode GracefulExitTransferQueue_LastFailedCode_Field
|
|
FailedCount GracefulExitTransferQueue_FailedCount_Field
|
|
FinishedAt GracefulExitTransferQueue_FinishedAt_Field
|
|
}
|
|
|
|
type GracefulExitTransferQueue_Update_Fields struct {
|
|
DurabilityRatio GracefulExitTransferQueue_DurabilityRatio_Field
|
|
RequestedAt GracefulExitTransferQueue_RequestedAt_Field
|
|
LastFailedAt GracefulExitTransferQueue_LastFailedAt_Field
|
|
LastFailedCode GracefulExitTransferQueue_LastFailedCode_Field
|
|
FailedCount GracefulExitTransferQueue_FailedCount_Field
|
|
FinishedAt GracefulExitTransferQueue_FinishedAt_Field
|
|
OrderLimitSendCount GracefulExitTransferQueue_OrderLimitSendCount_Field
|
|
}
|
|
|
|
type GracefulExitTransferQueue_NodeId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func GracefulExitTransferQueue_NodeId(v []byte) GracefulExitTransferQueue_NodeId_Field {
|
|
return GracefulExitTransferQueue_NodeId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f GracefulExitTransferQueue_NodeId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (GracefulExitTransferQueue_NodeId_Field) _Column() string { return "node_id" }
|
|
|
|
type GracefulExitTransferQueue_Path_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func GracefulExitTransferQueue_Path(v []byte) GracefulExitTransferQueue_Path_Field {
|
|
return GracefulExitTransferQueue_Path_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f GracefulExitTransferQueue_Path_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (GracefulExitTransferQueue_Path_Field) _Column() string { return "path" }
|
|
|
|
type GracefulExitTransferQueue_PieceNum_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func GracefulExitTransferQueue_PieceNum(v int) GracefulExitTransferQueue_PieceNum_Field {
|
|
return GracefulExitTransferQueue_PieceNum_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f GracefulExitTransferQueue_PieceNum_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (GracefulExitTransferQueue_PieceNum_Field) _Column() string { return "piece_num" }
|
|
|
|
type GracefulExitTransferQueue_RootPieceId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func GracefulExitTransferQueue_RootPieceId(v []byte) GracefulExitTransferQueue_RootPieceId_Field {
|
|
return GracefulExitTransferQueue_RootPieceId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func GracefulExitTransferQueue_RootPieceId_Raw(v []byte) GracefulExitTransferQueue_RootPieceId_Field {
|
|
if v == nil {
|
|
return GracefulExitTransferQueue_RootPieceId_Null()
|
|
}
|
|
return GracefulExitTransferQueue_RootPieceId(v)
|
|
}
|
|
|
|
func GracefulExitTransferQueue_RootPieceId_Null() GracefulExitTransferQueue_RootPieceId_Field {
|
|
return GracefulExitTransferQueue_RootPieceId_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f GracefulExitTransferQueue_RootPieceId_Field) isnull() bool {
|
|
return !f._set || f._null || f._value == nil
|
|
}
|
|
|
|
func (f GracefulExitTransferQueue_RootPieceId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (GracefulExitTransferQueue_RootPieceId_Field) _Column() string { return "root_piece_id" }
|
|
|
|
type GracefulExitTransferQueue_DurabilityRatio_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value float64
|
|
}
|
|
|
|
func GracefulExitTransferQueue_DurabilityRatio(v float64) GracefulExitTransferQueue_DurabilityRatio_Field {
|
|
return GracefulExitTransferQueue_DurabilityRatio_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f GracefulExitTransferQueue_DurabilityRatio_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (GracefulExitTransferQueue_DurabilityRatio_Field) _Column() string { return "durability_ratio" }
|
|
|
|
type GracefulExitTransferQueue_QueuedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func GracefulExitTransferQueue_QueuedAt(v time.Time) GracefulExitTransferQueue_QueuedAt_Field {
|
|
v = toUTC(v)
|
|
return GracefulExitTransferQueue_QueuedAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f GracefulExitTransferQueue_QueuedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (GracefulExitTransferQueue_QueuedAt_Field) _Column() string { return "queued_at" }
|
|
|
|
type GracefulExitTransferQueue_RequestedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value *time.Time
|
|
}
|
|
|
|
func GracefulExitTransferQueue_RequestedAt(v time.Time) GracefulExitTransferQueue_RequestedAt_Field {
|
|
v = toUTC(v)
|
|
return GracefulExitTransferQueue_RequestedAt_Field{_set: true, _value: &v}
|
|
}
|
|
|
|
func GracefulExitTransferQueue_RequestedAt_Raw(v *time.Time) GracefulExitTransferQueue_RequestedAt_Field {
|
|
if v == nil {
|
|
return GracefulExitTransferQueue_RequestedAt_Null()
|
|
}
|
|
return GracefulExitTransferQueue_RequestedAt(*v)
|
|
}
|
|
|
|
func GracefulExitTransferQueue_RequestedAt_Null() GracefulExitTransferQueue_RequestedAt_Field {
|
|
return GracefulExitTransferQueue_RequestedAt_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f GracefulExitTransferQueue_RequestedAt_Field) isnull() bool {
|
|
return !f._set || f._null || f._value == nil
|
|
}
|
|
|
|
func (f GracefulExitTransferQueue_RequestedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (GracefulExitTransferQueue_RequestedAt_Field) _Column() string { return "requested_at" }
|
|
|
|
type GracefulExitTransferQueue_LastFailedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value *time.Time
|
|
}
|
|
|
|
func GracefulExitTransferQueue_LastFailedAt(v time.Time) GracefulExitTransferQueue_LastFailedAt_Field {
|
|
v = toUTC(v)
|
|
return GracefulExitTransferQueue_LastFailedAt_Field{_set: true, _value: &v}
|
|
}
|
|
|
|
func GracefulExitTransferQueue_LastFailedAt_Raw(v *time.Time) GracefulExitTransferQueue_LastFailedAt_Field {
|
|
if v == nil {
|
|
return GracefulExitTransferQueue_LastFailedAt_Null()
|
|
}
|
|
return GracefulExitTransferQueue_LastFailedAt(*v)
|
|
}
|
|
|
|
func GracefulExitTransferQueue_LastFailedAt_Null() GracefulExitTransferQueue_LastFailedAt_Field {
|
|
return GracefulExitTransferQueue_LastFailedAt_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f GracefulExitTransferQueue_LastFailedAt_Field) isnull() bool {
|
|
return !f._set || f._null || f._value == nil
|
|
}
|
|
|
|
func (f GracefulExitTransferQueue_LastFailedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (GracefulExitTransferQueue_LastFailedAt_Field) _Column() string { return "last_failed_at" }
|
|
|
|
type GracefulExitTransferQueue_LastFailedCode_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value *int
|
|
}
|
|
|
|
func GracefulExitTransferQueue_LastFailedCode(v int) GracefulExitTransferQueue_LastFailedCode_Field {
|
|
return GracefulExitTransferQueue_LastFailedCode_Field{_set: true, _value: &v}
|
|
}
|
|
|
|
func GracefulExitTransferQueue_LastFailedCode_Raw(v *int) GracefulExitTransferQueue_LastFailedCode_Field {
|
|
if v == nil {
|
|
return GracefulExitTransferQueue_LastFailedCode_Null()
|
|
}
|
|
return GracefulExitTransferQueue_LastFailedCode(*v)
|
|
}
|
|
|
|
func GracefulExitTransferQueue_LastFailedCode_Null() GracefulExitTransferQueue_LastFailedCode_Field {
|
|
return GracefulExitTransferQueue_LastFailedCode_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f GracefulExitTransferQueue_LastFailedCode_Field) isnull() bool {
|
|
return !f._set || f._null || f._value == nil
|
|
}
|
|
|
|
func (f GracefulExitTransferQueue_LastFailedCode_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (GracefulExitTransferQueue_LastFailedCode_Field) _Column() string { return "last_failed_code" }
|
|
|
|
type GracefulExitTransferQueue_FailedCount_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value *int
|
|
}
|
|
|
|
func GracefulExitTransferQueue_FailedCount(v int) GracefulExitTransferQueue_FailedCount_Field {
|
|
return GracefulExitTransferQueue_FailedCount_Field{_set: true, _value: &v}
|
|
}
|
|
|
|
func GracefulExitTransferQueue_FailedCount_Raw(v *int) GracefulExitTransferQueue_FailedCount_Field {
|
|
if v == nil {
|
|
return GracefulExitTransferQueue_FailedCount_Null()
|
|
}
|
|
return GracefulExitTransferQueue_FailedCount(*v)
|
|
}
|
|
|
|
func GracefulExitTransferQueue_FailedCount_Null() GracefulExitTransferQueue_FailedCount_Field {
|
|
return GracefulExitTransferQueue_FailedCount_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f GracefulExitTransferQueue_FailedCount_Field) isnull() bool {
|
|
return !f._set || f._null || f._value == nil
|
|
}
|
|
|
|
func (f GracefulExitTransferQueue_FailedCount_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (GracefulExitTransferQueue_FailedCount_Field) _Column() string { return "failed_count" }
|
|
|
|
type GracefulExitTransferQueue_FinishedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value *time.Time
|
|
}
|
|
|
|
func GracefulExitTransferQueue_FinishedAt(v time.Time) GracefulExitTransferQueue_FinishedAt_Field {
|
|
v = toUTC(v)
|
|
return GracefulExitTransferQueue_FinishedAt_Field{_set: true, _value: &v}
|
|
}
|
|
|
|
func GracefulExitTransferQueue_FinishedAt_Raw(v *time.Time) GracefulExitTransferQueue_FinishedAt_Field {
|
|
if v == nil {
|
|
return GracefulExitTransferQueue_FinishedAt_Null()
|
|
}
|
|
return GracefulExitTransferQueue_FinishedAt(*v)
|
|
}
|
|
|
|
func GracefulExitTransferQueue_FinishedAt_Null() GracefulExitTransferQueue_FinishedAt_Field {
|
|
return GracefulExitTransferQueue_FinishedAt_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f GracefulExitTransferQueue_FinishedAt_Field) isnull() bool {
|
|
return !f._set || f._null || f._value == nil
|
|
}
|
|
|
|
func (f GracefulExitTransferQueue_FinishedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (GracefulExitTransferQueue_FinishedAt_Field) _Column() string { return "finished_at" }
|
|
|
|
type GracefulExitTransferQueue_OrderLimitSendCount_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func GracefulExitTransferQueue_OrderLimitSendCount(v int) GracefulExitTransferQueue_OrderLimitSendCount_Field {
|
|
return GracefulExitTransferQueue_OrderLimitSendCount_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f GracefulExitTransferQueue_OrderLimitSendCount_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (GracefulExitTransferQueue_OrderLimitSendCount_Field) _Column() string {
|
|
return "order_limit_send_count"
|
|
}
|
|
|
|
type Injuredsegment struct {
|
|
Path []byte
|
|
Data []byte
|
|
Attempted *time.Time
|
|
}
|
|
|
|
func (Injuredsegment) _Table() string { return "injuredsegments" }
|
|
|
|
type Injuredsegment_Create_Fields struct {
|
|
Attempted Injuredsegment_Attempted_Field
|
|
}
|
|
|
|
type Injuredsegment_Update_Fields struct {
|
|
Attempted Injuredsegment_Attempted_Field
|
|
}
|
|
|
|
type Injuredsegment_Path_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func Injuredsegment_Path(v []byte) Injuredsegment_Path_Field {
|
|
return Injuredsegment_Path_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Injuredsegment_Path_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Injuredsegment_Path_Field) _Column() string { return "path" }
|
|
|
|
type Injuredsegment_Data_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func Injuredsegment_Data(v []byte) Injuredsegment_Data_Field {
|
|
return Injuredsegment_Data_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Injuredsegment_Data_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Injuredsegment_Data_Field) _Column() string { return "data" }
|
|
|
|
type Injuredsegment_Attempted_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value *time.Time
|
|
}
|
|
|
|
func Injuredsegment_Attempted(v time.Time) Injuredsegment_Attempted_Field {
|
|
v = toUTC(v)
|
|
return Injuredsegment_Attempted_Field{_set: true, _value: &v}
|
|
}
|
|
|
|
func Injuredsegment_Attempted_Raw(v *time.Time) Injuredsegment_Attempted_Field {
|
|
if v == nil {
|
|
return Injuredsegment_Attempted_Null()
|
|
}
|
|
return Injuredsegment_Attempted(*v)
|
|
}
|
|
|
|
func Injuredsegment_Attempted_Null() Injuredsegment_Attempted_Field {
|
|
return Injuredsegment_Attempted_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f Injuredsegment_Attempted_Field) isnull() bool { return !f._set || f._null || f._value == nil }
|
|
|
|
func (f Injuredsegment_Attempted_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Injuredsegment_Attempted_Field) _Column() string { return "attempted" }
|
|
|
|
type Irreparabledb struct {
|
|
Segmentpath []byte
|
|
Segmentdetail []byte
|
|
PiecesLostCount int64
|
|
SegDamagedUnixSec int64
|
|
RepairAttemptCount int64
|
|
}
|
|
|
|
func (Irreparabledb) _Table() string { return "irreparabledbs" }
|
|
|
|
type Irreparabledb_Update_Fields struct {
|
|
Segmentdetail Irreparabledb_Segmentdetail_Field
|
|
PiecesLostCount Irreparabledb_PiecesLostCount_Field
|
|
SegDamagedUnixSec Irreparabledb_SegDamagedUnixSec_Field
|
|
RepairAttemptCount Irreparabledb_RepairAttemptCount_Field
|
|
}
|
|
|
|
type Irreparabledb_Segmentpath_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func Irreparabledb_Segmentpath(v []byte) Irreparabledb_Segmentpath_Field {
|
|
return Irreparabledb_Segmentpath_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Irreparabledb_Segmentpath_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Irreparabledb_Segmentpath_Field) _Column() string { return "segmentpath" }
|
|
|
|
type Irreparabledb_Segmentdetail_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func Irreparabledb_Segmentdetail(v []byte) Irreparabledb_Segmentdetail_Field {
|
|
return Irreparabledb_Segmentdetail_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Irreparabledb_Segmentdetail_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Irreparabledb_Segmentdetail_Field) _Column() string { return "segmentdetail" }
|
|
|
|
type Irreparabledb_PiecesLostCount_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func Irreparabledb_PiecesLostCount(v int64) Irreparabledb_PiecesLostCount_Field {
|
|
return Irreparabledb_PiecesLostCount_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Irreparabledb_PiecesLostCount_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Irreparabledb_PiecesLostCount_Field) _Column() string { return "pieces_lost_count" }
|
|
|
|
type Irreparabledb_SegDamagedUnixSec_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func Irreparabledb_SegDamagedUnixSec(v int64) Irreparabledb_SegDamagedUnixSec_Field {
|
|
return Irreparabledb_SegDamagedUnixSec_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Irreparabledb_SegDamagedUnixSec_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Irreparabledb_SegDamagedUnixSec_Field) _Column() string { return "seg_damaged_unix_sec" }
|
|
|
|
type Irreparabledb_RepairAttemptCount_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func Irreparabledb_RepairAttemptCount(v int64) Irreparabledb_RepairAttemptCount_Field {
|
|
return Irreparabledb_RepairAttemptCount_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Irreparabledb_RepairAttemptCount_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Irreparabledb_RepairAttemptCount_Field) _Column() string { return "repair_attempt_count" }
|
|
|
|
type Node struct {
|
|
Id []byte
|
|
Address string
|
|
LastNet string
|
|
Protocol int
|
|
Type int
|
|
Email string
|
|
Wallet string
|
|
FreeBandwidth int64
|
|
FreeDisk int64
|
|
PieceCount int64
|
|
Major int64
|
|
Minor int64
|
|
Patch int64
|
|
Hash string
|
|
Timestamp time.Time
|
|
Release bool
|
|
Latency90 int64
|
|
AuditSuccessCount int64
|
|
TotalAuditCount int64
|
|
UptimeSuccessCount int64
|
|
TotalUptimeCount int64
|
|
CreatedAt time.Time
|
|
UpdatedAt time.Time
|
|
LastContactSuccess time.Time
|
|
LastContactFailure time.Time
|
|
Contained bool
|
|
Disqualified *time.Time
|
|
AuditReputationAlpha float64
|
|
AuditReputationBeta float64
|
|
UptimeReputationAlpha float64
|
|
UptimeReputationBeta float64
|
|
ExitInitiatedAt *time.Time
|
|
ExitLoopCompletedAt *time.Time
|
|
ExitFinishedAt *time.Time
|
|
ExitSuccess bool
|
|
}
|
|
|
|
func (Node) _Table() string { return "nodes" }
|
|
|
|
type Node_Create_Fields struct {
|
|
Disqualified Node_Disqualified_Field
|
|
ExitInitiatedAt Node_ExitInitiatedAt_Field
|
|
ExitLoopCompletedAt Node_ExitLoopCompletedAt_Field
|
|
ExitFinishedAt Node_ExitFinishedAt_Field
|
|
}
|
|
|
|
type Node_Update_Fields struct {
|
|
Address Node_Address_Field
|
|
LastNet Node_LastNet_Field
|
|
Protocol Node_Protocol_Field
|
|
Type Node_Type_Field
|
|
Email Node_Email_Field
|
|
Wallet Node_Wallet_Field
|
|
FreeBandwidth Node_FreeBandwidth_Field
|
|
FreeDisk Node_FreeDisk_Field
|
|
PieceCount Node_PieceCount_Field
|
|
Major Node_Major_Field
|
|
Minor Node_Minor_Field
|
|
Patch Node_Patch_Field
|
|
Hash Node_Hash_Field
|
|
Timestamp Node_Timestamp_Field
|
|
Release Node_Release_Field
|
|
Latency90 Node_Latency90_Field
|
|
AuditSuccessCount Node_AuditSuccessCount_Field
|
|
TotalAuditCount Node_TotalAuditCount_Field
|
|
UptimeSuccessCount Node_UptimeSuccessCount_Field
|
|
TotalUptimeCount Node_TotalUptimeCount_Field
|
|
LastContactSuccess Node_LastContactSuccess_Field
|
|
LastContactFailure Node_LastContactFailure_Field
|
|
Contained Node_Contained_Field
|
|
Disqualified Node_Disqualified_Field
|
|
AuditReputationAlpha Node_AuditReputationAlpha_Field
|
|
AuditReputationBeta Node_AuditReputationBeta_Field
|
|
UptimeReputationAlpha Node_UptimeReputationAlpha_Field
|
|
UptimeReputationBeta Node_UptimeReputationBeta_Field
|
|
ExitInitiatedAt Node_ExitInitiatedAt_Field
|
|
ExitLoopCompletedAt Node_ExitLoopCompletedAt_Field
|
|
ExitFinishedAt Node_ExitFinishedAt_Field
|
|
ExitSuccess Node_ExitSuccess_Field
|
|
}
|
|
|
|
type Node_Id_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func Node_Id(v []byte) Node_Id_Field {
|
|
return Node_Id_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_Id_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_Id_Field) _Column() string { return "id" }
|
|
|
|
type Node_Address_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func Node_Address(v string) Node_Address_Field {
|
|
return Node_Address_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_Address_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_Address_Field) _Column() string { return "address" }
|
|
|
|
type Node_LastNet_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func Node_LastNet(v string) Node_LastNet_Field {
|
|
return Node_LastNet_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_LastNet_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_LastNet_Field) _Column() string { return "last_net" }
|
|
|
|
type Node_Protocol_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func Node_Protocol(v int) Node_Protocol_Field {
|
|
return Node_Protocol_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_Protocol_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_Protocol_Field) _Column() string { return "protocol" }
|
|
|
|
type Node_Type_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func Node_Type(v int) Node_Type_Field {
|
|
return Node_Type_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_Type_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_Type_Field) _Column() string { return "type" }
|
|
|
|
type Node_Email_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func Node_Email(v string) Node_Email_Field {
|
|
return Node_Email_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_Email_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_Email_Field) _Column() string { return "email" }
|
|
|
|
type Node_Wallet_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func Node_Wallet(v string) Node_Wallet_Field {
|
|
return Node_Wallet_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_Wallet_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_Wallet_Field) _Column() string { return "wallet" }
|
|
|
|
type Node_FreeBandwidth_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func Node_FreeBandwidth(v int64) Node_FreeBandwidth_Field {
|
|
return Node_FreeBandwidth_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_FreeBandwidth_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_FreeBandwidth_Field) _Column() string { return "free_bandwidth" }
|
|
|
|
type Node_FreeDisk_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func Node_FreeDisk(v int64) Node_FreeDisk_Field {
|
|
return Node_FreeDisk_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_FreeDisk_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_FreeDisk_Field) _Column() string { return "free_disk" }
|
|
|
|
type Node_PieceCount_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func Node_PieceCount(v int64) Node_PieceCount_Field {
|
|
return Node_PieceCount_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_PieceCount_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_PieceCount_Field) _Column() string { return "piece_count" }
|
|
|
|
type Node_Major_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func Node_Major(v int64) Node_Major_Field {
|
|
return Node_Major_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_Major_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_Major_Field) _Column() string { return "major" }
|
|
|
|
type Node_Minor_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func Node_Minor(v int64) Node_Minor_Field {
|
|
return Node_Minor_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_Minor_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_Minor_Field) _Column() string { return "minor" }
|
|
|
|
type Node_Patch_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func Node_Patch(v int64) Node_Patch_Field {
|
|
return Node_Patch_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_Patch_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_Patch_Field) _Column() string { return "patch" }
|
|
|
|
type Node_Hash_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func Node_Hash(v string) Node_Hash_Field {
|
|
return Node_Hash_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_Hash_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_Hash_Field) _Column() string { return "hash" }
|
|
|
|
type Node_Timestamp_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func Node_Timestamp(v time.Time) Node_Timestamp_Field {
|
|
return Node_Timestamp_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_Timestamp_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_Timestamp_Field) _Column() string { return "timestamp" }
|
|
|
|
type Node_Release_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value bool
|
|
}
|
|
|
|
func Node_Release(v bool) Node_Release_Field {
|
|
return Node_Release_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_Release_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_Release_Field) _Column() string { return "release" }
|
|
|
|
type Node_Latency90_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func Node_Latency90(v int64) Node_Latency90_Field {
|
|
return Node_Latency90_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_Latency90_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_Latency90_Field) _Column() string { return "latency_90" }
|
|
|
|
type Node_AuditSuccessCount_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func Node_AuditSuccessCount(v int64) Node_AuditSuccessCount_Field {
|
|
return Node_AuditSuccessCount_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_AuditSuccessCount_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_AuditSuccessCount_Field) _Column() string { return "audit_success_count" }
|
|
|
|
type Node_TotalAuditCount_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func Node_TotalAuditCount(v int64) Node_TotalAuditCount_Field {
|
|
return Node_TotalAuditCount_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_TotalAuditCount_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_TotalAuditCount_Field) _Column() string { return "total_audit_count" }
|
|
|
|
type Node_UptimeSuccessCount_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func Node_UptimeSuccessCount(v int64) Node_UptimeSuccessCount_Field {
|
|
return Node_UptimeSuccessCount_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_UptimeSuccessCount_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_UptimeSuccessCount_Field) _Column() string { return "uptime_success_count" }
|
|
|
|
type Node_TotalUptimeCount_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func Node_TotalUptimeCount(v int64) Node_TotalUptimeCount_Field {
|
|
return Node_TotalUptimeCount_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_TotalUptimeCount_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_TotalUptimeCount_Field) _Column() string { return "total_uptime_count" }
|
|
|
|
type Node_CreatedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func Node_CreatedAt(v time.Time) Node_CreatedAt_Field {
|
|
return Node_CreatedAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_CreatedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_CreatedAt_Field) _Column() string { return "created_at" }
|
|
|
|
type Node_UpdatedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func Node_UpdatedAt(v time.Time) Node_UpdatedAt_Field {
|
|
return Node_UpdatedAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_UpdatedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_UpdatedAt_Field) _Column() string { return "updated_at" }
|
|
|
|
type Node_LastContactSuccess_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func Node_LastContactSuccess(v time.Time) Node_LastContactSuccess_Field {
|
|
return Node_LastContactSuccess_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_LastContactSuccess_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_LastContactSuccess_Field) _Column() string { return "last_contact_success" }
|
|
|
|
type Node_LastContactFailure_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func Node_LastContactFailure(v time.Time) Node_LastContactFailure_Field {
|
|
return Node_LastContactFailure_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_LastContactFailure_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_LastContactFailure_Field) _Column() string { return "last_contact_failure" }
|
|
|
|
type Node_Contained_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value bool
|
|
}
|
|
|
|
func Node_Contained(v bool) Node_Contained_Field {
|
|
return Node_Contained_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_Contained_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_Contained_Field) _Column() string { return "contained" }
|
|
|
|
type Node_Disqualified_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value *time.Time
|
|
}
|
|
|
|
func Node_Disqualified(v time.Time) Node_Disqualified_Field {
|
|
return Node_Disqualified_Field{_set: true, _value: &v}
|
|
}
|
|
|
|
func Node_Disqualified_Raw(v *time.Time) Node_Disqualified_Field {
|
|
if v == nil {
|
|
return Node_Disqualified_Null()
|
|
}
|
|
return Node_Disqualified(*v)
|
|
}
|
|
|
|
func Node_Disqualified_Null() Node_Disqualified_Field {
|
|
return Node_Disqualified_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f Node_Disqualified_Field) isnull() bool { return !f._set || f._null || f._value == nil }
|
|
|
|
func (f Node_Disqualified_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_Disqualified_Field) _Column() string { return "disqualified" }
|
|
|
|
type Node_AuditReputationAlpha_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value float64
|
|
}
|
|
|
|
func Node_AuditReputationAlpha(v float64) Node_AuditReputationAlpha_Field {
|
|
return Node_AuditReputationAlpha_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_AuditReputationAlpha_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_AuditReputationAlpha_Field) _Column() string { return "audit_reputation_alpha" }
|
|
|
|
type Node_AuditReputationBeta_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value float64
|
|
}
|
|
|
|
func Node_AuditReputationBeta(v float64) Node_AuditReputationBeta_Field {
|
|
return Node_AuditReputationBeta_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_AuditReputationBeta_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_AuditReputationBeta_Field) _Column() string { return "audit_reputation_beta" }
|
|
|
|
type Node_UptimeReputationAlpha_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value float64
|
|
}
|
|
|
|
func Node_UptimeReputationAlpha(v float64) Node_UptimeReputationAlpha_Field {
|
|
return Node_UptimeReputationAlpha_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_UptimeReputationAlpha_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_UptimeReputationAlpha_Field) _Column() string { return "uptime_reputation_alpha" }
|
|
|
|
type Node_UptimeReputationBeta_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value float64
|
|
}
|
|
|
|
func Node_UptimeReputationBeta(v float64) Node_UptimeReputationBeta_Field {
|
|
return Node_UptimeReputationBeta_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_UptimeReputationBeta_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_UptimeReputationBeta_Field) _Column() string { return "uptime_reputation_beta" }
|
|
|
|
type Node_ExitInitiatedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value *time.Time
|
|
}
|
|
|
|
func Node_ExitInitiatedAt(v time.Time) Node_ExitInitiatedAt_Field {
|
|
v = toUTC(v)
|
|
return Node_ExitInitiatedAt_Field{_set: true, _value: &v}
|
|
}
|
|
|
|
func Node_ExitInitiatedAt_Raw(v *time.Time) Node_ExitInitiatedAt_Field {
|
|
if v == nil {
|
|
return Node_ExitInitiatedAt_Null()
|
|
}
|
|
return Node_ExitInitiatedAt(*v)
|
|
}
|
|
|
|
func Node_ExitInitiatedAt_Null() Node_ExitInitiatedAt_Field {
|
|
return Node_ExitInitiatedAt_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f Node_ExitInitiatedAt_Field) isnull() bool { return !f._set || f._null || f._value == nil }
|
|
|
|
func (f Node_ExitInitiatedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_ExitInitiatedAt_Field) _Column() string { return "exit_initiated_at" }
|
|
|
|
type Node_ExitLoopCompletedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value *time.Time
|
|
}
|
|
|
|
func Node_ExitLoopCompletedAt(v time.Time) Node_ExitLoopCompletedAt_Field {
|
|
v = toUTC(v)
|
|
return Node_ExitLoopCompletedAt_Field{_set: true, _value: &v}
|
|
}
|
|
|
|
func Node_ExitLoopCompletedAt_Raw(v *time.Time) Node_ExitLoopCompletedAt_Field {
|
|
if v == nil {
|
|
return Node_ExitLoopCompletedAt_Null()
|
|
}
|
|
return Node_ExitLoopCompletedAt(*v)
|
|
}
|
|
|
|
func Node_ExitLoopCompletedAt_Null() Node_ExitLoopCompletedAt_Field {
|
|
return Node_ExitLoopCompletedAt_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f Node_ExitLoopCompletedAt_Field) isnull() bool { return !f._set || f._null || f._value == nil }
|
|
|
|
func (f Node_ExitLoopCompletedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_ExitLoopCompletedAt_Field) _Column() string { return "exit_loop_completed_at" }
|
|
|
|
type Node_ExitFinishedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value *time.Time
|
|
}
|
|
|
|
func Node_ExitFinishedAt(v time.Time) Node_ExitFinishedAt_Field {
|
|
v = toUTC(v)
|
|
return Node_ExitFinishedAt_Field{_set: true, _value: &v}
|
|
}
|
|
|
|
func Node_ExitFinishedAt_Raw(v *time.Time) Node_ExitFinishedAt_Field {
|
|
if v == nil {
|
|
return Node_ExitFinishedAt_Null()
|
|
}
|
|
return Node_ExitFinishedAt(*v)
|
|
}
|
|
|
|
func Node_ExitFinishedAt_Null() Node_ExitFinishedAt_Field {
|
|
return Node_ExitFinishedAt_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f Node_ExitFinishedAt_Field) isnull() bool { return !f._set || f._null || f._value == nil }
|
|
|
|
func (f Node_ExitFinishedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_ExitFinishedAt_Field) _Column() string { return "exit_finished_at" }
|
|
|
|
type Node_ExitSuccess_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value bool
|
|
}
|
|
|
|
func Node_ExitSuccess(v bool) Node_ExitSuccess_Field {
|
|
return Node_ExitSuccess_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_ExitSuccess_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_ExitSuccess_Field) _Column() string { return "exit_success" }
|
|
|
|
type NodesOfflineTime struct {
|
|
NodeId []byte
|
|
TrackedAt time.Time
|
|
Seconds int
|
|
}
|
|
|
|
func (NodesOfflineTime) _Table() string { return "nodes_offline_times" }
|
|
|
|
type NodesOfflineTime_Update_Fields struct {
|
|
}
|
|
|
|
type NodesOfflineTime_NodeId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func NodesOfflineTime_NodeId(v []byte) NodesOfflineTime_NodeId_Field {
|
|
return NodesOfflineTime_NodeId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f NodesOfflineTime_NodeId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (NodesOfflineTime_NodeId_Field) _Column() string { return "node_id" }
|
|
|
|
type NodesOfflineTime_TrackedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func NodesOfflineTime_TrackedAt(v time.Time) NodesOfflineTime_TrackedAt_Field {
|
|
return NodesOfflineTime_TrackedAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f NodesOfflineTime_TrackedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (NodesOfflineTime_TrackedAt_Field) _Column() string { return "tracked_at" }
|
|
|
|
type NodesOfflineTime_Seconds_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func NodesOfflineTime_Seconds(v int) NodesOfflineTime_Seconds_Field {
|
|
return NodesOfflineTime_Seconds_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f NodesOfflineTime_Seconds_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (NodesOfflineTime_Seconds_Field) _Column() string { return "seconds" }
|
|
|
|
type Offer struct {
|
|
Id int
|
|
Name string
|
|
Description string
|
|
AwardCreditInCents int
|
|
InviteeCreditInCents int
|
|
AwardCreditDurationDays *int
|
|
InviteeCreditDurationDays *int
|
|
RedeemableCap *int
|
|
ExpiresAt time.Time
|
|
CreatedAt time.Time
|
|
Status int
|
|
Type int
|
|
}
|
|
|
|
func (Offer) _Table() string { return "offers" }
|
|
|
|
type Offer_Create_Fields struct {
|
|
AwardCreditDurationDays Offer_AwardCreditDurationDays_Field
|
|
InviteeCreditDurationDays Offer_InviteeCreditDurationDays_Field
|
|
RedeemableCap Offer_RedeemableCap_Field
|
|
}
|
|
|
|
type Offer_Update_Fields struct {
|
|
Name Offer_Name_Field
|
|
Description Offer_Description_Field
|
|
AwardCreditInCents Offer_AwardCreditInCents_Field
|
|
InviteeCreditInCents Offer_InviteeCreditInCents_Field
|
|
AwardCreditDurationDays Offer_AwardCreditDurationDays_Field
|
|
InviteeCreditDurationDays Offer_InviteeCreditDurationDays_Field
|
|
RedeemableCap Offer_RedeemableCap_Field
|
|
ExpiresAt Offer_ExpiresAt_Field
|
|
Status Offer_Status_Field
|
|
Type Offer_Type_Field
|
|
}
|
|
|
|
type Offer_Id_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func Offer_Id(v int) Offer_Id_Field {
|
|
return Offer_Id_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Offer_Id_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Offer_Id_Field) _Column() string { return "id" }
|
|
|
|
type Offer_Name_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func Offer_Name(v string) Offer_Name_Field {
|
|
return Offer_Name_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Offer_Name_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Offer_Name_Field) _Column() string { return "name" }
|
|
|
|
type Offer_Description_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func Offer_Description(v string) Offer_Description_Field {
|
|
return Offer_Description_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Offer_Description_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Offer_Description_Field) _Column() string { return "description" }
|
|
|
|
type Offer_AwardCreditInCents_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func Offer_AwardCreditInCents(v int) Offer_AwardCreditInCents_Field {
|
|
return Offer_AwardCreditInCents_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Offer_AwardCreditInCents_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Offer_AwardCreditInCents_Field) _Column() string { return "award_credit_in_cents" }
|
|
|
|
type Offer_InviteeCreditInCents_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func Offer_InviteeCreditInCents(v int) Offer_InviteeCreditInCents_Field {
|
|
return Offer_InviteeCreditInCents_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Offer_InviteeCreditInCents_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Offer_InviteeCreditInCents_Field) _Column() string { return "invitee_credit_in_cents" }
|
|
|
|
type Offer_AwardCreditDurationDays_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value *int
|
|
}
|
|
|
|
func Offer_AwardCreditDurationDays(v int) Offer_AwardCreditDurationDays_Field {
|
|
return Offer_AwardCreditDurationDays_Field{_set: true, _value: &v}
|
|
}
|
|
|
|
func Offer_AwardCreditDurationDays_Raw(v *int) Offer_AwardCreditDurationDays_Field {
|
|
if v == nil {
|
|
return Offer_AwardCreditDurationDays_Null()
|
|
}
|
|
return Offer_AwardCreditDurationDays(*v)
|
|
}
|
|
|
|
func Offer_AwardCreditDurationDays_Null() Offer_AwardCreditDurationDays_Field {
|
|
return Offer_AwardCreditDurationDays_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f Offer_AwardCreditDurationDays_Field) isnull() bool {
|
|
return !f._set || f._null || f._value == nil
|
|
}
|
|
|
|
func (f Offer_AwardCreditDurationDays_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Offer_AwardCreditDurationDays_Field) _Column() string { return "award_credit_duration_days" }
|
|
|
|
type Offer_InviteeCreditDurationDays_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value *int
|
|
}
|
|
|
|
func Offer_InviteeCreditDurationDays(v int) Offer_InviteeCreditDurationDays_Field {
|
|
return Offer_InviteeCreditDurationDays_Field{_set: true, _value: &v}
|
|
}
|
|
|
|
func Offer_InviteeCreditDurationDays_Raw(v *int) Offer_InviteeCreditDurationDays_Field {
|
|
if v == nil {
|
|
return Offer_InviteeCreditDurationDays_Null()
|
|
}
|
|
return Offer_InviteeCreditDurationDays(*v)
|
|
}
|
|
|
|
func Offer_InviteeCreditDurationDays_Null() Offer_InviteeCreditDurationDays_Field {
|
|
return Offer_InviteeCreditDurationDays_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f Offer_InviteeCreditDurationDays_Field) isnull() bool {
|
|
return !f._set || f._null || f._value == nil
|
|
}
|
|
|
|
func (f Offer_InviteeCreditDurationDays_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Offer_InviteeCreditDurationDays_Field) _Column() string { return "invitee_credit_duration_days" }
|
|
|
|
type Offer_RedeemableCap_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value *int
|
|
}
|
|
|
|
func Offer_RedeemableCap(v int) Offer_RedeemableCap_Field {
|
|
return Offer_RedeemableCap_Field{_set: true, _value: &v}
|
|
}
|
|
|
|
func Offer_RedeemableCap_Raw(v *int) Offer_RedeemableCap_Field {
|
|
if v == nil {
|
|
return Offer_RedeemableCap_Null()
|
|
}
|
|
return Offer_RedeemableCap(*v)
|
|
}
|
|
|
|
func Offer_RedeemableCap_Null() Offer_RedeemableCap_Field {
|
|
return Offer_RedeemableCap_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f Offer_RedeemableCap_Field) isnull() bool { return !f._set || f._null || f._value == nil }
|
|
|
|
func (f Offer_RedeemableCap_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Offer_RedeemableCap_Field) _Column() string { return "redeemable_cap" }
|
|
|
|
type Offer_ExpiresAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func Offer_ExpiresAt(v time.Time) Offer_ExpiresAt_Field {
|
|
return Offer_ExpiresAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Offer_ExpiresAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Offer_ExpiresAt_Field) _Column() string { return "expires_at" }
|
|
|
|
type Offer_CreatedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func Offer_CreatedAt(v time.Time) Offer_CreatedAt_Field {
|
|
return Offer_CreatedAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Offer_CreatedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Offer_CreatedAt_Field) _Column() string { return "created_at" }
|
|
|
|
type Offer_Status_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func Offer_Status(v int) Offer_Status_Field {
|
|
return Offer_Status_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Offer_Status_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Offer_Status_Field) _Column() string { return "status" }
|
|
|
|
type Offer_Type_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func Offer_Type(v int) Offer_Type_Field {
|
|
return Offer_Type_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Offer_Type_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Offer_Type_Field) _Column() string { return "type" }
|
|
|
|
type PeerIdentity struct {
|
|
NodeId []byte
|
|
LeafSerialNumber []byte
|
|
Chain []byte
|
|
UpdatedAt time.Time
|
|
}
|
|
|
|
func (PeerIdentity) _Table() string { return "peer_identities" }
|
|
|
|
type PeerIdentity_Update_Fields struct {
|
|
LeafSerialNumber PeerIdentity_LeafSerialNumber_Field
|
|
Chain PeerIdentity_Chain_Field
|
|
}
|
|
|
|
type PeerIdentity_NodeId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func PeerIdentity_NodeId(v []byte) PeerIdentity_NodeId_Field {
|
|
return PeerIdentity_NodeId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f PeerIdentity_NodeId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (PeerIdentity_NodeId_Field) _Column() string { return "node_id" }
|
|
|
|
type PeerIdentity_LeafSerialNumber_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func PeerIdentity_LeafSerialNumber(v []byte) PeerIdentity_LeafSerialNumber_Field {
|
|
return PeerIdentity_LeafSerialNumber_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f PeerIdentity_LeafSerialNumber_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (PeerIdentity_LeafSerialNumber_Field) _Column() string { return "leaf_serial_number" }
|
|
|
|
type PeerIdentity_Chain_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func PeerIdentity_Chain(v []byte) PeerIdentity_Chain_Field {
|
|
return PeerIdentity_Chain_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f PeerIdentity_Chain_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (PeerIdentity_Chain_Field) _Column() string { return "chain" }
|
|
|
|
type PeerIdentity_UpdatedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func PeerIdentity_UpdatedAt(v time.Time) PeerIdentity_UpdatedAt_Field {
|
|
return PeerIdentity_UpdatedAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f PeerIdentity_UpdatedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (PeerIdentity_UpdatedAt_Field) _Column() string { return "updated_at" }
|
|
|
|
type PendingAudits struct {
|
|
NodeId []byte
|
|
PieceId []byte
|
|
StripeIndex int64
|
|
ShareSize int64
|
|
ExpectedShareHash []byte
|
|
ReverifyCount int64
|
|
Path []byte
|
|
}
|
|
|
|
func (PendingAudits) _Table() string { return "pending_audits" }
|
|
|
|
type PendingAudits_Update_Fields struct {
|
|
ReverifyCount PendingAudits_ReverifyCount_Field
|
|
}
|
|
|
|
type PendingAudits_NodeId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func PendingAudits_NodeId(v []byte) PendingAudits_NodeId_Field {
|
|
return PendingAudits_NodeId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f PendingAudits_NodeId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (PendingAudits_NodeId_Field) _Column() string { return "node_id" }
|
|
|
|
type PendingAudits_PieceId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func PendingAudits_PieceId(v []byte) PendingAudits_PieceId_Field {
|
|
return PendingAudits_PieceId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f PendingAudits_PieceId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (PendingAudits_PieceId_Field) _Column() string { return "piece_id" }
|
|
|
|
type PendingAudits_StripeIndex_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func PendingAudits_StripeIndex(v int64) PendingAudits_StripeIndex_Field {
|
|
return PendingAudits_StripeIndex_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f PendingAudits_StripeIndex_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (PendingAudits_StripeIndex_Field) _Column() string { return "stripe_index" }
|
|
|
|
type PendingAudits_ShareSize_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func PendingAudits_ShareSize(v int64) PendingAudits_ShareSize_Field {
|
|
return PendingAudits_ShareSize_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f PendingAudits_ShareSize_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (PendingAudits_ShareSize_Field) _Column() string { return "share_size" }
|
|
|
|
type PendingAudits_ExpectedShareHash_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func PendingAudits_ExpectedShareHash(v []byte) PendingAudits_ExpectedShareHash_Field {
|
|
return PendingAudits_ExpectedShareHash_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f PendingAudits_ExpectedShareHash_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (PendingAudits_ExpectedShareHash_Field) _Column() string { return "expected_share_hash" }
|
|
|
|
type PendingAudits_ReverifyCount_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func PendingAudits_ReverifyCount(v int64) PendingAudits_ReverifyCount_Field {
|
|
return PendingAudits_ReverifyCount_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f PendingAudits_ReverifyCount_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (PendingAudits_ReverifyCount_Field) _Column() string { return "reverify_count" }
|
|
|
|
type PendingAudits_Path_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func PendingAudits_Path(v []byte) PendingAudits_Path_Field {
|
|
return PendingAudits_Path_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f PendingAudits_Path_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (PendingAudits_Path_Field) _Column() string { return "path" }
|
|
|
|
type Project struct {
|
|
Id []byte
|
|
Name string
|
|
Description string
|
|
UsageLimit int64
|
|
PartnerId []byte
|
|
OwnerId []byte
|
|
CreatedAt time.Time
|
|
}
|
|
|
|
func (Project) _Table() string { return "projects" }
|
|
|
|
type Project_Create_Fields struct {
|
|
PartnerId Project_PartnerId_Field
|
|
}
|
|
|
|
type Project_Update_Fields struct {
|
|
Description Project_Description_Field
|
|
UsageLimit Project_UsageLimit_Field
|
|
}
|
|
|
|
type Project_Id_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func Project_Id(v []byte) Project_Id_Field {
|
|
return Project_Id_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Project_Id_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Project_Id_Field) _Column() string { return "id" }
|
|
|
|
type Project_Name_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func Project_Name(v string) Project_Name_Field {
|
|
return Project_Name_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Project_Name_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Project_Name_Field) _Column() string { return "name" }
|
|
|
|
type Project_Description_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func Project_Description(v string) Project_Description_Field {
|
|
return Project_Description_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Project_Description_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Project_Description_Field) _Column() string { return "description" }
|
|
|
|
type Project_UsageLimit_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func Project_UsageLimit(v int64) Project_UsageLimit_Field {
|
|
return Project_UsageLimit_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Project_UsageLimit_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Project_UsageLimit_Field) _Column() string { return "usage_limit" }
|
|
|
|
type Project_PartnerId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func Project_PartnerId(v []byte) Project_PartnerId_Field {
|
|
return Project_PartnerId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func Project_PartnerId_Raw(v []byte) Project_PartnerId_Field {
|
|
if v == nil {
|
|
return Project_PartnerId_Null()
|
|
}
|
|
return Project_PartnerId(v)
|
|
}
|
|
|
|
func Project_PartnerId_Null() Project_PartnerId_Field {
|
|
return Project_PartnerId_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f Project_PartnerId_Field) isnull() bool { return !f._set || f._null || f._value == nil }
|
|
|
|
func (f Project_PartnerId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Project_PartnerId_Field) _Column() string { return "partner_id" }
|
|
|
|
type Project_OwnerId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func Project_OwnerId(v []byte) Project_OwnerId_Field {
|
|
return Project_OwnerId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Project_OwnerId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Project_OwnerId_Field) _Column() string { return "owner_id" }
|
|
|
|
type Project_CreatedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func Project_CreatedAt(v time.Time) Project_CreatedAt_Field {
|
|
return Project_CreatedAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Project_CreatedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Project_CreatedAt_Field) _Column() string { return "created_at" }
|
|
|
|
type RegistrationToken struct {
|
|
Secret []byte
|
|
OwnerId []byte
|
|
ProjectLimit int
|
|
CreatedAt time.Time
|
|
}
|
|
|
|
func (RegistrationToken) _Table() string { return "registration_tokens" }
|
|
|
|
type RegistrationToken_Create_Fields struct {
|
|
OwnerId RegistrationToken_OwnerId_Field
|
|
}
|
|
|
|
type RegistrationToken_Update_Fields struct {
|
|
OwnerId RegistrationToken_OwnerId_Field
|
|
}
|
|
|
|
type RegistrationToken_Secret_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func RegistrationToken_Secret(v []byte) RegistrationToken_Secret_Field {
|
|
return RegistrationToken_Secret_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f RegistrationToken_Secret_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (RegistrationToken_Secret_Field) _Column() string { return "secret" }
|
|
|
|
type RegistrationToken_OwnerId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func RegistrationToken_OwnerId(v []byte) RegistrationToken_OwnerId_Field {
|
|
return RegistrationToken_OwnerId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func RegistrationToken_OwnerId_Raw(v []byte) RegistrationToken_OwnerId_Field {
|
|
if v == nil {
|
|
return RegistrationToken_OwnerId_Null()
|
|
}
|
|
return RegistrationToken_OwnerId(v)
|
|
}
|
|
|
|
func RegistrationToken_OwnerId_Null() RegistrationToken_OwnerId_Field {
|
|
return RegistrationToken_OwnerId_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f RegistrationToken_OwnerId_Field) isnull() bool { return !f._set || f._null || f._value == nil }
|
|
|
|
func (f RegistrationToken_OwnerId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (RegistrationToken_OwnerId_Field) _Column() string { return "owner_id" }
|
|
|
|
type RegistrationToken_ProjectLimit_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func RegistrationToken_ProjectLimit(v int) RegistrationToken_ProjectLimit_Field {
|
|
return RegistrationToken_ProjectLimit_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f RegistrationToken_ProjectLimit_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (RegistrationToken_ProjectLimit_Field) _Column() string { return "project_limit" }
|
|
|
|
type RegistrationToken_CreatedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func RegistrationToken_CreatedAt(v time.Time) RegistrationToken_CreatedAt_Field {
|
|
return RegistrationToken_CreatedAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f RegistrationToken_CreatedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (RegistrationToken_CreatedAt_Field) _Column() string { return "created_at" }
|
|
|
|
type ResetPasswordToken struct {
|
|
Secret []byte
|
|
OwnerId []byte
|
|
CreatedAt time.Time
|
|
}
|
|
|
|
func (ResetPasswordToken) _Table() string { return "reset_password_tokens" }
|
|
|
|
type ResetPasswordToken_Update_Fields struct {
|
|
OwnerId ResetPasswordToken_OwnerId_Field
|
|
}
|
|
|
|
type ResetPasswordToken_Secret_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func ResetPasswordToken_Secret(v []byte) ResetPasswordToken_Secret_Field {
|
|
return ResetPasswordToken_Secret_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f ResetPasswordToken_Secret_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (ResetPasswordToken_Secret_Field) _Column() string { return "secret" }
|
|
|
|
type ResetPasswordToken_OwnerId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func ResetPasswordToken_OwnerId(v []byte) ResetPasswordToken_OwnerId_Field {
|
|
return ResetPasswordToken_OwnerId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f ResetPasswordToken_OwnerId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (ResetPasswordToken_OwnerId_Field) _Column() string { return "owner_id" }
|
|
|
|
type ResetPasswordToken_CreatedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func ResetPasswordToken_CreatedAt(v time.Time) ResetPasswordToken_CreatedAt_Field {
|
|
return ResetPasswordToken_CreatedAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f ResetPasswordToken_CreatedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (ResetPasswordToken_CreatedAt_Field) _Column() string { return "created_at" }
|
|
|
|
type SerialNumber struct {
|
|
Id int
|
|
SerialNumber []byte
|
|
BucketId []byte
|
|
ExpiresAt time.Time
|
|
}
|
|
|
|
func (SerialNumber) _Table() string { return "serial_numbers" }
|
|
|
|
type SerialNumber_Update_Fields struct {
|
|
}
|
|
|
|
type SerialNumber_Id_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func SerialNumber_Id(v int) SerialNumber_Id_Field {
|
|
return SerialNumber_Id_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f SerialNumber_Id_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (SerialNumber_Id_Field) _Column() string { return "id" }
|
|
|
|
type SerialNumber_SerialNumber_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func SerialNumber_SerialNumber(v []byte) SerialNumber_SerialNumber_Field {
|
|
return SerialNumber_SerialNumber_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f SerialNumber_SerialNumber_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (SerialNumber_SerialNumber_Field) _Column() string { return "serial_number" }
|
|
|
|
type SerialNumber_BucketId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func SerialNumber_BucketId(v []byte) SerialNumber_BucketId_Field {
|
|
return SerialNumber_BucketId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f SerialNumber_BucketId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (SerialNumber_BucketId_Field) _Column() string { return "bucket_id" }
|
|
|
|
type SerialNumber_ExpiresAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func SerialNumber_ExpiresAt(v time.Time) SerialNumber_ExpiresAt_Field {
|
|
v = toUTC(v)
|
|
return SerialNumber_ExpiresAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f SerialNumber_ExpiresAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (SerialNumber_ExpiresAt_Field) _Column() string { return "expires_at" }
|
|
|
|
type StoragenodeBandwidthRollup struct {
|
|
StoragenodeId []byte
|
|
IntervalStart time.Time
|
|
IntervalSeconds uint
|
|
Action uint
|
|
Allocated uint64
|
|
Settled uint64
|
|
}
|
|
|
|
func (StoragenodeBandwidthRollup) _Table() string { return "storagenode_bandwidth_rollups" }
|
|
|
|
type StoragenodeBandwidthRollup_Update_Fields struct {
|
|
Allocated StoragenodeBandwidthRollup_Allocated_Field
|
|
Settled StoragenodeBandwidthRollup_Settled_Field
|
|
}
|
|
|
|
type StoragenodeBandwidthRollup_StoragenodeId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func StoragenodeBandwidthRollup_StoragenodeId(v []byte) StoragenodeBandwidthRollup_StoragenodeId_Field {
|
|
return StoragenodeBandwidthRollup_StoragenodeId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodeBandwidthRollup_StoragenodeId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodeBandwidthRollup_StoragenodeId_Field) _Column() string { return "storagenode_id" }
|
|
|
|
type StoragenodeBandwidthRollup_IntervalStart_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func StoragenodeBandwidthRollup_IntervalStart(v time.Time) StoragenodeBandwidthRollup_IntervalStart_Field {
|
|
v = toUTC(v)
|
|
return StoragenodeBandwidthRollup_IntervalStart_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodeBandwidthRollup_IntervalStart_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodeBandwidthRollup_IntervalStart_Field) _Column() string { return "interval_start" }
|
|
|
|
type StoragenodeBandwidthRollup_IntervalSeconds_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value uint
|
|
}
|
|
|
|
func StoragenodeBandwidthRollup_IntervalSeconds(v uint) StoragenodeBandwidthRollup_IntervalSeconds_Field {
|
|
return StoragenodeBandwidthRollup_IntervalSeconds_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodeBandwidthRollup_IntervalSeconds_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodeBandwidthRollup_IntervalSeconds_Field) _Column() string { return "interval_seconds" }
|
|
|
|
type StoragenodeBandwidthRollup_Action_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value uint
|
|
}
|
|
|
|
func StoragenodeBandwidthRollup_Action(v uint) StoragenodeBandwidthRollup_Action_Field {
|
|
return StoragenodeBandwidthRollup_Action_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodeBandwidthRollup_Action_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodeBandwidthRollup_Action_Field) _Column() string { return "action" }
|
|
|
|
type StoragenodeBandwidthRollup_Allocated_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value uint64
|
|
}
|
|
|
|
func StoragenodeBandwidthRollup_Allocated(v uint64) StoragenodeBandwidthRollup_Allocated_Field {
|
|
return StoragenodeBandwidthRollup_Allocated_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodeBandwidthRollup_Allocated_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodeBandwidthRollup_Allocated_Field) _Column() string { return "allocated" }
|
|
|
|
type StoragenodeBandwidthRollup_Settled_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value uint64
|
|
}
|
|
|
|
func StoragenodeBandwidthRollup_Settled(v uint64) StoragenodeBandwidthRollup_Settled_Field {
|
|
return StoragenodeBandwidthRollup_Settled_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodeBandwidthRollup_Settled_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodeBandwidthRollup_Settled_Field) _Column() string { return "settled" }
|
|
|
|
type StoragenodeStorageTally struct {
|
|
Id int64
|
|
NodeId []byte
|
|
IntervalEndTime time.Time
|
|
DataTotal float64
|
|
}
|
|
|
|
func (StoragenodeStorageTally) _Table() string { return "storagenode_storage_tallies" }
|
|
|
|
type StoragenodeStorageTally_Update_Fields struct {
|
|
}
|
|
|
|
type StoragenodeStorageTally_Id_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func StoragenodeStorageTally_Id(v int64) StoragenodeStorageTally_Id_Field {
|
|
return StoragenodeStorageTally_Id_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodeStorageTally_Id_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodeStorageTally_Id_Field) _Column() string { return "id" }
|
|
|
|
type StoragenodeStorageTally_NodeId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func StoragenodeStorageTally_NodeId(v []byte) StoragenodeStorageTally_NodeId_Field {
|
|
return StoragenodeStorageTally_NodeId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodeStorageTally_NodeId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodeStorageTally_NodeId_Field) _Column() string { return "node_id" }
|
|
|
|
type StoragenodeStorageTally_IntervalEndTime_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func StoragenodeStorageTally_IntervalEndTime(v time.Time) StoragenodeStorageTally_IntervalEndTime_Field {
|
|
return StoragenodeStorageTally_IntervalEndTime_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodeStorageTally_IntervalEndTime_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodeStorageTally_IntervalEndTime_Field) _Column() string { return "interval_end_time" }
|
|
|
|
type StoragenodeStorageTally_DataTotal_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value float64
|
|
}
|
|
|
|
func StoragenodeStorageTally_DataTotal(v float64) StoragenodeStorageTally_DataTotal_Field {
|
|
return StoragenodeStorageTally_DataTotal_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodeStorageTally_DataTotal_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodeStorageTally_DataTotal_Field) _Column() string { return "data_total" }
|
|
|
|
type StripeCustomer struct {
|
|
UserId []byte
|
|
CustomerId string
|
|
CreatedAt time.Time
|
|
}
|
|
|
|
func (StripeCustomer) _Table() string { return "stripe_customers" }
|
|
|
|
type StripeCustomer_Update_Fields struct {
|
|
}
|
|
|
|
type StripeCustomer_UserId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func StripeCustomer_UserId(v []byte) StripeCustomer_UserId_Field {
|
|
return StripeCustomer_UserId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StripeCustomer_UserId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StripeCustomer_UserId_Field) _Column() string { return "user_id" }
|
|
|
|
type StripeCustomer_CustomerId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func StripeCustomer_CustomerId(v string) StripeCustomer_CustomerId_Field {
|
|
return StripeCustomer_CustomerId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StripeCustomer_CustomerId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StripeCustomer_CustomerId_Field) _Column() string { return "customer_id" }
|
|
|
|
type StripeCustomer_CreatedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func StripeCustomer_CreatedAt(v time.Time) StripeCustomer_CreatedAt_Field {
|
|
return StripeCustomer_CreatedAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StripeCustomer_CreatedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StripeCustomer_CreatedAt_Field) _Column() string { return "created_at" }
|
|
|
|
type StripecoinpaymentsInvoiceProjectRecord struct {
|
|
Id []byte
|
|
ProjectId []byte
|
|
Storage float64
|
|
Egress int64
|
|
Objects int64
|
|
PeriodStart time.Time
|
|
PeriodEnd time.Time
|
|
State int
|
|
CreatedAt time.Time
|
|
}
|
|
|
|
func (StripecoinpaymentsInvoiceProjectRecord) _Table() string {
|
|
return "stripecoinpayments_invoice_project_records"
|
|
}
|
|
|
|
type StripecoinpaymentsInvoiceProjectRecord_Update_Fields struct {
|
|
State StripecoinpaymentsInvoiceProjectRecord_State_Field
|
|
}
|
|
|
|
type StripecoinpaymentsInvoiceProjectRecord_Id_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func StripecoinpaymentsInvoiceProjectRecord_Id(v []byte) StripecoinpaymentsInvoiceProjectRecord_Id_Field {
|
|
return StripecoinpaymentsInvoiceProjectRecord_Id_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StripecoinpaymentsInvoiceProjectRecord_Id_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StripecoinpaymentsInvoiceProjectRecord_Id_Field) _Column() string { return "id" }
|
|
|
|
type StripecoinpaymentsInvoiceProjectRecord_ProjectId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func StripecoinpaymentsInvoiceProjectRecord_ProjectId(v []byte) StripecoinpaymentsInvoiceProjectRecord_ProjectId_Field {
|
|
return StripecoinpaymentsInvoiceProjectRecord_ProjectId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StripecoinpaymentsInvoiceProjectRecord_ProjectId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StripecoinpaymentsInvoiceProjectRecord_ProjectId_Field) _Column() string { return "project_id" }
|
|
|
|
type StripecoinpaymentsInvoiceProjectRecord_Storage_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value float64
|
|
}
|
|
|
|
func StripecoinpaymentsInvoiceProjectRecord_Storage(v float64) StripecoinpaymentsInvoiceProjectRecord_Storage_Field {
|
|
return StripecoinpaymentsInvoiceProjectRecord_Storage_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StripecoinpaymentsInvoiceProjectRecord_Storage_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StripecoinpaymentsInvoiceProjectRecord_Storage_Field) _Column() string { return "storage" }
|
|
|
|
type StripecoinpaymentsInvoiceProjectRecord_Egress_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func StripecoinpaymentsInvoiceProjectRecord_Egress(v int64) StripecoinpaymentsInvoiceProjectRecord_Egress_Field {
|
|
return StripecoinpaymentsInvoiceProjectRecord_Egress_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StripecoinpaymentsInvoiceProjectRecord_Egress_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StripecoinpaymentsInvoiceProjectRecord_Egress_Field) _Column() string { return "egress" }
|
|
|
|
type StripecoinpaymentsInvoiceProjectRecord_Objects_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func StripecoinpaymentsInvoiceProjectRecord_Objects(v int64) StripecoinpaymentsInvoiceProjectRecord_Objects_Field {
|
|
return StripecoinpaymentsInvoiceProjectRecord_Objects_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StripecoinpaymentsInvoiceProjectRecord_Objects_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StripecoinpaymentsInvoiceProjectRecord_Objects_Field) _Column() string { return "objects" }
|
|
|
|
type StripecoinpaymentsInvoiceProjectRecord_PeriodStart_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func StripecoinpaymentsInvoiceProjectRecord_PeriodStart(v time.Time) StripecoinpaymentsInvoiceProjectRecord_PeriodStart_Field {
|
|
return StripecoinpaymentsInvoiceProjectRecord_PeriodStart_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StripecoinpaymentsInvoiceProjectRecord_PeriodStart_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StripecoinpaymentsInvoiceProjectRecord_PeriodStart_Field) _Column() string {
|
|
return "period_start"
|
|
}
|
|
|
|
type StripecoinpaymentsInvoiceProjectRecord_PeriodEnd_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func StripecoinpaymentsInvoiceProjectRecord_PeriodEnd(v time.Time) StripecoinpaymentsInvoiceProjectRecord_PeriodEnd_Field {
|
|
return StripecoinpaymentsInvoiceProjectRecord_PeriodEnd_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StripecoinpaymentsInvoiceProjectRecord_PeriodEnd_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StripecoinpaymentsInvoiceProjectRecord_PeriodEnd_Field) _Column() string { return "period_end" }
|
|
|
|
type StripecoinpaymentsInvoiceProjectRecord_State_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func StripecoinpaymentsInvoiceProjectRecord_State(v int) StripecoinpaymentsInvoiceProjectRecord_State_Field {
|
|
return StripecoinpaymentsInvoiceProjectRecord_State_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StripecoinpaymentsInvoiceProjectRecord_State_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StripecoinpaymentsInvoiceProjectRecord_State_Field) _Column() string { return "state" }
|
|
|
|
type StripecoinpaymentsInvoiceProjectRecord_CreatedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func StripecoinpaymentsInvoiceProjectRecord_CreatedAt(v time.Time) StripecoinpaymentsInvoiceProjectRecord_CreatedAt_Field {
|
|
return StripecoinpaymentsInvoiceProjectRecord_CreatedAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StripecoinpaymentsInvoiceProjectRecord_CreatedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StripecoinpaymentsInvoiceProjectRecord_CreatedAt_Field) _Column() string { return "created_at" }
|
|
|
|
type StripecoinpaymentsTxConversionRate struct {
|
|
TxId string
|
|
Rate []byte
|
|
CreatedAt time.Time
|
|
}
|
|
|
|
func (StripecoinpaymentsTxConversionRate) _Table() string {
|
|
return "stripecoinpayments_tx_conversion_rates"
|
|
}
|
|
|
|
type StripecoinpaymentsTxConversionRate_Update_Fields struct {
|
|
}
|
|
|
|
type StripecoinpaymentsTxConversionRate_TxId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func StripecoinpaymentsTxConversionRate_TxId(v string) StripecoinpaymentsTxConversionRate_TxId_Field {
|
|
return StripecoinpaymentsTxConversionRate_TxId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StripecoinpaymentsTxConversionRate_TxId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StripecoinpaymentsTxConversionRate_TxId_Field) _Column() string { return "tx_id" }
|
|
|
|
type StripecoinpaymentsTxConversionRate_Rate_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func StripecoinpaymentsTxConversionRate_Rate(v []byte) StripecoinpaymentsTxConversionRate_Rate_Field {
|
|
return StripecoinpaymentsTxConversionRate_Rate_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StripecoinpaymentsTxConversionRate_Rate_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StripecoinpaymentsTxConversionRate_Rate_Field) _Column() string { return "rate" }
|
|
|
|
type StripecoinpaymentsTxConversionRate_CreatedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func StripecoinpaymentsTxConversionRate_CreatedAt(v time.Time) StripecoinpaymentsTxConversionRate_CreatedAt_Field {
|
|
return StripecoinpaymentsTxConversionRate_CreatedAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StripecoinpaymentsTxConversionRate_CreatedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StripecoinpaymentsTxConversionRate_CreatedAt_Field) _Column() string { return "created_at" }
|
|
|
|
type User struct {
|
|
Id []byte
|
|
Email string
|
|
NormalizedEmail string
|
|
FullName string
|
|
ShortName *string
|
|
PasswordHash []byte
|
|
Status int
|
|
PartnerId []byte
|
|
CreatedAt time.Time
|
|
}
|
|
|
|
func (User) _Table() string { return "users" }
|
|
|
|
type User_Create_Fields struct {
|
|
ShortName User_ShortName_Field
|
|
PartnerId User_PartnerId_Field
|
|
}
|
|
|
|
type User_Update_Fields struct {
|
|
Email User_Email_Field
|
|
NormalizedEmail User_NormalizedEmail_Field
|
|
FullName User_FullName_Field
|
|
ShortName User_ShortName_Field
|
|
PasswordHash User_PasswordHash_Field
|
|
Status User_Status_Field
|
|
}
|
|
|
|
type User_Id_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func User_Id(v []byte) User_Id_Field {
|
|
return User_Id_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f User_Id_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (User_Id_Field) _Column() string { return "id" }
|
|
|
|
type User_Email_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func User_Email(v string) User_Email_Field {
|
|
return User_Email_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f User_Email_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (User_Email_Field) _Column() string { return "email" }
|
|
|
|
type User_NormalizedEmail_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func User_NormalizedEmail(v string) User_NormalizedEmail_Field {
|
|
return User_NormalizedEmail_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f User_NormalizedEmail_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (User_NormalizedEmail_Field) _Column() string { return "normalized_email" }
|
|
|
|
type User_FullName_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func User_FullName(v string) User_FullName_Field {
|
|
return User_FullName_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f User_FullName_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (User_FullName_Field) _Column() string { return "full_name" }
|
|
|
|
type User_ShortName_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value *string
|
|
}
|
|
|
|
func User_ShortName(v string) User_ShortName_Field {
|
|
return User_ShortName_Field{_set: true, _value: &v}
|
|
}
|
|
|
|
func User_ShortName_Raw(v *string) User_ShortName_Field {
|
|
if v == nil {
|
|
return User_ShortName_Null()
|
|
}
|
|
return User_ShortName(*v)
|
|
}
|
|
|
|
func User_ShortName_Null() User_ShortName_Field {
|
|
return User_ShortName_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f User_ShortName_Field) isnull() bool { return !f._set || f._null || f._value == nil }
|
|
|
|
func (f User_ShortName_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (User_ShortName_Field) _Column() string { return "short_name" }
|
|
|
|
type User_PasswordHash_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func User_PasswordHash(v []byte) User_PasswordHash_Field {
|
|
return User_PasswordHash_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f User_PasswordHash_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (User_PasswordHash_Field) _Column() string { return "password_hash" }
|
|
|
|
type User_Status_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func User_Status(v int) User_Status_Field {
|
|
return User_Status_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f User_Status_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (User_Status_Field) _Column() string { return "status" }
|
|
|
|
type User_PartnerId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func User_PartnerId(v []byte) User_PartnerId_Field {
|
|
return User_PartnerId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func User_PartnerId_Raw(v []byte) User_PartnerId_Field {
|
|
if v == nil {
|
|
return User_PartnerId_Null()
|
|
}
|
|
return User_PartnerId(v)
|
|
}
|
|
|
|
func User_PartnerId_Null() User_PartnerId_Field {
|
|
return User_PartnerId_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f User_PartnerId_Field) isnull() bool { return !f._set || f._null || f._value == nil }
|
|
|
|
func (f User_PartnerId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (User_PartnerId_Field) _Column() string { return "partner_id" }
|
|
|
|
type User_CreatedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func User_CreatedAt(v time.Time) User_CreatedAt_Field {
|
|
return User_CreatedAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f User_CreatedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (User_CreatedAt_Field) _Column() string { return "created_at" }
|
|
|
|
type ValueAttribution struct {
|
|
ProjectId []byte
|
|
BucketName []byte
|
|
PartnerId []byte
|
|
LastUpdated time.Time
|
|
}
|
|
|
|
func (ValueAttribution) _Table() string { return "value_attributions" }
|
|
|
|
type ValueAttribution_Update_Fields struct {
|
|
}
|
|
|
|
type ValueAttribution_ProjectId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func ValueAttribution_ProjectId(v []byte) ValueAttribution_ProjectId_Field {
|
|
return ValueAttribution_ProjectId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f ValueAttribution_ProjectId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (ValueAttribution_ProjectId_Field) _Column() string { return "project_id" }
|
|
|
|
type ValueAttribution_BucketName_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func ValueAttribution_BucketName(v []byte) ValueAttribution_BucketName_Field {
|
|
return ValueAttribution_BucketName_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f ValueAttribution_BucketName_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (ValueAttribution_BucketName_Field) _Column() string { return "bucket_name" }
|
|
|
|
type ValueAttribution_PartnerId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func ValueAttribution_PartnerId(v []byte) ValueAttribution_PartnerId_Field {
|
|
return ValueAttribution_PartnerId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f ValueAttribution_PartnerId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (ValueAttribution_PartnerId_Field) _Column() string { return "partner_id" }
|
|
|
|
type ValueAttribution_LastUpdated_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func ValueAttribution_LastUpdated(v time.Time) ValueAttribution_LastUpdated_Field {
|
|
v = toUTC(v)
|
|
return ValueAttribution_LastUpdated_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f ValueAttribution_LastUpdated_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (ValueAttribution_LastUpdated_Field) _Column() string { return "last_updated" }
|
|
|
|
type ApiKey struct {
|
|
Id []byte
|
|
ProjectId []byte
|
|
Head []byte
|
|
Name string
|
|
Secret []byte
|
|
PartnerId []byte
|
|
CreatedAt time.Time
|
|
}
|
|
|
|
func (ApiKey) _Table() string { return "api_keys" }
|
|
|
|
type ApiKey_Create_Fields struct {
|
|
PartnerId ApiKey_PartnerId_Field
|
|
}
|
|
|
|
type ApiKey_Update_Fields struct {
|
|
Name ApiKey_Name_Field
|
|
}
|
|
|
|
type ApiKey_Id_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func ApiKey_Id(v []byte) ApiKey_Id_Field {
|
|
return ApiKey_Id_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f ApiKey_Id_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (ApiKey_Id_Field) _Column() string { return "id" }
|
|
|
|
type ApiKey_ProjectId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func ApiKey_ProjectId(v []byte) ApiKey_ProjectId_Field {
|
|
return ApiKey_ProjectId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f ApiKey_ProjectId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (ApiKey_ProjectId_Field) _Column() string { return "project_id" }
|
|
|
|
type ApiKey_Head_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func ApiKey_Head(v []byte) ApiKey_Head_Field {
|
|
return ApiKey_Head_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f ApiKey_Head_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (ApiKey_Head_Field) _Column() string { return "head" }
|
|
|
|
type ApiKey_Name_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func ApiKey_Name(v string) ApiKey_Name_Field {
|
|
return ApiKey_Name_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f ApiKey_Name_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (ApiKey_Name_Field) _Column() string { return "name" }
|
|
|
|
type ApiKey_Secret_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func ApiKey_Secret(v []byte) ApiKey_Secret_Field {
|
|
return ApiKey_Secret_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f ApiKey_Secret_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (ApiKey_Secret_Field) _Column() string { return "secret" }
|
|
|
|
type ApiKey_PartnerId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func ApiKey_PartnerId(v []byte) ApiKey_PartnerId_Field {
|
|
return ApiKey_PartnerId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func ApiKey_PartnerId_Raw(v []byte) ApiKey_PartnerId_Field {
|
|
if v == nil {
|
|
return ApiKey_PartnerId_Null()
|
|
}
|
|
return ApiKey_PartnerId(v)
|
|
}
|
|
|
|
func ApiKey_PartnerId_Null() ApiKey_PartnerId_Field {
|
|
return ApiKey_PartnerId_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f ApiKey_PartnerId_Field) isnull() bool { return !f._set || f._null || f._value == nil }
|
|
|
|
func (f ApiKey_PartnerId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (ApiKey_PartnerId_Field) _Column() string { return "partner_id" }
|
|
|
|
type ApiKey_CreatedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func ApiKey_CreatedAt(v time.Time) ApiKey_CreatedAt_Field {
|
|
return ApiKey_CreatedAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f ApiKey_CreatedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (ApiKey_CreatedAt_Field) _Column() string { return "created_at" }
|
|
|
|
type BucketMetainfo struct {
|
|
Id []byte
|
|
ProjectId []byte
|
|
Name []byte
|
|
PartnerId []byte
|
|
PathCipher int
|
|
CreatedAt time.Time
|
|
DefaultSegmentSize int
|
|
DefaultEncryptionCipherSuite int
|
|
DefaultEncryptionBlockSize int
|
|
DefaultRedundancyAlgorithm int
|
|
DefaultRedundancyShareSize int
|
|
DefaultRedundancyRequiredShares int
|
|
DefaultRedundancyRepairShares int
|
|
DefaultRedundancyOptimalShares int
|
|
DefaultRedundancyTotalShares int
|
|
}
|
|
|
|
func (BucketMetainfo) _Table() string { return "bucket_metainfos" }
|
|
|
|
type BucketMetainfo_Create_Fields struct {
|
|
PartnerId BucketMetainfo_PartnerId_Field
|
|
}
|
|
|
|
type BucketMetainfo_Update_Fields struct {
|
|
PartnerId BucketMetainfo_PartnerId_Field
|
|
DefaultSegmentSize BucketMetainfo_DefaultSegmentSize_Field
|
|
DefaultEncryptionCipherSuite BucketMetainfo_DefaultEncryptionCipherSuite_Field
|
|
DefaultEncryptionBlockSize BucketMetainfo_DefaultEncryptionBlockSize_Field
|
|
DefaultRedundancyAlgorithm BucketMetainfo_DefaultRedundancyAlgorithm_Field
|
|
DefaultRedundancyShareSize BucketMetainfo_DefaultRedundancyShareSize_Field
|
|
DefaultRedundancyRequiredShares BucketMetainfo_DefaultRedundancyRequiredShares_Field
|
|
DefaultRedundancyRepairShares BucketMetainfo_DefaultRedundancyRepairShares_Field
|
|
DefaultRedundancyOptimalShares BucketMetainfo_DefaultRedundancyOptimalShares_Field
|
|
DefaultRedundancyTotalShares BucketMetainfo_DefaultRedundancyTotalShares_Field
|
|
}
|
|
|
|
type BucketMetainfo_Id_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func BucketMetainfo_Id(v []byte) BucketMetainfo_Id_Field {
|
|
return BucketMetainfo_Id_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketMetainfo_Id_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketMetainfo_Id_Field) _Column() string { return "id" }
|
|
|
|
type BucketMetainfo_ProjectId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func BucketMetainfo_ProjectId(v []byte) BucketMetainfo_ProjectId_Field {
|
|
return BucketMetainfo_ProjectId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketMetainfo_ProjectId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketMetainfo_ProjectId_Field) _Column() string { return "project_id" }
|
|
|
|
type BucketMetainfo_Name_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func BucketMetainfo_Name(v []byte) BucketMetainfo_Name_Field {
|
|
return BucketMetainfo_Name_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketMetainfo_Name_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketMetainfo_Name_Field) _Column() string { return "name" }
|
|
|
|
type BucketMetainfo_PartnerId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func BucketMetainfo_PartnerId(v []byte) BucketMetainfo_PartnerId_Field {
|
|
return BucketMetainfo_PartnerId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func BucketMetainfo_PartnerId_Raw(v []byte) BucketMetainfo_PartnerId_Field {
|
|
if v == nil {
|
|
return BucketMetainfo_PartnerId_Null()
|
|
}
|
|
return BucketMetainfo_PartnerId(v)
|
|
}
|
|
|
|
func BucketMetainfo_PartnerId_Null() BucketMetainfo_PartnerId_Field {
|
|
return BucketMetainfo_PartnerId_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f BucketMetainfo_PartnerId_Field) isnull() bool { return !f._set || f._null || f._value == nil }
|
|
|
|
func (f BucketMetainfo_PartnerId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketMetainfo_PartnerId_Field) _Column() string { return "partner_id" }
|
|
|
|
type BucketMetainfo_PathCipher_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func BucketMetainfo_PathCipher(v int) BucketMetainfo_PathCipher_Field {
|
|
return BucketMetainfo_PathCipher_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketMetainfo_PathCipher_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketMetainfo_PathCipher_Field) _Column() string { return "path_cipher" }
|
|
|
|
type BucketMetainfo_CreatedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func BucketMetainfo_CreatedAt(v time.Time) BucketMetainfo_CreatedAt_Field {
|
|
return BucketMetainfo_CreatedAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketMetainfo_CreatedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketMetainfo_CreatedAt_Field) _Column() string { return "created_at" }
|
|
|
|
type BucketMetainfo_DefaultSegmentSize_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func BucketMetainfo_DefaultSegmentSize(v int) BucketMetainfo_DefaultSegmentSize_Field {
|
|
return BucketMetainfo_DefaultSegmentSize_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketMetainfo_DefaultSegmentSize_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketMetainfo_DefaultSegmentSize_Field) _Column() string { return "default_segment_size" }
|
|
|
|
type BucketMetainfo_DefaultEncryptionCipherSuite_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func BucketMetainfo_DefaultEncryptionCipherSuite(v int) BucketMetainfo_DefaultEncryptionCipherSuite_Field {
|
|
return BucketMetainfo_DefaultEncryptionCipherSuite_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketMetainfo_DefaultEncryptionCipherSuite_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketMetainfo_DefaultEncryptionCipherSuite_Field) _Column() string {
|
|
return "default_encryption_cipher_suite"
|
|
}
|
|
|
|
type BucketMetainfo_DefaultEncryptionBlockSize_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func BucketMetainfo_DefaultEncryptionBlockSize(v int) BucketMetainfo_DefaultEncryptionBlockSize_Field {
|
|
return BucketMetainfo_DefaultEncryptionBlockSize_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketMetainfo_DefaultEncryptionBlockSize_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketMetainfo_DefaultEncryptionBlockSize_Field) _Column() string {
|
|
return "default_encryption_block_size"
|
|
}
|
|
|
|
type BucketMetainfo_DefaultRedundancyAlgorithm_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func BucketMetainfo_DefaultRedundancyAlgorithm(v int) BucketMetainfo_DefaultRedundancyAlgorithm_Field {
|
|
return BucketMetainfo_DefaultRedundancyAlgorithm_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketMetainfo_DefaultRedundancyAlgorithm_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketMetainfo_DefaultRedundancyAlgorithm_Field) _Column() string {
|
|
return "default_redundancy_algorithm"
|
|
}
|
|
|
|
type BucketMetainfo_DefaultRedundancyShareSize_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func BucketMetainfo_DefaultRedundancyShareSize(v int) BucketMetainfo_DefaultRedundancyShareSize_Field {
|
|
return BucketMetainfo_DefaultRedundancyShareSize_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketMetainfo_DefaultRedundancyShareSize_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketMetainfo_DefaultRedundancyShareSize_Field) _Column() string {
|
|
return "default_redundancy_share_size"
|
|
}
|
|
|
|
type BucketMetainfo_DefaultRedundancyRequiredShares_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func BucketMetainfo_DefaultRedundancyRequiredShares(v int) BucketMetainfo_DefaultRedundancyRequiredShares_Field {
|
|
return BucketMetainfo_DefaultRedundancyRequiredShares_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketMetainfo_DefaultRedundancyRequiredShares_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketMetainfo_DefaultRedundancyRequiredShares_Field) _Column() string {
|
|
return "default_redundancy_required_shares"
|
|
}
|
|
|
|
type BucketMetainfo_DefaultRedundancyRepairShares_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func BucketMetainfo_DefaultRedundancyRepairShares(v int) BucketMetainfo_DefaultRedundancyRepairShares_Field {
|
|
return BucketMetainfo_DefaultRedundancyRepairShares_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketMetainfo_DefaultRedundancyRepairShares_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketMetainfo_DefaultRedundancyRepairShares_Field) _Column() string {
|
|
return "default_redundancy_repair_shares"
|
|
}
|
|
|
|
type BucketMetainfo_DefaultRedundancyOptimalShares_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func BucketMetainfo_DefaultRedundancyOptimalShares(v int) BucketMetainfo_DefaultRedundancyOptimalShares_Field {
|
|
return BucketMetainfo_DefaultRedundancyOptimalShares_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketMetainfo_DefaultRedundancyOptimalShares_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketMetainfo_DefaultRedundancyOptimalShares_Field) _Column() string {
|
|
return "default_redundancy_optimal_shares"
|
|
}
|
|
|
|
type BucketMetainfo_DefaultRedundancyTotalShares_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func BucketMetainfo_DefaultRedundancyTotalShares(v int) BucketMetainfo_DefaultRedundancyTotalShares_Field {
|
|
return BucketMetainfo_DefaultRedundancyTotalShares_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketMetainfo_DefaultRedundancyTotalShares_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketMetainfo_DefaultRedundancyTotalShares_Field) _Column() string {
|
|
return "default_redundancy_total_shares"
|
|
}
|
|
|
|
type ProjectInvoiceStamp struct {
|
|
ProjectId []byte
|
|
InvoiceId []byte
|
|
StartDate time.Time
|
|
EndDate time.Time
|
|
CreatedAt time.Time
|
|
}
|
|
|
|
func (ProjectInvoiceStamp) _Table() string { return "project_invoice_stamps" }
|
|
|
|
type ProjectInvoiceStamp_Update_Fields struct {
|
|
}
|
|
|
|
type ProjectInvoiceStamp_ProjectId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func ProjectInvoiceStamp_ProjectId(v []byte) ProjectInvoiceStamp_ProjectId_Field {
|
|
return ProjectInvoiceStamp_ProjectId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f ProjectInvoiceStamp_ProjectId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (ProjectInvoiceStamp_ProjectId_Field) _Column() string { return "project_id" }
|
|
|
|
type ProjectInvoiceStamp_InvoiceId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func ProjectInvoiceStamp_InvoiceId(v []byte) ProjectInvoiceStamp_InvoiceId_Field {
|
|
return ProjectInvoiceStamp_InvoiceId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f ProjectInvoiceStamp_InvoiceId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (ProjectInvoiceStamp_InvoiceId_Field) _Column() string { return "invoice_id" }
|
|
|
|
type ProjectInvoiceStamp_StartDate_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func ProjectInvoiceStamp_StartDate(v time.Time) ProjectInvoiceStamp_StartDate_Field {
|
|
return ProjectInvoiceStamp_StartDate_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f ProjectInvoiceStamp_StartDate_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (ProjectInvoiceStamp_StartDate_Field) _Column() string { return "start_date" }
|
|
|
|
type ProjectInvoiceStamp_EndDate_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func ProjectInvoiceStamp_EndDate(v time.Time) ProjectInvoiceStamp_EndDate_Field {
|
|
return ProjectInvoiceStamp_EndDate_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f ProjectInvoiceStamp_EndDate_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (ProjectInvoiceStamp_EndDate_Field) _Column() string { return "end_date" }
|
|
|
|
type ProjectInvoiceStamp_CreatedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func ProjectInvoiceStamp_CreatedAt(v time.Time) ProjectInvoiceStamp_CreatedAt_Field {
|
|
return ProjectInvoiceStamp_CreatedAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f ProjectInvoiceStamp_CreatedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (ProjectInvoiceStamp_CreatedAt_Field) _Column() string { return "created_at" }
|
|
|
|
type ProjectMember struct {
|
|
MemberId []byte
|
|
ProjectId []byte
|
|
CreatedAt time.Time
|
|
}
|
|
|
|
func (ProjectMember) _Table() string { return "project_members" }
|
|
|
|
type ProjectMember_Update_Fields struct {
|
|
}
|
|
|
|
type ProjectMember_MemberId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func ProjectMember_MemberId(v []byte) ProjectMember_MemberId_Field {
|
|
return ProjectMember_MemberId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f ProjectMember_MemberId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (ProjectMember_MemberId_Field) _Column() string { return "member_id" }
|
|
|
|
type ProjectMember_ProjectId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func ProjectMember_ProjectId(v []byte) ProjectMember_ProjectId_Field {
|
|
return ProjectMember_ProjectId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f ProjectMember_ProjectId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (ProjectMember_ProjectId_Field) _Column() string { return "project_id" }
|
|
|
|
type ProjectMember_CreatedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func ProjectMember_CreatedAt(v time.Time) ProjectMember_CreatedAt_Field {
|
|
return ProjectMember_CreatedAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f ProjectMember_CreatedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (ProjectMember_CreatedAt_Field) _Column() string { return "created_at" }
|
|
|
|
type StripecoinpaymentsApplyBalanceIntent struct {
|
|
TxId string
|
|
State int
|
|
CreatedAt time.Time
|
|
}
|
|
|
|
func (StripecoinpaymentsApplyBalanceIntent) _Table() string {
|
|
return "stripecoinpayments_apply_balance_intents"
|
|
}
|
|
|
|
type StripecoinpaymentsApplyBalanceIntent_Update_Fields struct {
|
|
State StripecoinpaymentsApplyBalanceIntent_State_Field
|
|
}
|
|
|
|
type StripecoinpaymentsApplyBalanceIntent_TxId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func StripecoinpaymentsApplyBalanceIntent_TxId(v string) StripecoinpaymentsApplyBalanceIntent_TxId_Field {
|
|
return StripecoinpaymentsApplyBalanceIntent_TxId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StripecoinpaymentsApplyBalanceIntent_TxId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StripecoinpaymentsApplyBalanceIntent_TxId_Field) _Column() string { return "tx_id" }
|
|
|
|
type StripecoinpaymentsApplyBalanceIntent_State_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func StripecoinpaymentsApplyBalanceIntent_State(v int) StripecoinpaymentsApplyBalanceIntent_State_Field {
|
|
return StripecoinpaymentsApplyBalanceIntent_State_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StripecoinpaymentsApplyBalanceIntent_State_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StripecoinpaymentsApplyBalanceIntent_State_Field) _Column() string { return "state" }
|
|
|
|
type StripecoinpaymentsApplyBalanceIntent_CreatedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func StripecoinpaymentsApplyBalanceIntent_CreatedAt(v time.Time) StripecoinpaymentsApplyBalanceIntent_CreatedAt_Field {
|
|
return StripecoinpaymentsApplyBalanceIntent_CreatedAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StripecoinpaymentsApplyBalanceIntent_CreatedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StripecoinpaymentsApplyBalanceIntent_CreatedAt_Field) _Column() string { return "created_at" }
|
|
|
|
type UsedSerial struct {
|
|
SerialNumberId int
|
|
StorageNodeId []byte
|
|
}
|
|
|
|
func (UsedSerial) _Table() string { return "used_serials" }
|
|
|
|
type UsedSerial_Update_Fields struct {
|
|
}
|
|
|
|
type UsedSerial_SerialNumberId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func UsedSerial_SerialNumberId(v int) UsedSerial_SerialNumberId_Field {
|
|
return UsedSerial_SerialNumberId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f UsedSerial_SerialNumberId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (UsedSerial_SerialNumberId_Field) _Column() string { return "serial_number_id" }
|
|
|
|
type UsedSerial_StorageNodeId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func UsedSerial_StorageNodeId(v []byte) UsedSerial_StorageNodeId_Field {
|
|
return UsedSerial_StorageNodeId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f UsedSerial_StorageNodeId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (UsedSerial_StorageNodeId_Field) _Column() string { return "storage_node_id" }
|
|
|
|
type UserCredit struct {
|
|
Id int
|
|
UserId []byte
|
|
OfferId int
|
|
ReferredBy []byte
|
|
Type string
|
|
CreditsEarnedInCents int
|
|
CreditsUsedInCents int
|
|
ExpiresAt time.Time
|
|
CreatedAt time.Time
|
|
}
|
|
|
|
func (UserCredit) _Table() string { return "user_credits" }
|
|
|
|
type UserCredit_Create_Fields struct {
|
|
ReferredBy UserCredit_ReferredBy_Field
|
|
}
|
|
|
|
type UserCredit_Update_Fields struct {
|
|
CreditsUsedInCents UserCredit_CreditsUsedInCents_Field
|
|
ExpiresAt UserCredit_ExpiresAt_Field
|
|
}
|
|
|
|
type UserCredit_Id_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func UserCredit_Id(v int) UserCredit_Id_Field {
|
|
return UserCredit_Id_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f UserCredit_Id_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (UserCredit_Id_Field) _Column() string { return "id" }
|
|
|
|
type UserCredit_UserId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func UserCredit_UserId(v []byte) UserCredit_UserId_Field {
|
|
return UserCredit_UserId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f UserCredit_UserId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (UserCredit_UserId_Field) _Column() string { return "user_id" }
|
|
|
|
type UserCredit_OfferId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func UserCredit_OfferId(v int) UserCredit_OfferId_Field {
|
|
return UserCredit_OfferId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f UserCredit_OfferId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (UserCredit_OfferId_Field) _Column() string { return "offer_id" }
|
|
|
|
type UserCredit_ReferredBy_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func UserCredit_ReferredBy(v []byte) UserCredit_ReferredBy_Field {
|
|
return UserCredit_ReferredBy_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func UserCredit_ReferredBy_Raw(v []byte) UserCredit_ReferredBy_Field {
|
|
if v == nil {
|
|
return UserCredit_ReferredBy_Null()
|
|
}
|
|
return UserCredit_ReferredBy(v)
|
|
}
|
|
|
|
func UserCredit_ReferredBy_Null() UserCredit_ReferredBy_Field {
|
|
return UserCredit_ReferredBy_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f UserCredit_ReferredBy_Field) isnull() bool { return !f._set || f._null || f._value == nil }
|
|
|
|
func (f UserCredit_ReferredBy_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (UserCredit_ReferredBy_Field) _Column() string { return "referred_by" }
|
|
|
|
type UserCredit_Type_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func UserCredit_Type(v string) UserCredit_Type_Field {
|
|
return UserCredit_Type_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f UserCredit_Type_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (UserCredit_Type_Field) _Column() string { return "type" }
|
|
|
|
type UserCredit_CreditsEarnedInCents_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func UserCredit_CreditsEarnedInCents(v int) UserCredit_CreditsEarnedInCents_Field {
|
|
return UserCredit_CreditsEarnedInCents_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f UserCredit_CreditsEarnedInCents_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (UserCredit_CreditsEarnedInCents_Field) _Column() string { return "credits_earned_in_cents" }
|
|
|
|
type UserCredit_CreditsUsedInCents_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func UserCredit_CreditsUsedInCents(v int) UserCredit_CreditsUsedInCents_Field {
|
|
return UserCredit_CreditsUsedInCents_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f UserCredit_CreditsUsedInCents_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (UserCredit_CreditsUsedInCents_Field) _Column() string { return "credits_used_in_cents" }
|
|
|
|
type UserCredit_ExpiresAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func UserCredit_ExpiresAt(v time.Time) UserCredit_ExpiresAt_Field {
|
|
return UserCredit_ExpiresAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f UserCredit_ExpiresAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (UserCredit_ExpiresAt_Field) _Column() string { return "expires_at" }
|
|
|
|
type UserCredit_CreatedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func UserCredit_CreatedAt(v time.Time) UserCredit_CreatedAt_Field {
|
|
return UserCredit_CreatedAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f UserCredit_CreatedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (UserCredit_CreatedAt_Field) _Column() string { return "created_at" }
|
|
|
|
func toUTC(t time.Time) time.Time {
|
|
return t.UTC()
|
|
}
|
|
|
|
func toDate(t time.Time) time.Time {
|
|
// keep up the minute portion so that translations between timezones will
|
|
// continue to reflect properly.
|
|
return t.Truncate(time.Minute)
|
|
}
|
|
|
|
//
|
|
// runtime support for building sql statements
|
|
//
|
|
|
|
type __sqlbundle_SQL interface {
|
|
Render() string
|
|
|
|
private()
|
|
}
|
|
|
|
type __sqlbundle_Dialect interface {
|
|
Rebind(sql string) string
|
|
}
|
|
|
|
type __sqlbundle_RenderOp int
|
|
|
|
const (
|
|
__sqlbundle_NoFlatten __sqlbundle_RenderOp = iota
|
|
__sqlbundle_NoTerminate
|
|
)
|
|
|
|
func __sqlbundle_Render(dialect __sqlbundle_Dialect, sql __sqlbundle_SQL, ops ...__sqlbundle_RenderOp) string {
|
|
out := sql.Render()
|
|
|
|
flatten := true
|
|
terminate := true
|
|
for _, op := range ops {
|
|
switch op {
|
|
case __sqlbundle_NoFlatten:
|
|
flatten = false
|
|
case __sqlbundle_NoTerminate:
|
|
terminate = false
|
|
}
|
|
}
|
|
|
|
if flatten {
|
|
out = __sqlbundle_flattenSQL(out)
|
|
}
|
|
if terminate {
|
|
out += ";"
|
|
}
|
|
|
|
return dialect.Rebind(out)
|
|
}
|
|
|
|
func __sqlbundle_flattenSQL(x string) string {
|
|
// trim whitespace from beginning and end
|
|
s, e := 0, len(x)-1
|
|
for s < len(x) && (x[s] == ' ' || x[s] == '\t' || x[s] == '\n') {
|
|
s++
|
|
}
|
|
for s <= e && (x[e] == ' ' || x[e] == '\t' || x[e] == '\n') {
|
|
e--
|
|
}
|
|
if s > e {
|
|
return ""
|
|
}
|
|
x = x[s : e+1]
|
|
|
|
// check for whitespace that needs fixing
|
|
wasSpace := false
|
|
for i := 0; i < len(x); i++ {
|
|
r := x[i]
|
|
justSpace := r == ' '
|
|
if (wasSpace && justSpace) || r == '\t' || r == '\n' {
|
|
// whitespace detected, start writing a new string
|
|
var result strings.Builder
|
|
result.Grow(len(x))
|
|
if wasSpace {
|
|
result.WriteString(x[:i-1])
|
|
} else {
|
|
result.WriteString(x[:i])
|
|
}
|
|
for p := i; p < len(x); p++ {
|
|
for p < len(x) && (x[p] == ' ' || x[p] == '\t' || x[p] == '\n') {
|
|
p++
|
|
}
|
|
result.WriteByte(' ')
|
|
|
|
start := p
|
|
for p < len(x) && !(x[p] == ' ' || x[p] == '\t' || x[p] == '\n') {
|
|
p++
|
|
}
|
|
result.WriteString(x[start:p])
|
|
}
|
|
|
|
return result.String()
|
|
}
|
|
wasSpace = justSpace
|
|
}
|
|
|
|
// no problematic whitespace found
|
|
return x
|
|
}
|
|
|
|
// this type is specially named to match up with the name returned by the
|
|
// dialect impl in the sql package.
|
|
type __sqlbundle_postgres struct{}
|
|
|
|
func (p __sqlbundle_postgres) Rebind(sql string) string {
|
|
type sqlParseState int
|
|
const (
|
|
sqlParseStart sqlParseState = iota
|
|
sqlParseInStringLiteral
|
|
sqlParseInQuotedIdentifier
|
|
sqlParseInComment
|
|
)
|
|
|
|
out := make([]byte, 0, len(sql)+10)
|
|
|
|
j := 1
|
|
state := sqlParseStart
|
|
for i := 0; i < len(sql); i++ {
|
|
ch := sql[i]
|
|
switch state {
|
|
case sqlParseStart:
|
|
switch ch {
|
|
case '?':
|
|
out = append(out, '$')
|
|
out = append(out, strconv.Itoa(j)...)
|
|
state = sqlParseStart
|
|
j++
|
|
continue
|
|
case '-':
|
|
if i+1 < len(sql) && sql[i+1] == '-' {
|
|
state = sqlParseInComment
|
|
}
|
|
case '"':
|
|
state = sqlParseInQuotedIdentifier
|
|
case '\'':
|
|
state = sqlParseInStringLiteral
|
|
}
|
|
case sqlParseInStringLiteral:
|
|
if ch == '\'' {
|
|
state = sqlParseStart
|
|
}
|
|
case sqlParseInQuotedIdentifier:
|
|
if ch == '"' {
|
|
state = sqlParseStart
|
|
}
|
|
case sqlParseInComment:
|
|
if ch == '\n' {
|
|
state = sqlParseStart
|
|
}
|
|
}
|
|
out = append(out, ch)
|
|
}
|
|
|
|
return string(out)
|
|
}
|
|
|
|
// this type is specially named to match up with the name returned by the
|
|
// dialect impl in the sql package.
|
|
type __sqlbundle_sqlite3 struct{}
|
|
|
|
func (s __sqlbundle_sqlite3) Rebind(sql string) string {
|
|
return sql
|
|
}
|
|
|
|
// this type is specially named to match up with the name returned by the
|
|
// dialect impl in the sql package.
|
|
type __sqlbundle_cockroach struct{}
|
|
|
|
func (p __sqlbundle_cockroach) Rebind(sql string) string {
|
|
type sqlParseState int
|
|
const (
|
|
sqlParseStart sqlParseState = iota
|
|
sqlParseInStringLiteral
|
|
sqlParseInQuotedIdentifier
|
|
sqlParseInComment
|
|
)
|
|
|
|
out := make([]byte, 0, len(sql)+10)
|
|
|
|
j := 1
|
|
state := sqlParseStart
|
|
for i := 0; i < len(sql); i++ {
|
|
ch := sql[i]
|
|
switch state {
|
|
case sqlParseStart:
|
|
switch ch {
|
|
case '?':
|
|
out = append(out, '$')
|
|
out = append(out, strconv.Itoa(j)...)
|
|
state = sqlParseStart
|
|
j++
|
|
continue
|
|
case '-':
|
|
if i+1 < len(sql) && sql[i+1] == '-' {
|
|
state = sqlParseInComment
|
|
}
|
|
case '"':
|
|
state = sqlParseInQuotedIdentifier
|
|
case '\'':
|
|
state = sqlParseInStringLiteral
|
|
}
|
|
case sqlParseInStringLiteral:
|
|
if ch == '\'' {
|
|
state = sqlParseStart
|
|
}
|
|
case sqlParseInQuotedIdentifier:
|
|
if ch == '"' {
|
|
state = sqlParseStart
|
|
}
|
|
case sqlParseInComment:
|
|
if ch == '\n' {
|
|
state = sqlParseStart
|
|
}
|
|
}
|
|
out = append(out, ch)
|
|
}
|
|
|
|
return string(out)
|
|
}
|
|
|
|
type __sqlbundle_Literal string
|
|
|
|
func (__sqlbundle_Literal) private() {}
|
|
|
|
func (l __sqlbundle_Literal) Render() string { return string(l) }
|
|
|
|
type __sqlbundle_Literals struct {
|
|
Join string
|
|
SQLs []__sqlbundle_SQL
|
|
}
|
|
|
|
func (__sqlbundle_Literals) private() {}
|
|
|
|
func (l __sqlbundle_Literals) Render() string {
|
|
var out bytes.Buffer
|
|
|
|
first := true
|
|
for _, sql := range l.SQLs {
|
|
if sql == nil {
|
|
continue
|
|
}
|
|
if !first {
|
|
out.WriteString(l.Join)
|
|
}
|
|
first = false
|
|
out.WriteString(sql.Render())
|
|
}
|
|
|
|
return out.String()
|
|
}
|
|
|
|
type __sqlbundle_Condition struct {
|
|
// set at compile/embed time
|
|
Name string
|
|
Left string
|
|
Equal bool
|
|
Right string
|
|
|
|
// set at runtime
|
|
Null bool
|
|
}
|
|
|
|
func (*__sqlbundle_Condition) private() {}
|
|
|
|
func (c *__sqlbundle_Condition) Render() string {
|
|
// TODO(jeff): maybe check if we can use placeholders instead of the
|
|
// literal null: this would make the templates easier.
|
|
|
|
switch {
|
|
case c.Equal && c.Null:
|
|
return c.Left + " is null"
|
|
case c.Equal && !c.Null:
|
|
return c.Left + " = " + c.Right
|
|
case !c.Equal && c.Null:
|
|
return c.Left + " is not null"
|
|
case !c.Equal && !c.Null:
|
|
return c.Left + " != " + c.Right
|
|
default:
|
|
panic("unhandled case")
|
|
}
|
|
}
|
|
|
|
type __sqlbundle_Hole struct {
|
|
// set at compiile/embed time
|
|
Name string
|
|
|
|
// set at runtime
|
|
SQL __sqlbundle_SQL
|
|
}
|
|
|
|
func (*__sqlbundle_Hole) private() {}
|
|
|
|
func (h *__sqlbundle_Hole) Render() string { return h.SQL.Render() }
|
|
|
|
//
|
|
// end runtime support for building sql statements
|
|
//
|
|
|
|
type CustomerId_Row struct {
|
|
CustomerId string
|
|
}
|
|
|
|
type Id_Address_LastContactSuccess_LastContactFailure_Row struct {
|
|
Id []byte
|
|
Address string
|
|
LastContactSuccess time.Time
|
|
LastContactFailure time.Time
|
|
}
|
|
|
|
type Id_LastNet_Address_Protocol_Row struct {
|
|
Id []byte
|
|
LastNet string
|
|
Address string
|
|
Protocol int
|
|
}
|
|
|
|
type Id_PieceCount_Row struct {
|
|
Id []byte
|
|
PieceCount int64
|
|
}
|
|
|
|
type Id_Row struct {
|
|
Id []byte
|
|
}
|
|
|
|
type LeafSerialNumber_Row struct {
|
|
LeafSerialNumber []byte
|
|
}
|
|
|
|
type UsageLimit_Row struct {
|
|
UsageLimit int64
|
|
}
|
|
|
|
type Value_Row struct {
|
|
Value time.Time
|
|
}
|
|
|
|
func (obj *postgresImpl) Create_ValueAttribution(ctx context.Context,
|
|
value_attribution_project_id ValueAttribution_ProjectId_Field,
|
|
value_attribution_bucket_name ValueAttribution_BucketName_Field,
|
|
value_attribution_partner_id ValueAttribution_PartnerId_Field) (
|
|
value_attribution *ValueAttribution, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__project_id_val := value_attribution_project_id.value()
|
|
__bucket_name_val := value_attribution_bucket_name.value()
|
|
__partner_id_val := value_attribution_partner_id.value()
|
|
__last_updated_val := __now.UTC()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO value_attributions ( project_id, bucket_name, partner_id, last_updated ) VALUES ( ?, ?, ?, ? ) RETURNING value_attributions.project_id, value_attributions.bucket_name, value_attributions.partner_id, value_attributions.last_updated")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __project_id_val, __bucket_name_val, __partner_id_val, __last_updated_val)
|
|
|
|
value_attribution = &ValueAttribution{}
|
|
err = obj.driver.QueryRow(__stmt, __project_id_val, __bucket_name_val, __partner_id_val, __last_updated_val).Scan(&value_attribution.ProjectId, &value_attribution.BucketName, &value_attribution.PartnerId, &value_attribution.LastUpdated)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return value_attribution, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Create_PendingAudits(ctx context.Context,
|
|
pending_audits_node_id PendingAudits_NodeId_Field,
|
|
pending_audits_piece_id PendingAudits_PieceId_Field,
|
|
pending_audits_stripe_index PendingAudits_StripeIndex_Field,
|
|
pending_audits_share_size PendingAudits_ShareSize_Field,
|
|
pending_audits_expected_share_hash PendingAudits_ExpectedShareHash_Field,
|
|
pending_audits_reverify_count PendingAudits_ReverifyCount_Field,
|
|
pending_audits_path PendingAudits_Path_Field) (
|
|
pending_audits *PendingAudits, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
__node_id_val := pending_audits_node_id.value()
|
|
__piece_id_val := pending_audits_piece_id.value()
|
|
__stripe_index_val := pending_audits_stripe_index.value()
|
|
__share_size_val := pending_audits_share_size.value()
|
|
__expected_share_hash_val := pending_audits_expected_share_hash.value()
|
|
__reverify_count_val := pending_audits_reverify_count.value()
|
|
__path_val := pending_audits_path.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO pending_audits ( node_id, piece_id, stripe_index, share_size, expected_share_hash, reverify_count, path ) VALUES ( ?, ?, ?, ?, ?, ?, ? ) RETURNING pending_audits.node_id, pending_audits.piece_id, pending_audits.stripe_index, pending_audits.share_size, pending_audits.expected_share_hash, pending_audits.reverify_count, pending_audits.path")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __node_id_val, __piece_id_val, __stripe_index_val, __share_size_val, __expected_share_hash_val, __reverify_count_val, __path_val)
|
|
|
|
pending_audits = &PendingAudits{}
|
|
err = obj.driver.QueryRow(__stmt, __node_id_val, __piece_id_val, __stripe_index_val, __share_size_val, __expected_share_hash_val, __reverify_count_val, __path_val).Scan(&pending_audits.NodeId, &pending_audits.PieceId, &pending_audits.StripeIndex, &pending_audits.ShareSize, &pending_audits.ExpectedShareHash, &pending_audits.ReverifyCount, &pending_audits.Path)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return pending_audits, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) CreateNoReturn_Irreparabledb(ctx context.Context,
|
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field,
|
|
irreparabledb_segmentdetail Irreparabledb_Segmentdetail_Field,
|
|
irreparabledb_pieces_lost_count Irreparabledb_PiecesLostCount_Field,
|
|
irreparabledb_seg_damaged_unix_sec Irreparabledb_SegDamagedUnixSec_Field,
|
|
irreparabledb_repair_attempt_count Irreparabledb_RepairAttemptCount_Field) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
__segmentpath_val := irreparabledb_segmentpath.value()
|
|
__segmentdetail_val := irreparabledb_segmentdetail.value()
|
|
__pieces_lost_count_val := irreparabledb_pieces_lost_count.value()
|
|
__seg_damaged_unix_sec_val := irreparabledb_seg_damaged_unix_sec.value()
|
|
__repair_attempt_count_val := irreparabledb_repair_attempt_count.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO irreparabledbs ( segmentpath, segmentdetail, pieces_lost_count, seg_damaged_unix_sec, repair_attempt_count ) VALUES ( ?, ?, ?, ?, ? )")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __segmentpath_val, __segmentdetail_val, __pieces_lost_count_val, __seg_damaged_unix_sec_val, __repair_attempt_count_val)
|
|
|
|
_, err = obj.driver.Exec(__stmt, __segmentpath_val, __segmentdetail_val, __pieces_lost_count_val, __seg_damaged_unix_sec_val, __repair_attempt_count_val)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) CreateNoReturn_AccountingTimestamps(ctx context.Context,
|
|
accounting_timestamps_name AccountingTimestamps_Name_Field,
|
|
accounting_timestamps_value AccountingTimestamps_Value_Field) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
__name_val := accounting_timestamps_name.value()
|
|
__value_val := accounting_timestamps_value.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO accounting_timestamps ( name, value ) VALUES ( ?, ? )")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __name_val, __value_val)
|
|
|
|
_, err = obj.driver.Exec(__stmt, __name_val, __value_val)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) CreateNoReturn_AccountingRollup(ctx context.Context,
|
|
accounting_rollup_node_id AccountingRollup_NodeId_Field,
|
|
accounting_rollup_start_time AccountingRollup_StartTime_Field,
|
|
accounting_rollup_put_total AccountingRollup_PutTotal_Field,
|
|
accounting_rollup_get_total AccountingRollup_GetTotal_Field,
|
|
accounting_rollup_get_audit_total AccountingRollup_GetAuditTotal_Field,
|
|
accounting_rollup_get_repair_total AccountingRollup_GetRepairTotal_Field,
|
|
accounting_rollup_put_repair_total AccountingRollup_PutRepairTotal_Field,
|
|
accounting_rollup_at_rest_total AccountingRollup_AtRestTotal_Field) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
__node_id_val := accounting_rollup_node_id.value()
|
|
__start_time_val := accounting_rollup_start_time.value()
|
|
__put_total_val := accounting_rollup_put_total.value()
|
|
__get_total_val := accounting_rollup_get_total.value()
|
|
__get_audit_total_val := accounting_rollup_get_audit_total.value()
|
|
__get_repair_total_val := accounting_rollup_get_repair_total.value()
|
|
__put_repair_total_val := accounting_rollup_put_repair_total.value()
|
|
__at_rest_total_val := accounting_rollup_at_rest_total.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO accounting_rollups ( node_id, start_time, put_total, get_total, get_audit_total, get_repair_total, put_repair_total, at_rest_total ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ? )")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __node_id_val, __start_time_val, __put_total_val, __get_total_val, __get_audit_total_val, __get_repair_total_val, __put_repair_total_val, __at_rest_total_val)
|
|
|
|
_, err = obj.driver.Exec(__stmt, __node_id_val, __start_time_val, __put_total_val, __get_total_val, __get_audit_total_val, __get_repair_total_val, __put_repair_total_val, __at_rest_total_val)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) CreateNoReturn_Node(ctx context.Context,
|
|
node_id Node_Id_Field,
|
|
node_address Node_Address_Field,
|
|
node_last_net Node_LastNet_Field,
|
|
node_protocol Node_Protocol_Field,
|
|
node_type Node_Type_Field,
|
|
node_email Node_Email_Field,
|
|
node_wallet Node_Wallet_Field,
|
|
node_free_bandwidth Node_FreeBandwidth_Field,
|
|
node_free_disk Node_FreeDisk_Field,
|
|
node_major Node_Major_Field,
|
|
node_minor Node_Minor_Field,
|
|
node_patch Node_Patch_Field,
|
|
node_hash Node_Hash_Field,
|
|
node_timestamp Node_Timestamp_Field,
|
|
node_release Node_Release_Field,
|
|
node_latency_90 Node_Latency90_Field,
|
|
node_audit_success_count Node_AuditSuccessCount_Field,
|
|
node_total_audit_count Node_TotalAuditCount_Field,
|
|
node_uptime_success_count Node_UptimeSuccessCount_Field,
|
|
node_total_uptime_count Node_TotalUptimeCount_Field,
|
|
node_last_contact_success Node_LastContactSuccess_Field,
|
|
node_last_contact_failure Node_LastContactFailure_Field,
|
|
node_contained Node_Contained_Field,
|
|
node_audit_reputation_alpha Node_AuditReputationAlpha_Field,
|
|
node_audit_reputation_beta Node_AuditReputationBeta_Field,
|
|
node_uptime_reputation_alpha Node_UptimeReputationAlpha_Field,
|
|
node_uptime_reputation_beta Node_UptimeReputationBeta_Field,
|
|
node_exit_success Node_ExitSuccess_Field,
|
|
optional Node_Create_Fields) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__id_val := node_id.value()
|
|
__address_val := node_address.value()
|
|
__last_net_val := node_last_net.value()
|
|
__protocol_val := node_protocol.value()
|
|
__type_val := node_type.value()
|
|
__email_val := node_email.value()
|
|
__wallet_val := node_wallet.value()
|
|
__free_bandwidth_val := node_free_bandwidth.value()
|
|
__free_disk_val := node_free_disk.value()
|
|
__piece_count_val := int64(0)
|
|
__major_val := node_major.value()
|
|
__minor_val := node_minor.value()
|
|
__patch_val := node_patch.value()
|
|
__hash_val := node_hash.value()
|
|
__timestamp_val := node_timestamp.value()
|
|
__release_val := node_release.value()
|
|
__latency_90_val := node_latency_90.value()
|
|
__audit_success_count_val := node_audit_success_count.value()
|
|
__total_audit_count_val := node_total_audit_count.value()
|
|
__uptime_success_count_val := node_uptime_success_count.value()
|
|
__total_uptime_count_val := node_total_uptime_count.value()
|
|
__created_at_val := __now
|
|
__updated_at_val := __now
|
|
__last_contact_success_val := node_last_contact_success.value()
|
|
__last_contact_failure_val := node_last_contact_failure.value()
|
|
__contained_val := node_contained.value()
|
|
__disqualified_val := optional.Disqualified.value()
|
|
__audit_reputation_alpha_val := node_audit_reputation_alpha.value()
|
|
__audit_reputation_beta_val := node_audit_reputation_beta.value()
|
|
__uptime_reputation_alpha_val := node_uptime_reputation_alpha.value()
|
|
__uptime_reputation_beta_val := node_uptime_reputation_beta.value()
|
|
__exit_initiated_at_val := optional.ExitInitiatedAt.value()
|
|
__exit_loop_completed_at_val := optional.ExitLoopCompletedAt.value()
|
|
__exit_finished_at_val := optional.ExitFinishedAt.value()
|
|
__exit_success_val := node_exit_success.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO nodes ( id, address, last_net, protocol, type, email, wallet, free_bandwidth, free_disk, piece_count, major, minor, patch, hash, timestamp, release, latency_90, audit_success_count, total_audit_count, uptime_success_count, total_uptime_count, created_at, updated_at, last_contact_success, last_contact_failure, contained, disqualified, audit_reputation_alpha, audit_reputation_beta, uptime_reputation_alpha, uptime_reputation_beta, exit_initiated_at, exit_loop_completed_at, exit_finished_at, exit_success ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? )")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __id_val, __address_val, __last_net_val, __protocol_val, __type_val, __email_val, __wallet_val, __free_bandwidth_val, __free_disk_val, __piece_count_val, __major_val, __minor_val, __patch_val, __hash_val, __timestamp_val, __release_val, __latency_90_val, __audit_success_count_val, __total_audit_count_val, __uptime_success_count_val, __total_uptime_count_val, __created_at_val, __updated_at_val, __last_contact_success_val, __last_contact_failure_val, __contained_val, __disqualified_val, __audit_reputation_alpha_val, __audit_reputation_beta_val, __uptime_reputation_alpha_val, __uptime_reputation_beta_val, __exit_initiated_at_val, __exit_loop_completed_at_val, __exit_finished_at_val, __exit_success_val)
|
|
|
|
_, err = obj.driver.Exec(__stmt, __id_val, __address_val, __last_net_val, __protocol_val, __type_val, __email_val, __wallet_val, __free_bandwidth_val, __free_disk_val, __piece_count_val, __major_val, __minor_val, __patch_val, __hash_val, __timestamp_val, __release_val, __latency_90_val, __audit_success_count_val, __total_audit_count_val, __uptime_success_count_val, __total_uptime_count_val, __created_at_val, __updated_at_val, __last_contact_success_val, __last_contact_failure_val, __contained_val, __disqualified_val, __audit_reputation_alpha_val, __audit_reputation_beta_val, __uptime_reputation_alpha_val, __uptime_reputation_beta_val, __exit_initiated_at_val, __exit_loop_completed_at_val, __exit_finished_at_val, __exit_success_val)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Create_User(ctx context.Context,
|
|
user_id User_Id_Field,
|
|
user_email User_Email_Field,
|
|
user_normalized_email User_NormalizedEmail_Field,
|
|
user_full_name User_FullName_Field,
|
|
user_password_hash User_PasswordHash_Field,
|
|
optional User_Create_Fields) (
|
|
user *User, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__id_val := user_id.value()
|
|
__email_val := user_email.value()
|
|
__normalized_email_val := user_normalized_email.value()
|
|
__full_name_val := user_full_name.value()
|
|
__short_name_val := optional.ShortName.value()
|
|
__password_hash_val := user_password_hash.value()
|
|
__status_val := int(0)
|
|
__partner_id_val := optional.PartnerId.value()
|
|
__created_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO users ( id, email, normalized_email, full_name, short_name, password_hash, status, partner_id, created_at ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ? ) RETURNING users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.created_at")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __id_val, __email_val, __normalized_email_val, __full_name_val, __short_name_val, __password_hash_val, __status_val, __partner_id_val, __created_at_val)
|
|
|
|
user = &User{}
|
|
err = obj.driver.QueryRow(__stmt, __id_val, __email_val, __normalized_email_val, __full_name_val, __short_name_val, __password_hash_val, __status_val, __partner_id_val, __created_at_val).Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return user, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Create_Project(ctx context.Context,
|
|
project_id Project_Id_Field,
|
|
project_name Project_Name_Field,
|
|
project_description Project_Description_Field,
|
|
project_usage_limit Project_UsageLimit_Field,
|
|
project_owner_id Project_OwnerId_Field,
|
|
optional Project_Create_Fields) (
|
|
project *Project, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__id_val := project_id.value()
|
|
__name_val := project_name.value()
|
|
__description_val := project_description.value()
|
|
__usage_limit_val := project_usage_limit.value()
|
|
__partner_id_val := optional.PartnerId.value()
|
|
__owner_id_val := project_owner_id.value()
|
|
__created_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO projects ( id, name, description, usage_limit, partner_id, owner_id, created_at ) VALUES ( ?, ?, ?, ?, ?, ?, ? ) RETURNING projects.id, projects.name, projects.description, projects.usage_limit, projects.partner_id, projects.owner_id, projects.created_at")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __id_val, __name_val, __description_val, __usage_limit_val, __partner_id_val, __owner_id_val, __created_at_val)
|
|
|
|
project = &Project{}
|
|
err = obj.driver.QueryRow(__stmt, __id_val, __name_val, __description_val, __usage_limit_val, __partner_id_val, __owner_id_val, __created_at_val).Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return project, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Create_ProjectInvoiceStamp(ctx context.Context,
|
|
project_invoice_stamp_project_id ProjectInvoiceStamp_ProjectId_Field,
|
|
project_invoice_stamp_invoice_id ProjectInvoiceStamp_InvoiceId_Field,
|
|
project_invoice_stamp_start_date ProjectInvoiceStamp_StartDate_Field,
|
|
project_invoice_stamp_end_date ProjectInvoiceStamp_EndDate_Field,
|
|
project_invoice_stamp_created_at ProjectInvoiceStamp_CreatedAt_Field) (
|
|
project_invoice_stamp *ProjectInvoiceStamp, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
__project_id_val := project_invoice_stamp_project_id.value()
|
|
__invoice_id_val := project_invoice_stamp_invoice_id.value()
|
|
__start_date_val := project_invoice_stamp_start_date.value()
|
|
__end_date_val := project_invoice_stamp_end_date.value()
|
|
__created_at_val := project_invoice_stamp_created_at.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO project_invoice_stamps ( project_id, invoice_id, start_date, end_date, created_at ) VALUES ( ?, ?, ?, ?, ? ) RETURNING project_invoice_stamps.project_id, project_invoice_stamps.invoice_id, project_invoice_stamps.start_date, project_invoice_stamps.end_date, project_invoice_stamps.created_at")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __project_id_val, __invoice_id_val, __start_date_val, __end_date_val, __created_at_val)
|
|
|
|
project_invoice_stamp = &ProjectInvoiceStamp{}
|
|
err = obj.driver.QueryRow(__stmt, __project_id_val, __invoice_id_val, __start_date_val, __end_date_val, __created_at_val).Scan(&project_invoice_stamp.ProjectId, &project_invoice_stamp.InvoiceId, &project_invoice_stamp.StartDate, &project_invoice_stamp.EndDate, &project_invoice_stamp.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return project_invoice_stamp, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Create_ProjectMember(ctx context.Context,
|
|
project_member_member_id ProjectMember_MemberId_Field,
|
|
project_member_project_id ProjectMember_ProjectId_Field) (
|
|
project_member *ProjectMember, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__member_id_val := project_member_member_id.value()
|
|
__project_id_val := project_member_project_id.value()
|
|
__created_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO project_members ( member_id, project_id, created_at ) VALUES ( ?, ?, ? ) RETURNING project_members.member_id, project_members.project_id, project_members.created_at")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __member_id_val, __project_id_val, __created_at_val)
|
|
|
|
project_member = &ProjectMember{}
|
|
err = obj.driver.QueryRow(__stmt, __member_id_val, __project_id_val, __created_at_val).Scan(&project_member.MemberId, &project_member.ProjectId, &project_member.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return project_member, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Create_ApiKey(ctx context.Context,
|
|
api_key_id ApiKey_Id_Field,
|
|
api_key_project_id ApiKey_ProjectId_Field,
|
|
api_key_head ApiKey_Head_Field,
|
|
api_key_name ApiKey_Name_Field,
|
|
api_key_secret ApiKey_Secret_Field,
|
|
optional ApiKey_Create_Fields) (
|
|
api_key *ApiKey, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__id_val := api_key_id.value()
|
|
__project_id_val := api_key_project_id.value()
|
|
__head_val := api_key_head.value()
|
|
__name_val := api_key_name.value()
|
|
__secret_val := api_key_secret.value()
|
|
__partner_id_val := optional.PartnerId.value()
|
|
__created_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO api_keys ( id, project_id, head, name, secret, partner_id, created_at ) VALUES ( ?, ?, ?, ?, ?, ?, ? ) RETURNING api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.partner_id, api_keys.created_at")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __id_val, __project_id_val, __head_val, __name_val, __secret_val, __partner_id_val, __created_at_val)
|
|
|
|
api_key = &ApiKey{}
|
|
err = obj.driver.QueryRow(__stmt, __id_val, __project_id_val, __head_val, __name_val, __secret_val, __partner_id_val, __created_at_val).Scan(&api_key.Id, &api_key.ProjectId, &api_key.Head, &api_key.Name, &api_key.Secret, &api_key.PartnerId, &api_key.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return api_key, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) CreateNoReturn_SerialNumber(ctx context.Context,
|
|
serial_number_serial_number SerialNumber_SerialNumber_Field,
|
|
serial_number_bucket_id SerialNumber_BucketId_Field,
|
|
serial_number_expires_at SerialNumber_ExpiresAt_Field) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
__serial_number_val := serial_number_serial_number.value()
|
|
__bucket_id_val := serial_number_bucket_id.value()
|
|
__expires_at_val := serial_number_expires_at.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO serial_numbers ( serial_number, bucket_id, expires_at ) VALUES ( ?, ?, ? )")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __serial_number_val, __bucket_id_val, __expires_at_val)
|
|
|
|
_, err = obj.driver.Exec(__stmt, __serial_number_val, __bucket_id_val, __expires_at_val)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) CreateNoReturn_UsedSerial(ctx context.Context,
|
|
used_serial_serial_number_id UsedSerial_SerialNumberId_Field,
|
|
used_serial_storage_node_id UsedSerial_StorageNodeId_Field) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
__serial_number_id_val := used_serial_serial_number_id.value()
|
|
__storage_node_id_val := used_serial_storage_node_id.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO used_serials ( serial_number_id, storage_node_id ) VALUES ( ?, ? )")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __serial_number_id_val, __storage_node_id_val)
|
|
|
|
_, err = obj.driver.Exec(__stmt, __serial_number_id_val, __storage_node_id_val)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) CreateNoReturn_BucketStorageTally(ctx context.Context,
|
|
bucket_storage_tally_bucket_name BucketStorageTally_BucketName_Field,
|
|
bucket_storage_tally_project_id BucketStorageTally_ProjectId_Field,
|
|
bucket_storage_tally_interval_start BucketStorageTally_IntervalStart_Field,
|
|
bucket_storage_tally_inline BucketStorageTally_Inline_Field,
|
|
bucket_storage_tally_remote BucketStorageTally_Remote_Field,
|
|
bucket_storage_tally_remote_segments_count BucketStorageTally_RemoteSegmentsCount_Field,
|
|
bucket_storage_tally_inline_segments_count BucketStorageTally_InlineSegmentsCount_Field,
|
|
bucket_storage_tally_object_count BucketStorageTally_ObjectCount_Field,
|
|
bucket_storage_tally_metadata_size BucketStorageTally_MetadataSize_Field) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
__bucket_name_val := bucket_storage_tally_bucket_name.value()
|
|
__project_id_val := bucket_storage_tally_project_id.value()
|
|
__interval_start_val := bucket_storage_tally_interval_start.value()
|
|
__inline_val := bucket_storage_tally_inline.value()
|
|
__remote_val := bucket_storage_tally_remote.value()
|
|
__remote_segments_count_val := bucket_storage_tally_remote_segments_count.value()
|
|
__inline_segments_count_val := bucket_storage_tally_inline_segments_count.value()
|
|
__object_count_val := bucket_storage_tally_object_count.value()
|
|
__metadata_size_val := bucket_storage_tally_metadata_size.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO bucket_storage_tallies ( bucket_name, project_id, interval_start, inline, remote, remote_segments_count, inline_segments_count, object_count, metadata_size ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ? )")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __bucket_name_val, __project_id_val, __interval_start_val, __inline_val, __remote_val, __remote_segments_count_val, __inline_segments_count_val, __object_count_val, __metadata_size_val)
|
|
|
|
_, err = obj.driver.Exec(__stmt, __bucket_name_val, __project_id_val, __interval_start_val, __inline_val, __remote_val, __remote_segments_count_val, __inline_segments_count_val, __object_count_val, __metadata_size_val)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) CreateNoReturn_StoragenodeStorageTally(ctx context.Context,
|
|
storagenode_storage_tally_node_id StoragenodeStorageTally_NodeId_Field,
|
|
storagenode_storage_tally_interval_end_time StoragenodeStorageTally_IntervalEndTime_Field,
|
|
storagenode_storage_tally_data_total StoragenodeStorageTally_DataTotal_Field) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
__node_id_val := storagenode_storage_tally_node_id.value()
|
|
__interval_end_time_val := storagenode_storage_tally_interval_end_time.value()
|
|
__data_total_val := storagenode_storage_tally_data_total.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO storagenode_storage_tallies ( node_id, interval_end_time, data_total ) VALUES ( ?, ?, ? )")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __node_id_val, __interval_end_time_val, __data_total_val)
|
|
|
|
_, err = obj.driver.Exec(__stmt, __node_id_val, __interval_end_time_val, __data_total_val)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) CreateNoReturn_PeerIdentity(ctx context.Context,
|
|
peer_identity_node_id PeerIdentity_NodeId_Field,
|
|
peer_identity_leaf_serial_number PeerIdentity_LeafSerialNumber_Field,
|
|
peer_identity_chain PeerIdentity_Chain_Field) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__node_id_val := peer_identity_node_id.value()
|
|
__leaf_serial_number_val := peer_identity_leaf_serial_number.value()
|
|
__chain_val := peer_identity_chain.value()
|
|
__updated_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO peer_identities ( node_id, leaf_serial_number, chain, updated_at ) VALUES ( ?, ?, ?, ? )")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __node_id_val, __leaf_serial_number_val, __chain_val, __updated_at_val)
|
|
|
|
_, err = obj.driver.Exec(__stmt, __node_id_val, __leaf_serial_number_val, __chain_val, __updated_at_val)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Create_RegistrationToken(ctx context.Context,
|
|
registration_token_secret RegistrationToken_Secret_Field,
|
|
registration_token_project_limit RegistrationToken_ProjectLimit_Field,
|
|
optional RegistrationToken_Create_Fields) (
|
|
registration_token *RegistrationToken, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__secret_val := registration_token_secret.value()
|
|
__owner_id_val := optional.OwnerId.value()
|
|
__project_limit_val := registration_token_project_limit.value()
|
|
__created_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO registration_tokens ( secret, owner_id, project_limit, created_at ) VALUES ( ?, ?, ?, ? ) RETURNING registration_tokens.secret, registration_tokens.owner_id, registration_tokens.project_limit, registration_tokens.created_at")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __secret_val, __owner_id_val, __project_limit_val, __created_at_val)
|
|
|
|
registration_token = &RegistrationToken{}
|
|
err = obj.driver.QueryRow(__stmt, __secret_val, __owner_id_val, __project_limit_val, __created_at_val).Scan(®istration_token.Secret, ®istration_token.OwnerId, ®istration_token.ProjectLimit, ®istration_token.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return registration_token, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Create_ResetPasswordToken(ctx context.Context,
|
|
reset_password_token_secret ResetPasswordToken_Secret_Field,
|
|
reset_password_token_owner_id ResetPasswordToken_OwnerId_Field) (
|
|
reset_password_token *ResetPasswordToken, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__secret_val := reset_password_token_secret.value()
|
|
__owner_id_val := reset_password_token_owner_id.value()
|
|
__created_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO reset_password_tokens ( secret, owner_id, created_at ) VALUES ( ?, ?, ? ) RETURNING reset_password_tokens.secret, reset_password_tokens.owner_id, reset_password_tokens.created_at")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __secret_val, __owner_id_val, __created_at_val)
|
|
|
|
reset_password_token = &ResetPasswordToken{}
|
|
err = obj.driver.QueryRow(__stmt, __secret_val, __owner_id_val, __created_at_val).Scan(&reset_password_token.Secret, &reset_password_token.OwnerId, &reset_password_token.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return reset_password_token, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Create_Offer(ctx context.Context,
|
|
offer_name Offer_Name_Field,
|
|
offer_description Offer_Description_Field,
|
|
offer_award_credit_in_cents Offer_AwardCreditInCents_Field,
|
|
offer_invitee_credit_in_cents Offer_InviteeCreditInCents_Field,
|
|
offer_expires_at Offer_ExpiresAt_Field,
|
|
offer_status Offer_Status_Field,
|
|
offer_type Offer_Type_Field,
|
|
optional Offer_Create_Fields) (
|
|
offer *Offer, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__name_val := offer_name.value()
|
|
__description_val := offer_description.value()
|
|
__award_credit_in_cents_val := offer_award_credit_in_cents.value()
|
|
__invitee_credit_in_cents_val := offer_invitee_credit_in_cents.value()
|
|
__award_credit_duration_days_val := optional.AwardCreditDurationDays.value()
|
|
__invitee_credit_duration_days_val := optional.InviteeCreditDurationDays.value()
|
|
__redeemable_cap_val := optional.RedeemableCap.value()
|
|
__expires_at_val := offer_expires_at.value()
|
|
__created_at_val := __now
|
|
__status_val := offer_status.value()
|
|
__type_val := offer_type.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO offers ( name, description, award_credit_in_cents, invitee_credit_in_cents, award_credit_duration_days, invitee_credit_duration_days, redeemable_cap, expires_at, created_at, status, type ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? ) RETURNING offers.id, offers.name, offers.description, offers.award_credit_in_cents, offers.invitee_credit_in_cents, offers.award_credit_duration_days, offers.invitee_credit_duration_days, offers.redeemable_cap, offers.expires_at, offers.created_at, offers.status, offers.type")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __name_val, __description_val, __award_credit_in_cents_val, __invitee_credit_in_cents_val, __award_credit_duration_days_val, __invitee_credit_duration_days_val, __redeemable_cap_val, __expires_at_val, __created_at_val, __status_val, __type_val)
|
|
|
|
offer = &Offer{}
|
|
err = obj.driver.QueryRow(__stmt, __name_val, __description_val, __award_credit_in_cents_val, __invitee_credit_in_cents_val, __award_credit_duration_days_val, __invitee_credit_duration_days_val, __redeemable_cap_val, __expires_at_val, __created_at_val, __status_val, __type_val).Scan(&offer.Id, &offer.Name, &offer.Description, &offer.AwardCreditInCents, &offer.InviteeCreditInCents, &offer.AwardCreditDurationDays, &offer.InviteeCreditDurationDays, &offer.RedeemableCap, &offer.ExpiresAt, &offer.CreatedAt, &offer.Status, &offer.Type)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return offer, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Create_UserCredit(ctx context.Context,
|
|
user_credit_user_id UserCredit_UserId_Field,
|
|
user_credit_offer_id UserCredit_OfferId_Field,
|
|
user_credit_type UserCredit_Type_Field,
|
|
user_credit_credits_earned_in_cents UserCredit_CreditsEarnedInCents_Field,
|
|
user_credit_expires_at UserCredit_ExpiresAt_Field,
|
|
optional UserCredit_Create_Fields) (
|
|
user_credit *UserCredit, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__user_id_val := user_credit_user_id.value()
|
|
__offer_id_val := user_credit_offer_id.value()
|
|
__referred_by_val := optional.ReferredBy.value()
|
|
__type_val := user_credit_type.value()
|
|
__credits_earned_in_cents_val := user_credit_credits_earned_in_cents.value()
|
|
__credits_used_in_cents_val := int(0)
|
|
__expires_at_val := user_credit_expires_at.value()
|
|
__created_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO user_credits ( user_id, offer_id, referred_by, type, credits_earned_in_cents, credits_used_in_cents, expires_at, created_at ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ? ) RETURNING user_credits.id, user_credits.user_id, user_credits.offer_id, user_credits.referred_by, user_credits.type, user_credits.credits_earned_in_cents, user_credits.credits_used_in_cents, user_credits.expires_at, user_credits.created_at")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __user_id_val, __offer_id_val, __referred_by_val, __type_val, __credits_earned_in_cents_val, __credits_used_in_cents_val, __expires_at_val, __created_at_val)
|
|
|
|
user_credit = &UserCredit{}
|
|
err = obj.driver.QueryRow(__stmt, __user_id_val, __offer_id_val, __referred_by_val, __type_val, __credits_earned_in_cents_val, __credits_used_in_cents_val, __expires_at_val, __created_at_val).Scan(&user_credit.Id, &user_credit.UserId, &user_credit.OfferId, &user_credit.ReferredBy, &user_credit.Type, &user_credit.CreditsEarnedInCents, &user_credit.CreditsUsedInCents, &user_credit.ExpiresAt, &user_credit.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return user_credit, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Create_BucketMetainfo(ctx context.Context,
|
|
bucket_metainfo_id BucketMetainfo_Id_Field,
|
|
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
|
|
bucket_metainfo_name BucketMetainfo_Name_Field,
|
|
bucket_metainfo_path_cipher BucketMetainfo_PathCipher_Field,
|
|
bucket_metainfo_default_segment_size BucketMetainfo_DefaultSegmentSize_Field,
|
|
bucket_metainfo_default_encryption_cipher_suite BucketMetainfo_DefaultEncryptionCipherSuite_Field,
|
|
bucket_metainfo_default_encryption_block_size BucketMetainfo_DefaultEncryptionBlockSize_Field,
|
|
bucket_metainfo_default_redundancy_algorithm BucketMetainfo_DefaultRedundancyAlgorithm_Field,
|
|
bucket_metainfo_default_redundancy_share_size BucketMetainfo_DefaultRedundancyShareSize_Field,
|
|
bucket_metainfo_default_redundancy_required_shares BucketMetainfo_DefaultRedundancyRequiredShares_Field,
|
|
bucket_metainfo_default_redundancy_repair_shares BucketMetainfo_DefaultRedundancyRepairShares_Field,
|
|
bucket_metainfo_default_redundancy_optimal_shares BucketMetainfo_DefaultRedundancyOptimalShares_Field,
|
|
bucket_metainfo_default_redundancy_total_shares BucketMetainfo_DefaultRedundancyTotalShares_Field,
|
|
optional BucketMetainfo_Create_Fields) (
|
|
bucket_metainfo *BucketMetainfo, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__id_val := bucket_metainfo_id.value()
|
|
__project_id_val := bucket_metainfo_project_id.value()
|
|
__name_val := bucket_metainfo_name.value()
|
|
__partner_id_val := optional.PartnerId.value()
|
|
__path_cipher_val := bucket_metainfo_path_cipher.value()
|
|
__created_at_val := __now
|
|
__default_segment_size_val := bucket_metainfo_default_segment_size.value()
|
|
__default_encryption_cipher_suite_val := bucket_metainfo_default_encryption_cipher_suite.value()
|
|
__default_encryption_block_size_val := bucket_metainfo_default_encryption_block_size.value()
|
|
__default_redundancy_algorithm_val := bucket_metainfo_default_redundancy_algorithm.value()
|
|
__default_redundancy_share_size_val := bucket_metainfo_default_redundancy_share_size.value()
|
|
__default_redundancy_required_shares_val := bucket_metainfo_default_redundancy_required_shares.value()
|
|
__default_redundancy_repair_shares_val := bucket_metainfo_default_redundancy_repair_shares.value()
|
|
__default_redundancy_optimal_shares_val := bucket_metainfo_default_redundancy_optimal_shares.value()
|
|
__default_redundancy_total_shares_val := bucket_metainfo_default_redundancy_total_shares.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO bucket_metainfos ( id, project_id, name, partner_id, path_cipher, created_at, default_segment_size, default_encryption_cipher_suite, default_encryption_block_size, default_redundancy_algorithm, default_redundancy_share_size, default_redundancy_required_shares, default_redundancy_repair_shares, default_redundancy_optimal_shares, default_redundancy_total_shares ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? ) RETURNING bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.partner_id, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __id_val, __project_id_val, __name_val, __partner_id_val, __path_cipher_val, __created_at_val, __default_segment_size_val, __default_encryption_cipher_suite_val, __default_encryption_block_size_val, __default_redundancy_algorithm_val, __default_redundancy_share_size_val, __default_redundancy_required_shares_val, __default_redundancy_repair_shares_val, __default_redundancy_optimal_shares_val, __default_redundancy_total_shares_val)
|
|
|
|
bucket_metainfo = &BucketMetainfo{}
|
|
err = obj.driver.QueryRow(__stmt, __id_val, __project_id_val, __name_val, __partner_id_val, __path_cipher_val, __created_at_val, __default_segment_size_val, __default_encryption_cipher_suite_val, __default_encryption_block_size_val, __default_redundancy_algorithm_val, __default_redundancy_share_size_val, __default_redundancy_required_shares_val, __default_redundancy_repair_shares_val, __default_redundancy_optimal_shares_val, __default_redundancy_total_shares_val).Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.PartnerId, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return bucket_metainfo, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) CreateNoReturn_GracefulExitProgress(ctx context.Context,
|
|
graceful_exit_progress_node_id GracefulExitProgress_NodeId_Field,
|
|
graceful_exit_progress_bytes_transferred GracefulExitProgress_BytesTransferred_Field) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__node_id_val := graceful_exit_progress_node_id.value()
|
|
__bytes_transferred_val := graceful_exit_progress_bytes_transferred.value()
|
|
__pieces_transferred_val := int64(0)
|
|
__pieces_failed_val := int64(0)
|
|
__updated_at_val := __now.UTC()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO graceful_exit_progress ( node_id, bytes_transferred, pieces_transferred, pieces_failed, updated_at ) VALUES ( ?, ?, ?, ?, ? )")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __node_id_val, __bytes_transferred_val, __pieces_transferred_val, __pieces_failed_val, __updated_at_val)
|
|
|
|
_, err = obj.driver.Exec(__stmt, __node_id_val, __bytes_transferred_val, __pieces_transferred_val, __pieces_failed_val, __updated_at_val)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) CreateNoReturn_GracefulExitTransferQueue(ctx context.Context,
|
|
graceful_exit_transfer_queue_node_id GracefulExitTransferQueue_NodeId_Field,
|
|
graceful_exit_transfer_queue_path GracefulExitTransferQueue_Path_Field,
|
|
graceful_exit_transfer_queue_piece_num GracefulExitTransferQueue_PieceNum_Field,
|
|
graceful_exit_transfer_queue_durability_ratio GracefulExitTransferQueue_DurabilityRatio_Field,
|
|
graceful_exit_transfer_queue_order_limit_send_count GracefulExitTransferQueue_OrderLimitSendCount_Field,
|
|
optional GracefulExitTransferQueue_Create_Fields) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__node_id_val := graceful_exit_transfer_queue_node_id.value()
|
|
__path_val := graceful_exit_transfer_queue_path.value()
|
|
__piece_num_val := graceful_exit_transfer_queue_piece_num.value()
|
|
__root_piece_id_val := optional.RootPieceId.value()
|
|
__durability_ratio_val := graceful_exit_transfer_queue_durability_ratio.value()
|
|
__queued_at_val := __now.UTC()
|
|
__requested_at_val := optional.RequestedAt.value()
|
|
__last_failed_at_val := optional.LastFailedAt.value()
|
|
__last_failed_code_val := optional.LastFailedCode.value()
|
|
__failed_count_val := optional.FailedCount.value()
|
|
__finished_at_val := optional.FinishedAt.value()
|
|
__order_limit_send_count_val := graceful_exit_transfer_queue_order_limit_send_count.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO graceful_exit_transfer_queue ( node_id, path, piece_num, root_piece_id, durability_ratio, queued_at, requested_at, last_failed_at, last_failed_code, failed_count, finished_at, order_limit_send_count ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? )")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __node_id_val, __path_val, __piece_num_val, __root_piece_id_val, __durability_ratio_val, __queued_at_val, __requested_at_val, __last_failed_at_val, __last_failed_code_val, __failed_count_val, __finished_at_val, __order_limit_send_count_val)
|
|
|
|
_, err = obj.driver.Exec(__stmt, __node_id_val, __path_val, __piece_num_val, __root_piece_id_val, __durability_ratio_val, __queued_at_val, __requested_at_val, __last_failed_at_val, __last_failed_code_val, __failed_count_val, __finished_at_val, __order_limit_send_count_val)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Create_NodesOfflineTime(ctx context.Context,
|
|
nodes_offline_time_node_id NodesOfflineTime_NodeId_Field,
|
|
nodes_offline_time_tracked_at NodesOfflineTime_TrackedAt_Field,
|
|
nodes_offline_time_seconds NodesOfflineTime_Seconds_Field) (
|
|
nodes_offline_time *NodesOfflineTime, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
__node_id_val := nodes_offline_time_node_id.value()
|
|
__tracked_at_val := nodes_offline_time_tracked_at.value()
|
|
__seconds_val := nodes_offline_time_seconds.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO nodes_offline_times ( node_id, tracked_at, seconds ) VALUES ( ?, ?, ? ) RETURNING nodes_offline_times.node_id, nodes_offline_times.tracked_at, nodes_offline_times.seconds")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __node_id_val, __tracked_at_val, __seconds_val)
|
|
|
|
nodes_offline_time = &NodesOfflineTime{}
|
|
err = obj.driver.QueryRow(__stmt, __node_id_val, __tracked_at_val, __seconds_val).Scan(&nodes_offline_time.NodeId, &nodes_offline_time.TrackedAt, &nodes_offline_time.Seconds)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return nodes_offline_time, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Create_StripeCustomer(ctx context.Context,
|
|
stripe_customer_user_id StripeCustomer_UserId_Field,
|
|
stripe_customer_customer_id StripeCustomer_CustomerId_Field) (
|
|
stripe_customer *StripeCustomer, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__user_id_val := stripe_customer_user_id.value()
|
|
__customer_id_val := stripe_customer_customer_id.value()
|
|
__created_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO stripe_customers ( user_id, customer_id, created_at ) VALUES ( ?, ?, ? ) RETURNING stripe_customers.user_id, stripe_customers.customer_id, stripe_customers.created_at")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __user_id_val, __customer_id_val, __created_at_val)
|
|
|
|
stripe_customer = &StripeCustomer{}
|
|
err = obj.driver.QueryRow(__stmt, __user_id_val, __customer_id_val, __created_at_val).Scan(&stripe_customer.UserId, &stripe_customer.CustomerId, &stripe_customer.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return stripe_customer, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Create_CoinpaymentsTransaction(ctx context.Context,
|
|
coinpayments_transaction_id CoinpaymentsTransaction_Id_Field,
|
|
coinpayments_transaction_user_id CoinpaymentsTransaction_UserId_Field,
|
|
coinpayments_transaction_address CoinpaymentsTransaction_Address_Field,
|
|
coinpayments_transaction_amount CoinpaymentsTransaction_Amount_Field,
|
|
coinpayments_transaction_received CoinpaymentsTransaction_Received_Field,
|
|
coinpayments_transaction_status CoinpaymentsTransaction_Status_Field,
|
|
coinpayments_transaction_key CoinpaymentsTransaction_Key_Field,
|
|
coinpayments_transaction_timeout CoinpaymentsTransaction_Timeout_Field) (
|
|
coinpayments_transaction *CoinpaymentsTransaction, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__id_val := coinpayments_transaction_id.value()
|
|
__user_id_val := coinpayments_transaction_user_id.value()
|
|
__address_val := coinpayments_transaction_address.value()
|
|
__amount_val := coinpayments_transaction_amount.value()
|
|
__received_val := coinpayments_transaction_received.value()
|
|
__status_val := coinpayments_transaction_status.value()
|
|
__key_val := coinpayments_transaction_key.value()
|
|
__timeout_val := coinpayments_transaction_timeout.value()
|
|
__created_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO coinpayments_transactions ( id, user_id, address, amount, received, status, key, timeout, created_at ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ? ) RETURNING coinpayments_transactions.id, coinpayments_transactions.user_id, coinpayments_transactions.address, coinpayments_transactions.amount, coinpayments_transactions.received, coinpayments_transactions.status, coinpayments_transactions.key, coinpayments_transactions.timeout, coinpayments_transactions.created_at")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __id_val, __user_id_val, __address_val, __amount_val, __received_val, __status_val, __key_val, __timeout_val, __created_at_val)
|
|
|
|
coinpayments_transaction = &CoinpaymentsTransaction{}
|
|
err = obj.driver.QueryRow(__stmt, __id_val, __user_id_val, __address_val, __amount_val, __received_val, __status_val, __key_val, __timeout_val, __created_at_val).Scan(&coinpayments_transaction.Id, &coinpayments_transaction.UserId, &coinpayments_transaction.Address, &coinpayments_transaction.Amount, &coinpayments_transaction.Received, &coinpayments_transaction.Status, &coinpayments_transaction.Key, &coinpayments_transaction.Timeout, &coinpayments_transaction.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return coinpayments_transaction, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Create_StripecoinpaymentsApplyBalanceIntent(ctx context.Context,
|
|
stripecoinpayments_apply_balance_intent_tx_id StripecoinpaymentsApplyBalanceIntent_TxId_Field,
|
|
stripecoinpayments_apply_balance_intent_state StripecoinpaymentsApplyBalanceIntent_State_Field) (
|
|
stripecoinpayments_apply_balance_intent *StripecoinpaymentsApplyBalanceIntent, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__tx_id_val := stripecoinpayments_apply_balance_intent_tx_id.value()
|
|
__state_val := stripecoinpayments_apply_balance_intent_state.value()
|
|
__created_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO stripecoinpayments_apply_balance_intents ( tx_id, state, created_at ) VALUES ( ?, ?, ? ) RETURNING stripecoinpayments_apply_balance_intents.tx_id, stripecoinpayments_apply_balance_intents.state, stripecoinpayments_apply_balance_intents.created_at")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __tx_id_val, __state_val, __created_at_val)
|
|
|
|
stripecoinpayments_apply_balance_intent = &StripecoinpaymentsApplyBalanceIntent{}
|
|
err = obj.driver.QueryRow(__stmt, __tx_id_val, __state_val, __created_at_val).Scan(&stripecoinpayments_apply_balance_intent.TxId, &stripecoinpayments_apply_balance_intent.State, &stripecoinpayments_apply_balance_intent.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return stripecoinpayments_apply_balance_intent, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Create_StripecoinpaymentsInvoiceProjectRecord(ctx context.Context,
|
|
stripecoinpayments_invoice_project_record_id StripecoinpaymentsInvoiceProjectRecord_Id_Field,
|
|
stripecoinpayments_invoice_project_record_project_id StripecoinpaymentsInvoiceProjectRecord_ProjectId_Field,
|
|
stripecoinpayments_invoice_project_record_storage StripecoinpaymentsInvoiceProjectRecord_Storage_Field,
|
|
stripecoinpayments_invoice_project_record_egress StripecoinpaymentsInvoiceProjectRecord_Egress_Field,
|
|
stripecoinpayments_invoice_project_record_objects StripecoinpaymentsInvoiceProjectRecord_Objects_Field,
|
|
stripecoinpayments_invoice_project_record_period_start StripecoinpaymentsInvoiceProjectRecord_PeriodStart_Field,
|
|
stripecoinpayments_invoice_project_record_period_end StripecoinpaymentsInvoiceProjectRecord_PeriodEnd_Field,
|
|
stripecoinpayments_invoice_project_record_state StripecoinpaymentsInvoiceProjectRecord_State_Field) (
|
|
stripecoinpayments_invoice_project_record *StripecoinpaymentsInvoiceProjectRecord, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__id_val := stripecoinpayments_invoice_project_record_id.value()
|
|
__project_id_val := stripecoinpayments_invoice_project_record_project_id.value()
|
|
__storage_val := stripecoinpayments_invoice_project_record_storage.value()
|
|
__egress_val := stripecoinpayments_invoice_project_record_egress.value()
|
|
__objects_val := stripecoinpayments_invoice_project_record_objects.value()
|
|
__period_start_val := stripecoinpayments_invoice_project_record_period_start.value()
|
|
__period_end_val := stripecoinpayments_invoice_project_record_period_end.value()
|
|
__state_val := stripecoinpayments_invoice_project_record_state.value()
|
|
__created_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO stripecoinpayments_invoice_project_records ( id, project_id, storage, egress, objects, period_start, period_end, state, created_at ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ? ) RETURNING stripecoinpayments_invoice_project_records.id, stripecoinpayments_invoice_project_records.project_id, stripecoinpayments_invoice_project_records.storage, stripecoinpayments_invoice_project_records.egress, stripecoinpayments_invoice_project_records.objects, stripecoinpayments_invoice_project_records.period_start, stripecoinpayments_invoice_project_records.period_end, stripecoinpayments_invoice_project_records.state, stripecoinpayments_invoice_project_records.created_at")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __id_val, __project_id_val, __storage_val, __egress_val, __objects_val, __period_start_val, __period_end_val, __state_val, __created_at_val)
|
|
|
|
stripecoinpayments_invoice_project_record = &StripecoinpaymentsInvoiceProjectRecord{}
|
|
err = obj.driver.QueryRow(__stmt, __id_val, __project_id_val, __storage_val, __egress_val, __objects_val, __period_start_val, __period_end_val, __state_val, __created_at_val).Scan(&stripecoinpayments_invoice_project_record.Id, &stripecoinpayments_invoice_project_record.ProjectId, &stripecoinpayments_invoice_project_record.Storage, &stripecoinpayments_invoice_project_record.Egress, &stripecoinpayments_invoice_project_record.Objects, &stripecoinpayments_invoice_project_record.PeriodStart, &stripecoinpayments_invoice_project_record.PeriodEnd, &stripecoinpayments_invoice_project_record.State, &stripecoinpayments_invoice_project_record.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return stripecoinpayments_invoice_project_record, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Create_StripecoinpaymentsTxConversionRate(ctx context.Context,
|
|
stripecoinpayments_tx_conversion_rate_tx_id StripecoinpaymentsTxConversionRate_TxId_Field,
|
|
stripecoinpayments_tx_conversion_rate_rate StripecoinpaymentsTxConversionRate_Rate_Field) (
|
|
stripecoinpayments_tx_conversion_rate *StripecoinpaymentsTxConversionRate, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__tx_id_val := stripecoinpayments_tx_conversion_rate_tx_id.value()
|
|
__rate_val := stripecoinpayments_tx_conversion_rate_rate.value()
|
|
__created_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO stripecoinpayments_tx_conversion_rates ( tx_id, rate, created_at ) VALUES ( ?, ?, ? ) RETURNING stripecoinpayments_tx_conversion_rates.tx_id, stripecoinpayments_tx_conversion_rates.rate, stripecoinpayments_tx_conversion_rates.created_at")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __tx_id_val, __rate_val, __created_at_val)
|
|
|
|
stripecoinpayments_tx_conversion_rate = &StripecoinpaymentsTxConversionRate{}
|
|
err = obj.driver.QueryRow(__stmt, __tx_id_val, __rate_val, __created_at_val).Scan(&stripecoinpayments_tx_conversion_rate.TxId, &stripecoinpayments_tx_conversion_rate.Rate, &stripecoinpayments_tx_conversion_rate.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return stripecoinpayments_tx_conversion_rate, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Create_Coupon(ctx context.Context,
|
|
coupon_id Coupon_Id_Field,
|
|
coupon_project_id Coupon_ProjectId_Field,
|
|
coupon_user_id Coupon_UserId_Field,
|
|
coupon_amount Coupon_Amount_Field,
|
|
coupon_description Coupon_Description_Field,
|
|
coupon_type Coupon_Type_Field,
|
|
coupon_status Coupon_Status_Field,
|
|
coupon_duration Coupon_Duration_Field) (
|
|
coupon *Coupon, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__id_val := coupon_id.value()
|
|
__project_id_val := coupon_project_id.value()
|
|
__user_id_val := coupon_user_id.value()
|
|
__amount_val := coupon_amount.value()
|
|
__description_val := coupon_description.value()
|
|
__type_val := coupon_type.value()
|
|
__status_val := coupon_status.value()
|
|
__duration_val := coupon_duration.value()
|
|
__created_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO coupons ( id, project_id, user_id, amount, description, type, status, duration, created_at ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ? ) RETURNING coupons.id, coupons.project_id, coupons.user_id, coupons.amount, coupons.description, coupons.type, coupons.status, coupons.duration, coupons.created_at")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __id_val, __project_id_val, __user_id_val, __amount_val, __description_val, __type_val, __status_val, __duration_val, __created_at_val)
|
|
|
|
coupon = &Coupon{}
|
|
err = obj.driver.QueryRow(__stmt, __id_val, __project_id_val, __user_id_val, __amount_val, __description_val, __type_val, __status_val, __duration_val, __created_at_val).Scan(&coupon.Id, &coupon.ProjectId, &coupon.UserId, &coupon.Amount, &coupon.Description, &coupon.Type, &coupon.Status, &coupon.Duration, &coupon.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return coupon, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Create_CouponUsage(ctx context.Context,
|
|
coupon_usage_coupon_id CouponUsage_CouponId_Field,
|
|
coupon_usage_amount CouponUsage_Amount_Field,
|
|
coupon_usage_status CouponUsage_Status_Field,
|
|
coupon_usage_period CouponUsage_Period_Field) (
|
|
coupon_usage *CouponUsage, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
__coupon_id_val := coupon_usage_coupon_id.value()
|
|
__amount_val := coupon_usage_amount.value()
|
|
__status_val := coupon_usage_status.value()
|
|
__period_val := coupon_usage_period.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO coupon_usages ( coupon_id, amount, status, period ) VALUES ( ?, ?, ?, ? ) RETURNING coupon_usages.coupon_id, coupon_usages.amount, coupon_usages.status, coupon_usages.period")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __coupon_id_val, __amount_val, __status_val, __period_val)
|
|
|
|
coupon_usage = &CouponUsage{}
|
|
err = obj.driver.QueryRow(__stmt, __coupon_id_val, __amount_val, __status_val, __period_val).Scan(&coupon_usage.CouponId, &coupon_usage.Amount, &coupon_usage.Status, &coupon_usage.Period)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return coupon_usage, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Get_ValueAttribution_By_ProjectId_And_BucketName(ctx context.Context,
|
|
value_attribution_project_id ValueAttribution_ProjectId_Field,
|
|
value_attribution_bucket_name ValueAttribution_BucketName_Field) (
|
|
value_attribution *ValueAttribution, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT value_attributions.project_id, value_attributions.bucket_name, value_attributions.partner_id, value_attributions.last_updated FROM value_attributions WHERE value_attributions.project_id = ? AND value_attributions.bucket_name = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, value_attribution_project_id.value(), value_attribution_bucket_name.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
value_attribution = &ValueAttribution{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&value_attribution.ProjectId, &value_attribution.BucketName, &value_attribution.PartnerId, &value_attribution.LastUpdated)
|
|
if err != nil {
|
|
return (*ValueAttribution)(nil), obj.makeErr(err)
|
|
}
|
|
return value_attribution, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Get_PendingAudits_By_NodeId(ctx context.Context,
|
|
pending_audits_node_id PendingAudits_NodeId_Field) (
|
|
pending_audits *PendingAudits, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT pending_audits.node_id, pending_audits.piece_id, pending_audits.stripe_index, pending_audits.share_size, pending_audits.expected_share_hash, pending_audits.reverify_count, pending_audits.path FROM pending_audits WHERE pending_audits.node_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, pending_audits_node_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
pending_audits = &PendingAudits{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&pending_audits.NodeId, &pending_audits.PieceId, &pending_audits.StripeIndex, &pending_audits.ShareSize, &pending_audits.ExpectedShareHash, &pending_audits.ReverifyCount, &pending_audits.Path)
|
|
if err != nil {
|
|
return (*PendingAudits)(nil), obj.makeErr(err)
|
|
}
|
|
return pending_audits, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Get_Irreparabledb_By_Segmentpath(ctx context.Context,
|
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field) (
|
|
irreparabledb *Irreparabledb, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT irreparabledbs.segmentpath, irreparabledbs.segmentdetail, irreparabledbs.pieces_lost_count, irreparabledbs.seg_damaged_unix_sec, irreparabledbs.repair_attempt_count FROM irreparabledbs WHERE irreparabledbs.segmentpath = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, irreparabledb_segmentpath.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
irreparabledb = &Irreparabledb{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&irreparabledb.Segmentpath, &irreparabledb.Segmentdetail, &irreparabledb.PiecesLostCount, &irreparabledb.SegDamagedUnixSec, &irreparabledb.RepairAttemptCount)
|
|
if err != nil {
|
|
return (*Irreparabledb)(nil), obj.makeErr(err)
|
|
}
|
|
return irreparabledb, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Limited_Irreparabledb_By_Segmentpath_Greater_OrderBy_Asc_Segmentpath(ctx context.Context,
|
|
irreparabledb_segmentpath_greater Irreparabledb_Segmentpath_Field,
|
|
limit int, offset int64) (
|
|
rows []*Irreparabledb, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT irreparabledbs.segmentpath, irreparabledbs.segmentdetail, irreparabledbs.pieces_lost_count, irreparabledbs.seg_damaged_unix_sec, irreparabledbs.repair_attempt_count FROM irreparabledbs WHERE irreparabledbs.segmentpath > ? ORDER BY irreparabledbs.segmentpath LIMIT ? OFFSET ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, irreparabledb_segmentpath_greater.value())
|
|
|
|
__values = append(__values, limit, offset)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
irreparabledb := &Irreparabledb{}
|
|
err = __rows.Scan(&irreparabledb.Segmentpath, &irreparabledb.Segmentdetail, &irreparabledb.PiecesLostCount, &irreparabledb.SegDamagedUnixSec, &irreparabledb.RepairAttemptCount)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, irreparabledb)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Find_AccountingTimestamps_Value_By_Name(ctx context.Context,
|
|
accounting_timestamps_name AccountingTimestamps_Name_Field) (
|
|
row *Value_Row, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT accounting_timestamps.value FROM accounting_timestamps WHERE accounting_timestamps.name = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, accounting_timestamps_name.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
row = &Value_Row{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&row.Value)
|
|
if err == sql.ErrNoRows {
|
|
return (*Value_Row)(nil), nil
|
|
}
|
|
if err != nil {
|
|
return (*Value_Row)(nil), obj.makeErr(err)
|
|
}
|
|
return row, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Get_AccountingRollup_By_Id(ctx context.Context,
|
|
accounting_rollup_id AccountingRollup_Id_Field) (
|
|
accounting_rollup *AccountingRollup, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT accounting_rollups.id, accounting_rollups.node_id, accounting_rollups.start_time, accounting_rollups.put_total, accounting_rollups.get_total, accounting_rollups.get_audit_total, accounting_rollups.get_repair_total, accounting_rollups.put_repair_total, accounting_rollups.at_rest_total FROM accounting_rollups WHERE accounting_rollups.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, accounting_rollup_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
accounting_rollup = &AccountingRollup{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&accounting_rollup.Id, &accounting_rollup.NodeId, &accounting_rollup.StartTime, &accounting_rollup.PutTotal, &accounting_rollup.GetTotal, &accounting_rollup.GetAuditTotal, &accounting_rollup.GetRepairTotal, &accounting_rollup.PutRepairTotal, &accounting_rollup.AtRestTotal)
|
|
if err != nil {
|
|
return (*AccountingRollup)(nil), obj.makeErr(err)
|
|
}
|
|
return accounting_rollup, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) All_AccountingRollup_By_StartTime_GreaterOrEqual(ctx context.Context,
|
|
accounting_rollup_start_time_greater_or_equal AccountingRollup_StartTime_Field) (
|
|
rows []*AccountingRollup, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT accounting_rollups.id, accounting_rollups.node_id, accounting_rollups.start_time, accounting_rollups.put_total, accounting_rollups.get_total, accounting_rollups.get_audit_total, accounting_rollups.get_repair_total, accounting_rollups.put_repair_total, accounting_rollups.at_rest_total FROM accounting_rollups WHERE accounting_rollups.start_time >= ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, accounting_rollup_start_time_greater_or_equal.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
accounting_rollup := &AccountingRollup{}
|
|
err = __rows.Scan(&accounting_rollup.Id, &accounting_rollup.NodeId, &accounting_rollup.StartTime, &accounting_rollup.PutTotal, &accounting_rollup.GetTotal, &accounting_rollup.GetAuditTotal, &accounting_rollup.GetRepairTotal, &accounting_rollup.PutRepairTotal, &accounting_rollup.AtRestTotal)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, accounting_rollup)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Get_Node_By_Id(ctx context.Context,
|
|
node_id Node_Id_Field) (
|
|
node *Node, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.address, nodes.last_net, nodes.protocol, nodes.type, nodes.email, nodes.wallet, nodes.free_bandwidth, nodes.free_disk, nodes.piece_count, nodes.major, nodes.minor, nodes.patch, nodes.hash, nodes.timestamp, nodes.release, nodes.latency_90, nodes.audit_success_count, nodes.total_audit_count, nodes.uptime_success_count, nodes.total_uptime_count, nodes.created_at, nodes.updated_at, nodes.last_contact_success, nodes.last_contact_failure, nodes.contained, nodes.disqualified, nodes.audit_reputation_alpha, nodes.audit_reputation_beta, nodes.uptime_reputation_alpha, nodes.uptime_reputation_beta, nodes.exit_initiated_at, nodes.exit_loop_completed_at, nodes.exit_finished_at, nodes.exit_success FROM nodes WHERE nodes.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, node_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
node = &Node{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&node.Id, &node.Address, &node.LastNet, &node.Protocol, &node.Type, &node.Email, &node.Wallet, &node.FreeBandwidth, &node.FreeDisk, &node.PieceCount, &node.Major, &node.Minor, &node.Patch, &node.Hash, &node.Timestamp, &node.Release, &node.Latency90, &node.AuditSuccessCount, &node.TotalAuditCount, &node.UptimeSuccessCount, &node.TotalUptimeCount, &node.CreatedAt, &node.UpdatedAt, &node.LastContactSuccess, &node.LastContactFailure, &node.Contained, &node.Disqualified, &node.AuditReputationAlpha, &node.AuditReputationBeta, &node.UptimeReputationAlpha, &node.UptimeReputationBeta, &node.ExitInitiatedAt, &node.ExitLoopCompletedAt, &node.ExitFinishedAt, &node.ExitSuccess)
|
|
if err != nil {
|
|
return (*Node)(nil), obj.makeErr(err)
|
|
}
|
|
return node, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) All_Node_Id(ctx context.Context) (
|
|
rows []*Id_Row, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id FROM nodes")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
row := &Id_Row{}
|
|
err = __rows.Scan(&row.Id)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, row)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Limited_Node_By_Id_GreaterOrEqual_OrderBy_Asc_Id(ctx context.Context,
|
|
node_id_greater_or_equal Node_Id_Field,
|
|
limit int, offset int64) (
|
|
rows []*Node, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.address, nodes.last_net, nodes.protocol, nodes.type, nodes.email, nodes.wallet, nodes.free_bandwidth, nodes.free_disk, nodes.piece_count, nodes.major, nodes.minor, nodes.patch, nodes.hash, nodes.timestamp, nodes.release, nodes.latency_90, nodes.audit_success_count, nodes.total_audit_count, nodes.uptime_success_count, nodes.total_uptime_count, nodes.created_at, nodes.updated_at, nodes.last_contact_success, nodes.last_contact_failure, nodes.contained, nodes.disqualified, nodes.audit_reputation_alpha, nodes.audit_reputation_beta, nodes.uptime_reputation_alpha, nodes.uptime_reputation_beta, nodes.exit_initiated_at, nodes.exit_loop_completed_at, nodes.exit_finished_at, nodes.exit_success FROM nodes WHERE nodes.id >= ? ORDER BY nodes.id LIMIT ? OFFSET ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, node_id_greater_or_equal.value())
|
|
|
|
__values = append(__values, limit, offset)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
node := &Node{}
|
|
err = __rows.Scan(&node.Id, &node.Address, &node.LastNet, &node.Protocol, &node.Type, &node.Email, &node.Wallet, &node.FreeBandwidth, &node.FreeDisk, &node.PieceCount, &node.Major, &node.Minor, &node.Patch, &node.Hash, &node.Timestamp, &node.Release, &node.Latency90, &node.AuditSuccessCount, &node.TotalAuditCount, &node.UptimeSuccessCount, &node.TotalUptimeCount, &node.CreatedAt, &node.UpdatedAt, &node.LastContactSuccess, &node.LastContactFailure, &node.Contained, &node.Disqualified, &node.AuditReputationAlpha, &node.AuditReputationBeta, &node.UptimeReputationAlpha, &node.UptimeReputationBeta, &node.ExitInitiatedAt, &node.ExitLoopCompletedAt, &node.ExitFinishedAt, &node.ExitSuccess)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, node)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Limited_Node_Id_Node_LastNet_Node_Address_Node_Protocol_By_Id_GreaterOrEqual_And_Disqualified_Is_Null_OrderBy_Asc_Id(ctx context.Context,
|
|
node_id_greater_or_equal Node_Id_Field,
|
|
limit int, offset int64) (
|
|
rows []*Id_LastNet_Address_Protocol_Row, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.last_net, nodes.address, nodes.protocol FROM nodes WHERE nodes.id >= ? AND nodes.disqualified is NULL ORDER BY nodes.id LIMIT ? OFFSET ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, node_id_greater_or_equal.value())
|
|
|
|
__values = append(__values, limit, offset)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
row := &Id_LastNet_Address_Protocol_Row{}
|
|
err = __rows.Scan(&row.Id, &row.LastNet, &row.Address, &row.Protocol)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, row)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) All_Node_Id_Node_PieceCount_By_PieceCount_Not_Number(ctx context.Context) (
|
|
rows []*Id_PieceCount_Row, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.piece_count FROM nodes WHERE nodes.piece_count != 0")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
row := &Id_PieceCount_Row{}
|
|
err = __rows.Scan(&row.Id, &row.PieceCount)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, row)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Limited_Node_Id_Node_Address_Node_LastContactSuccess_Node_LastContactFailure_By_LastContactSuccess_Less_LastContactFailure_And_Disqualified_Is_Null_OrderBy_Asc_LastContactFailure(ctx context.Context,
|
|
limit int, offset int64) (
|
|
rows []*Id_Address_LastContactSuccess_LastContactFailure_Row, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.address, nodes.last_contact_success, nodes.last_contact_failure FROM nodes WHERE nodes.last_contact_success < nodes.last_contact_failure AND nodes.disqualified is NULL ORDER BY nodes.last_contact_failure LIMIT ? OFFSET ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values)
|
|
|
|
__values = append(__values, limit, offset)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
row := &Id_Address_LastContactSuccess_LastContactFailure_Row{}
|
|
err = __rows.Scan(&row.Id, &row.Address, &row.LastContactSuccess, &row.LastContactFailure)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, row)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) All_Node_Id_Node_Address_Node_LastContactSuccess_Node_LastContactFailure_By_LastContactSuccess_Less_And_LastContactSuccess_Greater_LastContactFailure_And_Disqualified_Is_Null_OrderBy_Asc_LastContactSuccess(ctx context.Context,
|
|
node_last_contact_success_less Node_LastContactSuccess_Field) (
|
|
rows []*Id_Address_LastContactSuccess_LastContactFailure_Row, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.address, nodes.last_contact_success, nodes.last_contact_failure FROM nodes WHERE nodes.last_contact_success < ? AND nodes.last_contact_success > nodes.last_contact_failure AND nodes.disqualified is NULL ORDER BY nodes.last_contact_success")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, node_last_contact_success_less.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
row := &Id_Address_LastContactSuccess_LastContactFailure_Row{}
|
|
err = __rows.Scan(&row.Id, &row.Address, &row.LastContactSuccess, &row.LastContactFailure)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, row)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Get_User_By_NormalizedEmail_And_Status_Not_Number(ctx context.Context,
|
|
user_normalized_email User_NormalizedEmail_Field) (
|
|
user *User, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.created_at FROM users WHERE users.normalized_email = ? AND users.status != 0 LIMIT 2")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, user_normalized_email.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
if !__rows.Next() {
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return nil, makeErr(sql.ErrNoRows)
|
|
}
|
|
|
|
user = &User{}
|
|
err = __rows.Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
|
|
if __rows.Next() {
|
|
return nil, tooManyRows("User_By_NormalizedEmail_And_Status_Not_Number")
|
|
}
|
|
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
|
|
return user, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Get_User_By_Id(ctx context.Context,
|
|
user_id User_Id_Field) (
|
|
user *User, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.created_at FROM users WHERE users.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, user_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
user = &User{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.CreatedAt)
|
|
if err != nil {
|
|
return (*User)(nil), obj.makeErr(err)
|
|
}
|
|
return user, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Get_Project_By_Id(ctx context.Context,
|
|
project_id Project_Id_Field) (
|
|
project *Project, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.partner_id, projects.owner_id, projects.created_at FROM projects WHERE projects.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, project_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
project = &Project{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
|
|
if err != nil {
|
|
return (*Project)(nil), obj.makeErr(err)
|
|
}
|
|
return project, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Get_Project_UsageLimit_By_Id(ctx context.Context,
|
|
project_id Project_Id_Field) (
|
|
row *UsageLimit_Row, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT projects.usage_limit FROM projects WHERE projects.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, project_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
row = &UsageLimit_Row{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&row.UsageLimit)
|
|
if err != nil {
|
|
return (*UsageLimit_Row)(nil), obj.makeErr(err)
|
|
}
|
|
return row, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) All_Project(ctx context.Context) (
|
|
rows []*Project, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.partner_id, projects.owner_id, projects.created_at FROM projects")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
project := &Project{}
|
|
err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, project)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) All_Project_By_CreatedAt_Less_OrderBy_Asc_CreatedAt(ctx context.Context,
|
|
project_created_at_less Project_CreatedAt_Field) (
|
|
rows []*Project, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.partner_id, projects.owner_id, projects.created_at FROM projects WHERE projects.created_at < ? ORDER BY projects.created_at")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, project_created_at_less.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
project := &Project{}
|
|
err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, project)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) All_Project_By_OwnerId_OrderBy_Asc_CreatedAt(ctx context.Context,
|
|
project_owner_id Project_OwnerId_Field) (
|
|
rows []*Project, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.partner_id, projects.owner_id, projects.created_at FROM projects WHERE projects.owner_id = ? ORDER BY projects.created_at")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, project_owner_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
project := &Project{}
|
|
err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, project)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) All_Project_By_ProjectMember_MemberId_OrderBy_Asc_Project_Name(ctx context.Context,
|
|
project_member_member_id ProjectMember_MemberId_Field) (
|
|
rows []*Project, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.partner_id, projects.owner_id, projects.created_at FROM projects JOIN project_members ON projects.id = project_members.project_id WHERE project_members.member_id = ? ORDER BY projects.name")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, project_member_member_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
project := &Project{}
|
|
err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, project)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Limited_Project_By_CreatedAt_Less_OrderBy_Asc_CreatedAt(ctx context.Context,
|
|
project_created_at_less Project_CreatedAt_Field,
|
|
limit int, offset int64) (
|
|
rows []*Project, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.partner_id, projects.owner_id, projects.created_at FROM projects WHERE projects.created_at < ? ORDER BY projects.created_at LIMIT ? OFFSET ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, project_created_at_less.value())
|
|
|
|
__values = append(__values, limit, offset)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
project := &Project{}
|
|
err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, project)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Get_ProjectInvoiceStamp_By_ProjectId_And_StartDate(ctx context.Context,
|
|
project_invoice_stamp_project_id ProjectInvoiceStamp_ProjectId_Field,
|
|
project_invoice_stamp_start_date ProjectInvoiceStamp_StartDate_Field) (
|
|
project_invoice_stamp *ProjectInvoiceStamp, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT project_invoice_stamps.project_id, project_invoice_stamps.invoice_id, project_invoice_stamps.start_date, project_invoice_stamps.end_date, project_invoice_stamps.created_at FROM project_invoice_stamps WHERE project_invoice_stamps.project_id = ? AND project_invoice_stamps.start_date = ? LIMIT 2")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, project_invoice_stamp_project_id.value(), project_invoice_stamp_start_date.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
if !__rows.Next() {
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return nil, makeErr(sql.ErrNoRows)
|
|
}
|
|
|
|
project_invoice_stamp = &ProjectInvoiceStamp{}
|
|
err = __rows.Scan(&project_invoice_stamp.ProjectId, &project_invoice_stamp.InvoiceId, &project_invoice_stamp.StartDate, &project_invoice_stamp.EndDate, &project_invoice_stamp.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
|
|
if __rows.Next() {
|
|
return nil, tooManyRows("ProjectInvoiceStamp_By_ProjectId_And_StartDate")
|
|
}
|
|
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
|
|
return project_invoice_stamp, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) All_ProjectInvoiceStamp_By_ProjectId_OrderBy_Desc_StartDate(ctx context.Context,
|
|
project_invoice_stamp_project_id ProjectInvoiceStamp_ProjectId_Field) (
|
|
rows []*ProjectInvoiceStamp, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT project_invoice_stamps.project_id, project_invoice_stamps.invoice_id, project_invoice_stamps.start_date, project_invoice_stamps.end_date, project_invoice_stamps.created_at FROM project_invoice_stamps WHERE project_invoice_stamps.project_id = ? ORDER BY project_invoice_stamps.start_date DESC")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, project_invoice_stamp_project_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
project_invoice_stamp := &ProjectInvoiceStamp{}
|
|
err = __rows.Scan(&project_invoice_stamp.ProjectId, &project_invoice_stamp.InvoiceId, &project_invoice_stamp.StartDate, &project_invoice_stamp.EndDate, &project_invoice_stamp.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, project_invoice_stamp)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) All_ProjectMember_By_MemberId(ctx context.Context,
|
|
project_member_member_id ProjectMember_MemberId_Field) (
|
|
rows []*ProjectMember, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT project_members.member_id, project_members.project_id, project_members.created_at FROM project_members WHERE project_members.member_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, project_member_member_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
project_member := &ProjectMember{}
|
|
err = __rows.Scan(&project_member.MemberId, &project_member.ProjectId, &project_member.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, project_member)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Limited_ProjectMember_By_ProjectId(ctx context.Context,
|
|
project_member_project_id ProjectMember_ProjectId_Field,
|
|
limit int, offset int64) (
|
|
rows []*ProjectMember, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT project_members.member_id, project_members.project_id, project_members.created_at FROM project_members WHERE project_members.project_id = ? LIMIT ? OFFSET ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, project_member_project_id.value())
|
|
|
|
__values = append(__values, limit, offset)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
project_member := &ProjectMember{}
|
|
err = __rows.Scan(&project_member.MemberId, &project_member.ProjectId, &project_member.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, project_member)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Get_ApiKey_By_Id(ctx context.Context,
|
|
api_key_id ApiKey_Id_Field) (
|
|
api_key *ApiKey, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.partner_id, api_keys.created_at FROM api_keys WHERE api_keys.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, api_key_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
api_key = &ApiKey{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&api_key.Id, &api_key.ProjectId, &api_key.Head, &api_key.Name, &api_key.Secret, &api_key.PartnerId, &api_key.CreatedAt)
|
|
if err != nil {
|
|
return (*ApiKey)(nil), obj.makeErr(err)
|
|
}
|
|
return api_key, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Get_ApiKey_By_Head(ctx context.Context,
|
|
api_key_head ApiKey_Head_Field) (
|
|
api_key *ApiKey, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.partner_id, api_keys.created_at FROM api_keys WHERE api_keys.head = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, api_key_head.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
api_key = &ApiKey{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&api_key.Id, &api_key.ProjectId, &api_key.Head, &api_key.Name, &api_key.Secret, &api_key.PartnerId, &api_key.CreatedAt)
|
|
if err != nil {
|
|
return (*ApiKey)(nil), obj.makeErr(err)
|
|
}
|
|
return api_key, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Get_ApiKey_By_Name_And_ProjectId(ctx context.Context,
|
|
api_key_name ApiKey_Name_Field,
|
|
api_key_project_id ApiKey_ProjectId_Field) (
|
|
api_key *ApiKey, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.partner_id, api_keys.created_at FROM api_keys WHERE api_keys.name = ? AND api_keys.project_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, api_key_name.value(), api_key_project_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
api_key = &ApiKey{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&api_key.Id, &api_key.ProjectId, &api_key.Head, &api_key.Name, &api_key.Secret, &api_key.PartnerId, &api_key.CreatedAt)
|
|
if err != nil {
|
|
return (*ApiKey)(nil), obj.makeErr(err)
|
|
}
|
|
return api_key, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) All_ApiKey_By_ProjectId_OrderBy_Asc_Name(ctx context.Context,
|
|
api_key_project_id ApiKey_ProjectId_Field) (
|
|
rows []*ApiKey, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.partner_id, api_keys.created_at FROM api_keys WHERE api_keys.project_id = ? ORDER BY api_keys.name")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, api_key_project_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
api_key := &ApiKey{}
|
|
err = __rows.Scan(&api_key.Id, &api_key.ProjectId, &api_key.Head, &api_key.Name, &api_key.Secret, &api_key.PartnerId, &api_key.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, api_key)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Find_SerialNumber_By_SerialNumber(ctx context.Context,
|
|
serial_number_serial_number SerialNumber_SerialNumber_Field) (
|
|
serial_number *SerialNumber, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT serial_numbers.id, serial_numbers.serial_number, serial_numbers.bucket_id, serial_numbers.expires_at FROM serial_numbers WHERE serial_numbers.serial_number = ? LIMIT 2")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, serial_number_serial_number.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
if !__rows.Next() {
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return nil, nil
|
|
}
|
|
|
|
serial_number = &SerialNumber{}
|
|
err = __rows.Scan(&serial_number.Id, &serial_number.SerialNumber, &serial_number.BucketId, &serial_number.ExpiresAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
|
|
if __rows.Next() {
|
|
return nil, tooManyRows("SerialNumber_By_SerialNumber")
|
|
}
|
|
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
|
|
return serial_number, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Find_BucketBandwidthRollup_By_BucketName_And_ProjectId_And_IntervalStart_And_Action(ctx context.Context,
|
|
bucket_bandwidth_rollup_bucket_name BucketBandwidthRollup_BucketName_Field,
|
|
bucket_bandwidth_rollup_project_id BucketBandwidthRollup_ProjectId_Field,
|
|
bucket_bandwidth_rollup_interval_start BucketBandwidthRollup_IntervalStart_Field,
|
|
bucket_bandwidth_rollup_action BucketBandwidthRollup_Action_Field) (
|
|
bucket_bandwidth_rollup *BucketBandwidthRollup, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_bandwidth_rollups.bucket_name, bucket_bandwidth_rollups.project_id, bucket_bandwidth_rollups.interval_start, bucket_bandwidth_rollups.interval_seconds, bucket_bandwidth_rollups.action, bucket_bandwidth_rollups.inline, bucket_bandwidth_rollups.allocated, bucket_bandwidth_rollups.settled FROM bucket_bandwidth_rollups WHERE bucket_bandwidth_rollups.bucket_name = ? AND bucket_bandwidth_rollups.project_id = ? AND bucket_bandwidth_rollups.interval_start = ? AND bucket_bandwidth_rollups.action = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, bucket_bandwidth_rollup_bucket_name.value(), bucket_bandwidth_rollup_project_id.value(), bucket_bandwidth_rollup_interval_start.value(), bucket_bandwidth_rollup_action.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
bucket_bandwidth_rollup = &BucketBandwidthRollup{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&bucket_bandwidth_rollup.BucketName, &bucket_bandwidth_rollup.ProjectId, &bucket_bandwidth_rollup.IntervalStart, &bucket_bandwidth_rollup.IntervalSeconds, &bucket_bandwidth_rollup.Action, &bucket_bandwidth_rollup.Inline, &bucket_bandwidth_rollup.Allocated, &bucket_bandwidth_rollup.Settled)
|
|
if err == sql.ErrNoRows {
|
|
return (*BucketBandwidthRollup)(nil), nil
|
|
}
|
|
if err != nil {
|
|
return (*BucketBandwidthRollup)(nil), obj.makeErr(err)
|
|
}
|
|
return bucket_bandwidth_rollup, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) First_BucketStorageTally_By_ProjectId_OrderBy_Desc_IntervalStart(ctx context.Context,
|
|
bucket_storage_tally_project_id BucketStorageTally_ProjectId_Field) (
|
|
bucket_storage_tally *BucketStorageTally, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_storage_tallies.bucket_name, bucket_storage_tallies.project_id, bucket_storage_tallies.interval_start, bucket_storage_tallies.inline, bucket_storage_tallies.remote, bucket_storage_tallies.remote_segments_count, bucket_storage_tallies.inline_segments_count, bucket_storage_tallies.object_count, bucket_storage_tallies.metadata_size FROM bucket_storage_tallies WHERE bucket_storage_tallies.project_id = ? ORDER BY bucket_storage_tallies.interval_start DESC LIMIT 1 OFFSET 0")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, bucket_storage_tally_project_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
if !__rows.Next() {
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return nil, nil
|
|
}
|
|
|
|
bucket_storage_tally = &BucketStorageTally{}
|
|
err = __rows.Scan(&bucket_storage_tally.BucketName, &bucket_storage_tally.ProjectId, &bucket_storage_tally.IntervalStart, &bucket_storage_tally.Inline, &bucket_storage_tally.Remote, &bucket_storage_tally.RemoteSegmentsCount, &bucket_storage_tally.InlineSegmentsCount, &bucket_storage_tally.ObjectCount, &bucket_storage_tally.MetadataSize)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
|
|
return bucket_storage_tally, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) All_BucketStorageTally(ctx context.Context) (
|
|
rows []*BucketStorageTally, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_storage_tallies.bucket_name, bucket_storage_tallies.project_id, bucket_storage_tallies.interval_start, bucket_storage_tallies.inline, bucket_storage_tallies.remote, bucket_storage_tallies.remote_segments_count, bucket_storage_tallies.inline_segments_count, bucket_storage_tallies.object_count, bucket_storage_tallies.metadata_size FROM bucket_storage_tallies")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
bucket_storage_tally := &BucketStorageTally{}
|
|
err = __rows.Scan(&bucket_storage_tally.BucketName, &bucket_storage_tally.ProjectId, &bucket_storage_tally.IntervalStart, &bucket_storage_tally.Inline, &bucket_storage_tally.Remote, &bucket_storage_tally.RemoteSegmentsCount, &bucket_storage_tally.InlineSegmentsCount, &bucket_storage_tally.ObjectCount, &bucket_storage_tally.MetadataSize)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, bucket_storage_tally)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) All_BucketStorageTally_By_ProjectId_And_BucketName_And_IntervalStart_GreaterOrEqual_And_IntervalStart_LessOrEqual_OrderBy_Desc_IntervalStart(ctx context.Context,
|
|
bucket_storage_tally_project_id BucketStorageTally_ProjectId_Field,
|
|
bucket_storage_tally_bucket_name BucketStorageTally_BucketName_Field,
|
|
bucket_storage_tally_interval_start_greater_or_equal BucketStorageTally_IntervalStart_Field,
|
|
bucket_storage_tally_interval_start_less_or_equal BucketStorageTally_IntervalStart_Field) (
|
|
rows []*BucketStorageTally, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_storage_tallies.bucket_name, bucket_storage_tallies.project_id, bucket_storage_tallies.interval_start, bucket_storage_tallies.inline, bucket_storage_tallies.remote, bucket_storage_tallies.remote_segments_count, bucket_storage_tallies.inline_segments_count, bucket_storage_tallies.object_count, bucket_storage_tallies.metadata_size FROM bucket_storage_tallies WHERE bucket_storage_tallies.project_id = ? AND bucket_storage_tallies.bucket_name = ? AND bucket_storage_tallies.interval_start >= ? AND bucket_storage_tallies.interval_start <= ? ORDER BY bucket_storage_tallies.interval_start DESC")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, bucket_storage_tally_project_id.value(), bucket_storage_tally_bucket_name.value(), bucket_storage_tally_interval_start_greater_or_equal.value(), bucket_storage_tally_interval_start_less_or_equal.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
bucket_storage_tally := &BucketStorageTally{}
|
|
err = __rows.Scan(&bucket_storage_tally.BucketName, &bucket_storage_tally.ProjectId, &bucket_storage_tally.IntervalStart, &bucket_storage_tally.Inline, &bucket_storage_tally.Remote, &bucket_storage_tally.RemoteSegmentsCount, &bucket_storage_tally.InlineSegmentsCount, &bucket_storage_tally.ObjectCount, &bucket_storage_tally.MetadataSize)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, bucket_storage_tally)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Find_StoragenodeBandwidthRollup_By_StoragenodeId_And_IntervalStart_And_Action(ctx context.Context,
|
|
storagenode_bandwidth_rollup_storagenode_id StoragenodeBandwidthRollup_StoragenodeId_Field,
|
|
storagenode_bandwidth_rollup_interval_start StoragenodeBandwidthRollup_IntervalStart_Field,
|
|
storagenode_bandwidth_rollup_action StoragenodeBandwidthRollup_Action_Field) (
|
|
storagenode_bandwidth_rollup *StoragenodeBandwidthRollup, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT storagenode_bandwidth_rollups.storagenode_id, storagenode_bandwidth_rollups.interval_start, storagenode_bandwidth_rollups.interval_seconds, storagenode_bandwidth_rollups.action, storagenode_bandwidth_rollups.allocated, storagenode_bandwidth_rollups.settled FROM storagenode_bandwidth_rollups WHERE storagenode_bandwidth_rollups.storagenode_id = ? AND storagenode_bandwidth_rollups.interval_start = ? AND storagenode_bandwidth_rollups.action = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, storagenode_bandwidth_rollup_storagenode_id.value(), storagenode_bandwidth_rollup_interval_start.value(), storagenode_bandwidth_rollup_action.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
storagenode_bandwidth_rollup = &StoragenodeBandwidthRollup{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&storagenode_bandwidth_rollup.StoragenodeId, &storagenode_bandwidth_rollup.IntervalStart, &storagenode_bandwidth_rollup.IntervalSeconds, &storagenode_bandwidth_rollup.Action, &storagenode_bandwidth_rollup.Allocated, &storagenode_bandwidth_rollup.Settled)
|
|
if err == sql.ErrNoRows {
|
|
return (*StoragenodeBandwidthRollup)(nil), nil
|
|
}
|
|
if err != nil {
|
|
return (*StoragenodeBandwidthRollup)(nil), obj.makeErr(err)
|
|
}
|
|
return storagenode_bandwidth_rollup, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) All_StoragenodeBandwidthRollup_By_IntervalStart_GreaterOrEqual(ctx context.Context,
|
|
storagenode_bandwidth_rollup_interval_start_greater_or_equal StoragenodeBandwidthRollup_IntervalStart_Field) (
|
|
rows []*StoragenodeBandwidthRollup, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT storagenode_bandwidth_rollups.storagenode_id, storagenode_bandwidth_rollups.interval_start, storagenode_bandwidth_rollups.interval_seconds, storagenode_bandwidth_rollups.action, storagenode_bandwidth_rollups.allocated, storagenode_bandwidth_rollups.settled FROM storagenode_bandwidth_rollups WHERE storagenode_bandwidth_rollups.interval_start >= ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, storagenode_bandwidth_rollup_interval_start_greater_or_equal.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
storagenode_bandwidth_rollup := &StoragenodeBandwidthRollup{}
|
|
err = __rows.Scan(&storagenode_bandwidth_rollup.StoragenodeId, &storagenode_bandwidth_rollup.IntervalStart, &storagenode_bandwidth_rollup.IntervalSeconds, &storagenode_bandwidth_rollup.Action, &storagenode_bandwidth_rollup.Allocated, &storagenode_bandwidth_rollup.Settled)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, storagenode_bandwidth_rollup)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Get_StoragenodeStorageTally_By_Id(ctx context.Context,
|
|
storagenode_storage_tally_id StoragenodeStorageTally_Id_Field) (
|
|
storagenode_storage_tally *StoragenodeStorageTally, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT storagenode_storage_tallies.id, storagenode_storage_tallies.node_id, storagenode_storage_tallies.interval_end_time, storagenode_storage_tallies.data_total FROM storagenode_storage_tallies WHERE storagenode_storage_tallies.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, storagenode_storage_tally_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
storagenode_storage_tally = &StoragenodeStorageTally{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&storagenode_storage_tally.Id, &storagenode_storage_tally.NodeId, &storagenode_storage_tally.IntervalEndTime, &storagenode_storage_tally.DataTotal)
|
|
if err != nil {
|
|
return (*StoragenodeStorageTally)(nil), obj.makeErr(err)
|
|
}
|
|
return storagenode_storage_tally, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) All_StoragenodeStorageTally(ctx context.Context) (
|
|
rows []*StoragenodeStorageTally, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT storagenode_storage_tallies.id, storagenode_storage_tallies.node_id, storagenode_storage_tallies.interval_end_time, storagenode_storage_tallies.data_total FROM storagenode_storage_tallies")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
storagenode_storage_tally := &StoragenodeStorageTally{}
|
|
err = __rows.Scan(&storagenode_storage_tally.Id, &storagenode_storage_tally.NodeId, &storagenode_storage_tally.IntervalEndTime, &storagenode_storage_tally.DataTotal)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, storagenode_storage_tally)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) All_StoragenodeStorageTally_By_IntervalEndTime_GreaterOrEqual(ctx context.Context,
|
|
storagenode_storage_tally_interval_end_time_greater_or_equal StoragenodeStorageTally_IntervalEndTime_Field) (
|
|
rows []*StoragenodeStorageTally, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT storagenode_storage_tallies.id, storagenode_storage_tallies.node_id, storagenode_storage_tallies.interval_end_time, storagenode_storage_tallies.data_total FROM storagenode_storage_tallies WHERE storagenode_storage_tallies.interval_end_time >= ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, storagenode_storage_tally_interval_end_time_greater_or_equal.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
storagenode_storage_tally := &StoragenodeStorageTally{}
|
|
err = __rows.Scan(&storagenode_storage_tally.Id, &storagenode_storage_tally.NodeId, &storagenode_storage_tally.IntervalEndTime, &storagenode_storage_tally.DataTotal)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, storagenode_storage_tally)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Get_PeerIdentity_By_NodeId(ctx context.Context,
|
|
peer_identity_node_id PeerIdentity_NodeId_Field) (
|
|
peer_identity *PeerIdentity, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT peer_identities.node_id, peer_identities.leaf_serial_number, peer_identities.chain, peer_identities.updated_at FROM peer_identities WHERE peer_identities.node_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, peer_identity_node_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
peer_identity = &PeerIdentity{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&peer_identity.NodeId, &peer_identity.LeafSerialNumber, &peer_identity.Chain, &peer_identity.UpdatedAt)
|
|
if err != nil {
|
|
return (*PeerIdentity)(nil), obj.makeErr(err)
|
|
}
|
|
return peer_identity, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Get_PeerIdentity_LeafSerialNumber_By_NodeId(ctx context.Context,
|
|
peer_identity_node_id PeerIdentity_NodeId_Field) (
|
|
row *LeafSerialNumber_Row, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT peer_identities.leaf_serial_number FROM peer_identities WHERE peer_identities.node_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, peer_identity_node_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
row = &LeafSerialNumber_Row{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&row.LeafSerialNumber)
|
|
if err != nil {
|
|
return (*LeafSerialNumber_Row)(nil), obj.makeErr(err)
|
|
}
|
|
return row, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Get_RegistrationToken_By_Secret(ctx context.Context,
|
|
registration_token_secret RegistrationToken_Secret_Field) (
|
|
registration_token *RegistrationToken, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT registration_tokens.secret, registration_tokens.owner_id, registration_tokens.project_limit, registration_tokens.created_at FROM registration_tokens WHERE registration_tokens.secret = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, registration_token_secret.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
registration_token = &RegistrationToken{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(®istration_token.Secret, ®istration_token.OwnerId, ®istration_token.ProjectLimit, ®istration_token.CreatedAt)
|
|
if err != nil {
|
|
return (*RegistrationToken)(nil), obj.makeErr(err)
|
|
}
|
|
return registration_token, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Get_RegistrationToken_By_OwnerId(ctx context.Context,
|
|
registration_token_owner_id RegistrationToken_OwnerId_Field) (
|
|
registration_token *RegistrationToken, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __cond_0 = &__sqlbundle_Condition{Left: "registration_tokens.owner_id", Equal: true, Right: "?", Null: true}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("SELECT registration_tokens.secret, registration_tokens.owner_id, registration_tokens.project_limit, registration_tokens.created_at FROM registration_tokens WHERE "), __cond_0}}
|
|
|
|
var __values []interface{}
|
|
__values = append(__values)
|
|
|
|
if !registration_token_owner_id.isnull() {
|
|
__cond_0.Null = false
|
|
__values = append(__values, registration_token_owner_id.value())
|
|
}
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
registration_token = &RegistrationToken{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(®istration_token.Secret, ®istration_token.OwnerId, ®istration_token.ProjectLimit, ®istration_token.CreatedAt)
|
|
if err != nil {
|
|
return (*RegistrationToken)(nil), obj.makeErr(err)
|
|
}
|
|
return registration_token, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Get_ResetPasswordToken_By_Secret(ctx context.Context,
|
|
reset_password_token_secret ResetPasswordToken_Secret_Field) (
|
|
reset_password_token *ResetPasswordToken, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT reset_password_tokens.secret, reset_password_tokens.owner_id, reset_password_tokens.created_at FROM reset_password_tokens WHERE reset_password_tokens.secret = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, reset_password_token_secret.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
reset_password_token = &ResetPasswordToken{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&reset_password_token.Secret, &reset_password_token.OwnerId, &reset_password_token.CreatedAt)
|
|
if err != nil {
|
|
return (*ResetPasswordToken)(nil), obj.makeErr(err)
|
|
}
|
|
return reset_password_token, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Get_ResetPasswordToken_By_OwnerId(ctx context.Context,
|
|
reset_password_token_owner_id ResetPasswordToken_OwnerId_Field) (
|
|
reset_password_token *ResetPasswordToken, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT reset_password_tokens.secret, reset_password_tokens.owner_id, reset_password_tokens.created_at FROM reset_password_tokens WHERE reset_password_tokens.owner_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, reset_password_token_owner_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
reset_password_token = &ResetPasswordToken{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&reset_password_token.Secret, &reset_password_token.OwnerId, &reset_password_token.CreatedAt)
|
|
if err != nil {
|
|
return (*ResetPasswordToken)(nil), obj.makeErr(err)
|
|
}
|
|
return reset_password_token, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Get_Offer_By_Id(ctx context.Context,
|
|
offer_id Offer_Id_Field) (
|
|
offer *Offer, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT offers.id, offers.name, offers.description, offers.award_credit_in_cents, offers.invitee_credit_in_cents, offers.award_credit_duration_days, offers.invitee_credit_duration_days, offers.redeemable_cap, offers.expires_at, offers.created_at, offers.status, offers.type FROM offers WHERE offers.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, offer_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
offer = &Offer{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&offer.Id, &offer.Name, &offer.Description, &offer.AwardCreditInCents, &offer.InviteeCreditInCents, &offer.AwardCreditDurationDays, &offer.InviteeCreditDurationDays, &offer.RedeemableCap, &offer.ExpiresAt, &offer.CreatedAt, &offer.Status, &offer.Type)
|
|
if err != nil {
|
|
return (*Offer)(nil), obj.makeErr(err)
|
|
}
|
|
return offer, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) All_Offer_OrderBy_Asc_Id(ctx context.Context) (
|
|
rows []*Offer, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT offers.id, offers.name, offers.description, offers.award_credit_in_cents, offers.invitee_credit_in_cents, offers.award_credit_duration_days, offers.invitee_credit_duration_days, offers.redeemable_cap, offers.expires_at, offers.created_at, offers.status, offers.type FROM offers ORDER BY offers.id")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
offer := &Offer{}
|
|
err = __rows.Scan(&offer.Id, &offer.Name, &offer.Description, &offer.AwardCreditInCents, &offer.InviteeCreditInCents, &offer.AwardCreditDurationDays, &offer.InviteeCreditDurationDays, &offer.RedeemableCap, &offer.ExpiresAt, &offer.CreatedAt, &offer.Status, &offer.Type)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, offer)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) All_UserCredit_By_UserId_And_ExpiresAt_Greater_And_CreditsUsedInCents_Less_CreditsEarnedInCents_OrderBy_Asc_ExpiresAt(ctx context.Context,
|
|
user_credit_user_id UserCredit_UserId_Field,
|
|
user_credit_expires_at_greater UserCredit_ExpiresAt_Field) (
|
|
rows []*UserCredit, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT user_credits.id, user_credits.user_id, user_credits.offer_id, user_credits.referred_by, user_credits.type, user_credits.credits_earned_in_cents, user_credits.credits_used_in_cents, user_credits.expires_at, user_credits.created_at FROM user_credits WHERE user_credits.user_id = ? AND user_credits.expires_at > ? AND user_credits.credits_used_in_cents < user_credits.credits_earned_in_cents ORDER BY user_credits.expires_at")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, user_credit_user_id.value(), user_credit_expires_at_greater.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
user_credit := &UserCredit{}
|
|
err = __rows.Scan(&user_credit.Id, &user_credit.UserId, &user_credit.OfferId, &user_credit.ReferredBy, &user_credit.Type, &user_credit.CreditsEarnedInCents, &user_credit.CreditsUsedInCents, &user_credit.ExpiresAt, &user_credit.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, user_credit)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Count_UserCredit_By_ReferredBy(ctx context.Context,
|
|
user_credit_referred_by UserCredit_ReferredBy_Field) (
|
|
count int64, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __cond_0 = &__sqlbundle_Condition{Left: "user_credits.referred_by", Equal: true, Right: "?", Null: true}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("SELECT COUNT(*) FROM user_credits WHERE "), __cond_0}}
|
|
|
|
var __values []interface{}
|
|
__values = append(__values)
|
|
|
|
if !user_credit_referred_by.isnull() {
|
|
__cond_0.Null = false
|
|
__values = append(__values, user_credit_referred_by.value())
|
|
}
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&count)
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
return count, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Get_BucketMetainfo_By_ProjectId_And_Name(ctx context.Context,
|
|
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
|
|
bucket_metainfo_name BucketMetainfo_Name_Field) (
|
|
bucket_metainfo *BucketMetainfo, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.partner_id, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares FROM bucket_metainfos WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, bucket_metainfo_project_id.value(), bucket_metainfo_name.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
bucket_metainfo = &BucketMetainfo{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.PartnerId, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares)
|
|
if err != nil {
|
|
return (*BucketMetainfo)(nil), obj.makeErr(err)
|
|
}
|
|
return bucket_metainfo, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Limited_BucketMetainfo_By_ProjectId_And_Name_GreaterOrEqual_OrderBy_Asc_Name(ctx context.Context,
|
|
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
|
|
bucket_metainfo_name_greater_or_equal BucketMetainfo_Name_Field,
|
|
limit int, offset int64) (
|
|
rows []*BucketMetainfo, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.partner_id, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares FROM bucket_metainfos WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name >= ? ORDER BY bucket_metainfos.name LIMIT ? OFFSET ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, bucket_metainfo_project_id.value(), bucket_metainfo_name_greater_or_equal.value())
|
|
|
|
__values = append(__values, limit, offset)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
bucket_metainfo := &BucketMetainfo{}
|
|
err = __rows.Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.PartnerId, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, bucket_metainfo)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Limited_BucketMetainfo_By_ProjectId_And_Name_Greater_OrderBy_Asc_Name(ctx context.Context,
|
|
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
|
|
bucket_metainfo_name_greater BucketMetainfo_Name_Field,
|
|
limit int, offset int64) (
|
|
rows []*BucketMetainfo, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.partner_id, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares FROM bucket_metainfos WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name > ? ORDER BY bucket_metainfos.name LIMIT ? OFFSET ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, bucket_metainfo_project_id.value(), bucket_metainfo_name_greater.value())
|
|
|
|
__values = append(__values, limit, offset)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
bucket_metainfo := &BucketMetainfo{}
|
|
err = __rows.Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.PartnerId, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, bucket_metainfo)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Get_GracefulExitProgress_By_NodeId(ctx context.Context,
|
|
graceful_exit_progress_node_id GracefulExitProgress_NodeId_Field) (
|
|
graceful_exit_progress *GracefulExitProgress, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT graceful_exit_progress.node_id, graceful_exit_progress.bytes_transferred, graceful_exit_progress.pieces_transferred, graceful_exit_progress.pieces_failed, graceful_exit_progress.updated_at FROM graceful_exit_progress WHERE graceful_exit_progress.node_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, graceful_exit_progress_node_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
graceful_exit_progress = &GracefulExitProgress{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&graceful_exit_progress.NodeId, &graceful_exit_progress.BytesTransferred, &graceful_exit_progress.PiecesTransferred, &graceful_exit_progress.PiecesFailed, &graceful_exit_progress.UpdatedAt)
|
|
if err != nil {
|
|
return (*GracefulExitProgress)(nil), obj.makeErr(err)
|
|
}
|
|
return graceful_exit_progress, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Get_GracefulExitTransferQueue_By_NodeId_And_Path_And_PieceNum(ctx context.Context,
|
|
graceful_exit_transfer_queue_node_id GracefulExitTransferQueue_NodeId_Field,
|
|
graceful_exit_transfer_queue_path GracefulExitTransferQueue_Path_Field,
|
|
graceful_exit_transfer_queue_piece_num GracefulExitTransferQueue_PieceNum_Field) (
|
|
graceful_exit_transfer_queue *GracefulExitTransferQueue, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT graceful_exit_transfer_queue.node_id, graceful_exit_transfer_queue.path, graceful_exit_transfer_queue.piece_num, graceful_exit_transfer_queue.root_piece_id, graceful_exit_transfer_queue.durability_ratio, graceful_exit_transfer_queue.queued_at, graceful_exit_transfer_queue.requested_at, graceful_exit_transfer_queue.last_failed_at, graceful_exit_transfer_queue.last_failed_code, graceful_exit_transfer_queue.failed_count, graceful_exit_transfer_queue.finished_at, graceful_exit_transfer_queue.order_limit_send_count FROM graceful_exit_transfer_queue WHERE graceful_exit_transfer_queue.node_id = ? AND graceful_exit_transfer_queue.path = ? AND graceful_exit_transfer_queue.piece_num = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, graceful_exit_transfer_queue_node_id.value(), graceful_exit_transfer_queue_path.value(), graceful_exit_transfer_queue_piece_num.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
graceful_exit_transfer_queue = &GracefulExitTransferQueue{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&graceful_exit_transfer_queue.NodeId, &graceful_exit_transfer_queue.Path, &graceful_exit_transfer_queue.PieceNum, &graceful_exit_transfer_queue.RootPieceId, &graceful_exit_transfer_queue.DurabilityRatio, &graceful_exit_transfer_queue.QueuedAt, &graceful_exit_transfer_queue.RequestedAt, &graceful_exit_transfer_queue.LastFailedAt, &graceful_exit_transfer_queue.LastFailedCode, &graceful_exit_transfer_queue.FailedCount, &graceful_exit_transfer_queue.FinishedAt, &graceful_exit_transfer_queue.OrderLimitSendCount)
|
|
if err != nil {
|
|
return (*GracefulExitTransferQueue)(nil), obj.makeErr(err)
|
|
}
|
|
return graceful_exit_transfer_queue, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) All_NodesOfflineTime_By_NodeId_And_TrackedAt_Greater_And_TrackedAt_LessOrEqual(ctx context.Context,
|
|
nodes_offline_time_node_id NodesOfflineTime_NodeId_Field,
|
|
nodes_offline_time_tracked_at_greater NodesOfflineTime_TrackedAt_Field,
|
|
nodes_offline_time_tracked_at_less_or_equal NodesOfflineTime_TrackedAt_Field) (
|
|
rows []*NodesOfflineTime, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT nodes_offline_times.node_id, nodes_offline_times.tracked_at, nodes_offline_times.seconds FROM nodes_offline_times WHERE nodes_offline_times.node_id = ? AND nodes_offline_times.tracked_at > ? AND nodes_offline_times.tracked_at <= ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, nodes_offline_time_node_id.value(), nodes_offline_time_tracked_at_greater.value(), nodes_offline_time_tracked_at_less_or_equal.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
nodes_offline_time := &NodesOfflineTime{}
|
|
err = __rows.Scan(&nodes_offline_time.NodeId, &nodes_offline_time.TrackedAt, &nodes_offline_time.Seconds)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, nodes_offline_time)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Get_StripeCustomer_CustomerId_By_UserId(ctx context.Context,
|
|
stripe_customer_user_id StripeCustomer_UserId_Field) (
|
|
row *CustomerId_Row, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT stripe_customers.customer_id FROM stripe_customers WHERE stripe_customers.user_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, stripe_customer_user_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
row = &CustomerId_Row{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&row.CustomerId)
|
|
if err != nil {
|
|
return (*CustomerId_Row)(nil), obj.makeErr(err)
|
|
}
|
|
return row, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Limited_StripeCustomer_By_CreatedAt_LessOrEqual_OrderBy_Desc_CreatedAt(ctx context.Context,
|
|
stripe_customer_created_at_less_or_equal StripeCustomer_CreatedAt_Field,
|
|
limit int, offset int64) (
|
|
rows []*StripeCustomer, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT stripe_customers.user_id, stripe_customers.customer_id, stripe_customers.created_at FROM stripe_customers WHERE stripe_customers.created_at <= ? ORDER BY stripe_customers.created_at DESC LIMIT ? OFFSET ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, stripe_customer_created_at_less_or_equal.value())
|
|
|
|
__values = append(__values, limit, offset)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
stripe_customer := &StripeCustomer{}
|
|
err = __rows.Scan(&stripe_customer.UserId, &stripe_customer.CustomerId, &stripe_customer.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, stripe_customer)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) All_CoinpaymentsTransaction_By_UserId_OrderBy_Desc_CreatedAt(ctx context.Context,
|
|
coinpayments_transaction_user_id CoinpaymentsTransaction_UserId_Field) (
|
|
rows []*CoinpaymentsTransaction, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT coinpayments_transactions.id, coinpayments_transactions.user_id, coinpayments_transactions.address, coinpayments_transactions.amount, coinpayments_transactions.received, coinpayments_transactions.status, coinpayments_transactions.key, coinpayments_transactions.timeout, coinpayments_transactions.created_at FROM coinpayments_transactions WHERE coinpayments_transactions.user_id = ? ORDER BY coinpayments_transactions.created_at DESC")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, coinpayments_transaction_user_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
coinpayments_transaction := &CoinpaymentsTransaction{}
|
|
err = __rows.Scan(&coinpayments_transaction.Id, &coinpayments_transaction.UserId, &coinpayments_transaction.Address, &coinpayments_transaction.Amount, &coinpayments_transaction.Received, &coinpayments_transaction.Status, &coinpayments_transaction.Key, &coinpayments_transaction.Timeout, &coinpayments_transaction.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, coinpayments_transaction)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Limited_CoinpaymentsTransaction_By_CreatedAt_LessOrEqual_And_Status_OrderBy_Desc_CreatedAt(ctx context.Context,
|
|
coinpayments_transaction_created_at_less_or_equal CoinpaymentsTransaction_CreatedAt_Field,
|
|
coinpayments_transaction_status CoinpaymentsTransaction_Status_Field,
|
|
limit int, offset int64) (
|
|
rows []*CoinpaymentsTransaction, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT coinpayments_transactions.id, coinpayments_transactions.user_id, coinpayments_transactions.address, coinpayments_transactions.amount, coinpayments_transactions.received, coinpayments_transactions.status, coinpayments_transactions.key, coinpayments_transactions.timeout, coinpayments_transactions.created_at FROM coinpayments_transactions WHERE coinpayments_transactions.created_at <= ? AND coinpayments_transactions.status = ? ORDER BY coinpayments_transactions.created_at DESC LIMIT ? OFFSET ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, coinpayments_transaction_created_at_less_or_equal.value(), coinpayments_transaction_status.value())
|
|
|
|
__values = append(__values, limit, offset)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
coinpayments_transaction := &CoinpaymentsTransaction{}
|
|
err = __rows.Scan(&coinpayments_transaction.Id, &coinpayments_transaction.UserId, &coinpayments_transaction.Address, &coinpayments_transaction.Amount, &coinpayments_transaction.Received, &coinpayments_transaction.Status, &coinpayments_transaction.Key, &coinpayments_transaction.Timeout, &coinpayments_transaction.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, coinpayments_transaction)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Get_StripecoinpaymentsInvoiceProjectRecord_By_ProjectId_And_PeriodStart_And_PeriodEnd(ctx context.Context,
|
|
stripecoinpayments_invoice_project_record_project_id StripecoinpaymentsInvoiceProjectRecord_ProjectId_Field,
|
|
stripecoinpayments_invoice_project_record_period_start StripecoinpaymentsInvoiceProjectRecord_PeriodStart_Field,
|
|
stripecoinpayments_invoice_project_record_period_end StripecoinpaymentsInvoiceProjectRecord_PeriodEnd_Field) (
|
|
stripecoinpayments_invoice_project_record *StripecoinpaymentsInvoiceProjectRecord, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT stripecoinpayments_invoice_project_records.id, stripecoinpayments_invoice_project_records.project_id, stripecoinpayments_invoice_project_records.storage, stripecoinpayments_invoice_project_records.egress, stripecoinpayments_invoice_project_records.objects, stripecoinpayments_invoice_project_records.period_start, stripecoinpayments_invoice_project_records.period_end, stripecoinpayments_invoice_project_records.state, stripecoinpayments_invoice_project_records.created_at FROM stripecoinpayments_invoice_project_records WHERE stripecoinpayments_invoice_project_records.project_id = ? AND stripecoinpayments_invoice_project_records.period_start = ? AND stripecoinpayments_invoice_project_records.period_end = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, stripecoinpayments_invoice_project_record_project_id.value(), stripecoinpayments_invoice_project_record_period_start.value(), stripecoinpayments_invoice_project_record_period_end.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
stripecoinpayments_invoice_project_record = &StripecoinpaymentsInvoiceProjectRecord{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&stripecoinpayments_invoice_project_record.Id, &stripecoinpayments_invoice_project_record.ProjectId, &stripecoinpayments_invoice_project_record.Storage, &stripecoinpayments_invoice_project_record.Egress, &stripecoinpayments_invoice_project_record.Objects, &stripecoinpayments_invoice_project_record.PeriodStart, &stripecoinpayments_invoice_project_record.PeriodEnd, &stripecoinpayments_invoice_project_record.State, &stripecoinpayments_invoice_project_record.CreatedAt)
|
|
if err != nil {
|
|
return (*StripecoinpaymentsInvoiceProjectRecord)(nil), obj.makeErr(err)
|
|
}
|
|
return stripecoinpayments_invoice_project_record, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Limited_StripecoinpaymentsInvoiceProjectRecord_By_CreatedAt_LessOrEqual_And_State_OrderBy_Desc_CreatedAt(ctx context.Context,
|
|
stripecoinpayments_invoice_project_record_created_at_less_or_equal StripecoinpaymentsInvoiceProjectRecord_CreatedAt_Field,
|
|
stripecoinpayments_invoice_project_record_state StripecoinpaymentsInvoiceProjectRecord_State_Field,
|
|
limit int, offset int64) (
|
|
rows []*StripecoinpaymentsInvoiceProjectRecord, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT stripecoinpayments_invoice_project_records.id, stripecoinpayments_invoice_project_records.project_id, stripecoinpayments_invoice_project_records.storage, stripecoinpayments_invoice_project_records.egress, stripecoinpayments_invoice_project_records.objects, stripecoinpayments_invoice_project_records.period_start, stripecoinpayments_invoice_project_records.period_end, stripecoinpayments_invoice_project_records.state, stripecoinpayments_invoice_project_records.created_at FROM stripecoinpayments_invoice_project_records WHERE stripecoinpayments_invoice_project_records.created_at <= ? AND stripecoinpayments_invoice_project_records.state = ? ORDER BY stripecoinpayments_invoice_project_records.created_at DESC LIMIT ? OFFSET ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, stripecoinpayments_invoice_project_record_created_at_less_or_equal.value(), stripecoinpayments_invoice_project_record_state.value())
|
|
|
|
__values = append(__values, limit, offset)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
stripecoinpayments_invoice_project_record := &StripecoinpaymentsInvoiceProjectRecord{}
|
|
err = __rows.Scan(&stripecoinpayments_invoice_project_record.Id, &stripecoinpayments_invoice_project_record.ProjectId, &stripecoinpayments_invoice_project_record.Storage, &stripecoinpayments_invoice_project_record.Egress, &stripecoinpayments_invoice_project_record.Objects, &stripecoinpayments_invoice_project_record.PeriodStart, &stripecoinpayments_invoice_project_record.PeriodEnd, &stripecoinpayments_invoice_project_record.State, &stripecoinpayments_invoice_project_record.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, stripecoinpayments_invoice_project_record)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Get_StripecoinpaymentsTxConversionRate_By_TxId(ctx context.Context,
|
|
stripecoinpayments_tx_conversion_rate_tx_id StripecoinpaymentsTxConversionRate_TxId_Field) (
|
|
stripecoinpayments_tx_conversion_rate *StripecoinpaymentsTxConversionRate, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT stripecoinpayments_tx_conversion_rates.tx_id, stripecoinpayments_tx_conversion_rates.rate, stripecoinpayments_tx_conversion_rates.created_at FROM stripecoinpayments_tx_conversion_rates WHERE stripecoinpayments_tx_conversion_rates.tx_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, stripecoinpayments_tx_conversion_rate_tx_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
stripecoinpayments_tx_conversion_rate = &StripecoinpaymentsTxConversionRate{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&stripecoinpayments_tx_conversion_rate.TxId, &stripecoinpayments_tx_conversion_rate.Rate, &stripecoinpayments_tx_conversion_rate.CreatedAt)
|
|
if err != nil {
|
|
return (*StripecoinpaymentsTxConversionRate)(nil), obj.makeErr(err)
|
|
}
|
|
return stripecoinpayments_tx_conversion_rate, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Get_Coupon_By_Id(ctx context.Context,
|
|
coupon_id Coupon_Id_Field) (
|
|
coupon *Coupon, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT coupons.id, coupons.project_id, coupons.user_id, coupons.amount, coupons.description, coupons.type, coupons.status, coupons.duration, coupons.created_at FROM coupons WHERE coupons.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, coupon_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
coupon = &Coupon{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&coupon.Id, &coupon.ProjectId, &coupon.UserId, &coupon.Amount, &coupon.Description, &coupon.Type, &coupon.Status, &coupon.Duration, &coupon.CreatedAt)
|
|
if err != nil {
|
|
return (*Coupon)(nil), obj.makeErr(err)
|
|
}
|
|
return coupon, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) All_Coupon_By_ProjectId_And_Status_Equal_Number_OrderBy_Desc_CreatedAt(ctx context.Context,
|
|
coupon_project_id Coupon_ProjectId_Field) (
|
|
rows []*Coupon, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT coupons.id, coupons.project_id, coupons.user_id, coupons.amount, coupons.description, coupons.type, coupons.status, coupons.duration, coupons.created_at FROM coupons WHERE coupons.project_id = ? AND coupons.status = 0 ORDER BY coupons.created_at DESC")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, coupon_project_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
coupon := &Coupon{}
|
|
err = __rows.Scan(&coupon.Id, &coupon.ProjectId, &coupon.UserId, &coupon.Amount, &coupon.Description, &coupon.Type, &coupon.Status, &coupon.Duration, &coupon.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, coupon)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) All_Coupon_By_UserId_OrderBy_Desc_CreatedAt(ctx context.Context,
|
|
coupon_user_id Coupon_UserId_Field) (
|
|
rows []*Coupon, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT coupons.id, coupons.project_id, coupons.user_id, coupons.amount, coupons.description, coupons.type, coupons.status, coupons.duration, coupons.created_at FROM coupons WHERE coupons.user_id = ? ORDER BY coupons.created_at DESC")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, coupon_user_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
coupon := &Coupon{}
|
|
err = __rows.Scan(&coupon.Id, &coupon.ProjectId, &coupon.UserId, &coupon.Amount, &coupon.Description, &coupon.Type, &coupon.Status, &coupon.Duration, &coupon.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, coupon)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) All_Coupon_By_UserId_And_Status_OrderBy_Desc_CreatedAt(ctx context.Context,
|
|
coupon_user_id Coupon_UserId_Field,
|
|
coupon_status Coupon_Status_Field) (
|
|
rows []*Coupon, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT coupons.id, coupons.project_id, coupons.user_id, coupons.amount, coupons.description, coupons.type, coupons.status, coupons.duration, coupons.created_at FROM coupons WHERE coupons.user_id = ? AND coupons.status = ? ORDER BY coupons.created_at DESC")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, coupon_user_id.value(), coupon_status.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
coupon := &Coupon{}
|
|
err = __rows.Scan(&coupon.Id, &coupon.ProjectId, &coupon.UserId, &coupon.Amount, &coupon.Description, &coupon.Type, &coupon.Status, &coupon.Duration, &coupon.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, coupon)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) All_Coupon_By_Status_OrderBy_Desc_CreatedAt(ctx context.Context,
|
|
coupon_status Coupon_Status_Field) (
|
|
rows []*Coupon, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT coupons.id, coupons.project_id, coupons.user_id, coupons.amount, coupons.description, coupons.type, coupons.status, coupons.duration, coupons.created_at FROM coupons WHERE coupons.status = ? ORDER BY coupons.created_at DESC")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, coupon_status.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
coupon := &Coupon{}
|
|
err = __rows.Scan(&coupon.Id, &coupon.ProjectId, &coupon.UserId, &coupon.Amount, &coupon.Description, &coupon.Type, &coupon.Status, &coupon.Duration, &coupon.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, coupon)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Limited_Coupon_By_CreatedAt_LessOrEqual_And_Status_OrderBy_Desc_CreatedAt(ctx context.Context,
|
|
coupon_created_at_less_or_equal Coupon_CreatedAt_Field,
|
|
coupon_status Coupon_Status_Field,
|
|
limit int, offset int64) (
|
|
rows []*Coupon, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT coupons.id, coupons.project_id, coupons.user_id, coupons.amount, coupons.description, coupons.type, coupons.status, coupons.duration, coupons.created_at FROM coupons WHERE coupons.created_at <= ? AND coupons.status = ? ORDER BY coupons.created_at DESC LIMIT ? OFFSET ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, coupon_created_at_less_or_equal.value(), coupon_status.value())
|
|
|
|
__values = append(__values, limit, offset)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
coupon := &Coupon{}
|
|
err = __rows.Scan(&coupon.Id, &coupon.ProjectId, &coupon.UserId, &coupon.Amount, &coupon.Description, &coupon.Type, &coupon.Status, &coupon.Duration, &coupon.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, coupon)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Limited_CouponUsage_By_Period_LessOrEqual_And_Status_Equal_Number_OrderBy_Desc_Period(ctx context.Context,
|
|
coupon_usage_period_less_or_equal CouponUsage_Period_Field,
|
|
limit int, offset int64) (
|
|
rows []*CouponUsage, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT coupon_usages.coupon_id, coupon_usages.amount, coupon_usages.status, coupon_usages.period FROM coupon_usages WHERE coupon_usages.period <= ? AND coupon_usages.status = 0 ORDER BY coupon_usages.period DESC LIMIT ? OFFSET ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, coupon_usage_period_less_or_equal.value())
|
|
|
|
__values = append(__values, limit, offset)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
coupon_usage := &CouponUsage{}
|
|
err = __rows.Scan(&coupon_usage.CouponId, &coupon_usage.Amount, &coupon_usage.Status, &coupon_usage.Period)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, coupon_usage)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Update_PendingAudits_By_NodeId(ctx context.Context,
|
|
pending_audits_node_id PendingAudits_NodeId_Field,
|
|
update PendingAudits_Update_Fields) (
|
|
pending_audits *PendingAudits, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE pending_audits SET "), __sets, __sqlbundle_Literal(" WHERE pending_audits.node_id = ? RETURNING pending_audits.node_id, pending_audits.piece_id, pending_audits.stripe_index, pending_audits.share_size, pending_audits.expected_share_hash, pending_audits.reverify_count, pending_audits.path")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.ReverifyCount._set {
|
|
__values = append(__values, update.ReverifyCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("reverify_count = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return nil, emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, pending_audits_node_id.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
pending_audits = &PendingAudits{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&pending_audits.NodeId, &pending_audits.PieceId, &pending_audits.StripeIndex, &pending_audits.ShareSize, &pending_audits.ExpectedShareHash, &pending_audits.ReverifyCount, &pending_audits.Path)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return pending_audits, nil
|
|
}
|
|
|
|
func (obj *postgresImpl) UpdateNoReturn_Irreparabledb_By_Segmentpath(ctx context.Context,
|
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field,
|
|
update Irreparabledb_Update_Fields) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE irreparabledbs SET "), __sets, __sqlbundle_Literal(" WHERE irreparabledbs.segmentpath = ?")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.Segmentdetail._set {
|
|
__values = append(__values, update.Segmentdetail.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("segmentdetail = ?"))
|
|
}
|
|
|
|
if update.PiecesLostCount._set {
|
|
__values = append(__values, update.PiecesLostCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("pieces_lost_count = ?"))
|
|
}
|
|
|
|
if update.SegDamagedUnixSec._set {
|
|
__values = append(__values, update.SegDamagedUnixSec.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("seg_damaged_unix_sec = ?"))
|
|
}
|
|
|
|
if update.RepairAttemptCount._set {
|
|
__values = append(__values, update.RepairAttemptCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("repair_attempt_count = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, irreparabledb_segmentpath.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
_, err = obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (obj *postgresImpl) UpdateNoReturn_AccountingTimestamps_By_Name(ctx context.Context,
|
|
accounting_timestamps_name AccountingTimestamps_Name_Field,
|
|
update AccountingTimestamps_Update_Fields) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE accounting_timestamps SET "), __sets, __sqlbundle_Literal(" WHERE accounting_timestamps.name = ?")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.Value._set {
|
|
__values = append(__values, update.Value.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("value = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, accounting_timestamps_name.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
_, err = obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (obj *postgresImpl) Update_Node_By_Id(ctx context.Context,
|
|
node_id Node_Id_Field,
|
|
update Node_Update_Fields) (
|
|
node *Node, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE nodes SET "), __sets, __sqlbundle_Literal(" WHERE nodes.id = ? RETURNING nodes.id, nodes.address, nodes.last_net, nodes.protocol, nodes.type, nodes.email, nodes.wallet, nodes.free_bandwidth, nodes.free_disk, nodes.piece_count, nodes.major, nodes.minor, nodes.patch, nodes.hash, nodes.timestamp, nodes.release, nodes.latency_90, nodes.audit_success_count, nodes.total_audit_count, nodes.uptime_success_count, nodes.total_uptime_count, nodes.created_at, nodes.updated_at, nodes.last_contact_success, nodes.last_contact_failure, nodes.contained, nodes.disqualified, nodes.audit_reputation_alpha, nodes.audit_reputation_beta, nodes.uptime_reputation_alpha, nodes.uptime_reputation_beta, nodes.exit_initiated_at, nodes.exit_loop_completed_at, nodes.exit_finished_at, nodes.exit_success")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.Address._set {
|
|
__values = append(__values, update.Address.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("address = ?"))
|
|
}
|
|
|
|
if update.LastNet._set {
|
|
__values = append(__values, update.LastNet.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("last_net = ?"))
|
|
}
|
|
|
|
if update.Protocol._set {
|
|
__values = append(__values, update.Protocol.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("protocol = ?"))
|
|
}
|
|
|
|
if update.Type._set {
|
|
__values = append(__values, update.Type.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("type = ?"))
|
|
}
|
|
|
|
if update.Email._set {
|
|
__values = append(__values, update.Email.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("email = ?"))
|
|
}
|
|
|
|
if update.Wallet._set {
|
|
__values = append(__values, update.Wallet.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("wallet = ?"))
|
|
}
|
|
|
|
if update.FreeBandwidth._set {
|
|
__values = append(__values, update.FreeBandwidth.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("free_bandwidth = ?"))
|
|
}
|
|
|
|
if update.FreeDisk._set {
|
|
__values = append(__values, update.FreeDisk.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("free_disk = ?"))
|
|
}
|
|
|
|
if update.PieceCount._set {
|
|
__values = append(__values, update.PieceCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("piece_count = ?"))
|
|
}
|
|
|
|
if update.Major._set {
|
|
__values = append(__values, update.Major.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("major = ?"))
|
|
}
|
|
|
|
if update.Minor._set {
|
|
__values = append(__values, update.Minor.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("minor = ?"))
|
|
}
|
|
|
|
if update.Patch._set {
|
|
__values = append(__values, update.Patch.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("patch = ?"))
|
|
}
|
|
|
|
if update.Hash._set {
|
|
__values = append(__values, update.Hash.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("hash = ?"))
|
|
}
|
|
|
|
if update.Timestamp._set {
|
|
__values = append(__values, update.Timestamp.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("timestamp = ?"))
|
|
}
|
|
|
|
if update.Release._set {
|
|
__values = append(__values, update.Release.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("release = ?"))
|
|
}
|
|
|
|
if update.Latency90._set {
|
|
__values = append(__values, update.Latency90.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("latency_90 = ?"))
|
|
}
|
|
|
|
if update.AuditSuccessCount._set {
|
|
__values = append(__values, update.AuditSuccessCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("audit_success_count = ?"))
|
|
}
|
|
|
|
if update.TotalAuditCount._set {
|
|
__values = append(__values, update.TotalAuditCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("total_audit_count = ?"))
|
|
}
|
|
|
|
if update.UptimeSuccessCount._set {
|
|
__values = append(__values, update.UptimeSuccessCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("uptime_success_count = ?"))
|
|
}
|
|
|
|
if update.TotalUptimeCount._set {
|
|
__values = append(__values, update.TotalUptimeCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("total_uptime_count = ?"))
|
|
}
|
|
|
|
if update.LastContactSuccess._set {
|
|
__values = append(__values, update.LastContactSuccess.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("last_contact_success = ?"))
|
|
}
|
|
|
|
if update.LastContactFailure._set {
|
|
__values = append(__values, update.LastContactFailure.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("last_contact_failure = ?"))
|
|
}
|
|
|
|
if update.Contained._set {
|
|
__values = append(__values, update.Contained.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("contained = ?"))
|
|
}
|
|
|
|
if update.Disqualified._set {
|
|
__values = append(__values, update.Disqualified.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("disqualified = ?"))
|
|
}
|
|
|
|
if update.AuditReputationAlpha._set {
|
|
__values = append(__values, update.AuditReputationAlpha.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("audit_reputation_alpha = ?"))
|
|
}
|
|
|
|
if update.AuditReputationBeta._set {
|
|
__values = append(__values, update.AuditReputationBeta.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("audit_reputation_beta = ?"))
|
|
}
|
|
|
|
if update.UptimeReputationAlpha._set {
|
|
__values = append(__values, update.UptimeReputationAlpha.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("uptime_reputation_alpha = ?"))
|
|
}
|
|
|
|
if update.UptimeReputationBeta._set {
|
|
__values = append(__values, update.UptimeReputationBeta.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("uptime_reputation_beta = ?"))
|
|
}
|
|
|
|
if update.ExitInitiatedAt._set {
|
|
__values = append(__values, update.ExitInitiatedAt.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("exit_initiated_at = ?"))
|
|
}
|
|
|
|
if update.ExitLoopCompletedAt._set {
|
|
__values = append(__values, update.ExitLoopCompletedAt.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("exit_loop_completed_at = ?"))
|
|
}
|
|
|
|
if update.ExitFinishedAt._set {
|
|
__values = append(__values, update.ExitFinishedAt.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("exit_finished_at = ?"))
|
|
}
|
|
|
|
if update.ExitSuccess._set {
|
|
__values = append(__values, update.ExitSuccess.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("exit_success = ?"))
|
|
}
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
|
|
__values = append(__values, __now)
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("updated_at = ?"))
|
|
|
|
__args = append(__args, node_id.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
node = &Node{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&node.Id, &node.Address, &node.LastNet, &node.Protocol, &node.Type, &node.Email, &node.Wallet, &node.FreeBandwidth, &node.FreeDisk, &node.PieceCount, &node.Major, &node.Minor, &node.Patch, &node.Hash, &node.Timestamp, &node.Release, &node.Latency90, &node.AuditSuccessCount, &node.TotalAuditCount, &node.UptimeSuccessCount, &node.TotalUptimeCount, &node.CreatedAt, &node.UpdatedAt, &node.LastContactSuccess, &node.LastContactFailure, &node.Contained, &node.Disqualified, &node.AuditReputationAlpha, &node.AuditReputationBeta, &node.UptimeReputationAlpha, &node.UptimeReputationBeta, &node.ExitInitiatedAt, &node.ExitLoopCompletedAt, &node.ExitFinishedAt, &node.ExitSuccess)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return node, nil
|
|
}
|
|
|
|
func (obj *postgresImpl) UpdateNoReturn_Node_By_Id(ctx context.Context,
|
|
node_id Node_Id_Field,
|
|
update Node_Update_Fields) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE nodes SET "), __sets, __sqlbundle_Literal(" WHERE nodes.id = ?")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.Address._set {
|
|
__values = append(__values, update.Address.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("address = ?"))
|
|
}
|
|
|
|
if update.LastNet._set {
|
|
__values = append(__values, update.LastNet.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("last_net = ?"))
|
|
}
|
|
|
|
if update.Protocol._set {
|
|
__values = append(__values, update.Protocol.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("protocol = ?"))
|
|
}
|
|
|
|
if update.Type._set {
|
|
__values = append(__values, update.Type.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("type = ?"))
|
|
}
|
|
|
|
if update.Email._set {
|
|
__values = append(__values, update.Email.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("email = ?"))
|
|
}
|
|
|
|
if update.Wallet._set {
|
|
__values = append(__values, update.Wallet.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("wallet = ?"))
|
|
}
|
|
|
|
if update.FreeBandwidth._set {
|
|
__values = append(__values, update.FreeBandwidth.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("free_bandwidth = ?"))
|
|
}
|
|
|
|
if update.FreeDisk._set {
|
|
__values = append(__values, update.FreeDisk.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("free_disk = ?"))
|
|
}
|
|
|
|
if update.PieceCount._set {
|
|
__values = append(__values, update.PieceCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("piece_count = ?"))
|
|
}
|
|
|
|
if update.Major._set {
|
|
__values = append(__values, update.Major.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("major = ?"))
|
|
}
|
|
|
|
if update.Minor._set {
|
|
__values = append(__values, update.Minor.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("minor = ?"))
|
|
}
|
|
|
|
if update.Patch._set {
|
|
__values = append(__values, update.Patch.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("patch = ?"))
|
|
}
|
|
|
|
if update.Hash._set {
|
|
__values = append(__values, update.Hash.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("hash = ?"))
|
|
}
|
|
|
|
if update.Timestamp._set {
|
|
__values = append(__values, update.Timestamp.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("timestamp = ?"))
|
|
}
|
|
|
|
if update.Release._set {
|
|
__values = append(__values, update.Release.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("release = ?"))
|
|
}
|
|
|
|
if update.Latency90._set {
|
|
__values = append(__values, update.Latency90.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("latency_90 = ?"))
|
|
}
|
|
|
|
if update.AuditSuccessCount._set {
|
|
__values = append(__values, update.AuditSuccessCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("audit_success_count = ?"))
|
|
}
|
|
|
|
if update.TotalAuditCount._set {
|
|
__values = append(__values, update.TotalAuditCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("total_audit_count = ?"))
|
|
}
|
|
|
|
if update.UptimeSuccessCount._set {
|
|
__values = append(__values, update.UptimeSuccessCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("uptime_success_count = ?"))
|
|
}
|
|
|
|
if update.TotalUptimeCount._set {
|
|
__values = append(__values, update.TotalUptimeCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("total_uptime_count = ?"))
|
|
}
|
|
|
|
if update.LastContactSuccess._set {
|
|
__values = append(__values, update.LastContactSuccess.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("last_contact_success = ?"))
|
|
}
|
|
|
|
if update.LastContactFailure._set {
|
|
__values = append(__values, update.LastContactFailure.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("last_contact_failure = ?"))
|
|
}
|
|
|
|
if update.Contained._set {
|
|
__values = append(__values, update.Contained.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("contained = ?"))
|
|
}
|
|
|
|
if update.Disqualified._set {
|
|
__values = append(__values, update.Disqualified.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("disqualified = ?"))
|
|
}
|
|
|
|
if update.AuditReputationAlpha._set {
|
|
__values = append(__values, update.AuditReputationAlpha.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("audit_reputation_alpha = ?"))
|
|
}
|
|
|
|
if update.AuditReputationBeta._set {
|
|
__values = append(__values, update.AuditReputationBeta.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("audit_reputation_beta = ?"))
|
|
}
|
|
|
|
if update.UptimeReputationAlpha._set {
|
|
__values = append(__values, update.UptimeReputationAlpha.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("uptime_reputation_alpha = ?"))
|
|
}
|
|
|
|
if update.UptimeReputationBeta._set {
|
|
__values = append(__values, update.UptimeReputationBeta.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("uptime_reputation_beta = ?"))
|
|
}
|
|
|
|
if update.ExitInitiatedAt._set {
|
|
__values = append(__values, update.ExitInitiatedAt.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("exit_initiated_at = ?"))
|
|
}
|
|
|
|
if update.ExitLoopCompletedAt._set {
|
|
__values = append(__values, update.ExitLoopCompletedAt.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("exit_loop_completed_at = ?"))
|
|
}
|
|
|
|
if update.ExitFinishedAt._set {
|
|
__values = append(__values, update.ExitFinishedAt.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("exit_finished_at = ?"))
|
|
}
|
|
|
|
if update.ExitSuccess._set {
|
|
__values = append(__values, update.ExitSuccess.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("exit_success = ?"))
|
|
}
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
|
|
__values = append(__values, __now)
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("updated_at = ?"))
|
|
|
|
__args = append(__args, node_id.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
_, err = obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (obj *postgresImpl) Update_User_By_Id(ctx context.Context,
|
|
user_id User_Id_Field,
|
|
update User_Update_Fields) (
|
|
user *User, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE users SET "), __sets, __sqlbundle_Literal(" WHERE users.id = ? RETURNING users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.created_at")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.Email._set {
|
|
__values = append(__values, update.Email.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("email = ?"))
|
|
}
|
|
|
|
if update.NormalizedEmail._set {
|
|
__values = append(__values, update.NormalizedEmail.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("normalized_email = ?"))
|
|
}
|
|
|
|
if update.FullName._set {
|
|
__values = append(__values, update.FullName.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("full_name = ?"))
|
|
}
|
|
|
|
if update.ShortName._set {
|
|
__values = append(__values, update.ShortName.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("short_name = ?"))
|
|
}
|
|
|
|
if update.PasswordHash._set {
|
|
__values = append(__values, update.PasswordHash.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("password_hash = ?"))
|
|
}
|
|
|
|
if update.Status._set {
|
|
__values = append(__values, update.Status.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("status = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return nil, emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, user_id.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
user = &User{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.CreatedAt)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return user, nil
|
|
}
|
|
|
|
func (obj *postgresImpl) Update_Project_By_Id(ctx context.Context,
|
|
project_id Project_Id_Field,
|
|
update Project_Update_Fields) (
|
|
project *Project, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE projects SET "), __sets, __sqlbundle_Literal(" WHERE projects.id = ? RETURNING projects.id, projects.name, projects.description, projects.usage_limit, projects.partner_id, projects.owner_id, projects.created_at")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.Description._set {
|
|
__values = append(__values, update.Description.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("description = ?"))
|
|
}
|
|
|
|
if update.UsageLimit._set {
|
|
__values = append(__values, update.UsageLimit.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("usage_limit = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return nil, emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, project_id.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
project = &Project{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return project, nil
|
|
}
|
|
|
|
func (obj *postgresImpl) UpdateNoReturn_ApiKey_By_Id(ctx context.Context,
|
|
api_key_id ApiKey_Id_Field,
|
|
update ApiKey_Update_Fields) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE api_keys SET "), __sets, __sqlbundle_Literal(" WHERE api_keys.id = ?")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.Name._set {
|
|
__values = append(__values, update.Name.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("name = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, api_key_id.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
_, err = obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (obj *postgresImpl) UpdateNoReturn_PeerIdentity_By_NodeId(ctx context.Context,
|
|
peer_identity_node_id PeerIdentity_NodeId_Field,
|
|
update PeerIdentity_Update_Fields) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE peer_identities SET "), __sets, __sqlbundle_Literal(" WHERE peer_identities.node_id = ?")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.LeafSerialNumber._set {
|
|
__values = append(__values, update.LeafSerialNumber.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("leaf_serial_number = ?"))
|
|
}
|
|
|
|
if update.Chain._set {
|
|
__values = append(__values, update.Chain.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("chain = ?"))
|
|
}
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
|
|
__values = append(__values, __now)
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("updated_at = ?"))
|
|
|
|
__args = append(__args, peer_identity_node_id.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
_, err = obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (obj *postgresImpl) Update_RegistrationToken_By_Secret(ctx context.Context,
|
|
registration_token_secret RegistrationToken_Secret_Field,
|
|
update RegistrationToken_Update_Fields) (
|
|
registration_token *RegistrationToken, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE registration_tokens SET "), __sets, __sqlbundle_Literal(" WHERE registration_tokens.secret = ? RETURNING registration_tokens.secret, registration_tokens.owner_id, registration_tokens.project_limit, registration_tokens.created_at")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.OwnerId._set {
|
|
__values = append(__values, update.OwnerId.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("owner_id = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return nil, emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, registration_token_secret.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
registration_token = &RegistrationToken{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(®istration_token.Secret, ®istration_token.OwnerId, ®istration_token.ProjectLimit, ®istration_token.CreatedAt)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return registration_token, nil
|
|
}
|
|
|
|
func (obj *postgresImpl) UpdateNoReturn_Offer_By_Id(ctx context.Context,
|
|
offer_id Offer_Id_Field,
|
|
update Offer_Update_Fields) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE offers SET "), __sets, __sqlbundle_Literal(" WHERE offers.id = ?")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.Name._set {
|
|
__values = append(__values, update.Name.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("name = ?"))
|
|
}
|
|
|
|
if update.Description._set {
|
|
__values = append(__values, update.Description.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("description = ?"))
|
|
}
|
|
|
|
if update.AwardCreditInCents._set {
|
|
__values = append(__values, update.AwardCreditInCents.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("award_credit_in_cents = ?"))
|
|
}
|
|
|
|
if update.InviteeCreditInCents._set {
|
|
__values = append(__values, update.InviteeCreditInCents.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("invitee_credit_in_cents = ?"))
|
|
}
|
|
|
|
if update.AwardCreditDurationDays._set {
|
|
__values = append(__values, update.AwardCreditDurationDays.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("award_credit_duration_days = ?"))
|
|
}
|
|
|
|
if update.InviteeCreditDurationDays._set {
|
|
__values = append(__values, update.InviteeCreditDurationDays.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("invitee_credit_duration_days = ?"))
|
|
}
|
|
|
|
if update.RedeemableCap._set {
|
|
__values = append(__values, update.RedeemableCap.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("redeemable_cap = ?"))
|
|
}
|
|
|
|
if update.ExpiresAt._set {
|
|
__values = append(__values, update.ExpiresAt.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("expires_at = ?"))
|
|
}
|
|
|
|
if update.Status._set {
|
|
__values = append(__values, update.Status.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("status = ?"))
|
|
}
|
|
|
|
if update.Type._set {
|
|
__values = append(__values, update.Type.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("type = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, offer_id.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
_, err = obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (obj *postgresImpl) Update_BucketMetainfo_By_ProjectId_And_Name(ctx context.Context,
|
|
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
|
|
bucket_metainfo_name BucketMetainfo_Name_Field,
|
|
update BucketMetainfo_Update_Fields) (
|
|
bucket_metainfo *BucketMetainfo, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE bucket_metainfos SET "), __sets, __sqlbundle_Literal(" WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name = ? RETURNING bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.partner_id, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.PartnerId._set {
|
|
__values = append(__values, update.PartnerId.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("partner_id = ?"))
|
|
}
|
|
|
|
if update.DefaultSegmentSize._set {
|
|
__values = append(__values, update.DefaultSegmentSize.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("default_segment_size = ?"))
|
|
}
|
|
|
|
if update.DefaultEncryptionCipherSuite._set {
|
|
__values = append(__values, update.DefaultEncryptionCipherSuite.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("default_encryption_cipher_suite = ?"))
|
|
}
|
|
|
|
if update.DefaultEncryptionBlockSize._set {
|
|
__values = append(__values, update.DefaultEncryptionBlockSize.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("default_encryption_block_size = ?"))
|
|
}
|
|
|
|
if update.DefaultRedundancyAlgorithm._set {
|
|
__values = append(__values, update.DefaultRedundancyAlgorithm.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("default_redundancy_algorithm = ?"))
|
|
}
|
|
|
|
if update.DefaultRedundancyShareSize._set {
|
|
__values = append(__values, update.DefaultRedundancyShareSize.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("default_redundancy_share_size = ?"))
|
|
}
|
|
|
|
if update.DefaultRedundancyRequiredShares._set {
|
|
__values = append(__values, update.DefaultRedundancyRequiredShares.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("default_redundancy_required_shares = ?"))
|
|
}
|
|
|
|
if update.DefaultRedundancyRepairShares._set {
|
|
__values = append(__values, update.DefaultRedundancyRepairShares.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("default_redundancy_repair_shares = ?"))
|
|
}
|
|
|
|
if update.DefaultRedundancyOptimalShares._set {
|
|
__values = append(__values, update.DefaultRedundancyOptimalShares.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("default_redundancy_optimal_shares = ?"))
|
|
}
|
|
|
|
if update.DefaultRedundancyTotalShares._set {
|
|
__values = append(__values, update.DefaultRedundancyTotalShares.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("default_redundancy_total_shares = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return nil, emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, bucket_metainfo_project_id.value(), bucket_metainfo_name.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
bucket_metainfo = &BucketMetainfo{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.PartnerId, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return bucket_metainfo, nil
|
|
}
|
|
|
|
func (obj *postgresImpl) UpdateNoReturn_GracefulExitProgress_By_NodeId(ctx context.Context,
|
|
graceful_exit_progress_node_id GracefulExitProgress_NodeId_Field,
|
|
update GracefulExitProgress_Update_Fields) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE graceful_exit_progress SET "), __sets, __sqlbundle_Literal(" WHERE graceful_exit_progress.node_id = ?")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.BytesTransferred._set {
|
|
__values = append(__values, update.BytesTransferred.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("bytes_transferred = ?"))
|
|
}
|
|
|
|
if update.PiecesTransferred._set {
|
|
__values = append(__values, update.PiecesTransferred.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("pieces_transferred = ?"))
|
|
}
|
|
|
|
if update.PiecesFailed._set {
|
|
__values = append(__values, update.PiecesFailed.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("pieces_failed = ?"))
|
|
}
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
|
|
__values = append(__values, __now.UTC())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("updated_at = ?"))
|
|
|
|
__args = append(__args, graceful_exit_progress_node_id.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
_, err = obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (obj *postgresImpl) UpdateNoReturn_GracefulExitTransferQueue_By_NodeId_And_Path_And_PieceNum(ctx context.Context,
|
|
graceful_exit_transfer_queue_node_id GracefulExitTransferQueue_NodeId_Field,
|
|
graceful_exit_transfer_queue_path GracefulExitTransferQueue_Path_Field,
|
|
graceful_exit_transfer_queue_piece_num GracefulExitTransferQueue_PieceNum_Field,
|
|
update GracefulExitTransferQueue_Update_Fields) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE graceful_exit_transfer_queue SET "), __sets, __sqlbundle_Literal(" WHERE graceful_exit_transfer_queue.node_id = ? AND graceful_exit_transfer_queue.path = ? AND graceful_exit_transfer_queue.piece_num = ?")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.DurabilityRatio._set {
|
|
__values = append(__values, update.DurabilityRatio.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("durability_ratio = ?"))
|
|
}
|
|
|
|
if update.RequestedAt._set {
|
|
__values = append(__values, update.RequestedAt.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("requested_at = ?"))
|
|
}
|
|
|
|
if update.LastFailedAt._set {
|
|
__values = append(__values, update.LastFailedAt.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("last_failed_at = ?"))
|
|
}
|
|
|
|
if update.LastFailedCode._set {
|
|
__values = append(__values, update.LastFailedCode.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("last_failed_code = ?"))
|
|
}
|
|
|
|
if update.FailedCount._set {
|
|
__values = append(__values, update.FailedCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("failed_count = ?"))
|
|
}
|
|
|
|
if update.FinishedAt._set {
|
|
__values = append(__values, update.FinishedAt.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("finished_at = ?"))
|
|
}
|
|
|
|
if update.OrderLimitSendCount._set {
|
|
__values = append(__values, update.OrderLimitSendCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("order_limit_send_count = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, graceful_exit_transfer_queue_node_id.value(), graceful_exit_transfer_queue_path.value(), graceful_exit_transfer_queue_piece_num.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
_, err = obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (obj *postgresImpl) Update_CoinpaymentsTransaction_By_Id(ctx context.Context,
|
|
coinpayments_transaction_id CoinpaymentsTransaction_Id_Field,
|
|
update CoinpaymentsTransaction_Update_Fields) (
|
|
coinpayments_transaction *CoinpaymentsTransaction, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE coinpayments_transactions SET "), __sets, __sqlbundle_Literal(" WHERE coinpayments_transactions.id = ? RETURNING coinpayments_transactions.id, coinpayments_transactions.user_id, coinpayments_transactions.address, coinpayments_transactions.amount, coinpayments_transactions.received, coinpayments_transactions.status, coinpayments_transactions.key, coinpayments_transactions.timeout, coinpayments_transactions.created_at")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.Received._set {
|
|
__values = append(__values, update.Received.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("received = ?"))
|
|
}
|
|
|
|
if update.Status._set {
|
|
__values = append(__values, update.Status.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("status = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return nil, emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, coinpayments_transaction_id.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
coinpayments_transaction = &CoinpaymentsTransaction{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&coinpayments_transaction.Id, &coinpayments_transaction.UserId, &coinpayments_transaction.Address, &coinpayments_transaction.Amount, &coinpayments_transaction.Received, &coinpayments_transaction.Status, &coinpayments_transaction.Key, &coinpayments_transaction.Timeout, &coinpayments_transaction.CreatedAt)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return coinpayments_transaction, nil
|
|
}
|
|
|
|
func (obj *postgresImpl) Update_StripecoinpaymentsApplyBalanceIntent_By_TxId(ctx context.Context,
|
|
stripecoinpayments_apply_balance_intent_tx_id StripecoinpaymentsApplyBalanceIntent_TxId_Field,
|
|
update StripecoinpaymentsApplyBalanceIntent_Update_Fields) (
|
|
stripecoinpayments_apply_balance_intent *StripecoinpaymentsApplyBalanceIntent, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE stripecoinpayments_apply_balance_intents SET "), __sets, __sqlbundle_Literal(" WHERE stripecoinpayments_apply_balance_intents.tx_id = ? RETURNING stripecoinpayments_apply_balance_intents.tx_id, stripecoinpayments_apply_balance_intents.state, stripecoinpayments_apply_balance_intents.created_at")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.State._set {
|
|
__values = append(__values, update.State.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("state = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return nil, emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, stripecoinpayments_apply_balance_intent_tx_id.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
stripecoinpayments_apply_balance_intent = &StripecoinpaymentsApplyBalanceIntent{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&stripecoinpayments_apply_balance_intent.TxId, &stripecoinpayments_apply_balance_intent.State, &stripecoinpayments_apply_balance_intent.CreatedAt)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return stripecoinpayments_apply_balance_intent, nil
|
|
}
|
|
|
|
func (obj *postgresImpl) Update_StripecoinpaymentsInvoiceProjectRecord_By_Id(ctx context.Context,
|
|
stripecoinpayments_invoice_project_record_id StripecoinpaymentsInvoiceProjectRecord_Id_Field,
|
|
update StripecoinpaymentsInvoiceProjectRecord_Update_Fields) (
|
|
stripecoinpayments_invoice_project_record *StripecoinpaymentsInvoiceProjectRecord, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE stripecoinpayments_invoice_project_records SET "), __sets, __sqlbundle_Literal(" WHERE stripecoinpayments_invoice_project_records.id = ? RETURNING stripecoinpayments_invoice_project_records.id, stripecoinpayments_invoice_project_records.project_id, stripecoinpayments_invoice_project_records.storage, stripecoinpayments_invoice_project_records.egress, stripecoinpayments_invoice_project_records.objects, stripecoinpayments_invoice_project_records.period_start, stripecoinpayments_invoice_project_records.period_end, stripecoinpayments_invoice_project_records.state, stripecoinpayments_invoice_project_records.created_at")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.State._set {
|
|
__values = append(__values, update.State.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("state = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return nil, emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, stripecoinpayments_invoice_project_record_id.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
stripecoinpayments_invoice_project_record = &StripecoinpaymentsInvoiceProjectRecord{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&stripecoinpayments_invoice_project_record.Id, &stripecoinpayments_invoice_project_record.ProjectId, &stripecoinpayments_invoice_project_record.Storage, &stripecoinpayments_invoice_project_record.Egress, &stripecoinpayments_invoice_project_record.Objects, &stripecoinpayments_invoice_project_record.PeriodStart, &stripecoinpayments_invoice_project_record.PeriodEnd, &stripecoinpayments_invoice_project_record.State, &stripecoinpayments_invoice_project_record.CreatedAt)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return stripecoinpayments_invoice_project_record, nil
|
|
}
|
|
|
|
func (obj *postgresImpl) Update_Coupon_By_Id(ctx context.Context,
|
|
coupon_id Coupon_Id_Field,
|
|
update Coupon_Update_Fields) (
|
|
coupon *Coupon, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE coupons SET "), __sets, __sqlbundle_Literal(" WHERE coupons.id = ? RETURNING coupons.id, coupons.project_id, coupons.user_id, coupons.amount, coupons.description, coupons.type, coupons.status, coupons.duration, coupons.created_at")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.Status._set {
|
|
__values = append(__values, update.Status.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("status = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return nil, emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, coupon_id.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
coupon = &Coupon{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&coupon.Id, &coupon.ProjectId, &coupon.UserId, &coupon.Amount, &coupon.Description, &coupon.Type, &coupon.Status, &coupon.Duration, &coupon.CreatedAt)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return coupon, nil
|
|
}
|
|
|
|
func (obj *postgresImpl) Update_CouponUsage_By_CouponId_And_Period(ctx context.Context,
|
|
coupon_usage_coupon_id CouponUsage_CouponId_Field,
|
|
coupon_usage_period CouponUsage_Period_Field,
|
|
update CouponUsage_Update_Fields) (
|
|
coupon_usage *CouponUsage, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE coupon_usages SET "), __sets, __sqlbundle_Literal(" WHERE coupon_usages.coupon_id = ? AND coupon_usages.period = ? RETURNING coupon_usages.coupon_id, coupon_usages.amount, coupon_usages.status, coupon_usages.period")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.Status._set {
|
|
__values = append(__values, update.Status.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("status = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return nil, emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, coupon_usage_coupon_id.value(), coupon_usage_period.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
coupon_usage = &CouponUsage{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&coupon_usage.CouponId, &coupon_usage.Amount, &coupon_usage.Status, &coupon_usage.Period)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return coupon_usage, nil
|
|
}
|
|
|
|
func (obj *postgresImpl) Delete_ValueAttribution_By_ProjectId_And_BucketName(ctx context.Context,
|
|
value_attribution_project_id ValueAttribution_ProjectId_Field,
|
|
value_attribution_bucket_name ValueAttribution_BucketName_Field) (
|
|
deleted bool, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM value_attributions WHERE value_attributions.project_id = ? AND value_attributions.bucket_name = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, value_attribution_project_id.value(), value_attribution_bucket_name.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Delete_PendingAudits_By_NodeId(ctx context.Context,
|
|
pending_audits_node_id PendingAudits_NodeId_Field) (
|
|
deleted bool, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM pending_audits WHERE pending_audits.node_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, pending_audits_node_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Delete_Irreparabledb_By_Segmentpath(ctx context.Context,
|
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field) (
|
|
deleted bool, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM irreparabledbs WHERE irreparabledbs.segmentpath = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, irreparabledb_segmentpath.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Delete_AccountingRollup_By_Id(ctx context.Context,
|
|
accounting_rollup_id AccountingRollup_Id_Field) (
|
|
deleted bool, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM accounting_rollups WHERE accounting_rollups.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, accounting_rollup_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Delete_Node_By_Id(ctx context.Context,
|
|
node_id Node_Id_Field) (
|
|
deleted bool, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM nodes WHERE nodes.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, node_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Delete_User_By_Id(ctx context.Context,
|
|
user_id User_Id_Field) (
|
|
deleted bool, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM users WHERE users.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, user_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Delete_Project_By_Id(ctx context.Context,
|
|
project_id Project_Id_Field) (
|
|
deleted bool, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM projects WHERE projects.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, project_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Delete_ProjectMember_By_MemberId_And_ProjectId(ctx context.Context,
|
|
project_member_member_id ProjectMember_MemberId_Field,
|
|
project_member_project_id ProjectMember_ProjectId_Field) (
|
|
deleted bool, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM project_members WHERE project_members.member_id = ? AND project_members.project_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, project_member_member_id.value(), project_member_project_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Delete_ApiKey_By_Id(ctx context.Context,
|
|
api_key_id ApiKey_Id_Field) (
|
|
deleted bool, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM api_keys WHERE api_keys.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, api_key_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Delete_SerialNumber_By_ExpiresAt_LessOrEqual(ctx context.Context,
|
|
serial_number_expires_at_less_or_equal SerialNumber_ExpiresAt_Field) (
|
|
count int64, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM serial_numbers WHERE serial_numbers.expires_at <= ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, serial_number_expires_at_less_or_equal.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
return count, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Delete_StoragenodeStorageTally_By_Id(ctx context.Context,
|
|
storagenode_storage_tally_id StoragenodeStorageTally_Id_Field) (
|
|
deleted bool, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM storagenode_storage_tallies WHERE storagenode_storage_tallies.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, storagenode_storage_tally_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Delete_ResetPasswordToken_By_Secret(ctx context.Context,
|
|
reset_password_token_secret ResetPasswordToken_Secret_Field) (
|
|
deleted bool, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM reset_password_tokens WHERE reset_password_tokens.secret = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, reset_password_token_secret.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Delete_BucketMetainfo_By_ProjectId_And_Name(ctx context.Context,
|
|
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
|
|
bucket_metainfo_name BucketMetainfo_Name_Field) (
|
|
deleted bool, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM bucket_metainfos WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, bucket_metainfo_project_id.value(), bucket_metainfo_name.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Delete_GracefulExitProgress_By_NodeId(ctx context.Context,
|
|
graceful_exit_progress_node_id GracefulExitProgress_NodeId_Field) (
|
|
deleted bool, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM graceful_exit_progress WHERE graceful_exit_progress.node_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, graceful_exit_progress_node_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Delete_GracefulExitTransferQueue_By_NodeId(ctx context.Context,
|
|
graceful_exit_transfer_queue_node_id GracefulExitTransferQueue_NodeId_Field) (
|
|
count int64, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM graceful_exit_transfer_queue WHERE graceful_exit_transfer_queue.node_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, graceful_exit_transfer_queue_node_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
return count, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Delete_GracefulExitTransferQueue_By_NodeId_And_Path_And_PieceNum(ctx context.Context,
|
|
graceful_exit_transfer_queue_node_id GracefulExitTransferQueue_NodeId_Field,
|
|
graceful_exit_transfer_queue_path GracefulExitTransferQueue_Path_Field,
|
|
graceful_exit_transfer_queue_piece_num GracefulExitTransferQueue_PieceNum_Field) (
|
|
deleted bool, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM graceful_exit_transfer_queue WHERE graceful_exit_transfer_queue.node_id = ? AND graceful_exit_transfer_queue.path = ? AND graceful_exit_transfer_queue.piece_num = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, graceful_exit_transfer_queue_node_id.value(), graceful_exit_transfer_queue_path.value(), graceful_exit_transfer_queue_piece_num.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Delete_GracefulExitTransferQueue_By_NodeId_And_FinishedAt_IsNot_Null(ctx context.Context,
|
|
graceful_exit_transfer_queue_node_id GracefulExitTransferQueue_NodeId_Field) (
|
|
count int64, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM graceful_exit_transfer_queue WHERE graceful_exit_transfer_queue.node_id = ? AND graceful_exit_transfer_queue.finished_at is not NULL")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, graceful_exit_transfer_queue_node_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
return count, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Delete_StripecoinpaymentsApplyBalanceIntent_By_TxId(ctx context.Context,
|
|
stripecoinpayments_apply_balance_intent_tx_id StripecoinpaymentsApplyBalanceIntent_TxId_Field) (
|
|
deleted bool, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM stripecoinpayments_apply_balance_intents WHERE stripecoinpayments_apply_balance_intents.tx_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, stripecoinpayments_apply_balance_intent_tx_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Delete_StripecoinpaymentsInvoiceProjectRecord_By_Id(ctx context.Context,
|
|
stripecoinpayments_invoice_project_record_id StripecoinpaymentsInvoiceProjectRecord_Id_Field) (
|
|
deleted bool, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM stripecoinpayments_invoice_project_records WHERE stripecoinpayments_invoice_project_records.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, stripecoinpayments_invoice_project_record_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Delete_Coupon_By_Id(ctx context.Context,
|
|
coupon_id Coupon_Id_Field) (
|
|
deleted bool, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM coupons WHERE coupons.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, coupon_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (impl postgresImpl) isConstraintError(err error) (
|
|
constraint string, ok bool) {
|
|
if e, ok := err.(*pq.Error); ok {
|
|
if e.Code.Class() == "23" {
|
|
return e.Constraint, true
|
|
}
|
|
}
|
|
return "", false
|
|
}
|
|
|
|
func (obj *postgresImpl) deleteAll(ctx context.Context) (count int64, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __res sql.Result
|
|
var __count int64
|
|
__res, err = obj.driver.Exec("DELETE FROM user_credits;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM used_serials;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM stripecoinpayments_apply_balance_intents;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM project_members;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM project_invoice_stamps;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM bucket_metainfos;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM api_keys;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM value_attributions;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM users;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM stripecoinpayments_tx_conversion_rates;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM stripecoinpayments_invoice_project_records;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM stripe_customers;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM storagenode_storage_tallies;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM storagenode_bandwidth_rollups;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM serial_numbers;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM reset_password_tokens;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM registration_tokens;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM projects;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM pending_audits;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM peer_identities;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM offers;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM nodes_offline_times;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM nodes;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM irreparabledbs;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM injuredsegments;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM graceful_exit_transfer_queue;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM graceful_exit_progress;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM coupon_usages;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM coupons;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM coinpayments_transactions;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM bucket_storage_tallies;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM bucket_bandwidth_rollups;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM accounting_timestamps;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM accounting_rollups;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
|
|
return count, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Create_ValueAttribution(ctx context.Context,
|
|
value_attribution_project_id ValueAttribution_ProjectId_Field,
|
|
value_attribution_bucket_name ValueAttribution_BucketName_Field,
|
|
value_attribution_partner_id ValueAttribution_PartnerId_Field) (
|
|
value_attribution *ValueAttribution, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__project_id_val := value_attribution_project_id.value()
|
|
__bucket_name_val := value_attribution_bucket_name.value()
|
|
__partner_id_val := value_attribution_partner_id.value()
|
|
__last_updated_val := __now.UTC()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO value_attributions ( project_id, bucket_name, partner_id, last_updated ) VALUES ( ?, ?, ?, ? ) RETURNING value_attributions.project_id, value_attributions.bucket_name, value_attributions.partner_id, value_attributions.last_updated")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __project_id_val, __bucket_name_val, __partner_id_val, __last_updated_val)
|
|
|
|
value_attribution = &ValueAttribution{}
|
|
err = obj.driver.QueryRow(__stmt, __project_id_val, __bucket_name_val, __partner_id_val, __last_updated_val).Scan(&value_attribution.ProjectId, &value_attribution.BucketName, &value_attribution.PartnerId, &value_attribution.LastUpdated)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return value_attribution, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Create_PendingAudits(ctx context.Context,
|
|
pending_audits_node_id PendingAudits_NodeId_Field,
|
|
pending_audits_piece_id PendingAudits_PieceId_Field,
|
|
pending_audits_stripe_index PendingAudits_StripeIndex_Field,
|
|
pending_audits_share_size PendingAudits_ShareSize_Field,
|
|
pending_audits_expected_share_hash PendingAudits_ExpectedShareHash_Field,
|
|
pending_audits_reverify_count PendingAudits_ReverifyCount_Field,
|
|
pending_audits_path PendingAudits_Path_Field) (
|
|
pending_audits *PendingAudits, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
__node_id_val := pending_audits_node_id.value()
|
|
__piece_id_val := pending_audits_piece_id.value()
|
|
__stripe_index_val := pending_audits_stripe_index.value()
|
|
__share_size_val := pending_audits_share_size.value()
|
|
__expected_share_hash_val := pending_audits_expected_share_hash.value()
|
|
__reverify_count_val := pending_audits_reverify_count.value()
|
|
__path_val := pending_audits_path.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO pending_audits ( node_id, piece_id, stripe_index, share_size, expected_share_hash, reverify_count, path ) VALUES ( ?, ?, ?, ?, ?, ?, ? ) RETURNING pending_audits.node_id, pending_audits.piece_id, pending_audits.stripe_index, pending_audits.share_size, pending_audits.expected_share_hash, pending_audits.reverify_count, pending_audits.path")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __node_id_val, __piece_id_val, __stripe_index_val, __share_size_val, __expected_share_hash_val, __reverify_count_val, __path_val)
|
|
|
|
pending_audits = &PendingAudits{}
|
|
err = obj.driver.QueryRow(__stmt, __node_id_val, __piece_id_val, __stripe_index_val, __share_size_val, __expected_share_hash_val, __reverify_count_val, __path_val).Scan(&pending_audits.NodeId, &pending_audits.PieceId, &pending_audits.StripeIndex, &pending_audits.ShareSize, &pending_audits.ExpectedShareHash, &pending_audits.ReverifyCount, &pending_audits.Path)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return pending_audits, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) CreateNoReturn_Irreparabledb(ctx context.Context,
|
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field,
|
|
irreparabledb_segmentdetail Irreparabledb_Segmentdetail_Field,
|
|
irreparabledb_pieces_lost_count Irreparabledb_PiecesLostCount_Field,
|
|
irreparabledb_seg_damaged_unix_sec Irreparabledb_SegDamagedUnixSec_Field,
|
|
irreparabledb_repair_attempt_count Irreparabledb_RepairAttemptCount_Field) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
__segmentpath_val := irreparabledb_segmentpath.value()
|
|
__segmentdetail_val := irreparabledb_segmentdetail.value()
|
|
__pieces_lost_count_val := irreparabledb_pieces_lost_count.value()
|
|
__seg_damaged_unix_sec_val := irreparabledb_seg_damaged_unix_sec.value()
|
|
__repair_attempt_count_val := irreparabledb_repair_attempt_count.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO irreparabledbs ( segmentpath, segmentdetail, pieces_lost_count, seg_damaged_unix_sec, repair_attempt_count ) VALUES ( ?, ?, ?, ?, ? )")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __segmentpath_val, __segmentdetail_val, __pieces_lost_count_val, __seg_damaged_unix_sec_val, __repair_attempt_count_val)
|
|
|
|
_, err = obj.driver.Exec(__stmt, __segmentpath_val, __segmentdetail_val, __pieces_lost_count_val, __seg_damaged_unix_sec_val, __repair_attempt_count_val)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) CreateNoReturn_AccountingTimestamps(ctx context.Context,
|
|
accounting_timestamps_name AccountingTimestamps_Name_Field,
|
|
accounting_timestamps_value AccountingTimestamps_Value_Field) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
__name_val := accounting_timestamps_name.value()
|
|
__value_val := accounting_timestamps_value.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO accounting_timestamps ( name, value ) VALUES ( ?, ? )")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __name_val, __value_val)
|
|
|
|
_, err = obj.driver.Exec(__stmt, __name_val, __value_val)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) CreateNoReturn_AccountingRollup(ctx context.Context,
|
|
accounting_rollup_node_id AccountingRollup_NodeId_Field,
|
|
accounting_rollup_start_time AccountingRollup_StartTime_Field,
|
|
accounting_rollup_put_total AccountingRollup_PutTotal_Field,
|
|
accounting_rollup_get_total AccountingRollup_GetTotal_Field,
|
|
accounting_rollup_get_audit_total AccountingRollup_GetAuditTotal_Field,
|
|
accounting_rollup_get_repair_total AccountingRollup_GetRepairTotal_Field,
|
|
accounting_rollup_put_repair_total AccountingRollup_PutRepairTotal_Field,
|
|
accounting_rollup_at_rest_total AccountingRollup_AtRestTotal_Field) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
__node_id_val := accounting_rollup_node_id.value()
|
|
__start_time_val := accounting_rollup_start_time.value()
|
|
__put_total_val := accounting_rollup_put_total.value()
|
|
__get_total_val := accounting_rollup_get_total.value()
|
|
__get_audit_total_val := accounting_rollup_get_audit_total.value()
|
|
__get_repair_total_val := accounting_rollup_get_repair_total.value()
|
|
__put_repair_total_val := accounting_rollup_put_repair_total.value()
|
|
__at_rest_total_val := accounting_rollup_at_rest_total.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO accounting_rollups ( node_id, start_time, put_total, get_total, get_audit_total, get_repair_total, put_repair_total, at_rest_total ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ? )")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __node_id_val, __start_time_val, __put_total_val, __get_total_val, __get_audit_total_val, __get_repair_total_val, __put_repair_total_val, __at_rest_total_val)
|
|
|
|
_, err = obj.driver.Exec(__stmt, __node_id_val, __start_time_val, __put_total_val, __get_total_val, __get_audit_total_val, __get_repair_total_val, __put_repair_total_val, __at_rest_total_val)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) CreateNoReturn_Node(ctx context.Context,
|
|
node_id Node_Id_Field,
|
|
node_address Node_Address_Field,
|
|
node_last_net Node_LastNet_Field,
|
|
node_protocol Node_Protocol_Field,
|
|
node_type Node_Type_Field,
|
|
node_email Node_Email_Field,
|
|
node_wallet Node_Wallet_Field,
|
|
node_free_bandwidth Node_FreeBandwidth_Field,
|
|
node_free_disk Node_FreeDisk_Field,
|
|
node_major Node_Major_Field,
|
|
node_minor Node_Minor_Field,
|
|
node_patch Node_Patch_Field,
|
|
node_hash Node_Hash_Field,
|
|
node_timestamp Node_Timestamp_Field,
|
|
node_release Node_Release_Field,
|
|
node_latency_90 Node_Latency90_Field,
|
|
node_audit_success_count Node_AuditSuccessCount_Field,
|
|
node_total_audit_count Node_TotalAuditCount_Field,
|
|
node_uptime_success_count Node_UptimeSuccessCount_Field,
|
|
node_total_uptime_count Node_TotalUptimeCount_Field,
|
|
node_last_contact_success Node_LastContactSuccess_Field,
|
|
node_last_contact_failure Node_LastContactFailure_Field,
|
|
node_contained Node_Contained_Field,
|
|
node_audit_reputation_alpha Node_AuditReputationAlpha_Field,
|
|
node_audit_reputation_beta Node_AuditReputationBeta_Field,
|
|
node_uptime_reputation_alpha Node_UptimeReputationAlpha_Field,
|
|
node_uptime_reputation_beta Node_UptimeReputationBeta_Field,
|
|
node_exit_success Node_ExitSuccess_Field,
|
|
optional Node_Create_Fields) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__id_val := node_id.value()
|
|
__address_val := node_address.value()
|
|
__last_net_val := node_last_net.value()
|
|
__protocol_val := node_protocol.value()
|
|
__type_val := node_type.value()
|
|
__email_val := node_email.value()
|
|
__wallet_val := node_wallet.value()
|
|
__free_bandwidth_val := node_free_bandwidth.value()
|
|
__free_disk_val := node_free_disk.value()
|
|
__piece_count_val := int64(0)
|
|
__major_val := node_major.value()
|
|
__minor_val := node_minor.value()
|
|
__patch_val := node_patch.value()
|
|
__hash_val := node_hash.value()
|
|
__timestamp_val := node_timestamp.value()
|
|
__release_val := node_release.value()
|
|
__latency_90_val := node_latency_90.value()
|
|
__audit_success_count_val := node_audit_success_count.value()
|
|
__total_audit_count_val := node_total_audit_count.value()
|
|
__uptime_success_count_val := node_uptime_success_count.value()
|
|
__total_uptime_count_val := node_total_uptime_count.value()
|
|
__created_at_val := __now
|
|
__updated_at_val := __now
|
|
__last_contact_success_val := node_last_contact_success.value()
|
|
__last_contact_failure_val := node_last_contact_failure.value()
|
|
__contained_val := node_contained.value()
|
|
__disqualified_val := optional.Disqualified.value()
|
|
__audit_reputation_alpha_val := node_audit_reputation_alpha.value()
|
|
__audit_reputation_beta_val := node_audit_reputation_beta.value()
|
|
__uptime_reputation_alpha_val := node_uptime_reputation_alpha.value()
|
|
__uptime_reputation_beta_val := node_uptime_reputation_beta.value()
|
|
__exit_initiated_at_val := optional.ExitInitiatedAt.value()
|
|
__exit_loop_completed_at_val := optional.ExitLoopCompletedAt.value()
|
|
__exit_finished_at_val := optional.ExitFinishedAt.value()
|
|
__exit_success_val := node_exit_success.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO nodes ( id, address, last_net, protocol, type, email, wallet, free_bandwidth, free_disk, piece_count, major, minor, patch, hash, timestamp, release, latency_90, audit_success_count, total_audit_count, uptime_success_count, total_uptime_count, created_at, updated_at, last_contact_success, last_contact_failure, contained, disqualified, audit_reputation_alpha, audit_reputation_beta, uptime_reputation_alpha, uptime_reputation_beta, exit_initiated_at, exit_loop_completed_at, exit_finished_at, exit_success ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? )")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __id_val, __address_val, __last_net_val, __protocol_val, __type_val, __email_val, __wallet_val, __free_bandwidth_val, __free_disk_val, __piece_count_val, __major_val, __minor_val, __patch_val, __hash_val, __timestamp_val, __release_val, __latency_90_val, __audit_success_count_val, __total_audit_count_val, __uptime_success_count_val, __total_uptime_count_val, __created_at_val, __updated_at_val, __last_contact_success_val, __last_contact_failure_val, __contained_val, __disqualified_val, __audit_reputation_alpha_val, __audit_reputation_beta_val, __uptime_reputation_alpha_val, __uptime_reputation_beta_val, __exit_initiated_at_val, __exit_loop_completed_at_val, __exit_finished_at_val, __exit_success_val)
|
|
|
|
_, err = obj.driver.Exec(__stmt, __id_val, __address_val, __last_net_val, __protocol_val, __type_val, __email_val, __wallet_val, __free_bandwidth_val, __free_disk_val, __piece_count_val, __major_val, __minor_val, __patch_val, __hash_val, __timestamp_val, __release_val, __latency_90_val, __audit_success_count_val, __total_audit_count_val, __uptime_success_count_val, __total_uptime_count_val, __created_at_val, __updated_at_val, __last_contact_success_val, __last_contact_failure_val, __contained_val, __disqualified_val, __audit_reputation_alpha_val, __audit_reputation_beta_val, __uptime_reputation_alpha_val, __uptime_reputation_beta_val, __exit_initiated_at_val, __exit_loop_completed_at_val, __exit_finished_at_val, __exit_success_val)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Create_User(ctx context.Context,
|
|
user_id User_Id_Field,
|
|
user_email User_Email_Field,
|
|
user_normalized_email User_NormalizedEmail_Field,
|
|
user_full_name User_FullName_Field,
|
|
user_password_hash User_PasswordHash_Field,
|
|
optional User_Create_Fields) (
|
|
user *User, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__id_val := user_id.value()
|
|
__email_val := user_email.value()
|
|
__normalized_email_val := user_normalized_email.value()
|
|
__full_name_val := user_full_name.value()
|
|
__short_name_val := optional.ShortName.value()
|
|
__password_hash_val := user_password_hash.value()
|
|
__status_val := int(0)
|
|
__partner_id_val := optional.PartnerId.value()
|
|
__created_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO users ( id, email, normalized_email, full_name, short_name, password_hash, status, partner_id, created_at ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ? ) RETURNING users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.created_at")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __id_val, __email_val, __normalized_email_val, __full_name_val, __short_name_val, __password_hash_val, __status_val, __partner_id_val, __created_at_val)
|
|
|
|
user = &User{}
|
|
err = obj.driver.QueryRow(__stmt, __id_val, __email_val, __normalized_email_val, __full_name_val, __short_name_val, __password_hash_val, __status_val, __partner_id_val, __created_at_val).Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return user, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Create_Project(ctx context.Context,
|
|
project_id Project_Id_Field,
|
|
project_name Project_Name_Field,
|
|
project_description Project_Description_Field,
|
|
project_usage_limit Project_UsageLimit_Field,
|
|
project_owner_id Project_OwnerId_Field,
|
|
optional Project_Create_Fields) (
|
|
project *Project, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__id_val := project_id.value()
|
|
__name_val := project_name.value()
|
|
__description_val := project_description.value()
|
|
__usage_limit_val := project_usage_limit.value()
|
|
__partner_id_val := optional.PartnerId.value()
|
|
__owner_id_val := project_owner_id.value()
|
|
__created_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO projects ( id, name, description, usage_limit, partner_id, owner_id, created_at ) VALUES ( ?, ?, ?, ?, ?, ?, ? ) RETURNING projects.id, projects.name, projects.description, projects.usage_limit, projects.partner_id, projects.owner_id, projects.created_at")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __id_val, __name_val, __description_val, __usage_limit_val, __partner_id_val, __owner_id_val, __created_at_val)
|
|
|
|
project = &Project{}
|
|
err = obj.driver.QueryRow(__stmt, __id_val, __name_val, __description_val, __usage_limit_val, __partner_id_val, __owner_id_val, __created_at_val).Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return project, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Create_ProjectInvoiceStamp(ctx context.Context,
|
|
project_invoice_stamp_project_id ProjectInvoiceStamp_ProjectId_Field,
|
|
project_invoice_stamp_invoice_id ProjectInvoiceStamp_InvoiceId_Field,
|
|
project_invoice_stamp_start_date ProjectInvoiceStamp_StartDate_Field,
|
|
project_invoice_stamp_end_date ProjectInvoiceStamp_EndDate_Field,
|
|
project_invoice_stamp_created_at ProjectInvoiceStamp_CreatedAt_Field) (
|
|
project_invoice_stamp *ProjectInvoiceStamp, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
__project_id_val := project_invoice_stamp_project_id.value()
|
|
__invoice_id_val := project_invoice_stamp_invoice_id.value()
|
|
__start_date_val := project_invoice_stamp_start_date.value()
|
|
__end_date_val := project_invoice_stamp_end_date.value()
|
|
__created_at_val := project_invoice_stamp_created_at.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO project_invoice_stamps ( project_id, invoice_id, start_date, end_date, created_at ) VALUES ( ?, ?, ?, ?, ? ) RETURNING project_invoice_stamps.project_id, project_invoice_stamps.invoice_id, project_invoice_stamps.start_date, project_invoice_stamps.end_date, project_invoice_stamps.created_at")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __project_id_val, __invoice_id_val, __start_date_val, __end_date_val, __created_at_val)
|
|
|
|
project_invoice_stamp = &ProjectInvoiceStamp{}
|
|
err = obj.driver.QueryRow(__stmt, __project_id_val, __invoice_id_val, __start_date_val, __end_date_val, __created_at_val).Scan(&project_invoice_stamp.ProjectId, &project_invoice_stamp.InvoiceId, &project_invoice_stamp.StartDate, &project_invoice_stamp.EndDate, &project_invoice_stamp.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return project_invoice_stamp, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Create_ProjectMember(ctx context.Context,
|
|
project_member_member_id ProjectMember_MemberId_Field,
|
|
project_member_project_id ProjectMember_ProjectId_Field) (
|
|
project_member *ProjectMember, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__member_id_val := project_member_member_id.value()
|
|
__project_id_val := project_member_project_id.value()
|
|
__created_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO project_members ( member_id, project_id, created_at ) VALUES ( ?, ?, ? ) RETURNING project_members.member_id, project_members.project_id, project_members.created_at")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __member_id_val, __project_id_val, __created_at_val)
|
|
|
|
project_member = &ProjectMember{}
|
|
err = obj.driver.QueryRow(__stmt, __member_id_val, __project_id_val, __created_at_val).Scan(&project_member.MemberId, &project_member.ProjectId, &project_member.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return project_member, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Create_ApiKey(ctx context.Context,
|
|
api_key_id ApiKey_Id_Field,
|
|
api_key_project_id ApiKey_ProjectId_Field,
|
|
api_key_head ApiKey_Head_Field,
|
|
api_key_name ApiKey_Name_Field,
|
|
api_key_secret ApiKey_Secret_Field,
|
|
optional ApiKey_Create_Fields) (
|
|
api_key *ApiKey, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__id_val := api_key_id.value()
|
|
__project_id_val := api_key_project_id.value()
|
|
__head_val := api_key_head.value()
|
|
__name_val := api_key_name.value()
|
|
__secret_val := api_key_secret.value()
|
|
__partner_id_val := optional.PartnerId.value()
|
|
__created_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO api_keys ( id, project_id, head, name, secret, partner_id, created_at ) VALUES ( ?, ?, ?, ?, ?, ?, ? ) RETURNING api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.partner_id, api_keys.created_at")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __id_val, __project_id_val, __head_val, __name_val, __secret_val, __partner_id_val, __created_at_val)
|
|
|
|
api_key = &ApiKey{}
|
|
err = obj.driver.QueryRow(__stmt, __id_val, __project_id_val, __head_val, __name_val, __secret_val, __partner_id_val, __created_at_val).Scan(&api_key.Id, &api_key.ProjectId, &api_key.Head, &api_key.Name, &api_key.Secret, &api_key.PartnerId, &api_key.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return api_key, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) CreateNoReturn_SerialNumber(ctx context.Context,
|
|
serial_number_serial_number SerialNumber_SerialNumber_Field,
|
|
serial_number_bucket_id SerialNumber_BucketId_Field,
|
|
serial_number_expires_at SerialNumber_ExpiresAt_Field) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
__serial_number_val := serial_number_serial_number.value()
|
|
__bucket_id_val := serial_number_bucket_id.value()
|
|
__expires_at_val := serial_number_expires_at.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO serial_numbers ( serial_number, bucket_id, expires_at ) VALUES ( ?, ?, ? )")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __serial_number_val, __bucket_id_val, __expires_at_val)
|
|
|
|
_, err = obj.driver.Exec(__stmt, __serial_number_val, __bucket_id_val, __expires_at_val)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) CreateNoReturn_UsedSerial(ctx context.Context,
|
|
used_serial_serial_number_id UsedSerial_SerialNumberId_Field,
|
|
used_serial_storage_node_id UsedSerial_StorageNodeId_Field) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
__serial_number_id_val := used_serial_serial_number_id.value()
|
|
__storage_node_id_val := used_serial_storage_node_id.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO used_serials ( serial_number_id, storage_node_id ) VALUES ( ?, ? )")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __serial_number_id_val, __storage_node_id_val)
|
|
|
|
_, err = obj.driver.Exec(__stmt, __serial_number_id_val, __storage_node_id_val)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) CreateNoReturn_BucketStorageTally(ctx context.Context,
|
|
bucket_storage_tally_bucket_name BucketStorageTally_BucketName_Field,
|
|
bucket_storage_tally_project_id BucketStorageTally_ProjectId_Field,
|
|
bucket_storage_tally_interval_start BucketStorageTally_IntervalStart_Field,
|
|
bucket_storage_tally_inline BucketStorageTally_Inline_Field,
|
|
bucket_storage_tally_remote BucketStorageTally_Remote_Field,
|
|
bucket_storage_tally_remote_segments_count BucketStorageTally_RemoteSegmentsCount_Field,
|
|
bucket_storage_tally_inline_segments_count BucketStorageTally_InlineSegmentsCount_Field,
|
|
bucket_storage_tally_object_count BucketStorageTally_ObjectCount_Field,
|
|
bucket_storage_tally_metadata_size BucketStorageTally_MetadataSize_Field) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
__bucket_name_val := bucket_storage_tally_bucket_name.value()
|
|
__project_id_val := bucket_storage_tally_project_id.value()
|
|
__interval_start_val := bucket_storage_tally_interval_start.value()
|
|
__inline_val := bucket_storage_tally_inline.value()
|
|
__remote_val := bucket_storage_tally_remote.value()
|
|
__remote_segments_count_val := bucket_storage_tally_remote_segments_count.value()
|
|
__inline_segments_count_val := bucket_storage_tally_inline_segments_count.value()
|
|
__object_count_val := bucket_storage_tally_object_count.value()
|
|
__metadata_size_val := bucket_storage_tally_metadata_size.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO bucket_storage_tallies ( bucket_name, project_id, interval_start, inline, remote, remote_segments_count, inline_segments_count, object_count, metadata_size ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ? )")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __bucket_name_val, __project_id_val, __interval_start_val, __inline_val, __remote_val, __remote_segments_count_val, __inline_segments_count_val, __object_count_val, __metadata_size_val)
|
|
|
|
_, err = obj.driver.Exec(__stmt, __bucket_name_val, __project_id_val, __interval_start_val, __inline_val, __remote_val, __remote_segments_count_val, __inline_segments_count_val, __object_count_val, __metadata_size_val)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) CreateNoReturn_StoragenodeStorageTally(ctx context.Context,
|
|
storagenode_storage_tally_node_id StoragenodeStorageTally_NodeId_Field,
|
|
storagenode_storage_tally_interval_end_time StoragenodeStorageTally_IntervalEndTime_Field,
|
|
storagenode_storage_tally_data_total StoragenodeStorageTally_DataTotal_Field) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
__node_id_val := storagenode_storage_tally_node_id.value()
|
|
__interval_end_time_val := storagenode_storage_tally_interval_end_time.value()
|
|
__data_total_val := storagenode_storage_tally_data_total.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO storagenode_storage_tallies ( node_id, interval_end_time, data_total ) VALUES ( ?, ?, ? )")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __node_id_val, __interval_end_time_val, __data_total_val)
|
|
|
|
_, err = obj.driver.Exec(__stmt, __node_id_val, __interval_end_time_val, __data_total_val)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) CreateNoReturn_PeerIdentity(ctx context.Context,
|
|
peer_identity_node_id PeerIdentity_NodeId_Field,
|
|
peer_identity_leaf_serial_number PeerIdentity_LeafSerialNumber_Field,
|
|
peer_identity_chain PeerIdentity_Chain_Field) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__node_id_val := peer_identity_node_id.value()
|
|
__leaf_serial_number_val := peer_identity_leaf_serial_number.value()
|
|
__chain_val := peer_identity_chain.value()
|
|
__updated_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO peer_identities ( node_id, leaf_serial_number, chain, updated_at ) VALUES ( ?, ?, ?, ? )")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __node_id_val, __leaf_serial_number_val, __chain_val, __updated_at_val)
|
|
|
|
_, err = obj.driver.Exec(__stmt, __node_id_val, __leaf_serial_number_val, __chain_val, __updated_at_val)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Create_RegistrationToken(ctx context.Context,
|
|
registration_token_secret RegistrationToken_Secret_Field,
|
|
registration_token_project_limit RegistrationToken_ProjectLimit_Field,
|
|
optional RegistrationToken_Create_Fields) (
|
|
registration_token *RegistrationToken, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__secret_val := registration_token_secret.value()
|
|
__owner_id_val := optional.OwnerId.value()
|
|
__project_limit_val := registration_token_project_limit.value()
|
|
__created_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO registration_tokens ( secret, owner_id, project_limit, created_at ) VALUES ( ?, ?, ?, ? ) RETURNING registration_tokens.secret, registration_tokens.owner_id, registration_tokens.project_limit, registration_tokens.created_at")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __secret_val, __owner_id_val, __project_limit_val, __created_at_val)
|
|
|
|
registration_token = &RegistrationToken{}
|
|
err = obj.driver.QueryRow(__stmt, __secret_val, __owner_id_val, __project_limit_val, __created_at_val).Scan(®istration_token.Secret, ®istration_token.OwnerId, ®istration_token.ProjectLimit, ®istration_token.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return registration_token, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Create_ResetPasswordToken(ctx context.Context,
|
|
reset_password_token_secret ResetPasswordToken_Secret_Field,
|
|
reset_password_token_owner_id ResetPasswordToken_OwnerId_Field) (
|
|
reset_password_token *ResetPasswordToken, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__secret_val := reset_password_token_secret.value()
|
|
__owner_id_val := reset_password_token_owner_id.value()
|
|
__created_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO reset_password_tokens ( secret, owner_id, created_at ) VALUES ( ?, ?, ? ) RETURNING reset_password_tokens.secret, reset_password_tokens.owner_id, reset_password_tokens.created_at")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __secret_val, __owner_id_val, __created_at_val)
|
|
|
|
reset_password_token = &ResetPasswordToken{}
|
|
err = obj.driver.QueryRow(__stmt, __secret_val, __owner_id_val, __created_at_val).Scan(&reset_password_token.Secret, &reset_password_token.OwnerId, &reset_password_token.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return reset_password_token, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Create_Offer(ctx context.Context,
|
|
offer_name Offer_Name_Field,
|
|
offer_description Offer_Description_Field,
|
|
offer_award_credit_in_cents Offer_AwardCreditInCents_Field,
|
|
offer_invitee_credit_in_cents Offer_InviteeCreditInCents_Field,
|
|
offer_expires_at Offer_ExpiresAt_Field,
|
|
offer_status Offer_Status_Field,
|
|
offer_type Offer_Type_Field,
|
|
optional Offer_Create_Fields) (
|
|
offer *Offer, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__name_val := offer_name.value()
|
|
__description_val := offer_description.value()
|
|
__award_credit_in_cents_val := offer_award_credit_in_cents.value()
|
|
__invitee_credit_in_cents_val := offer_invitee_credit_in_cents.value()
|
|
__award_credit_duration_days_val := optional.AwardCreditDurationDays.value()
|
|
__invitee_credit_duration_days_val := optional.InviteeCreditDurationDays.value()
|
|
__redeemable_cap_val := optional.RedeemableCap.value()
|
|
__expires_at_val := offer_expires_at.value()
|
|
__created_at_val := __now
|
|
__status_val := offer_status.value()
|
|
__type_val := offer_type.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO offers ( name, description, award_credit_in_cents, invitee_credit_in_cents, award_credit_duration_days, invitee_credit_duration_days, redeemable_cap, expires_at, created_at, status, type ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? ) RETURNING offers.id, offers.name, offers.description, offers.award_credit_in_cents, offers.invitee_credit_in_cents, offers.award_credit_duration_days, offers.invitee_credit_duration_days, offers.redeemable_cap, offers.expires_at, offers.created_at, offers.status, offers.type")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __name_val, __description_val, __award_credit_in_cents_val, __invitee_credit_in_cents_val, __award_credit_duration_days_val, __invitee_credit_duration_days_val, __redeemable_cap_val, __expires_at_val, __created_at_val, __status_val, __type_val)
|
|
|
|
offer = &Offer{}
|
|
err = obj.driver.QueryRow(__stmt, __name_val, __description_val, __award_credit_in_cents_val, __invitee_credit_in_cents_val, __award_credit_duration_days_val, __invitee_credit_duration_days_val, __redeemable_cap_val, __expires_at_val, __created_at_val, __status_val, __type_val).Scan(&offer.Id, &offer.Name, &offer.Description, &offer.AwardCreditInCents, &offer.InviteeCreditInCents, &offer.AwardCreditDurationDays, &offer.InviteeCreditDurationDays, &offer.RedeemableCap, &offer.ExpiresAt, &offer.CreatedAt, &offer.Status, &offer.Type)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return offer, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Create_UserCredit(ctx context.Context,
|
|
user_credit_user_id UserCredit_UserId_Field,
|
|
user_credit_offer_id UserCredit_OfferId_Field,
|
|
user_credit_type UserCredit_Type_Field,
|
|
user_credit_credits_earned_in_cents UserCredit_CreditsEarnedInCents_Field,
|
|
user_credit_expires_at UserCredit_ExpiresAt_Field,
|
|
optional UserCredit_Create_Fields) (
|
|
user_credit *UserCredit, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__user_id_val := user_credit_user_id.value()
|
|
__offer_id_val := user_credit_offer_id.value()
|
|
__referred_by_val := optional.ReferredBy.value()
|
|
__type_val := user_credit_type.value()
|
|
__credits_earned_in_cents_val := user_credit_credits_earned_in_cents.value()
|
|
__credits_used_in_cents_val := int(0)
|
|
__expires_at_val := user_credit_expires_at.value()
|
|
__created_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO user_credits ( user_id, offer_id, referred_by, type, credits_earned_in_cents, credits_used_in_cents, expires_at, created_at ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ? ) RETURNING user_credits.id, user_credits.user_id, user_credits.offer_id, user_credits.referred_by, user_credits.type, user_credits.credits_earned_in_cents, user_credits.credits_used_in_cents, user_credits.expires_at, user_credits.created_at")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __user_id_val, __offer_id_val, __referred_by_val, __type_val, __credits_earned_in_cents_val, __credits_used_in_cents_val, __expires_at_val, __created_at_val)
|
|
|
|
user_credit = &UserCredit{}
|
|
err = obj.driver.QueryRow(__stmt, __user_id_val, __offer_id_val, __referred_by_val, __type_val, __credits_earned_in_cents_val, __credits_used_in_cents_val, __expires_at_val, __created_at_val).Scan(&user_credit.Id, &user_credit.UserId, &user_credit.OfferId, &user_credit.ReferredBy, &user_credit.Type, &user_credit.CreditsEarnedInCents, &user_credit.CreditsUsedInCents, &user_credit.ExpiresAt, &user_credit.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return user_credit, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Create_BucketMetainfo(ctx context.Context,
|
|
bucket_metainfo_id BucketMetainfo_Id_Field,
|
|
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
|
|
bucket_metainfo_name BucketMetainfo_Name_Field,
|
|
bucket_metainfo_path_cipher BucketMetainfo_PathCipher_Field,
|
|
bucket_metainfo_default_segment_size BucketMetainfo_DefaultSegmentSize_Field,
|
|
bucket_metainfo_default_encryption_cipher_suite BucketMetainfo_DefaultEncryptionCipherSuite_Field,
|
|
bucket_metainfo_default_encryption_block_size BucketMetainfo_DefaultEncryptionBlockSize_Field,
|
|
bucket_metainfo_default_redundancy_algorithm BucketMetainfo_DefaultRedundancyAlgorithm_Field,
|
|
bucket_metainfo_default_redundancy_share_size BucketMetainfo_DefaultRedundancyShareSize_Field,
|
|
bucket_metainfo_default_redundancy_required_shares BucketMetainfo_DefaultRedundancyRequiredShares_Field,
|
|
bucket_metainfo_default_redundancy_repair_shares BucketMetainfo_DefaultRedundancyRepairShares_Field,
|
|
bucket_metainfo_default_redundancy_optimal_shares BucketMetainfo_DefaultRedundancyOptimalShares_Field,
|
|
bucket_metainfo_default_redundancy_total_shares BucketMetainfo_DefaultRedundancyTotalShares_Field,
|
|
optional BucketMetainfo_Create_Fields) (
|
|
bucket_metainfo *BucketMetainfo, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__id_val := bucket_metainfo_id.value()
|
|
__project_id_val := bucket_metainfo_project_id.value()
|
|
__name_val := bucket_metainfo_name.value()
|
|
__partner_id_val := optional.PartnerId.value()
|
|
__path_cipher_val := bucket_metainfo_path_cipher.value()
|
|
__created_at_val := __now
|
|
__default_segment_size_val := bucket_metainfo_default_segment_size.value()
|
|
__default_encryption_cipher_suite_val := bucket_metainfo_default_encryption_cipher_suite.value()
|
|
__default_encryption_block_size_val := bucket_metainfo_default_encryption_block_size.value()
|
|
__default_redundancy_algorithm_val := bucket_metainfo_default_redundancy_algorithm.value()
|
|
__default_redundancy_share_size_val := bucket_metainfo_default_redundancy_share_size.value()
|
|
__default_redundancy_required_shares_val := bucket_metainfo_default_redundancy_required_shares.value()
|
|
__default_redundancy_repair_shares_val := bucket_metainfo_default_redundancy_repair_shares.value()
|
|
__default_redundancy_optimal_shares_val := bucket_metainfo_default_redundancy_optimal_shares.value()
|
|
__default_redundancy_total_shares_val := bucket_metainfo_default_redundancy_total_shares.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO bucket_metainfos ( id, project_id, name, partner_id, path_cipher, created_at, default_segment_size, default_encryption_cipher_suite, default_encryption_block_size, default_redundancy_algorithm, default_redundancy_share_size, default_redundancy_required_shares, default_redundancy_repair_shares, default_redundancy_optimal_shares, default_redundancy_total_shares ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? ) RETURNING bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.partner_id, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __id_val, __project_id_val, __name_val, __partner_id_val, __path_cipher_val, __created_at_val, __default_segment_size_val, __default_encryption_cipher_suite_val, __default_encryption_block_size_val, __default_redundancy_algorithm_val, __default_redundancy_share_size_val, __default_redundancy_required_shares_val, __default_redundancy_repair_shares_val, __default_redundancy_optimal_shares_val, __default_redundancy_total_shares_val)
|
|
|
|
bucket_metainfo = &BucketMetainfo{}
|
|
err = obj.driver.QueryRow(__stmt, __id_val, __project_id_val, __name_val, __partner_id_val, __path_cipher_val, __created_at_val, __default_segment_size_val, __default_encryption_cipher_suite_val, __default_encryption_block_size_val, __default_redundancy_algorithm_val, __default_redundancy_share_size_val, __default_redundancy_required_shares_val, __default_redundancy_repair_shares_val, __default_redundancy_optimal_shares_val, __default_redundancy_total_shares_val).Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.PartnerId, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return bucket_metainfo, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) CreateNoReturn_GracefulExitProgress(ctx context.Context,
|
|
graceful_exit_progress_node_id GracefulExitProgress_NodeId_Field,
|
|
graceful_exit_progress_bytes_transferred GracefulExitProgress_BytesTransferred_Field) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__node_id_val := graceful_exit_progress_node_id.value()
|
|
__bytes_transferred_val := graceful_exit_progress_bytes_transferred.value()
|
|
__pieces_transferred_val := int64(0)
|
|
__pieces_failed_val := int64(0)
|
|
__updated_at_val := __now.UTC()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO graceful_exit_progress ( node_id, bytes_transferred, pieces_transferred, pieces_failed, updated_at ) VALUES ( ?, ?, ?, ?, ? )")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __node_id_val, __bytes_transferred_val, __pieces_transferred_val, __pieces_failed_val, __updated_at_val)
|
|
|
|
_, err = obj.driver.Exec(__stmt, __node_id_val, __bytes_transferred_val, __pieces_transferred_val, __pieces_failed_val, __updated_at_val)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) CreateNoReturn_GracefulExitTransferQueue(ctx context.Context,
|
|
graceful_exit_transfer_queue_node_id GracefulExitTransferQueue_NodeId_Field,
|
|
graceful_exit_transfer_queue_path GracefulExitTransferQueue_Path_Field,
|
|
graceful_exit_transfer_queue_piece_num GracefulExitTransferQueue_PieceNum_Field,
|
|
graceful_exit_transfer_queue_durability_ratio GracefulExitTransferQueue_DurabilityRatio_Field,
|
|
graceful_exit_transfer_queue_order_limit_send_count GracefulExitTransferQueue_OrderLimitSendCount_Field,
|
|
optional GracefulExitTransferQueue_Create_Fields) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__node_id_val := graceful_exit_transfer_queue_node_id.value()
|
|
__path_val := graceful_exit_transfer_queue_path.value()
|
|
__piece_num_val := graceful_exit_transfer_queue_piece_num.value()
|
|
__root_piece_id_val := optional.RootPieceId.value()
|
|
__durability_ratio_val := graceful_exit_transfer_queue_durability_ratio.value()
|
|
__queued_at_val := __now.UTC()
|
|
__requested_at_val := optional.RequestedAt.value()
|
|
__last_failed_at_val := optional.LastFailedAt.value()
|
|
__last_failed_code_val := optional.LastFailedCode.value()
|
|
__failed_count_val := optional.FailedCount.value()
|
|
__finished_at_val := optional.FinishedAt.value()
|
|
__order_limit_send_count_val := graceful_exit_transfer_queue_order_limit_send_count.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO graceful_exit_transfer_queue ( node_id, path, piece_num, root_piece_id, durability_ratio, queued_at, requested_at, last_failed_at, last_failed_code, failed_count, finished_at, order_limit_send_count ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? )")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __node_id_val, __path_val, __piece_num_val, __root_piece_id_val, __durability_ratio_val, __queued_at_val, __requested_at_val, __last_failed_at_val, __last_failed_code_val, __failed_count_val, __finished_at_val, __order_limit_send_count_val)
|
|
|
|
_, err = obj.driver.Exec(__stmt, __node_id_val, __path_val, __piece_num_val, __root_piece_id_val, __durability_ratio_val, __queued_at_val, __requested_at_val, __last_failed_at_val, __last_failed_code_val, __failed_count_val, __finished_at_val, __order_limit_send_count_val)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Create_NodesOfflineTime(ctx context.Context,
|
|
nodes_offline_time_node_id NodesOfflineTime_NodeId_Field,
|
|
nodes_offline_time_tracked_at NodesOfflineTime_TrackedAt_Field,
|
|
nodes_offline_time_seconds NodesOfflineTime_Seconds_Field) (
|
|
nodes_offline_time *NodesOfflineTime, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
__node_id_val := nodes_offline_time_node_id.value()
|
|
__tracked_at_val := nodes_offline_time_tracked_at.value()
|
|
__seconds_val := nodes_offline_time_seconds.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO nodes_offline_times ( node_id, tracked_at, seconds ) VALUES ( ?, ?, ? ) RETURNING nodes_offline_times.node_id, nodes_offline_times.tracked_at, nodes_offline_times.seconds")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __node_id_val, __tracked_at_val, __seconds_val)
|
|
|
|
nodes_offline_time = &NodesOfflineTime{}
|
|
err = obj.driver.QueryRow(__stmt, __node_id_val, __tracked_at_val, __seconds_val).Scan(&nodes_offline_time.NodeId, &nodes_offline_time.TrackedAt, &nodes_offline_time.Seconds)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return nodes_offline_time, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Create_StripeCustomer(ctx context.Context,
|
|
stripe_customer_user_id StripeCustomer_UserId_Field,
|
|
stripe_customer_customer_id StripeCustomer_CustomerId_Field) (
|
|
stripe_customer *StripeCustomer, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__user_id_val := stripe_customer_user_id.value()
|
|
__customer_id_val := stripe_customer_customer_id.value()
|
|
__created_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO stripe_customers ( user_id, customer_id, created_at ) VALUES ( ?, ?, ? ) RETURNING stripe_customers.user_id, stripe_customers.customer_id, stripe_customers.created_at")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __user_id_val, __customer_id_val, __created_at_val)
|
|
|
|
stripe_customer = &StripeCustomer{}
|
|
err = obj.driver.QueryRow(__stmt, __user_id_val, __customer_id_val, __created_at_val).Scan(&stripe_customer.UserId, &stripe_customer.CustomerId, &stripe_customer.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return stripe_customer, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Create_CoinpaymentsTransaction(ctx context.Context,
|
|
coinpayments_transaction_id CoinpaymentsTransaction_Id_Field,
|
|
coinpayments_transaction_user_id CoinpaymentsTransaction_UserId_Field,
|
|
coinpayments_transaction_address CoinpaymentsTransaction_Address_Field,
|
|
coinpayments_transaction_amount CoinpaymentsTransaction_Amount_Field,
|
|
coinpayments_transaction_received CoinpaymentsTransaction_Received_Field,
|
|
coinpayments_transaction_status CoinpaymentsTransaction_Status_Field,
|
|
coinpayments_transaction_key CoinpaymentsTransaction_Key_Field,
|
|
coinpayments_transaction_timeout CoinpaymentsTransaction_Timeout_Field) (
|
|
coinpayments_transaction *CoinpaymentsTransaction, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__id_val := coinpayments_transaction_id.value()
|
|
__user_id_val := coinpayments_transaction_user_id.value()
|
|
__address_val := coinpayments_transaction_address.value()
|
|
__amount_val := coinpayments_transaction_amount.value()
|
|
__received_val := coinpayments_transaction_received.value()
|
|
__status_val := coinpayments_transaction_status.value()
|
|
__key_val := coinpayments_transaction_key.value()
|
|
__timeout_val := coinpayments_transaction_timeout.value()
|
|
__created_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO coinpayments_transactions ( id, user_id, address, amount, received, status, key, timeout, created_at ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ? ) RETURNING coinpayments_transactions.id, coinpayments_transactions.user_id, coinpayments_transactions.address, coinpayments_transactions.amount, coinpayments_transactions.received, coinpayments_transactions.status, coinpayments_transactions.key, coinpayments_transactions.timeout, coinpayments_transactions.created_at")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __id_val, __user_id_val, __address_val, __amount_val, __received_val, __status_val, __key_val, __timeout_val, __created_at_val)
|
|
|
|
coinpayments_transaction = &CoinpaymentsTransaction{}
|
|
err = obj.driver.QueryRow(__stmt, __id_val, __user_id_val, __address_val, __amount_val, __received_val, __status_val, __key_val, __timeout_val, __created_at_val).Scan(&coinpayments_transaction.Id, &coinpayments_transaction.UserId, &coinpayments_transaction.Address, &coinpayments_transaction.Amount, &coinpayments_transaction.Received, &coinpayments_transaction.Status, &coinpayments_transaction.Key, &coinpayments_transaction.Timeout, &coinpayments_transaction.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return coinpayments_transaction, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Create_StripecoinpaymentsApplyBalanceIntent(ctx context.Context,
|
|
stripecoinpayments_apply_balance_intent_tx_id StripecoinpaymentsApplyBalanceIntent_TxId_Field,
|
|
stripecoinpayments_apply_balance_intent_state StripecoinpaymentsApplyBalanceIntent_State_Field) (
|
|
stripecoinpayments_apply_balance_intent *StripecoinpaymentsApplyBalanceIntent, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__tx_id_val := stripecoinpayments_apply_balance_intent_tx_id.value()
|
|
__state_val := stripecoinpayments_apply_balance_intent_state.value()
|
|
__created_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO stripecoinpayments_apply_balance_intents ( tx_id, state, created_at ) VALUES ( ?, ?, ? ) RETURNING stripecoinpayments_apply_balance_intents.tx_id, stripecoinpayments_apply_balance_intents.state, stripecoinpayments_apply_balance_intents.created_at")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __tx_id_val, __state_val, __created_at_val)
|
|
|
|
stripecoinpayments_apply_balance_intent = &StripecoinpaymentsApplyBalanceIntent{}
|
|
err = obj.driver.QueryRow(__stmt, __tx_id_val, __state_val, __created_at_val).Scan(&stripecoinpayments_apply_balance_intent.TxId, &stripecoinpayments_apply_balance_intent.State, &stripecoinpayments_apply_balance_intent.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return stripecoinpayments_apply_balance_intent, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Create_StripecoinpaymentsInvoiceProjectRecord(ctx context.Context,
|
|
stripecoinpayments_invoice_project_record_id StripecoinpaymentsInvoiceProjectRecord_Id_Field,
|
|
stripecoinpayments_invoice_project_record_project_id StripecoinpaymentsInvoiceProjectRecord_ProjectId_Field,
|
|
stripecoinpayments_invoice_project_record_storage StripecoinpaymentsInvoiceProjectRecord_Storage_Field,
|
|
stripecoinpayments_invoice_project_record_egress StripecoinpaymentsInvoiceProjectRecord_Egress_Field,
|
|
stripecoinpayments_invoice_project_record_objects StripecoinpaymentsInvoiceProjectRecord_Objects_Field,
|
|
stripecoinpayments_invoice_project_record_period_start StripecoinpaymentsInvoiceProjectRecord_PeriodStart_Field,
|
|
stripecoinpayments_invoice_project_record_period_end StripecoinpaymentsInvoiceProjectRecord_PeriodEnd_Field,
|
|
stripecoinpayments_invoice_project_record_state StripecoinpaymentsInvoiceProjectRecord_State_Field) (
|
|
stripecoinpayments_invoice_project_record *StripecoinpaymentsInvoiceProjectRecord, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__id_val := stripecoinpayments_invoice_project_record_id.value()
|
|
__project_id_val := stripecoinpayments_invoice_project_record_project_id.value()
|
|
__storage_val := stripecoinpayments_invoice_project_record_storage.value()
|
|
__egress_val := stripecoinpayments_invoice_project_record_egress.value()
|
|
__objects_val := stripecoinpayments_invoice_project_record_objects.value()
|
|
__period_start_val := stripecoinpayments_invoice_project_record_period_start.value()
|
|
__period_end_val := stripecoinpayments_invoice_project_record_period_end.value()
|
|
__state_val := stripecoinpayments_invoice_project_record_state.value()
|
|
__created_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO stripecoinpayments_invoice_project_records ( id, project_id, storage, egress, objects, period_start, period_end, state, created_at ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ? ) RETURNING stripecoinpayments_invoice_project_records.id, stripecoinpayments_invoice_project_records.project_id, stripecoinpayments_invoice_project_records.storage, stripecoinpayments_invoice_project_records.egress, stripecoinpayments_invoice_project_records.objects, stripecoinpayments_invoice_project_records.period_start, stripecoinpayments_invoice_project_records.period_end, stripecoinpayments_invoice_project_records.state, stripecoinpayments_invoice_project_records.created_at")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __id_val, __project_id_val, __storage_val, __egress_val, __objects_val, __period_start_val, __period_end_val, __state_val, __created_at_val)
|
|
|
|
stripecoinpayments_invoice_project_record = &StripecoinpaymentsInvoiceProjectRecord{}
|
|
err = obj.driver.QueryRow(__stmt, __id_val, __project_id_val, __storage_val, __egress_val, __objects_val, __period_start_val, __period_end_val, __state_val, __created_at_val).Scan(&stripecoinpayments_invoice_project_record.Id, &stripecoinpayments_invoice_project_record.ProjectId, &stripecoinpayments_invoice_project_record.Storage, &stripecoinpayments_invoice_project_record.Egress, &stripecoinpayments_invoice_project_record.Objects, &stripecoinpayments_invoice_project_record.PeriodStart, &stripecoinpayments_invoice_project_record.PeriodEnd, &stripecoinpayments_invoice_project_record.State, &stripecoinpayments_invoice_project_record.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return stripecoinpayments_invoice_project_record, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Create_StripecoinpaymentsTxConversionRate(ctx context.Context,
|
|
stripecoinpayments_tx_conversion_rate_tx_id StripecoinpaymentsTxConversionRate_TxId_Field,
|
|
stripecoinpayments_tx_conversion_rate_rate StripecoinpaymentsTxConversionRate_Rate_Field) (
|
|
stripecoinpayments_tx_conversion_rate *StripecoinpaymentsTxConversionRate, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__tx_id_val := stripecoinpayments_tx_conversion_rate_tx_id.value()
|
|
__rate_val := stripecoinpayments_tx_conversion_rate_rate.value()
|
|
__created_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO stripecoinpayments_tx_conversion_rates ( tx_id, rate, created_at ) VALUES ( ?, ?, ? ) RETURNING stripecoinpayments_tx_conversion_rates.tx_id, stripecoinpayments_tx_conversion_rates.rate, stripecoinpayments_tx_conversion_rates.created_at")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __tx_id_val, __rate_val, __created_at_val)
|
|
|
|
stripecoinpayments_tx_conversion_rate = &StripecoinpaymentsTxConversionRate{}
|
|
err = obj.driver.QueryRow(__stmt, __tx_id_val, __rate_val, __created_at_val).Scan(&stripecoinpayments_tx_conversion_rate.TxId, &stripecoinpayments_tx_conversion_rate.Rate, &stripecoinpayments_tx_conversion_rate.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return stripecoinpayments_tx_conversion_rate, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Create_Coupon(ctx context.Context,
|
|
coupon_id Coupon_Id_Field,
|
|
coupon_project_id Coupon_ProjectId_Field,
|
|
coupon_user_id Coupon_UserId_Field,
|
|
coupon_amount Coupon_Amount_Field,
|
|
coupon_description Coupon_Description_Field,
|
|
coupon_type Coupon_Type_Field,
|
|
coupon_status Coupon_Status_Field,
|
|
coupon_duration Coupon_Duration_Field) (
|
|
coupon *Coupon, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__id_val := coupon_id.value()
|
|
__project_id_val := coupon_project_id.value()
|
|
__user_id_val := coupon_user_id.value()
|
|
__amount_val := coupon_amount.value()
|
|
__description_val := coupon_description.value()
|
|
__type_val := coupon_type.value()
|
|
__status_val := coupon_status.value()
|
|
__duration_val := coupon_duration.value()
|
|
__created_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO coupons ( id, project_id, user_id, amount, description, type, status, duration, created_at ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ? ) RETURNING coupons.id, coupons.project_id, coupons.user_id, coupons.amount, coupons.description, coupons.type, coupons.status, coupons.duration, coupons.created_at")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __id_val, __project_id_val, __user_id_val, __amount_val, __description_val, __type_val, __status_val, __duration_val, __created_at_val)
|
|
|
|
coupon = &Coupon{}
|
|
err = obj.driver.QueryRow(__stmt, __id_val, __project_id_val, __user_id_val, __amount_val, __description_val, __type_val, __status_val, __duration_val, __created_at_val).Scan(&coupon.Id, &coupon.ProjectId, &coupon.UserId, &coupon.Amount, &coupon.Description, &coupon.Type, &coupon.Status, &coupon.Duration, &coupon.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return coupon, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Create_CouponUsage(ctx context.Context,
|
|
coupon_usage_coupon_id CouponUsage_CouponId_Field,
|
|
coupon_usage_amount CouponUsage_Amount_Field,
|
|
coupon_usage_status CouponUsage_Status_Field,
|
|
coupon_usage_period CouponUsage_Period_Field) (
|
|
coupon_usage *CouponUsage, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
__coupon_id_val := coupon_usage_coupon_id.value()
|
|
__amount_val := coupon_usage_amount.value()
|
|
__status_val := coupon_usage_status.value()
|
|
__period_val := coupon_usage_period.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO coupon_usages ( coupon_id, amount, status, period ) VALUES ( ?, ?, ?, ? ) RETURNING coupon_usages.coupon_id, coupon_usages.amount, coupon_usages.status, coupon_usages.period")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __coupon_id_val, __amount_val, __status_val, __period_val)
|
|
|
|
coupon_usage = &CouponUsage{}
|
|
err = obj.driver.QueryRow(__stmt, __coupon_id_val, __amount_val, __status_val, __period_val).Scan(&coupon_usage.CouponId, &coupon_usage.Amount, &coupon_usage.Status, &coupon_usage.Period)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return coupon_usage, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Get_ValueAttribution_By_ProjectId_And_BucketName(ctx context.Context,
|
|
value_attribution_project_id ValueAttribution_ProjectId_Field,
|
|
value_attribution_bucket_name ValueAttribution_BucketName_Field) (
|
|
value_attribution *ValueAttribution, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT value_attributions.project_id, value_attributions.bucket_name, value_attributions.partner_id, value_attributions.last_updated FROM value_attributions WHERE value_attributions.project_id = ? AND value_attributions.bucket_name = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, value_attribution_project_id.value(), value_attribution_bucket_name.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
value_attribution = &ValueAttribution{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&value_attribution.ProjectId, &value_attribution.BucketName, &value_attribution.PartnerId, &value_attribution.LastUpdated)
|
|
if err != nil {
|
|
return (*ValueAttribution)(nil), obj.makeErr(err)
|
|
}
|
|
return value_attribution, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Get_PendingAudits_By_NodeId(ctx context.Context,
|
|
pending_audits_node_id PendingAudits_NodeId_Field) (
|
|
pending_audits *PendingAudits, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT pending_audits.node_id, pending_audits.piece_id, pending_audits.stripe_index, pending_audits.share_size, pending_audits.expected_share_hash, pending_audits.reverify_count, pending_audits.path FROM pending_audits WHERE pending_audits.node_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, pending_audits_node_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
pending_audits = &PendingAudits{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&pending_audits.NodeId, &pending_audits.PieceId, &pending_audits.StripeIndex, &pending_audits.ShareSize, &pending_audits.ExpectedShareHash, &pending_audits.ReverifyCount, &pending_audits.Path)
|
|
if err != nil {
|
|
return (*PendingAudits)(nil), obj.makeErr(err)
|
|
}
|
|
return pending_audits, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Get_Irreparabledb_By_Segmentpath(ctx context.Context,
|
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field) (
|
|
irreparabledb *Irreparabledb, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT irreparabledbs.segmentpath, irreparabledbs.segmentdetail, irreparabledbs.pieces_lost_count, irreparabledbs.seg_damaged_unix_sec, irreparabledbs.repair_attempt_count FROM irreparabledbs WHERE irreparabledbs.segmentpath = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, irreparabledb_segmentpath.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
irreparabledb = &Irreparabledb{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&irreparabledb.Segmentpath, &irreparabledb.Segmentdetail, &irreparabledb.PiecesLostCount, &irreparabledb.SegDamagedUnixSec, &irreparabledb.RepairAttemptCount)
|
|
if err != nil {
|
|
return (*Irreparabledb)(nil), obj.makeErr(err)
|
|
}
|
|
return irreparabledb, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Limited_Irreparabledb_By_Segmentpath_Greater_OrderBy_Asc_Segmentpath(ctx context.Context,
|
|
irreparabledb_segmentpath_greater Irreparabledb_Segmentpath_Field,
|
|
limit int, offset int64) (
|
|
rows []*Irreparabledb, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT irreparabledbs.segmentpath, irreparabledbs.segmentdetail, irreparabledbs.pieces_lost_count, irreparabledbs.seg_damaged_unix_sec, irreparabledbs.repair_attempt_count FROM irreparabledbs WHERE irreparabledbs.segmentpath > ? ORDER BY irreparabledbs.segmentpath LIMIT ? OFFSET ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, irreparabledb_segmentpath_greater.value())
|
|
|
|
__values = append(__values, limit, offset)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
irreparabledb := &Irreparabledb{}
|
|
err = __rows.Scan(&irreparabledb.Segmentpath, &irreparabledb.Segmentdetail, &irreparabledb.PiecesLostCount, &irreparabledb.SegDamagedUnixSec, &irreparabledb.RepairAttemptCount)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, irreparabledb)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Find_AccountingTimestamps_Value_By_Name(ctx context.Context,
|
|
accounting_timestamps_name AccountingTimestamps_Name_Field) (
|
|
row *Value_Row, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT accounting_timestamps.value FROM accounting_timestamps WHERE accounting_timestamps.name = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, accounting_timestamps_name.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
row = &Value_Row{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&row.Value)
|
|
if err == sql.ErrNoRows {
|
|
return (*Value_Row)(nil), nil
|
|
}
|
|
if err != nil {
|
|
return (*Value_Row)(nil), obj.makeErr(err)
|
|
}
|
|
return row, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Get_AccountingRollup_By_Id(ctx context.Context,
|
|
accounting_rollup_id AccountingRollup_Id_Field) (
|
|
accounting_rollup *AccountingRollup, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT accounting_rollups.id, accounting_rollups.node_id, accounting_rollups.start_time, accounting_rollups.put_total, accounting_rollups.get_total, accounting_rollups.get_audit_total, accounting_rollups.get_repair_total, accounting_rollups.put_repair_total, accounting_rollups.at_rest_total FROM accounting_rollups WHERE accounting_rollups.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, accounting_rollup_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
accounting_rollup = &AccountingRollup{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&accounting_rollup.Id, &accounting_rollup.NodeId, &accounting_rollup.StartTime, &accounting_rollup.PutTotal, &accounting_rollup.GetTotal, &accounting_rollup.GetAuditTotal, &accounting_rollup.GetRepairTotal, &accounting_rollup.PutRepairTotal, &accounting_rollup.AtRestTotal)
|
|
if err != nil {
|
|
return (*AccountingRollup)(nil), obj.makeErr(err)
|
|
}
|
|
return accounting_rollup, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) All_AccountingRollup_By_StartTime_GreaterOrEqual(ctx context.Context,
|
|
accounting_rollup_start_time_greater_or_equal AccountingRollup_StartTime_Field) (
|
|
rows []*AccountingRollup, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT accounting_rollups.id, accounting_rollups.node_id, accounting_rollups.start_time, accounting_rollups.put_total, accounting_rollups.get_total, accounting_rollups.get_audit_total, accounting_rollups.get_repair_total, accounting_rollups.put_repair_total, accounting_rollups.at_rest_total FROM accounting_rollups WHERE accounting_rollups.start_time >= ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, accounting_rollup_start_time_greater_or_equal.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
accounting_rollup := &AccountingRollup{}
|
|
err = __rows.Scan(&accounting_rollup.Id, &accounting_rollup.NodeId, &accounting_rollup.StartTime, &accounting_rollup.PutTotal, &accounting_rollup.GetTotal, &accounting_rollup.GetAuditTotal, &accounting_rollup.GetRepairTotal, &accounting_rollup.PutRepairTotal, &accounting_rollup.AtRestTotal)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, accounting_rollup)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Get_Node_By_Id(ctx context.Context,
|
|
node_id Node_Id_Field) (
|
|
node *Node, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.address, nodes.last_net, nodes.protocol, nodes.type, nodes.email, nodes.wallet, nodes.free_bandwidth, nodes.free_disk, nodes.piece_count, nodes.major, nodes.minor, nodes.patch, nodes.hash, nodes.timestamp, nodes.release, nodes.latency_90, nodes.audit_success_count, nodes.total_audit_count, nodes.uptime_success_count, nodes.total_uptime_count, nodes.created_at, nodes.updated_at, nodes.last_contact_success, nodes.last_contact_failure, nodes.contained, nodes.disqualified, nodes.audit_reputation_alpha, nodes.audit_reputation_beta, nodes.uptime_reputation_alpha, nodes.uptime_reputation_beta, nodes.exit_initiated_at, nodes.exit_loop_completed_at, nodes.exit_finished_at, nodes.exit_success FROM nodes WHERE nodes.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, node_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
node = &Node{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&node.Id, &node.Address, &node.LastNet, &node.Protocol, &node.Type, &node.Email, &node.Wallet, &node.FreeBandwidth, &node.FreeDisk, &node.PieceCount, &node.Major, &node.Minor, &node.Patch, &node.Hash, &node.Timestamp, &node.Release, &node.Latency90, &node.AuditSuccessCount, &node.TotalAuditCount, &node.UptimeSuccessCount, &node.TotalUptimeCount, &node.CreatedAt, &node.UpdatedAt, &node.LastContactSuccess, &node.LastContactFailure, &node.Contained, &node.Disqualified, &node.AuditReputationAlpha, &node.AuditReputationBeta, &node.UptimeReputationAlpha, &node.UptimeReputationBeta, &node.ExitInitiatedAt, &node.ExitLoopCompletedAt, &node.ExitFinishedAt, &node.ExitSuccess)
|
|
if err != nil {
|
|
return (*Node)(nil), obj.makeErr(err)
|
|
}
|
|
return node, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) All_Node_Id(ctx context.Context) (
|
|
rows []*Id_Row, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id FROM nodes")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
row := &Id_Row{}
|
|
err = __rows.Scan(&row.Id)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, row)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Limited_Node_By_Id_GreaterOrEqual_OrderBy_Asc_Id(ctx context.Context,
|
|
node_id_greater_or_equal Node_Id_Field,
|
|
limit int, offset int64) (
|
|
rows []*Node, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.address, nodes.last_net, nodes.protocol, nodes.type, nodes.email, nodes.wallet, nodes.free_bandwidth, nodes.free_disk, nodes.piece_count, nodes.major, nodes.minor, nodes.patch, nodes.hash, nodes.timestamp, nodes.release, nodes.latency_90, nodes.audit_success_count, nodes.total_audit_count, nodes.uptime_success_count, nodes.total_uptime_count, nodes.created_at, nodes.updated_at, nodes.last_contact_success, nodes.last_contact_failure, nodes.contained, nodes.disqualified, nodes.audit_reputation_alpha, nodes.audit_reputation_beta, nodes.uptime_reputation_alpha, nodes.uptime_reputation_beta, nodes.exit_initiated_at, nodes.exit_loop_completed_at, nodes.exit_finished_at, nodes.exit_success FROM nodes WHERE nodes.id >= ? ORDER BY nodes.id LIMIT ? OFFSET ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, node_id_greater_or_equal.value())
|
|
|
|
__values = append(__values, limit, offset)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
node := &Node{}
|
|
err = __rows.Scan(&node.Id, &node.Address, &node.LastNet, &node.Protocol, &node.Type, &node.Email, &node.Wallet, &node.FreeBandwidth, &node.FreeDisk, &node.PieceCount, &node.Major, &node.Minor, &node.Patch, &node.Hash, &node.Timestamp, &node.Release, &node.Latency90, &node.AuditSuccessCount, &node.TotalAuditCount, &node.UptimeSuccessCount, &node.TotalUptimeCount, &node.CreatedAt, &node.UpdatedAt, &node.LastContactSuccess, &node.LastContactFailure, &node.Contained, &node.Disqualified, &node.AuditReputationAlpha, &node.AuditReputationBeta, &node.UptimeReputationAlpha, &node.UptimeReputationBeta, &node.ExitInitiatedAt, &node.ExitLoopCompletedAt, &node.ExitFinishedAt, &node.ExitSuccess)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, node)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Limited_Node_Id_Node_LastNet_Node_Address_Node_Protocol_By_Id_GreaterOrEqual_And_Disqualified_Is_Null_OrderBy_Asc_Id(ctx context.Context,
|
|
node_id_greater_or_equal Node_Id_Field,
|
|
limit int, offset int64) (
|
|
rows []*Id_LastNet_Address_Protocol_Row, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.last_net, nodes.address, nodes.protocol FROM nodes WHERE nodes.id >= ? AND nodes.disqualified is NULL ORDER BY nodes.id LIMIT ? OFFSET ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, node_id_greater_or_equal.value())
|
|
|
|
__values = append(__values, limit, offset)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
row := &Id_LastNet_Address_Protocol_Row{}
|
|
err = __rows.Scan(&row.Id, &row.LastNet, &row.Address, &row.Protocol)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, row)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) All_Node_Id_Node_PieceCount_By_PieceCount_Not_Number(ctx context.Context) (
|
|
rows []*Id_PieceCount_Row, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.piece_count FROM nodes WHERE nodes.piece_count != 0")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
row := &Id_PieceCount_Row{}
|
|
err = __rows.Scan(&row.Id, &row.PieceCount)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, row)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Limited_Node_Id_Node_Address_Node_LastContactSuccess_Node_LastContactFailure_By_LastContactSuccess_Less_LastContactFailure_And_Disqualified_Is_Null_OrderBy_Asc_LastContactFailure(ctx context.Context,
|
|
limit int, offset int64) (
|
|
rows []*Id_Address_LastContactSuccess_LastContactFailure_Row, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.address, nodes.last_contact_success, nodes.last_contact_failure FROM nodes WHERE nodes.last_contact_success < nodes.last_contact_failure AND nodes.disqualified is NULL ORDER BY nodes.last_contact_failure LIMIT ? OFFSET ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values)
|
|
|
|
__values = append(__values, limit, offset)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
row := &Id_Address_LastContactSuccess_LastContactFailure_Row{}
|
|
err = __rows.Scan(&row.Id, &row.Address, &row.LastContactSuccess, &row.LastContactFailure)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, row)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) All_Node_Id_Node_Address_Node_LastContactSuccess_Node_LastContactFailure_By_LastContactSuccess_Less_And_LastContactSuccess_Greater_LastContactFailure_And_Disqualified_Is_Null_OrderBy_Asc_LastContactSuccess(ctx context.Context,
|
|
node_last_contact_success_less Node_LastContactSuccess_Field) (
|
|
rows []*Id_Address_LastContactSuccess_LastContactFailure_Row, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.address, nodes.last_contact_success, nodes.last_contact_failure FROM nodes WHERE nodes.last_contact_success < ? AND nodes.last_contact_success > nodes.last_contact_failure AND nodes.disqualified is NULL ORDER BY nodes.last_contact_success")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, node_last_contact_success_less.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
row := &Id_Address_LastContactSuccess_LastContactFailure_Row{}
|
|
err = __rows.Scan(&row.Id, &row.Address, &row.LastContactSuccess, &row.LastContactFailure)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, row)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Get_User_By_NormalizedEmail_And_Status_Not_Number(ctx context.Context,
|
|
user_normalized_email User_NormalizedEmail_Field) (
|
|
user *User, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.created_at FROM users WHERE users.normalized_email = ? AND users.status != 0 LIMIT 2")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, user_normalized_email.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
if !__rows.Next() {
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return nil, makeErr(sql.ErrNoRows)
|
|
}
|
|
|
|
user = &User{}
|
|
err = __rows.Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
|
|
if __rows.Next() {
|
|
return nil, tooManyRows("User_By_NormalizedEmail_And_Status_Not_Number")
|
|
}
|
|
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
|
|
return user, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Get_User_By_Id(ctx context.Context,
|
|
user_id User_Id_Field) (
|
|
user *User, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.created_at FROM users WHERE users.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, user_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
user = &User{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.CreatedAt)
|
|
if err != nil {
|
|
return (*User)(nil), obj.makeErr(err)
|
|
}
|
|
return user, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Get_Project_By_Id(ctx context.Context,
|
|
project_id Project_Id_Field) (
|
|
project *Project, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.partner_id, projects.owner_id, projects.created_at FROM projects WHERE projects.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, project_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
project = &Project{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
|
|
if err != nil {
|
|
return (*Project)(nil), obj.makeErr(err)
|
|
}
|
|
return project, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Get_Project_UsageLimit_By_Id(ctx context.Context,
|
|
project_id Project_Id_Field) (
|
|
row *UsageLimit_Row, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT projects.usage_limit FROM projects WHERE projects.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, project_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
row = &UsageLimit_Row{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&row.UsageLimit)
|
|
if err != nil {
|
|
return (*UsageLimit_Row)(nil), obj.makeErr(err)
|
|
}
|
|
return row, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) All_Project(ctx context.Context) (
|
|
rows []*Project, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.partner_id, projects.owner_id, projects.created_at FROM projects")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
project := &Project{}
|
|
err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, project)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) All_Project_By_CreatedAt_Less_OrderBy_Asc_CreatedAt(ctx context.Context,
|
|
project_created_at_less Project_CreatedAt_Field) (
|
|
rows []*Project, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.partner_id, projects.owner_id, projects.created_at FROM projects WHERE projects.created_at < ? ORDER BY projects.created_at")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, project_created_at_less.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
project := &Project{}
|
|
err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, project)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) All_Project_By_OwnerId_OrderBy_Asc_CreatedAt(ctx context.Context,
|
|
project_owner_id Project_OwnerId_Field) (
|
|
rows []*Project, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.partner_id, projects.owner_id, projects.created_at FROM projects WHERE projects.owner_id = ? ORDER BY projects.created_at")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, project_owner_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
project := &Project{}
|
|
err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, project)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) All_Project_By_ProjectMember_MemberId_OrderBy_Asc_Project_Name(ctx context.Context,
|
|
project_member_member_id ProjectMember_MemberId_Field) (
|
|
rows []*Project, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.partner_id, projects.owner_id, projects.created_at FROM projects JOIN project_members ON projects.id = project_members.project_id WHERE project_members.member_id = ? ORDER BY projects.name")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, project_member_member_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
project := &Project{}
|
|
err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, project)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Limited_Project_By_CreatedAt_Less_OrderBy_Asc_CreatedAt(ctx context.Context,
|
|
project_created_at_less Project_CreatedAt_Field,
|
|
limit int, offset int64) (
|
|
rows []*Project, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.partner_id, projects.owner_id, projects.created_at FROM projects WHERE projects.created_at < ? ORDER BY projects.created_at LIMIT ? OFFSET ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, project_created_at_less.value())
|
|
|
|
__values = append(__values, limit, offset)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
project := &Project{}
|
|
err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, project)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Get_ProjectInvoiceStamp_By_ProjectId_And_StartDate(ctx context.Context,
|
|
project_invoice_stamp_project_id ProjectInvoiceStamp_ProjectId_Field,
|
|
project_invoice_stamp_start_date ProjectInvoiceStamp_StartDate_Field) (
|
|
project_invoice_stamp *ProjectInvoiceStamp, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT project_invoice_stamps.project_id, project_invoice_stamps.invoice_id, project_invoice_stamps.start_date, project_invoice_stamps.end_date, project_invoice_stamps.created_at FROM project_invoice_stamps WHERE project_invoice_stamps.project_id = ? AND project_invoice_stamps.start_date = ? LIMIT 2")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, project_invoice_stamp_project_id.value(), project_invoice_stamp_start_date.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
if !__rows.Next() {
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return nil, makeErr(sql.ErrNoRows)
|
|
}
|
|
|
|
project_invoice_stamp = &ProjectInvoiceStamp{}
|
|
err = __rows.Scan(&project_invoice_stamp.ProjectId, &project_invoice_stamp.InvoiceId, &project_invoice_stamp.StartDate, &project_invoice_stamp.EndDate, &project_invoice_stamp.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
|
|
if __rows.Next() {
|
|
return nil, tooManyRows("ProjectInvoiceStamp_By_ProjectId_And_StartDate")
|
|
}
|
|
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
|
|
return project_invoice_stamp, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) All_ProjectInvoiceStamp_By_ProjectId_OrderBy_Desc_StartDate(ctx context.Context,
|
|
project_invoice_stamp_project_id ProjectInvoiceStamp_ProjectId_Field) (
|
|
rows []*ProjectInvoiceStamp, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT project_invoice_stamps.project_id, project_invoice_stamps.invoice_id, project_invoice_stamps.start_date, project_invoice_stamps.end_date, project_invoice_stamps.created_at FROM project_invoice_stamps WHERE project_invoice_stamps.project_id = ? ORDER BY project_invoice_stamps.start_date DESC")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, project_invoice_stamp_project_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
project_invoice_stamp := &ProjectInvoiceStamp{}
|
|
err = __rows.Scan(&project_invoice_stamp.ProjectId, &project_invoice_stamp.InvoiceId, &project_invoice_stamp.StartDate, &project_invoice_stamp.EndDate, &project_invoice_stamp.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, project_invoice_stamp)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) All_ProjectMember_By_MemberId(ctx context.Context,
|
|
project_member_member_id ProjectMember_MemberId_Field) (
|
|
rows []*ProjectMember, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT project_members.member_id, project_members.project_id, project_members.created_at FROM project_members WHERE project_members.member_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, project_member_member_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
project_member := &ProjectMember{}
|
|
err = __rows.Scan(&project_member.MemberId, &project_member.ProjectId, &project_member.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, project_member)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Limited_ProjectMember_By_ProjectId(ctx context.Context,
|
|
project_member_project_id ProjectMember_ProjectId_Field,
|
|
limit int, offset int64) (
|
|
rows []*ProjectMember, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT project_members.member_id, project_members.project_id, project_members.created_at FROM project_members WHERE project_members.project_id = ? LIMIT ? OFFSET ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, project_member_project_id.value())
|
|
|
|
__values = append(__values, limit, offset)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
project_member := &ProjectMember{}
|
|
err = __rows.Scan(&project_member.MemberId, &project_member.ProjectId, &project_member.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, project_member)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Get_ApiKey_By_Id(ctx context.Context,
|
|
api_key_id ApiKey_Id_Field) (
|
|
api_key *ApiKey, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.partner_id, api_keys.created_at FROM api_keys WHERE api_keys.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, api_key_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
api_key = &ApiKey{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&api_key.Id, &api_key.ProjectId, &api_key.Head, &api_key.Name, &api_key.Secret, &api_key.PartnerId, &api_key.CreatedAt)
|
|
if err != nil {
|
|
return (*ApiKey)(nil), obj.makeErr(err)
|
|
}
|
|
return api_key, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Get_ApiKey_By_Head(ctx context.Context,
|
|
api_key_head ApiKey_Head_Field) (
|
|
api_key *ApiKey, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.partner_id, api_keys.created_at FROM api_keys WHERE api_keys.head = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, api_key_head.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
api_key = &ApiKey{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&api_key.Id, &api_key.ProjectId, &api_key.Head, &api_key.Name, &api_key.Secret, &api_key.PartnerId, &api_key.CreatedAt)
|
|
if err != nil {
|
|
return (*ApiKey)(nil), obj.makeErr(err)
|
|
}
|
|
return api_key, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Get_ApiKey_By_Name_And_ProjectId(ctx context.Context,
|
|
api_key_name ApiKey_Name_Field,
|
|
api_key_project_id ApiKey_ProjectId_Field) (
|
|
api_key *ApiKey, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.partner_id, api_keys.created_at FROM api_keys WHERE api_keys.name = ? AND api_keys.project_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, api_key_name.value(), api_key_project_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
api_key = &ApiKey{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&api_key.Id, &api_key.ProjectId, &api_key.Head, &api_key.Name, &api_key.Secret, &api_key.PartnerId, &api_key.CreatedAt)
|
|
if err != nil {
|
|
return (*ApiKey)(nil), obj.makeErr(err)
|
|
}
|
|
return api_key, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) All_ApiKey_By_ProjectId_OrderBy_Asc_Name(ctx context.Context,
|
|
api_key_project_id ApiKey_ProjectId_Field) (
|
|
rows []*ApiKey, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.partner_id, api_keys.created_at FROM api_keys WHERE api_keys.project_id = ? ORDER BY api_keys.name")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, api_key_project_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
api_key := &ApiKey{}
|
|
err = __rows.Scan(&api_key.Id, &api_key.ProjectId, &api_key.Head, &api_key.Name, &api_key.Secret, &api_key.PartnerId, &api_key.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, api_key)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Find_SerialNumber_By_SerialNumber(ctx context.Context,
|
|
serial_number_serial_number SerialNumber_SerialNumber_Field) (
|
|
serial_number *SerialNumber, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT serial_numbers.id, serial_numbers.serial_number, serial_numbers.bucket_id, serial_numbers.expires_at FROM serial_numbers WHERE serial_numbers.serial_number = ? LIMIT 2")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, serial_number_serial_number.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
if !__rows.Next() {
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return nil, nil
|
|
}
|
|
|
|
serial_number = &SerialNumber{}
|
|
err = __rows.Scan(&serial_number.Id, &serial_number.SerialNumber, &serial_number.BucketId, &serial_number.ExpiresAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
|
|
if __rows.Next() {
|
|
return nil, tooManyRows("SerialNumber_By_SerialNumber")
|
|
}
|
|
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
|
|
return serial_number, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Find_BucketBandwidthRollup_By_BucketName_And_ProjectId_And_IntervalStart_And_Action(ctx context.Context,
|
|
bucket_bandwidth_rollup_bucket_name BucketBandwidthRollup_BucketName_Field,
|
|
bucket_bandwidth_rollup_project_id BucketBandwidthRollup_ProjectId_Field,
|
|
bucket_bandwidth_rollup_interval_start BucketBandwidthRollup_IntervalStart_Field,
|
|
bucket_bandwidth_rollup_action BucketBandwidthRollup_Action_Field) (
|
|
bucket_bandwidth_rollup *BucketBandwidthRollup, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_bandwidth_rollups.bucket_name, bucket_bandwidth_rollups.project_id, bucket_bandwidth_rollups.interval_start, bucket_bandwidth_rollups.interval_seconds, bucket_bandwidth_rollups.action, bucket_bandwidth_rollups.inline, bucket_bandwidth_rollups.allocated, bucket_bandwidth_rollups.settled FROM bucket_bandwidth_rollups WHERE bucket_bandwidth_rollups.bucket_name = ? AND bucket_bandwidth_rollups.project_id = ? AND bucket_bandwidth_rollups.interval_start = ? AND bucket_bandwidth_rollups.action = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, bucket_bandwidth_rollup_bucket_name.value(), bucket_bandwidth_rollup_project_id.value(), bucket_bandwidth_rollup_interval_start.value(), bucket_bandwidth_rollup_action.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
bucket_bandwidth_rollup = &BucketBandwidthRollup{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&bucket_bandwidth_rollup.BucketName, &bucket_bandwidth_rollup.ProjectId, &bucket_bandwidth_rollup.IntervalStart, &bucket_bandwidth_rollup.IntervalSeconds, &bucket_bandwidth_rollup.Action, &bucket_bandwidth_rollup.Inline, &bucket_bandwidth_rollup.Allocated, &bucket_bandwidth_rollup.Settled)
|
|
if err == sql.ErrNoRows {
|
|
return (*BucketBandwidthRollup)(nil), nil
|
|
}
|
|
if err != nil {
|
|
return (*BucketBandwidthRollup)(nil), obj.makeErr(err)
|
|
}
|
|
return bucket_bandwidth_rollup, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) First_BucketStorageTally_By_ProjectId_OrderBy_Desc_IntervalStart(ctx context.Context,
|
|
bucket_storage_tally_project_id BucketStorageTally_ProjectId_Field) (
|
|
bucket_storage_tally *BucketStorageTally, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_storage_tallies.bucket_name, bucket_storage_tallies.project_id, bucket_storage_tallies.interval_start, bucket_storage_tallies.inline, bucket_storage_tallies.remote, bucket_storage_tallies.remote_segments_count, bucket_storage_tallies.inline_segments_count, bucket_storage_tallies.object_count, bucket_storage_tallies.metadata_size FROM bucket_storage_tallies WHERE bucket_storage_tallies.project_id = ? ORDER BY bucket_storage_tallies.interval_start DESC LIMIT 1 OFFSET 0")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, bucket_storage_tally_project_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
if !__rows.Next() {
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return nil, nil
|
|
}
|
|
|
|
bucket_storage_tally = &BucketStorageTally{}
|
|
err = __rows.Scan(&bucket_storage_tally.BucketName, &bucket_storage_tally.ProjectId, &bucket_storage_tally.IntervalStart, &bucket_storage_tally.Inline, &bucket_storage_tally.Remote, &bucket_storage_tally.RemoteSegmentsCount, &bucket_storage_tally.InlineSegmentsCount, &bucket_storage_tally.ObjectCount, &bucket_storage_tally.MetadataSize)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
|
|
return bucket_storage_tally, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) All_BucketStorageTally(ctx context.Context) (
|
|
rows []*BucketStorageTally, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_storage_tallies.bucket_name, bucket_storage_tallies.project_id, bucket_storage_tallies.interval_start, bucket_storage_tallies.inline, bucket_storage_tallies.remote, bucket_storage_tallies.remote_segments_count, bucket_storage_tallies.inline_segments_count, bucket_storage_tallies.object_count, bucket_storage_tallies.metadata_size FROM bucket_storage_tallies")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
bucket_storage_tally := &BucketStorageTally{}
|
|
err = __rows.Scan(&bucket_storage_tally.BucketName, &bucket_storage_tally.ProjectId, &bucket_storage_tally.IntervalStart, &bucket_storage_tally.Inline, &bucket_storage_tally.Remote, &bucket_storage_tally.RemoteSegmentsCount, &bucket_storage_tally.InlineSegmentsCount, &bucket_storage_tally.ObjectCount, &bucket_storage_tally.MetadataSize)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, bucket_storage_tally)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) All_BucketStorageTally_By_ProjectId_And_BucketName_And_IntervalStart_GreaterOrEqual_And_IntervalStart_LessOrEqual_OrderBy_Desc_IntervalStart(ctx context.Context,
|
|
bucket_storage_tally_project_id BucketStorageTally_ProjectId_Field,
|
|
bucket_storage_tally_bucket_name BucketStorageTally_BucketName_Field,
|
|
bucket_storage_tally_interval_start_greater_or_equal BucketStorageTally_IntervalStart_Field,
|
|
bucket_storage_tally_interval_start_less_or_equal BucketStorageTally_IntervalStart_Field) (
|
|
rows []*BucketStorageTally, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_storage_tallies.bucket_name, bucket_storage_tallies.project_id, bucket_storage_tallies.interval_start, bucket_storage_tallies.inline, bucket_storage_tallies.remote, bucket_storage_tallies.remote_segments_count, bucket_storage_tallies.inline_segments_count, bucket_storage_tallies.object_count, bucket_storage_tallies.metadata_size FROM bucket_storage_tallies WHERE bucket_storage_tallies.project_id = ? AND bucket_storage_tallies.bucket_name = ? AND bucket_storage_tallies.interval_start >= ? AND bucket_storage_tallies.interval_start <= ? ORDER BY bucket_storage_tallies.interval_start DESC")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, bucket_storage_tally_project_id.value(), bucket_storage_tally_bucket_name.value(), bucket_storage_tally_interval_start_greater_or_equal.value(), bucket_storage_tally_interval_start_less_or_equal.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
bucket_storage_tally := &BucketStorageTally{}
|
|
err = __rows.Scan(&bucket_storage_tally.BucketName, &bucket_storage_tally.ProjectId, &bucket_storage_tally.IntervalStart, &bucket_storage_tally.Inline, &bucket_storage_tally.Remote, &bucket_storage_tally.RemoteSegmentsCount, &bucket_storage_tally.InlineSegmentsCount, &bucket_storage_tally.ObjectCount, &bucket_storage_tally.MetadataSize)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, bucket_storage_tally)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Find_StoragenodeBandwidthRollup_By_StoragenodeId_And_IntervalStart_And_Action(ctx context.Context,
|
|
storagenode_bandwidth_rollup_storagenode_id StoragenodeBandwidthRollup_StoragenodeId_Field,
|
|
storagenode_bandwidth_rollup_interval_start StoragenodeBandwidthRollup_IntervalStart_Field,
|
|
storagenode_bandwidth_rollup_action StoragenodeBandwidthRollup_Action_Field) (
|
|
storagenode_bandwidth_rollup *StoragenodeBandwidthRollup, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT storagenode_bandwidth_rollups.storagenode_id, storagenode_bandwidth_rollups.interval_start, storagenode_bandwidth_rollups.interval_seconds, storagenode_bandwidth_rollups.action, storagenode_bandwidth_rollups.allocated, storagenode_bandwidth_rollups.settled FROM storagenode_bandwidth_rollups WHERE storagenode_bandwidth_rollups.storagenode_id = ? AND storagenode_bandwidth_rollups.interval_start = ? AND storagenode_bandwidth_rollups.action = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, storagenode_bandwidth_rollup_storagenode_id.value(), storagenode_bandwidth_rollup_interval_start.value(), storagenode_bandwidth_rollup_action.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
storagenode_bandwidth_rollup = &StoragenodeBandwidthRollup{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&storagenode_bandwidth_rollup.StoragenodeId, &storagenode_bandwidth_rollup.IntervalStart, &storagenode_bandwidth_rollup.IntervalSeconds, &storagenode_bandwidth_rollup.Action, &storagenode_bandwidth_rollup.Allocated, &storagenode_bandwidth_rollup.Settled)
|
|
if err == sql.ErrNoRows {
|
|
return (*StoragenodeBandwidthRollup)(nil), nil
|
|
}
|
|
if err != nil {
|
|
return (*StoragenodeBandwidthRollup)(nil), obj.makeErr(err)
|
|
}
|
|
return storagenode_bandwidth_rollup, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) All_StoragenodeBandwidthRollup_By_IntervalStart_GreaterOrEqual(ctx context.Context,
|
|
storagenode_bandwidth_rollup_interval_start_greater_or_equal StoragenodeBandwidthRollup_IntervalStart_Field) (
|
|
rows []*StoragenodeBandwidthRollup, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT storagenode_bandwidth_rollups.storagenode_id, storagenode_bandwidth_rollups.interval_start, storagenode_bandwidth_rollups.interval_seconds, storagenode_bandwidth_rollups.action, storagenode_bandwidth_rollups.allocated, storagenode_bandwidth_rollups.settled FROM storagenode_bandwidth_rollups WHERE storagenode_bandwidth_rollups.interval_start >= ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, storagenode_bandwidth_rollup_interval_start_greater_or_equal.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
storagenode_bandwidth_rollup := &StoragenodeBandwidthRollup{}
|
|
err = __rows.Scan(&storagenode_bandwidth_rollup.StoragenodeId, &storagenode_bandwidth_rollup.IntervalStart, &storagenode_bandwidth_rollup.IntervalSeconds, &storagenode_bandwidth_rollup.Action, &storagenode_bandwidth_rollup.Allocated, &storagenode_bandwidth_rollup.Settled)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, storagenode_bandwidth_rollup)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Get_StoragenodeStorageTally_By_Id(ctx context.Context,
|
|
storagenode_storage_tally_id StoragenodeStorageTally_Id_Field) (
|
|
storagenode_storage_tally *StoragenodeStorageTally, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT storagenode_storage_tallies.id, storagenode_storage_tallies.node_id, storagenode_storage_tallies.interval_end_time, storagenode_storage_tallies.data_total FROM storagenode_storage_tallies WHERE storagenode_storage_tallies.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, storagenode_storage_tally_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
storagenode_storage_tally = &StoragenodeStorageTally{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&storagenode_storage_tally.Id, &storagenode_storage_tally.NodeId, &storagenode_storage_tally.IntervalEndTime, &storagenode_storage_tally.DataTotal)
|
|
if err != nil {
|
|
return (*StoragenodeStorageTally)(nil), obj.makeErr(err)
|
|
}
|
|
return storagenode_storage_tally, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) All_StoragenodeStorageTally(ctx context.Context) (
|
|
rows []*StoragenodeStorageTally, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT storagenode_storage_tallies.id, storagenode_storage_tallies.node_id, storagenode_storage_tallies.interval_end_time, storagenode_storage_tallies.data_total FROM storagenode_storage_tallies")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
storagenode_storage_tally := &StoragenodeStorageTally{}
|
|
err = __rows.Scan(&storagenode_storage_tally.Id, &storagenode_storage_tally.NodeId, &storagenode_storage_tally.IntervalEndTime, &storagenode_storage_tally.DataTotal)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, storagenode_storage_tally)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) All_StoragenodeStorageTally_By_IntervalEndTime_GreaterOrEqual(ctx context.Context,
|
|
storagenode_storage_tally_interval_end_time_greater_or_equal StoragenodeStorageTally_IntervalEndTime_Field) (
|
|
rows []*StoragenodeStorageTally, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT storagenode_storage_tallies.id, storagenode_storage_tallies.node_id, storagenode_storage_tallies.interval_end_time, storagenode_storage_tallies.data_total FROM storagenode_storage_tallies WHERE storagenode_storage_tallies.interval_end_time >= ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, storagenode_storage_tally_interval_end_time_greater_or_equal.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
storagenode_storage_tally := &StoragenodeStorageTally{}
|
|
err = __rows.Scan(&storagenode_storage_tally.Id, &storagenode_storage_tally.NodeId, &storagenode_storage_tally.IntervalEndTime, &storagenode_storage_tally.DataTotal)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, storagenode_storage_tally)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Get_PeerIdentity_By_NodeId(ctx context.Context,
|
|
peer_identity_node_id PeerIdentity_NodeId_Field) (
|
|
peer_identity *PeerIdentity, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT peer_identities.node_id, peer_identities.leaf_serial_number, peer_identities.chain, peer_identities.updated_at FROM peer_identities WHERE peer_identities.node_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, peer_identity_node_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
peer_identity = &PeerIdentity{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&peer_identity.NodeId, &peer_identity.LeafSerialNumber, &peer_identity.Chain, &peer_identity.UpdatedAt)
|
|
if err != nil {
|
|
return (*PeerIdentity)(nil), obj.makeErr(err)
|
|
}
|
|
return peer_identity, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Get_PeerIdentity_LeafSerialNumber_By_NodeId(ctx context.Context,
|
|
peer_identity_node_id PeerIdentity_NodeId_Field) (
|
|
row *LeafSerialNumber_Row, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT peer_identities.leaf_serial_number FROM peer_identities WHERE peer_identities.node_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, peer_identity_node_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
row = &LeafSerialNumber_Row{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&row.LeafSerialNumber)
|
|
if err != nil {
|
|
return (*LeafSerialNumber_Row)(nil), obj.makeErr(err)
|
|
}
|
|
return row, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Get_RegistrationToken_By_Secret(ctx context.Context,
|
|
registration_token_secret RegistrationToken_Secret_Field) (
|
|
registration_token *RegistrationToken, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT registration_tokens.secret, registration_tokens.owner_id, registration_tokens.project_limit, registration_tokens.created_at FROM registration_tokens WHERE registration_tokens.secret = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, registration_token_secret.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
registration_token = &RegistrationToken{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(®istration_token.Secret, ®istration_token.OwnerId, ®istration_token.ProjectLimit, ®istration_token.CreatedAt)
|
|
if err != nil {
|
|
return (*RegistrationToken)(nil), obj.makeErr(err)
|
|
}
|
|
return registration_token, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Get_RegistrationToken_By_OwnerId(ctx context.Context,
|
|
registration_token_owner_id RegistrationToken_OwnerId_Field) (
|
|
registration_token *RegistrationToken, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __cond_0 = &__sqlbundle_Condition{Left: "registration_tokens.owner_id", Equal: true, Right: "?", Null: true}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("SELECT registration_tokens.secret, registration_tokens.owner_id, registration_tokens.project_limit, registration_tokens.created_at FROM registration_tokens WHERE "), __cond_0}}
|
|
|
|
var __values []interface{}
|
|
__values = append(__values)
|
|
|
|
if !registration_token_owner_id.isnull() {
|
|
__cond_0.Null = false
|
|
__values = append(__values, registration_token_owner_id.value())
|
|
}
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
registration_token = &RegistrationToken{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(®istration_token.Secret, ®istration_token.OwnerId, ®istration_token.ProjectLimit, ®istration_token.CreatedAt)
|
|
if err != nil {
|
|
return (*RegistrationToken)(nil), obj.makeErr(err)
|
|
}
|
|
return registration_token, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Get_ResetPasswordToken_By_Secret(ctx context.Context,
|
|
reset_password_token_secret ResetPasswordToken_Secret_Field) (
|
|
reset_password_token *ResetPasswordToken, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT reset_password_tokens.secret, reset_password_tokens.owner_id, reset_password_tokens.created_at FROM reset_password_tokens WHERE reset_password_tokens.secret = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, reset_password_token_secret.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
reset_password_token = &ResetPasswordToken{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&reset_password_token.Secret, &reset_password_token.OwnerId, &reset_password_token.CreatedAt)
|
|
if err != nil {
|
|
return (*ResetPasswordToken)(nil), obj.makeErr(err)
|
|
}
|
|
return reset_password_token, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Get_ResetPasswordToken_By_OwnerId(ctx context.Context,
|
|
reset_password_token_owner_id ResetPasswordToken_OwnerId_Field) (
|
|
reset_password_token *ResetPasswordToken, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT reset_password_tokens.secret, reset_password_tokens.owner_id, reset_password_tokens.created_at FROM reset_password_tokens WHERE reset_password_tokens.owner_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, reset_password_token_owner_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
reset_password_token = &ResetPasswordToken{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&reset_password_token.Secret, &reset_password_token.OwnerId, &reset_password_token.CreatedAt)
|
|
if err != nil {
|
|
return (*ResetPasswordToken)(nil), obj.makeErr(err)
|
|
}
|
|
return reset_password_token, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Get_Offer_By_Id(ctx context.Context,
|
|
offer_id Offer_Id_Field) (
|
|
offer *Offer, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT offers.id, offers.name, offers.description, offers.award_credit_in_cents, offers.invitee_credit_in_cents, offers.award_credit_duration_days, offers.invitee_credit_duration_days, offers.redeemable_cap, offers.expires_at, offers.created_at, offers.status, offers.type FROM offers WHERE offers.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, offer_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
offer = &Offer{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&offer.Id, &offer.Name, &offer.Description, &offer.AwardCreditInCents, &offer.InviteeCreditInCents, &offer.AwardCreditDurationDays, &offer.InviteeCreditDurationDays, &offer.RedeemableCap, &offer.ExpiresAt, &offer.CreatedAt, &offer.Status, &offer.Type)
|
|
if err != nil {
|
|
return (*Offer)(nil), obj.makeErr(err)
|
|
}
|
|
return offer, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) All_Offer_OrderBy_Asc_Id(ctx context.Context) (
|
|
rows []*Offer, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT offers.id, offers.name, offers.description, offers.award_credit_in_cents, offers.invitee_credit_in_cents, offers.award_credit_duration_days, offers.invitee_credit_duration_days, offers.redeemable_cap, offers.expires_at, offers.created_at, offers.status, offers.type FROM offers ORDER BY offers.id")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
offer := &Offer{}
|
|
err = __rows.Scan(&offer.Id, &offer.Name, &offer.Description, &offer.AwardCreditInCents, &offer.InviteeCreditInCents, &offer.AwardCreditDurationDays, &offer.InviteeCreditDurationDays, &offer.RedeemableCap, &offer.ExpiresAt, &offer.CreatedAt, &offer.Status, &offer.Type)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, offer)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) All_UserCredit_By_UserId_And_ExpiresAt_Greater_And_CreditsUsedInCents_Less_CreditsEarnedInCents_OrderBy_Asc_ExpiresAt(ctx context.Context,
|
|
user_credit_user_id UserCredit_UserId_Field,
|
|
user_credit_expires_at_greater UserCredit_ExpiresAt_Field) (
|
|
rows []*UserCredit, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT user_credits.id, user_credits.user_id, user_credits.offer_id, user_credits.referred_by, user_credits.type, user_credits.credits_earned_in_cents, user_credits.credits_used_in_cents, user_credits.expires_at, user_credits.created_at FROM user_credits WHERE user_credits.user_id = ? AND user_credits.expires_at > ? AND user_credits.credits_used_in_cents < user_credits.credits_earned_in_cents ORDER BY user_credits.expires_at")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, user_credit_user_id.value(), user_credit_expires_at_greater.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
user_credit := &UserCredit{}
|
|
err = __rows.Scan(&user_credit.Id, &user_credit.UserId, &user_credit.OfferId, &user_credit.ReferredBy, &user_credit.Type, &user_credit.CreditsEarnedInCents, &user_credit.CreditsUsedInCents, &user_credit.ExpiresAt, &user_credit.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, user_credit)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Count_UserCredit_By_ReferredBy(ctx context.Context,
|
|
user_credit_referred_by UserCredit_ReferredBy_Field) (
|
|
count int64, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __cond_0 = &__sqlbundle_Condition{Left: "user_credits.referred_by", Equal: true, Right: "?", Null: true}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("SELECT COUNT(*) FROM user_credits WHERE "), __cond_0}}
|
|
|
|
var __values []interface{}
|
|
__values = append(__values)
|
|
|
|
if !user_credit_referred_by.isnull() {
|
|
__cond_0.Null = false
|
|
__values = append(__values, user_credit_referred_by.value())
|
|
}
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&count)
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
return count, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Get_BucketMetainfo_By_ProjectId_And_Name(ctx context.Context,
|
|
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
|
|
bucket_metainfo_name BucketMetainfo_Name_Field) (
|
|
bucket_metainfo *BucketMetainfo, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.partner_id, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares FROM bucket_metainfos WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, bucket_metainfo_project_id.value(), bucket_metainfo_name.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
bucket_metainfo = &BucketMetainfo{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.PartnerId, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares)
|
|
if err != nil {
|
|
return (*BucketMetainfo)(nil), obj.makeErr(err)
|
|
}
|
|
return bucket_metainfo, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Limited_BucketMetainfo_By_ProjectId_And_Name_GreaterOrEqual_OrderBy_Asc_Name(ctx context.Context,
|
|
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
|
|
bucket_metainfo_name_greater_or_equal BucketMetainfo_Name_Field,
|
|
limit int, offset int64) (
|
|
rows []*BucketMetainfo, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.partner_id, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares FROM bucket_metainfos WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name >= ? ORDER BY bucket_metainfos.name LIMIT ? OFFSET ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, bucket_metainfo_project_id.value(), bucket_metainfo_name_greater_or_equal.value())
|
|
|
|
__values = append(__values, limit, offset)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
bucket_metainfo := &BucketMetainfo{}
|
|
err = __rows.Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.PartnerId, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, bucket_metainfo)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Limited_BucketMetainfo_By_ProjectId_And_Name_Greater_OrderBy_Asc_Name(ctx context.Context,
|
|
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
|
|
bucket_metainfo_name_greater BucketMetainfo_Name_Field,
|
|
limit int, offset int64) (
|
|
rows []*BucketMetainfo, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.partner_id, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares FROM bucket_metainfos WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name > ? ORDER BY bucket_metainfos.name LIMIT ? OFFSET ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, bucket_metainfo_project_id.value(), bucket_metainfo_name_greater.value())
|
|
|
|
__values = append(__values, limit, offset)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
bucket_metainfo := &BucketMetainfo{}
|
|
err = __rows.Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.PartnerId, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, bucket_metainfo)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Get_GracefulExitProgress_By_NodeId(ctx context.Context,
|
|
graceful_exit_progress_node_id GracefulExitProgress_NodeId_Field) (
|
|
graceful_exit_progress *GracefulExitProgress, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT graceful_exit_progress.node_id, graceful_exit_progress.bytes_transferred, graceful_exit_progress.pieces_transferred, graceful_exit_progress.pieces_failed, graceful_exit_progress.updated_at FROM graceful_exit_progress WHERE graceful_exit_progress.node_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, graceful_exit_progress_node_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
graceful_exit_progress = &GracefulExitProgress{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&graceful_exit_progress.NodeId, &graceful_exit_progress.BytesTransferred, &graceful_exit_progress.PiecesTransferred, &graceful_exit_progress.PiecesFailed, &graceful_exit_progress.UpdatedAt)
|
|
if err != nil {
|
|
return (*GracefulExitProgress)(nil), obj.makeErr(err)
|
|
}
|
|
return graceful_exit_progress, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Get_GracefulExitTransferQueue_By_NodeId_And_Path_And_PieceNum(ctx context.Context,
|
|
graceful_exit_transfer_queue_node_id GracefulExitTransferQueue_NodeId_Field,
|
|
graceful_exit_transfer_queue_path GracefulExitTransferQueue_Path_Field,
|
|
graceful_exit_transfer_queue_piece_num GracefulExitTransferQueue_PieceNum_Field) (
|
|
graceful_exit_transfer_queue *GracefulExitTransferQueue, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT graceful_exit_transfer_queue.node_id, graceful_exit_transfer_queue.path, graceful_exit_transfer_queue.piece_num, graceful_exit_transfer_queue.root_piece_id, graceful_exit_transfer_queue.durability_ratio, graceful_exit_transfer_queue.queued_at, graceful_exit_transfer_queue.requested_at, graceful_exit_transfer_queue.last_failed_at, graceful_exit_transfer_queue.last_failed_code, graceful_exit_transfer_queue.failed_count, graceful_exit_transfer_queue.finished_at, graceful_exit_transfer_queue.order_limit_send_count FROM graceful_exit_transfer_queue WHERE graceful_exit_transfer_queue.node_id = ? AND graceful_exit_transfer_queue.path = ? AND graceful_exit_transfer_queue.piece_num = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, graceful_exit_transfer_queue_node_id.value(), graceful_exit_transfer_queue_path.value(), graceful_exit_transfer_queue_piece_num.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
graceful_exit_transfer_queue = &GracefulExitTransferQueue{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&graceful_exit_transfer_queue.NodeId, &graceful_exit_transfer_queue.Path, &graceful_exit_transfer_queue.PieceNum, &graceful_exit_transfer_queue.RootPieceId, &graceful_exit_transfer_queue.DurabilityRatio, &graceful_exit_transfer_queue.QueuedAt, &graceful_exit_transfer_queue.RequestedAt, &graceful_exit_transfer_queue.LastFailedAt, &graceful_exit_transfer_queue.LastFailedCode, &graceful_exit_transfer_queue.FailedCount, &graceful_exit_transfer_queue.FinishedAt, &graceful_exit_transfer_queue.OrderLimitSendCount)
|
|
if err != nil {
|
|
return (*GracefulExitTransferQueue)(nil), obj.makeErr(err)
|
|
}
|
|
return graceful_exit_transfer_queue, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) All_NodesOfflineTime_By_NodeId_And_TrackedAt_Greater_And_TrackedAt_LessOrEqual(ctx context.Context,
|
|
nodes_offline_time_node_id NodesOfflineTime_NodeId_Field,
|
|
nodes_offline_time_tracked_at_greater NodesOfflineTime_TrackedAt_Field,
|
|
nodes_offline_time_tracked_at_less_or_equal NodesOfflineTime_TrackedAt_Field) (
|
|
rows []*NodesOfflineTime, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT nodes_offline_times.node_id, nodes_offline_times.tracked_at, nodes_offline_times.seconds FROM nodes_offline_times WHERE nodes_offline_times.node_id = ? AND nodes_offline_times.tracked_at > ? AND nodes_offline_times.tracked_at <= ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, nodes_offline_time_node_id.value(), nodes_offline_time_tracked_at_greater.value(), nodes_offline_time_tracked_at_less_or_equal.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
nodes_offline_time := &NodesOfflineTime{}
|
|
err = __rows.Scan(&nodes_offline_time.NodeId, &nodes_offline_time.TrackedAt, &nodes_offline_time.Seconds)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, nodes_offline_time)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Get_StripeCustomer_CustomerId_By_UserId(ctx context.Context,
|
|
stripe_customer_user_id StripeCustomer_UserId_Field) (
|
|
row *CustomerId_Row, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT stripe_customers.customer_id FROM stripe_customers WHERE stripe_customers.user_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, stripe_customer_user_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
row = &CustomerId_Row{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&row.CustomerId)
|
|
if err != nil {
|
|
return (*CustomerId_Row)(nil), obj.makeErr(err)
|
|
}
|
|
return row, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Limited_StripeCustomer_By_CreatedAt_LessOrEqual_OrderBy_Desc_CreatedAt(ctx context.Context,
|
|
stripe_customer_created_at_less_or_equal StripeCustomer_CreatedAt_Field,
|
|
limit int, offset int64) (
|
|
rows []*StripeCustomer, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT stripe_customers.user_id, stripe_customers.customer_id, stripe_customers.created_at FROM stripe_customers WHERE stripe_customers.created_at <= ? ORDER BY stripe_customers.created_at DESC LIMIT ? OFFSET ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, stripe_customer_created_at_less_or_equal.value())
|
|
|
|
__values = append(__values, limit, offset)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
stripe_customer := &StripeCustomer{}
|
|
err = __rows.Scan(&stripe_customer.UserId, &stripe_customer.CustomerId, &stripe_customer.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, stripe_customer)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) All_CoinpaymentsTransaction_By_UserId_OrderBy_Desc_CreatedAt(ctx context.Context,
|
|
coinpayments_transaction_user_id CoinpaymentsTransaction_UserId_Field) (
|
|
rows []*CoinpaymentsTransaction, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT coinpayments_transactions.id, coinpayments_transactions.user_id, coinpayments_transactions.address, coinpayments_transactions.amount, coinpayments_transactions.received, coinpayments_transactions.status, coinpayments_transactions.key, coinpayments_transactions.timeout, coinpayments_transactions.created_at FROM coinpayments_transactions WHERE coinpayments_transactions.user_id = ? ORDER BY coinpayments_transactions.created_at DESC")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, coinpayments_transaction_user_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
coinpayments_transaction := &CoinpaymentsTransaction{}
|
|
err = __rows.Scan(&coinpayments_transaction.Id, &coinpayments_transaction.UserId, &coinpayments_transaction.Address, &coinpayments_transaction.Amount, &coinpayments_transaction.Received, &coinpayments_transaction.Status, &coinpayments_transaction.Key, &coinpayments_transaction.Timeout, &coinpayments_transaction.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, coinpayments_transaction)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Limited_CoinpaymentsTransaction_By_CreatedAt_LessOrEqual_And_Status_OrderBy_Desc_CreatedAt(ctx context.Context,
|
|
coinpayments_transaction_created_at_less_or_equal CoinpaymentsTransaction_CreatedAt_Field,
|
|
coinpayments_transaction_status CoinpaymentsTransaction_Status_Field,
|
|
limit int, offset int64) (
|
|
rows []*CoinpaymentsTransaction, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT coinpayments_transactions.id, coinpayments_transactions.user_id, coinpayments_transactions.address, coinpayments_transactions.amount, coinpayments_transactions.received, coinpayments_transactions.status, coinpayments_transactions.key, coinpayments_transactions.timeout, coinpayments_transactions.created_at FROM coinpayments_transactions WHERE coinpayments_transactions.created_at <= ? AND coinpayments_transactions.status = ? ORDER BY coinpayments_transactions.created_at DESC LIMIT ? OFFSET ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, coinpayments_transaction_created_at_less_or_equal.value(), coinpayments_transaction_status.value())
|
|
|
|
__values = append(__values, limit, offset)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
coinpayments_transaction := &CoinpaymentsTransaction{}
|
|
err = __rows.Scan(&coinpayments_transaction.Id, &coinpayments_transaction.UserId, &coinpayments_transaction.Address, &coinpayments_transaction.Amount, &coinpayments_transaction.Received, &coinpayments_transaction.Status, &coinpayments_transaction.Key, &coinpayments_transaction.Timeout, &coinpayments_transaction.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, coinpayments_transaction)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Get_StripecoinpaymentsInvoiceProjectRecord_By_ProjectId_And_PeriodStart_And_PeriodEnd(ctx context.Context,
|
|
stripecoinpayments_invoice_project_record_project_id StripecoinpaymentsInvoiceProjectRecord_ProjectId_Field,
|
|
stripecoinpayments_invoice_project_record_period_start StripecoinpaymentsInvoiceProjectRecord_PeriodStart_Field,
|
|
stripecoinpayments_invoice_project_record_period_end StripecoinpaymentsInvoiceProjectRecord_PeriodEnd_Field) (
|
|
stripecoinpayments_invoice_project_record *StripecoinpaymentsInvoiceProjectRecord, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT stripecoinpayments_invoice_project_records.id, stripecoinpayments_invoice_project_records.project_id, stripecoinpayments_invoice_project_records.storage, stripecoinpayments_invoice_project_records.egress, stripecoinpayments_invoice_project_records.objects, stripecoinpayments_invoice_project_records.period_start, stripecoinpayments_invoice_project_records.period_end, stripecoinpayments_invoice_project_records.state, stripecoinpayments_invoice_project_records.created_at FROM stripecoinpayments_invoice_project_records WHERE stripecoinpayments_invoice_project_records.project_id = ? AND stripecoinpayments_invoice_project_records.period_start = ? AND stripecoinpayments_invoice_project_records.period_end = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, stripecoinpayments_invoice_project_record_project_id.value(), stripecoinpayments_invoice_project_record_period_start.value(), stripecoinpayments_invoice_project_record_period_end.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
stripecoinpayments_invoice_project_record = &StripecoinpaymentsInvoiceProjectRecord{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&stripecoinpayments_invoice_project_record.Id, &stripecoinpayments_invoice_project_record.ProjectId, &stripecoinpayments_invoice_project_record.Storage, &stripecoinpayments_invoice_project_record.Egress, &stripecoinpayments_invoice_project_record.Objects, &stripecoinpayments_invoice_project_record.PeriodStart, &stripecoinpayments_invoice_project_record.PeriodEnd, &stripecoinpayments_invoice_project_record.State, &stripecoinpayments_invoice_project_record.CreatedAt)
|
|
if err != nil {
|
|
return (*StripecoinpaymentsInvoiceProjectRecord)(nil), obj.makeErr(err)
|
|
}
|
|
return stripecoinpayments_invoice_project_record, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Limited_StripecoinpaymentsInvoiceProjectRecord_By_CreatedAt_LessOrEqual_And_State_OrderBy_Desc_CreatedAt(ctx context.Context,
|
|
stripecoinpayments_invoice_project_record_created_at_less_or_equal StripecoinpaymentsInvoiceProjectRecord_CreatedAt_Field,
|
|
stripecoinpayments_invoice_project_record_state StripecoinpaymentsInvoiceProjectRecord_State_Field,
|
|
limit int, offset int64) (
|
|
rows []*StripecoinpaymentsInvoiceProjectRecord, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT stripecoinpayments_invoice_project_records.id, stripecoinpayments_invoice_project_records.project_id, stripecoinpayments_invoice_project_records.storage, stripecoinpayments_invoice_project_records.egress, stripecoinpayments_invoice_project_records.objects, stripecoinpayments_invoice_project_records.period_start, stripecoinpayments_invoice_project_records.period_end, stripecoinpayments_invoice_project_records.state, stripecoinpayments_invoice_project_records.created_at FROM stripecoinpayments_invoice_project_records WHERE stripecoinpayments_invoice_project_records.created_at <= ? AND stripecoinpayments_invoice_project_records.state = ? ORDER BY stripecoinpayments_invoice_project_records.created_at DESC LIMIT ? OFFSET ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, stripecoinpayments_invoice_project_record_created_at_less_or_equal.value(), stripecoinpayments_invoice_project_record_state.value())
|
|
|
|
__values = append(__values, limit, offset)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
stripecoinpayments_invoice_project_record := &StripecoinpaymentsInvoiceProjectRecord{}
|
|
err = __rows.Scan(&stripecoinpayments_invoice_project_record.Id, &stripecoinpayments_invoice_project_record.ProjectId, &stripecoinpayments_invoice_project_record.Storage, &stripecoinpayments_invoice_project_record.Egress, &stripecoinpayments_invoice_project_record.Objects, &stripecoinpayments_invoice_project_record.PeriodStart, &stripecoinpayments_invoice_project_record.PeriodEnd, &stripecoinpayments_invoice_project_record.State, &stripecoinpayments_invoice_project_record.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, stripecoinpayments_invoice_project_record)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Get_StripecoinpaymentsTxConversionRate_By_TxId(ctx context.Context,
|
|
stripecoinpayments_tx_conversion_rate_tx_id StripecoinpaymentsTxConversionRate_TxId_Field) (
|
|
stripecoinpayments_tx_conversion_rate *StripecoinpaymentsTxConversionRate, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT stripecoinpayments_tx_conversion_rates.tx_id, stripecoinpayments_tx_conversion_rates.rate, stripecoinpayments_tx_conversion_rates.created_at FROM stripecoinpayments_tx_conversion_rates WHERE stripecoinpayments_tx_conversion_rates.tx_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, stripecoinpayments_tx_conversion_rate_tx_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
stripecoinpayments_tx_conversion_rate = &StripecoinpaymentsTxConversionRate{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&stripecoinpayments_tx_conversion_rate.TxId, &stripecoinpayments_tx_conversion_rate.Rate, &stripecoinpayments_tx_conversion_rate.CreatedAt)
|
|
if err != nil {
|
|
return (*StripecoinpaymentsTxConversionRate)(nil), obj.makeErr(err)
|
|
}
|
|
return stripecoinpayments_tx_conversion_rate, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Get_Coupon_By_Id(ctx context.Context,
|
|
coupon_id Coupon_Id_Field) (
|
|
coupon *Coupon, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT coupons.id, coupons.project_id, coupons.user_id, coupons.amount, coupons.description, coupons.type, coupons.status, coupons.duration, coupons.created_at FROM coupons WHERE coupons.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, coupon_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
coupon = &Coupon{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&coupon.Id, &coupon.ProjectId, &coupon.UserId, &coupon.Amount, &coupon.Description, &coupon.Type, &coupon.Status, &coupon.Duration, &coupon.CreatedAt)
|
|
if err != nil {
|
|
return (*Coupon)(nil), obj.makeErr(err)
|
|
}
|
|
return coupon, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) All_Coupon_By_ProjectId_And_Status_Equal_Number_OrderBy_Desc_CreatedAt(ctx context.Context,
|
|
coupon_project_id Coupon_ProjectId_Field) (
|
|
rows []*Coupon, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT coupons.id, coupons.project_id, coupons.user_id, coupons.amount, coupons.description, coupons.type, coupons.status, coupons.duration, coupons.created_at FROM coupons WHERE coupons.project_id = ? AND coupons.status = 0 ORDER BY coupons.created_at DESC")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, coupon_project_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
coupon := &Coupon{}
|
|
err = __rows.Scan(&coupon.Id, &coupon.ProjectId, &coupon.UserId, &coupon.Amount, &coupon.Description, &coupon.Type, &coupon.Status, &coupon.Duration, &coupon.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, coupon)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) All_Coupon_By_UserId_OrderBy_Desc_CreatedAt(ctx context.Context,
|
|
coupon_user_id Coupon_UserId_Field) (
|
|
rows []*Coupon, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT coupons.id, coupons.project_id, coupons.user_id, coupons.amount, coupons.description, coupons.type, coupons.status, coupons.duration, coupons.created_at FROM coupons WHERE coupons.user_id = ? ORDER BY coupons.created_at DESC")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, coupon_user_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
coupon := &Coupon{}
|
|
err = __rows.Scan(&coupon.Id, &coupon.ProjectId, &coupon.UserId, &coupon.Amount, &coupon.Description, &coupon.Type, &coupon.Status, &coupon.Duration, &coupon.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, coupon)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) All_Coupon_By_UserId_And_Status_OrderBy_Desc_CreatedAt(ctx context.Context,
|
|
coupon_user_id Coupon_UserId_Field,
|
|
coupon_status Coupon_Status_Field) (
|
|
rows []*Coupon, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT coupons.id, coupons.project_id, coupons.user_id, coupons.amount, coupons.description, coupons.type, coupons.status, coupons.duration, coupons.created_at FROM coupons WHERE coupons.user_id = ? AND coupons.status = ? ORDER BY coupons.created_at DESC")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, coupon_user_id.value(), coupon_status.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
coupon := &Coupon{}
|
|
err = __rows.Scan(&coupon.Id, &coupon.ProjectId, &coupon.UserId, &coupon.Amount, &coupon.Description, &coupon.Type, &coupon.Status, &coupon.Duration, &coupon.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, coupon)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) All_Coupon_By_Status_OrderBy_Desc_CreatedAt(ctx context.Context,
|
|
coupon_status Coupon_Status_Field) (
|
|
rows []*Coupon, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT coupons.id, coupons.project_id, coupons.user_id, coupons.amount, coupons.description, coupons.type, coupons.status, coupons.duration, coupons.created_at FROM coupons WHERE coupons.status = ? ORDER BY coupons.created_at DESC")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, coupon_status.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
coupon := &Coupon{}
|
|
err = __rows.Scan(&coupon.Id, &coupon.ProjectId, &coupon.UserId, &coupon.Amount, &coupon.Description, &coupon.Type, &coupon.Status, &coupon.Duration, &coupon.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, coupon)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Limited_Coupon_By_CreatedAt_LessOrEqual_And_Status_OrderBy_Desc_CreatedAt(ctx context.Context,
|
|
coupon_created_at_less_or_equal Coupon_CreatedAt_Field,
|
|
coupon_status Coupon_Status_Field,
|
|
limit int, offset int64) (
|
|
rows []*Coupon, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT coupons.id, coupons.project_id, coupons.user_id, coupons.amount, coupons.description, coupons.type, coupons.status, coupons.duration, coupons.created_at FROM coupons WHERE coupons.created_at <= ? AND coupons.status = ? ORDER BY coupons.created_at DESC LIMIT ? OFFSET ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, coupon_created_at_less_or_equal.value(), coupon_status.value())
|
|
|
|
__values = append(__values, limit, offset)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
coupon := &Coupon{}
|
|
err = __rows.Scan(&coupon.Id, &coupon.ProjectId, &coupon.UserId, &coupon.Amount, &coupon.Description, &coupon.Type, &coupon.Status, &coupon.Duration, &coupon.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, coupon)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Limited_CouponUsage_By_Period_LessOrEqual_And_Status_Equal_Number_OrderBy_Desc_Period(ctx context.Context,
|
|
coupon_usage_period_less_or_equal CouponUsage_Period_Field,
|
|
limit int, offset int64) (
|
|
rows []*CouponUsage, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT coupon_usages.coupon_id, coupon_usages.amount, coupon_usages.status, coupon_usages.period FROM coupon_usages WHERE coupon_usages.period <= ? AND coupon_usages.status = 0 ORDER BY coupon_usages.period DESC LIMIT ? OFFSET ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, coupon_usage_period_less_or_equal.value())
|
|
|
|
__values = append(__values, limit, offset)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
coupon_usage := &CouponUsage{}
|
|
err = __rows.Scan(&coupon_usage.CouponId, &coupon_usage.Amount, &coupon_usage.Status, &coupon_usage.Period)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, coupon_usage)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Update_PendingAudits_By_NodeId(ctx context.Context,
|
|
pending_audits_node_id PendingAudits_NodeId_Field,
|
|
update PendingAudits_Update_Fields) (
|
|
pending_audits *PendingAudits, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE pending_audits SET "), __sets, __sqlbundle_Literal(" WHERE pending_audits.node_id = ? RETURNING pending_audits.node_id, pending_audits.piece_id, pending_audits.stripe_index, pending_audits.share_size, pending_audits.expected_share_hash, pending_audits.reverify_count, pending_audits.path")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.ReverifyCount._set {
|
|
__values = append(__values, update.ReverifyCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("reverify_count = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return nil, emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, pending_audits_node_id.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
pending_audits = &PendingAudits{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&pending_audits.NodeId, &pending_audits.PieceId, &pending_audits.StripeIndex, &pending_audits.ShareSize, &pending_audits.ExpectedShareHash, &pending_audits.ReverifyCount, &pending_audits.Path)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return pending_audits, nil
|
|
}
|
|
|
|
func (obj *cockroachImpl) UpdateNoReturn_Irreparabledb_By_Segmentpath(ctx context.Context,
|
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field,
|
|
update Irreparabledb_Update_Fields) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE irreparabledbs SET "), __sets, __sqlbundle_Literal(" WHERE irreparabledbs.segmentpath = ?")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.Segmentdetail._set {
|
|
__values = append(__values, update.Segmentdetail.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("segmentdetail = ?"))
|
|
}
|
|
|
|
if update.PiecesLostCount._set {
|
|
__values = append(__values, update.PiecesLostCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("pieces_lost_count = ?"))
|
|
}
|
|
|
|
if update.SegDamagedUnixSec._set {
|
|
__values = append(__values, update.SegDamagedUnixSec.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("seg_damaged_unix_sec = ?"))
|
|
}
|
|
|
|
if update.RepairAttemptCount._set {
|
|
__values = append(__values, update.RepairAttemptCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("repair_attempt_count = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, irreparabledb_segmentpath.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
_, err = obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (obj *cockroachImpl) UpdateNoReturn_AccountingTimestamps_By_Name(ctx context.Context,
|
|
accounting_timestamps_name AccountingTimestamps_Name_Field,
|
|
update AccountingTimestamps_Update_Fields) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE accounting_timestamps SET "), __sets, __sqlbundle_Literal(" WHERE accounting_timestamps.name = ?")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.Value._set {
|
|
__values = append(__values, update.Value.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("value = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, accounting_timestamps_name.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
_, err = obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (obj *cockroachImpl) Update_Node_By_Id(ctx context.Context,
|
|
node_id Node_Id_Field,
|
|
update Node_Update_Fields) (
|
|
node *Node, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE nodes SET "), __sets, __sqlbundle_Literal(" WHERE nodes.id = ? RETURNING nodes.id, nodes.address, nodes.last_net, nodes.protocol, nodes.type, nodes.email, nodes.wallet, nodes.free_bandwidth, nodes.free_disk, nodes.piece_count, nodes.major, nodes.minor, nodes.patch, nodes.hash, nodes.timestamp, nodes.release, nodes.latency_90, nodes.audit_success_count, nodes.total_audit_count, nodes.uptime_success_count, nodes.total_uptime_count, nodes.created_at, nodes.updated_at, nodes.last_contact_success, nodes.last_contact_failure, nodes.contained, nodes.disqualified, nodes.audit_reputation_alpha, nodes.audit_reputation_beta, nodes.uptime_reputation_alpha, nodes.uptime_reputation_beta, nodes.exit_initiated_at, nodes.exit_loop_completed_at, nodes.exit_finished_at, nodes.exit_success")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.Address._set {
|
|
__values = append(__values, update.Address.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("address = ?"))
|
|
}
|
|
|
|
if update.LastNet._set {
|
|
__values = append(__values, update.LastNet.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("last_net = ?"))
|
|
}
|
|
|
|
if update.Protocol._set {
|
|
__values = append(__values, update.Protocol.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("protocol = ?"))
|
|
}
|
|
|
|
if update.Type._set {
|
|
__values = append(__values, update.Type.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("type = ?"))
|
|
}
|
|
|
|
if update.Email._set {
|
|
__values = append(__values, update.Email.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("email = ?"))
|
|
}
|
|
|
|
if update.Wallet._set {
|
|
__values = append(__values, update.Wallet.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("wallet = ?"))
|
|
}
|
|
|
|
if update.FreeBandwidth._set {
|
|
__values = append(__values, update.FreeBandwidth.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("free_bandwidth = ?"))
|
|
}
|
|
|
|
if update.FreeDisk._set {
|
|
__values = append(__values, update.FreeDisk.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("free_disk = ?"))
|
|
}
|
|
|
|
if update.PieceCount._set {
|
|
__values = append(__values, update.PieceCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("piece_count = ?"))
|
|
}
|
|
|
|
if update.Major._set {
|
|
__values = append(__values, update.Major.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("major = ?"))
|
|
}
|
|
|
|
if update.Minor._set {
|
|
__values = append(__values, update.Minor.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("minor = ?"))
|
|
}
|
|
|
|
if update.Patch._set {
|
|
__values = append(__values, update.Patch.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("patch = ?"))
|
|
}
|
|
|
|
if update.Hash._set {
|
|
__values = append(__values, update.Hash.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("hash = ?"))
|
|
}
|
|
|
|
if update.Timestamp._set {
|
|
__values = append(__values, update.Timestamp.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("timestamp = ?"))
|
|
}
|
|
|
|
if update.Release._set {
|
|
__values = append(__values, update.Release.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("release = ?"))
|
|
}
|
|
|
|
if update.Latency90._set {
|
|
__values = append(__values, update.Latency90.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("latency_90 = ?"))
|
|
}
|
|
|
|
if update.AuditSuccessCount._set {
|
|
__values = append(__values, update.AuditSuccessCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("audit_success_count = ?"))
|
|
}
|
|
|
|
if update.TotalAuditCount._set {
|
|
__values = append(__values, update.TotalAuditCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("total_audit_count = ?"))
|
|
}
|
|
|
|
if update.UptimeSuccessCount._set {
|
|
__values = append(__values, update.UptimeSuccessCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("uptime_success_count = ?"))
|
|
}
|
|
|
|
if update.TotalUptimeCount._set {
|
|
__values = append(__values, update.TotalUptimeCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("total_uptime_count = ?"))
|
|
}
|
|
|
|
if update.LastContactSuccess._set {
|
|
__values = append(__values, update.LastContactSuccess.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("last_contact_success = ?"))
|
|
}
|
|
|
|
if update.LastContactFailure._set {
|
|
__values = append(__values, update.LastContactFailure.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("last_contact_failure = ?"))
|
|
}
|
|
|
|
if update.Contained._set {
|
|
__values = append(__values, update.Contained.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("contained = ?"))
|
|
}
|
|
|
|
if update.Disqualified._set {
|
|
__values = append(__values, update.Disqualified.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("disqualified = ?"))
|
|
}
|
|
|
|
if update.AuditReputationAlpha._set {
|
|
__values = append(__values, update.AuditReputationAlpha.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("audit_reputation_alpha = ?"))
|
|
}
|
|
|
|
if update.AuditReputationBeta._set {
|
|
__values = append(__values, update.AuditReputationBeta.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("audit_reputation_beta = ?"))
|
|
}
|
|
|
|
if update.UptimeReputationAlpha._set {
|
|
__values = append(__values, update.UptimeReputationAlpha.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("uptime_reputation_alpha = ?"))
|
|
}
|
|
|
|
if update.UptimeReputationBeta._set {
|
|
__values = append(__values, update.UptimeReputationBeta.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("uptime_reputation_beta = ?"))
|
|
}
|
|
|
|
if update.ExitInitiatedAt._set {
|
|
__values = append(__values, update.ExitInitiatedAt.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("exit_initiated_at = ?"))
|
|
}
|
|
|
|
if update.ExitLoopCompletedAt._set {
|
|
__values = append(__values, update.ExitLoopCompletedAt.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("exit_loop_completed_at = ?"))
|
|
}
|
|
|
|
if update.ExitFinishedAt._set {
|
|
__values = append(__values, update.ExitFinishedAt.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("exit_finished_at = ?"))
|
|
}
|
|
|
|
if update.ExitSuccess._set {
|
|
__values = append(__values, update.ExitSuccess.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("exit_success = ?"))
|
|
}
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
|
|
__values = append(__values, __now)
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("updated_at = ?"))
|
|
|
|
__args = append(__args, node_id.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
node = &Node{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&node.Id, &node.Address, &node.LastNet, &node.Protocol, &node.Type, &node.Email, &node.Wallet, &node.FreeBandwidth, &node.FreeDisk, &node.PieceCount, &node.Major, &node.Minor, &node.Patch, &node.Hash, &node.Timestamp, &node.Release, &node.Latency90, &node.AuditSuccessCount, &node.TotalAuditCount, &node.UptimeSuccessCount, &node.TotalUptimeCount, &node.CreatedAt, &node.UpdatedAt, &node.LastContactSuccess, &node.LastContactFailure, &node.Contained, &node.Disqualified, &node.AuditReputationAlpha, &node.AuditReputationBeta, &node.UptimeReputationAlpha, &node.UptimeReputationBeta, &node.ExitInitiatedAt, &node.ExitLoopCompletedAt, &node.ExitFinishedAt, &node.ExitSuccess)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return node, nil
|
|
}
|
|
|
|
func (obj *cockroachImpl) UpdateNoReturn_Node_By_Id(ctx context.Context,
|
|
node_id Node_Id_Field,
|
|
update Node_Update_Fields) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE nodes SET "), __sets, __sqlbundle_Literal(" WHERE nodes.id = ?")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.Address._set {
|
|
__values = append(__values, update.Address.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("address = ?"))
|
|
}
|
|
|
|
if update.LastNet._set {
|
|
__values = append(__values, update.LastNet.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("last_net = ?"))
|
|
}
|
|
|
|
if update.Protocol._set {
|
|
__values = append(__values, update.Protocol.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("protocol = ?"))
|
|
}
|
|
|
|
if update.Type._set {
|
|
__values = append(__values, update.Type.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("type = ?"))
|
|
}
|
|
|
|
if update.Email._set {
|
|
__values = append(__values, update.Email.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("email = ?"))
|
|
}
|
|
|
|
if update.Wallet._set {
|
|
__values = append(__values, update.Wallet.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("wallet = ?"))
|
|
}
|
|
|
|
if update.FreeBandwidth._set {
|
|
__values = append(__values, update.FreeBandwidth.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("free_bandwidth = ?"))
|
|
}
|
|
|
|
if update.FreeDisk._set {
|
|
__values = append(__values, update.FreeDisk.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("free_disk = ?"))
|
|
}
|
|
|
|
if update.PieceCount._set {
|
|
__values = append(__values, update.PieceCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("piece_count = ?"))
|
|
}
|
|
|
|
if update.Major._set {
|
|
__values = append(__values, update.Major.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("major = ?"))
|
|
}
|
|
|
|
if update.Minor._set {
|
|
__values = append(__values, update.Minor.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("minor = ?"))
|
|
}
|
|
|
|
if update.Patch._set {
|
|
__values = append(__values, update.Patch.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("patch = ?"))
|
|
}
|
|
|
|
if update.Hash._set {
|
|
__values = append(__values, update.Hash.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("hash = ?"))
|
|
}
|
|
|
|
if update.Timestamp._set {
|
|
__values = append(__values, update.Timestamp.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("timestamp = ?"))
|
|
}
|
|
|
|
if update.Release._set {
|
|
__values = append(__values, update.Release.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("release = ?"))
|
|
}
|
|
|
|
if update.Latency90._set {
|
|
__values = append(__values, update.Latency90.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("latency_90 = ?"))
|
|
}
|
|
|
|
if update.AuditSuccessCount._set {
|
|
__values = append(__values, update.AuditSuccessCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("audit_success_count = ?"))
|
|
}
|
|
|
|
if update.TotalAuditCount._set {
|
|
__values = append(__values, update.TotalAuditCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("total_audit_count = ?"))
|
|
}
|
|
|
|
if update.UptimeSuccessCount._set {
|
|
__values = append(__values, update.UptimeSuccessCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("uptime_success_count = ?"))
|
|
}
|
|
|
|
if update.TotalUptimeCount._set {
|
|
__values = append(__values, update.TotalUptimeCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("total_uptime_count = ?"))
|
|
}
|
|
|
|
if update.LastContactSuccess._set {
|
|
__values = append(__values, update.LastContactSuccess.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("last_contact_success = ?"))
|
|
}
|
|
|
|
if update.LastContactFailure._set {
|
|
__values = append(__values, update.LastContactFailure.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("last_contact_failure = ?"))
|
|
}
|
|
|
|
if update.Contained._set {
|
|
__values = append(__values, update.Contained.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("contained = ?"))
|
|
}
|
|
|
|
if update.Disqualified._set {
|
|
__values = append(__values, update.Disqualified.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("disqualified = ?"))
|
|
}
|
|
|
|
if update.AuditReputationAlpha._set {
|
|
__values = append(__values, update.AuditReputationAlpha.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("audit_reputation_alpha = ?"))
|
|
}
|
|
|
|
if update.AuditReputationBeta._set {
|
|
__values = append(__values, update.AuditReputationBeta.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("audit_reputation_beta = ?"))
|
|
}
|
|
|
|
if update.UptimeReputationAlpha._set {
|
|
__values = append(__values, update.UptimeReputationAlpha.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("uptime_reputation_alpha = ?"))
|
|
}
|
|
|
|
if update.UptimeReputationBeta._set {
|
|
__values = append(__values, update.UptimeReputationBeta.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("uptime_reputation_beta = ?"))
|
|
}
|
|
|
|
if update.ExitInitiatedAt._set {
|
|
__values = append(__values, update.ExitInitiatedAt.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("exit_initiated_at = ?"))
|
|
}
|
|
|
|
if update.ExitLoopCompletedAt._set {
|
|
__values = append(__values, update.ExitLoopCompletedAt.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("exit_loop_completed_at = ?"))
|
|
}
|
|
|
|
if update.ExitFinishedAt._set {
|
|
__values = append(__values, update.ExitFinishedAt.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("exit_finished_at = ?"))
|
|
}
|
|
|
|
if update.ExitSuccess._set {
|
|
__values = append(__values, update.ExitSuccess.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("exit_success = ?"))
|
|
}
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
|
|
__values = append(__values, __now)
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("updated_at = ?"))
|
|
|
|
__args = append(__args, node_id.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
_, err = obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (obj *cockroachImpl) Update_User_By_Id(ctx context.Context,
|
|
user_id User_Id_Field,
|
|
update User_Update_Fields) (
|
|
user *User, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE users SET "), __sets, __sqlbundle_Literal(" WHERE users.id = ? RETURNING users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.created_at")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.Email._set {
|
|
__values = append(__values, update.Email.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("email = ?"))
|
|
}
|
|
|
|
if update.NormalizedEmail._set {
|
|
__values = append(__values, update.NormalizedEmail.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("normalized_email = ?"))
|
|
}
|
|
|
|
if update.FullName._set {
|
|
__values = append(__values, update.FullName.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("full_name = ?"))
|
|
}
|
|
|
|
if update.ShortName._set {
|
|
__values = append(__values, update.ShortName.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("short_name = ?"))
|
|
}
|
|
|
|
if update.PasswordHash._set {
|
|
__values = append(__values, update.PasswordHash.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("password_hash = ?"))
|
|
}
|
|
|
|
if update.Status._set {
|
|
__values = append(__values, update.Status.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("status = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return nil, emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, user_id.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
user = &User{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.CreatedAt)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return user, nil
|
|
}
|
|
|
|
func (obj *cockroachImpl) Update_Project_By_Id(ctx context.Context,
|
|
project_id Project_Id_Field,
|
|
update Project_Update_Fields) (
|
|
project *Project, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE projects SET "), __sets, __sqlbundle_Literal(" WHERE projects.id = ? RETURNING projects.id, projects.name, projects.description, projects.usage_limit, projects.partner_id, projects.owner_id, projects.created_at")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.Description._set {
|
|
__values = append(__values, update.Description.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("description = ?"))
|
|
}
|
|
|
|
if update.UsageLimit._set {
|
|
__values = append(__values, update.UsageLimit.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("usage_limit = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return nil, emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, project_id.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
project = &Project{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return project, nil
|
|
}
|
|
|
|
func (obj *cockroachImpl) UpdateNoReturn_ApiKey_By_Id(ctx context.Context,
|
|
api_key_id ApiKey_Id_Field,
|
|
update ApiKey_Update_Fields) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE api_keys SET "), __sets, __sqlbundle_Literal(" WHERE api_keys.id = ?")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.Name._set {
|
|
__values = append(__values, update.Name.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("name = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, api_key_id.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
_, err = obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (obj *cockroachImpl) UpdateNoReturn_PeerIdentity_By_NodeId(ctx context.Context,
|
|
peer_identity_node_id PeerIdentity_NodeId_Field,
|
|
update PeerIdentity_Update_Fields) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE peer_identities SET "), __sets, __sqlbundle_Literal(" WHERE peer_identities.node_id = ?")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.LeafSerialNumber._set {
|
|
__values = append(__values, update.LeafSerialNumber.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("leaf_serial_number = ?"))
|
|
}
|
|
|
|
if update.Chain._set {
|
|
__values = append(__values, update.Chain.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("chain = ?"))
|
|
}
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
|
|
__values = append(__values, __now)
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("updated_at = ?"))
|
|
|
|
__args = append(__args, peer_identity_node_id.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
_, err = obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (obj *cockroachImpl) Update_RegistrationToken_By_Secret(ctx context.Context,
|
|
registration_token_secret RegistrationToken_Secret_Field,
|
|
update RegistrationToken_Update_Fields) (
|
|
registration_token *RegistrationToken, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE registration_tokens SET "), __sets, __sqlbundle_Literal(" WHERE registration_tokens.secret = ? RETURNING registration_tokens.secret, registration_tokens.owner_id, registration_tokens.project_limit, registration_tokens.created_at")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.OwnerId._set {
|
|
__values = append(__values, update.OwnerId.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("owner_id = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return nil, emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, registration_token_secret.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
registration_token = &RegistrationToken{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(®istration_token.Secret, ®istration_token.OwnerId, ®istration_token.ProjectLimit, ®istration_token.CreatedAt)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return registration_token, nil
|
|
}
|
|
|
|
func (obj *cockroachImpl) UpdateNoReturn_Offer_By_Id(ctx context.Context,
|
|
offer_id Offer_Id_Field,
|
|
update Offer_Update_Fields) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE offers SET "), __sets, __sqlbundle_Literal(" WHERE offers.id = ?")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.Name._set {
|
|
__values = append(__values, update.Name.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("name = ?"))
|
|
}
|
|
|
|
if update.Description._set {
|
|
__values = append(__values, update.Description.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("description = ?"))
|
|
}
|
|
|
|
if update.AwardCreditInCents._set {
|
|
__values = append(__values, update.AwardCreditInCents.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("award_credit_in_cents = ?"))
|
|
}
|
|
|
|
if update.InviteeCreditInCents._set {
|
|
__values = append(__values, update.InviteeCreditInCents.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("invitee_credit_in_cents = ?"))
|
|
}
|
|
|
|
if update.AwardCreditDurationDays._set {
|
|
__values = append(__values, update.AwardCreditDurationDays.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("award_credit_duration_days = ?"))
|
|
}
|
|
|
|
if update.InviteeCreditDurationDays._set {
|
|
__values = append(__values, update.InviteeCreditDurationDays.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("invitee_credit_duration_days = ?"))
|
|
}
|
|
|
|
if update.RedeemableCap._set {
|
|
__values = append(__values, update.RedeemableCap.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("redeemable_cap = ?"))
|
|
}
|
|
|
|
if update.ExpiresAt._set {
|
|
__values = append(__values, update.ExpiresAt.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("expires_at = ?"))
|
|
}
|
|
|
|
if update.Status._set {
|
|
__values = append(__values, update.Status.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("status = ?"))
|
|
}
|
|
|
|
if update.Type._set {
|
|
__values = append(__values, update.Type.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("type = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, offer_id.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
_, err = obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (obj *cockroachImpl) Update_BucketMetainfo_By_ProjectId_And_Name(ctx context.Context,
|
|
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
|
|
bucket_metainfo_name BucketMetainfo_Name_Field,
|
|
update BucketMetainfo_Update_Fields) (
|
|
bucket_metainfo *BucketMetainfo, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE bucket_metainfos SET "), __sets, __sqlbundle_Literal(" WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name = ? RETURNING bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.partner_id, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.PartnerId._set {
|
|
__values = append(__values, update.PartnerId.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("partner_id = ?"))
|
|
}
|
|
|
|
if update.DefaultSegmentSize._set {
|
|
__values = append(__values, update.DefaultSegmentSize.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("default_segment_size = ?"))
|
|
}
|
|
|
|
if update.DefaultEncryptionCipherSuite._set {
|
|
__values = append(__values, update.DefaultEncryptionCipherSuite.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("default_encryption_cipher_suite = ?"))
|
|
}
|
|
|
|
if update.DefaultEncryptionBlockSize._set {
|
|
__values = append(__values, update.DefaultEncryptionBlockSize.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("default_encryption_block_size = ?"))
|
|
}
|
|
|
|
if update.DefaultRedundancyAlgorithm._set {
|
|
__values = append(__values, update.DefaultRedundancyAlgorithm.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("default_redundancy_algorithm = ?"))
|
|
}
|
|
|
|
if update.DefaultRedundancyShareSize._set {
|
|
__values = append(__values, update.DefaultRedundancyShareSize.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("default_redundancy_share_size = ?"))
|
|
}
|
|
|
|
if update.DefaultRedundancyRequiredShares._set {
|
|
__values = append(__values, update.DefaultRedundancyRequiredShares.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("default_redundancy_required_shares = ?"))
|
|
}
|
|
|
|
if update.DefaultRedundancyRepairShares._set {
|
|
__values = append(__values, update.DefaultRedundancyRepairShares.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("default_redundancy_repair_shares = ?"))
|
|
}
|
|
|
|
if update.DefaultRedundancyOptimalShares._set {
|
|
__values = append(__values, update.DefaultRedundancyOptimalShares.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("default_redundancy_optimal_shares = ?"))
|
|
}
|
|
|
|
if update.DefaultRedundancyTotalShares._set {
|
|
__values = append(__values, update.DefaultRedundancyTotalShares.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("default_redundancy_total_shares = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return nil, emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, bucket_metainfo_project_id.value(), bucket_metainfo_name.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
bucket_metainfo = &BucketMetainfo{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.PartnerId, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return bucket_metainfo, nil
|
|
}
|
|
|
|
func (obj *cockroachImpl) UpdateNoReturn_GracefulExitProgress_By_NodeId(ctx context.Context,
|
|
graceful_exit_progress_node_id GracefulExitProgress_NodeId_Field,
|
|
update GracefulExitProgress_Update_Fields) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE graceful_exit_progress SET "), __sets, __sqlbundle_Literal(" WHERE graceful_exit_progress.node_id = ?")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.BytesTransferred._set {
|
|
__values = append(__values, update.BytesTransferred.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("bytes_transferred = ?"))
|
|
}
|
|
|
|
if update.PiecesTransferred._set {
|
|
__values = append(__values, update.PiecesTransferred.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("pieces_transferred = ?"))
|
|
}
|
|
|
|
if update.PiecesFailed._set {
|
|
__values = append(__values, update.PiecesFailed.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("pieces_failed = ?"))
|
|
}
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
|
|
__values = append(__values, __now.UTC())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("updated_at = ?"))
|
|
|
|
__args = append(__args, graceful_exit_progress_node_id.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
_, err = obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (obj *cockroachImpl) UpdateNoReturn_GracefulExitTransferQueue_By_NodeId_And_Path_And_PieceNum(ctx context.Context,
|
|
graceful_exit_transfer_queue_node_id GracefulExitTransferQueue_NodeId_Field,
|
|
graceful_exit_transfer_queue_path GracefulExitTransferQueue_Path_Field,
|
|
graceful_exit_transfer_queue_piece_num GracefulExitTransferQueue_PieceNum_Field,
|
|
update GracefulExitTransferQueue_Update_Fields) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE graceful_exit_transfer_queue SET "), __sets, __sqlbundle_Literal(" WHERE graceful_exit_transfer_queue.node_id = ? AND graceful_exit_transfer_queue.path = ? AND graceful_exit_transfer_queue.piece_num = ?")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.DurabilityRatio._set {
|
|
__values = append(__values, update.DurabilityRatio.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("durability_ratio = ?"))
|
|
}
|
|
|
|
if update.RequestedAt._set {
|
|
__values = append(__values, update.RequestedAt.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("requested_at = ?"))
|
|
}
|
|
|
|
if update.LastFailedAt._set {
|
|
__values = append(__values, update.LastFailedAt.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("last_failed_at = ?"))
|
|
}
|
|
|
|
if update.LastFailedCode._set {
|
|
__values = append(__values, update.LastFailedCode.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("last_failed_code = ?"))
|
|
}
|
|
|
|
if update.FailedCount._set {
|
|
__values = append(__values, update.FailedCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("failed_count = ?"))
|
|
}
|
|
|
|
if update.FinishedAt._set {
|
|
__values = append(__values, update.FinishedAt.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("finished_at = ?"))
|
|
}
|
|
|
|
if update.OrderLimitSendCount._set {
|
|
__values = append(__values, update.OrderLimitSendCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("order_limit_send_count = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, graceful_exit_transfer_queue_node_id.value(), graceful_exit_transfer_queue_path.value(), graceful_exit_transfer_queue_piece_num.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
_, err = obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (obj *cockroachImpl) Update_CoinpaymentsTransaction_By_Id(ctx context.Context,
|
|
coinpayments_transaction_id CoinpaymentsTransaction_Id_Field,
|
|
update CoinpaymentsTransaction_Update_Fields) (
|
|
coinpayments_transaction *CoinpaymentsTransaction, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE coinpayments_transactions SET "), __sets, __sqlbundle_Literal(" WHERE coinpayments_transactions.id = ? RETURNING coinpayments_transactions.id, coinpayments_transactions.user_id, coinpayments_transactions.address, coinpayments_transactions.amount, coinpayments_transactions.received, coinpayments_transactions.status, coinpayments_transactions.key, coinpayments_transactions.timeout, coinpayments_transactions.created_at")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.Received._set {
|
|
__values = append(__values, update.Received.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("received = ?"))
|
|
}
|
|
|
|
if update.Status._set {
|
|
__values = append(__values, update.Status.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("status = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return nil, emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, coinpayments_transaction_id.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
coinpayments_transaction = &CoinpaymentsTransaction{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&coinpayments_transaction.Id, &coinpayments_transaction.UserId, &coinpayments_transaction.Address, &coinpayments_transaction.Amount, &coinpayments_transaction.Received, &coinpayments_transaction.Status, &coinpayments_transaction.Key, &coinpayments_transaction.Timeout, &coinpayments_transaction.CreatedAt)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return coinpayments_transaction, nil
|
|
}
|
|
|
|
func (obj *cockroachImpl) Update_StripecoinpaymentsApplyBalanceIntent_By_TxId(ctx context.Context,
|
|
stripecoinpayments_apply_balance_intent_tx_id StripecoinpaymentsApplyBalanceIntent_TxId_Field,
|
|
update StripecoinpaymentsApplyBalanceIntent_Update_Fields) (
|
|
stripecoinpayments_apply_balance_intent *StripecoinpaymentsApplyBalanceIntent, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE stripecoinpayments_apply_balance_intents SET "), __sets, __sqlbundle_Literal(" WHERE stripecoinpayments_apply_balance_intents.tx_id = ? RETURNING stripecoinpayments_apply_balance_intents.tx_id, stripecoinpayments_apply_balance_intents.state, stripecoinpayments_apply_balance_intents.created_at")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.State._set {
|
|
__values = append(__values, update.State.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("state = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return nil, emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, stripecoinpayments_apply_balance_intent_tx_id.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
stripecoinpayments_apply_balance_intent = &StripecoinpaymentsApplyBalanceIntent{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&stripecoinpayments_apply_balance_intent.TxId, &stripecoinpayments_apply_balance_intent.State, &stripecoinpayments_apply_balance_intent.CreatedAt)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return stripecoinpayments_apply_balance_intent, nil
|
|
}
|
|
|
|
func (obj *cockroachImpl) Update_StripecoinpaymentsInvoiceProjectRecord_By_Id(ctx context.Context,
|
|
stripecoinpayments_invoice_project_record_id StripecoinpaymentsInvoiceProjectRecord_Id_Field,
|
|
update StripecoinpaymentsInvoiceProjectRecord_Update_Fields) (
|
|
stripecoinpayments_invoice_project_record *StripecoinpaymentsInvoiceProjectRecord, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE stripecoinpayments_invoice_project_records SET "), __sets, __sqlbundle_Literal(" WHERE stripecoinpayments_invoice_project_records.id = ? RETURNING stripecoinpayments_invoice_project_records.id, stripecoinpayments_invoice_project_records.project_id, stripecoinpayments_invoice_project_records.storage, stripecoinpayments_invoice_project_records.egress, stripecoinpayments_invoice_project_records.objects, stripecoinpayments_invoice_project_records.period_start, stripecoinpayments_invoice_project_records.period_end, stripecoinpayments_invoice_project_records.state, stripecoinpayments_invoice_project_records.created_at")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.State._set {
|
|
__values = append(__values, update.State.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("state = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return nil, emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, stripecoinpayments_invoice_project_record_id.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
stripecoinpayments_invoice_project_record = &StripecoinpaymentsInvoiceProjectRecord{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&stripecoinpayments_invoice_project_record.Id, &stripecoinpayments_invoice_project_record.ProjectId, &stripecoinpayments_invoice_project_record.Storage, &stripecoinpayments_invoice_project_record.Egress, &stripecoinpayments_invoice_project_record.Objects, &stripecoinpayments_invoice_project_record.PeriodStart, &stripecoinpayments_invoice_project_record.PeriodEnd, &stripecoinpayments_invoice_project_record.State, &stripecoinpayments_invoice_project_record.CreatedAt)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return stripecoinpayments_invoice_project_record, nil
|
|
}
|
|
|
|
func (obj *cockroachImpl) Update_Coupon_By_Id(ctx context.Context,
|
|
coupon_id Coupon_Id_Field,
|
|
update Coupon_Update_Fields) (
|
|
coupon *Coupon, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE coupons SET "), __sets, __sqlbundle_Literal(" WHERE coupons.id = ? RETURNING coupons.id, coupons.project_id, coupons.user_id, coupons.amount, coupons.description, coupons.type, coupons.status, coupons.duration, coupons.created_at")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.Status._set {
|
|
__values = append(__values, update.Status.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("status = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return nil, emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, coupon_id.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
coupon = &Coupon{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&coupon.Id, &coupon.ProjectId, &coupon.UserId, &coupon.Amount, &coupon.Description, &coupon.Type, &coupon.Status, &coupon.Duration, &coupon.CreatedAt)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return coupon, nil
|
|
}
|
|
|
|
func (obj *cockroachImpl) Update_CouponUsage_By_CouponId_And_Period(ctx context.Context,
|
|
coupon_usage_coupon_id CouponUsage_CouponId_Field,
|
|
coupon_usage_period CouponUsage_Period_Field,
|
|
update CouponUsage_Update_Fields) (
|
|
coupon_usage *CouponUsage, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE coupon_usages SET "), __sets, __sqlbundle_Literal(" WHERE coupon_usages.coupon_id = ? AND coupon_usages.period = ? RETURNING coupon_usages.coupon_id, coupon_usages.amount, coupon_usages.status, coupon_usages.period")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.Status._set {
|
|
__values = append(__values, update.Status.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("status = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return nil, emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, coupon_usage_coupon_id.value(), coupon_usage_period.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
coupon_usage = &CouponUsage{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&coupon_usage.CouponId, &coupon_usage.Amount, &coupon_usage.Status, &coupon_usage.Period)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return coupon_usage, nil
|
|
}
|
|
|
|
func (obj *cockroachImpl) Delete_ValueAttribution_By_ProjectId_And_BucketName(ctx context.Context,
|
|
value_attribution_project_id ValueAttribution_ProjectId_Field,
|
|
value_attribution_bucket_name ValueAttribution_BucketName_Field) (
|
|
deleted bool, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM value_attributions WHERE value_attributions.project_id = ? AND value_attributions.bucket_name = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, value_attribution_project_id.value(), value_attribution_bucket_name.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Delete_PendingAudits_By_NodeId(ctx context.Context,
|
|
pending_audits_node_id PendingAudits_NodeId_Field) (
|
|
deleted bool, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM pending_audits WHERE pending_audits.node_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, pending_audits_node_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Delete_Irreparabledb_By_Segmentpath(ctx context.Context,
|
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field) (
|
|
deleted bool, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM irreparabledbs WHERE irreparabledbs.segmentpath = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, irreparabledb_segmentpath.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Delete_AccountingRollup_By_Id(ctx context.Context,
|
|
accounting_rollup_id AccountingRollup_Id_Field) (
|
|
deleted bool, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM accounting_rollups WHERE accounting_rollups.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, accounting_rollup_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Delete_Node_By_Id(ctx context.Context,
|
|
node_id Node_Id_Field) (
|
|
deleted bool, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM nodes WHERE nodes.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, node_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Delete_User_By_Id(ctx context.Context,
|
|
user_id User_Id_Field) (
|
|
deleted bool, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM users WHERE users.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, user_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Delete_Project_By_Id(ctx context.Context,
|
|
project_id Project_Id_Field) (
|
|
deleted bool, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM projects WHERE projects.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, project_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Delete_ProjectMember_By_MemberId_And_ProjectId(ctx context.Context,
|
|
project_member_member_id ProjectMember_MemberId_Field,
|
|
project_member_project_id ProjectMember_ProjectId_Field) (
|
|
deleted bool, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM project_members WHERE project_members.member_id = ? AND project_members.project_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, project_member_member_id.value(), project_member_project_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Delete_ApiKey_By_Id(ctx context.Context,
|
|
api_key_id ApiKey_Id_Field) (
|
|
deleted bool, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM api_keys WHERE api_keys.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, api_key_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Delete_SerialNumber_By_ExpiresAt_LessOrEqual(ctx context.Context,
|
|
serial_number_expires_at_less_or_equal SerialNumber_ExpiresAt_Field) (
|
|
count int64, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM serial_numbers WHERE serial_numbers.expires_at <= ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, serial_number_expires_at_less_or_equal.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
return count, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Delete_StoragenodeStorageTally_By_Id(ctx context.Context,
|
|
storagenode_storage_tally_id StoragenodeStorageTally_Id_Field) (
|
|
deleted bool, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM storagenode_storage_tallies WHERE storagenode_storage_tallies.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, storagenode_storage_tally_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Delete_ResetPasswordToken_By_Secret(ctx context.Context,
|
|
reset_password_token_secret ResetPasswordToken_Secret_Field) (
|
|
deleted bool, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM reset_password_tokens WHERE reset_password_tokens.secret = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, reset_password_token_secret.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Delete_BucketMetainfo_By_ProjectId_And_Name(ctx context.Context,
|
|
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
|
|
bucket_metainfo_name BucketMetainfo_Name_Field) (
|
|
deleted bool, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM bucket_metainfos WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, bucket_metainfo_project_id.value(), bucket_metainfo_name.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Delete_GracefulExitProgress_By_NodeId(ctx context.Context,
|
|
graceful_exit_progress_node_id GracefulExitProgress_NodeId_Field) (
|
|
deleted bool, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM graceful_exit_progress WHERE graceful_exit_progress.node_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, graceful_exit_progress_node_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Delete_GracefulExitTransferQueue_By_NodeId(ctx context.Context,
|
|
graceful_exit_transfer_queue_node_id GracefulExitTransferQueue_NodeId_Field) (
|
|
count int64, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM graceful_exit_transfer_queue WHERE graceful_exit_transfer_queue.node_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, graceful_exit_transfer_queue_node_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
return count, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Delete_GracefulExitTransferQueue_By_NodeId_And_Path_And_PieceNum(ctx context.Context,
|
|
graceful_exit_transfer_queue_node_id GracefulExitTransferQueue_NodeId_Field,
|
|
graceful_exit_transfer_queue_path GracefulExitTransferQueue_Path_Field,
|
|
graceful_exit_transfer_queue_piece_num GracefulExitTransferQueue_PieceNum_Field) (
|
|
deleted bool, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM graceful_exit_transfer_queue WHERE graceful_exit_transfer_queue.node_id = ? AND graceful_exit_transfer_queue.path = ? AND graceful_exit_transfer_queue.piece_num = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, graceful_exit_transfer_queue_node_id.value(), graceful_exit_transfer_queue_path.value(), graceful_exit_transfer_queue_piece_num.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Delete_GracefulExitTransferQueue_By_NodeId_And_FinishedAt_IsNot_Null(ctx context.Context,
|
|
graceful_exit_transfer_queue_node_id GracefulExitTransferQueue_NodeId_Field) (
|
|
count int64, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM graceful_exit_transfer_queue WHERE graceful_exit_transfer_queue.node_id = ? AND graceful_exit_transfer_queue.finished_at is not NULL")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, graceful_exit_transfer_queue_node_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
return count, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Delete_StripecoinpaymentsApplyBalanceIntent_By_TxId(ctx context.Context,
|
|
stripecoinpayments_apply_balance_intent_tx_id StripecoinpaymentsApplyBalanceIntent_TxId_Field) (
|
|
deleted bool, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM stripecoinpayments_apply_balance_intents WHERE stripecoinpayments_apply_balance_intents.tx_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, stripecoinpayments_apply_balance_intent_tx_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Delete_StripecoinpaymentsInvoiceProjectRecord_By_Id(ctx context.Context,
|
|
stripecoinpayments_invoice_project_record_id StripecoinpaymentsInvoiceProjectRecord_Id_Field) (
|
|
deleted bool, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM stripecoinpayments_invoice_project_records WHERE stripecoinpayments_invoice_project_records.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, stripecoinpayments_invoice_project_record_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *cockroachImpl) Delete_Coupon_By_Id(ctx context.Context,
|
|
coupon_id Coupon_Id_Field) (
|
|
deleted bool, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM coupons WHERE coupons.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, coupon_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (impl cockroachImpl) isConstraintError(err error) (
|
|
constraint string, ok bool) {
|
|
if e, ok := err.(*pq.Error); ok {
|
|
if e.Code.Class() == "23" {
|
|
return e.Constraint, true
|
|
}
|
|
}
|
|
return "", false
|
|
}
|
|
|
|
func (obj *cockroachImpl) deleteAll(ctx context.Context) (count int64, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __res sql.Result
|
|
var __count int64
|
|
__res, err = obj.driver.Exec("DELETE FROM user_credits;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM used_serials;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM stripecoinpayments_apply_balance_intents;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM project_members;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM project_invoice_stamps;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM bucket_metainfos;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM api_keys;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM value_attributions;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM users;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM stripecoinpayments_tx_conversion_rates;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM stripecoinpayments_invoice_project_records;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM stripe_customers;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM storagenode_storage_tallies;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM storagenode_bandwidth_rollups;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM serial_numbers;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM reset_password_tokens;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM registration_tokens;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM projects;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM pending_audits;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM peer_identities;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM offers;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM nodes_offline_times;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM nodes;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM irreparabledbs;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM injuredsegments;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM graceful_exit_transfer_queue;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM graceful_exit_progress;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM coupon_usages;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM coupons;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM coinpayments_transactions;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM bucket_storage_tallies;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM bucket_bandwidth_rollups;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM accounting_timestamps;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM accounting_rollups;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
|
|
return count, nil
|
|
|
|
}
|
|
|
|
type Rx struct {
|
|
db *DB
|
|
tx *Tx
|
|
}
|
|
|
|
func (rx *Rx) UnsafeTx(ctx context.Context) (unsafe_tx *sql.Tx, err error) {
|
|
tx, err := rx.getTx(ctx)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
return tx.Tx, nil
|
|
}
|
|
|
|
func (rx *Rx) getTx(ctx context.Context) (tx *Tx, err error) {
|
|
if rx.tx == nil {
|
|
if rx.tx, err = rx.db.Open(ctx); err != nil {
|
|
return nil, err
|
|
}
|
|
}
|
|
return rx.tx, nil
|
|
}
|
|
|
|
func (rx *Rx) Rebind(s string) string {
|
|
return rx.db.Rebind(s)
|
|
}
|
|
|
|
func (rx *Rx) Commit() (err error) {
|
|
if rx.tx != nil {
|
|
err = rx.tx.Commit()
|
|
rx.tx = nil
|
|
}
|
|
return err
|
|
}
|
|
|
|
func (rx *Rx) Rollback() (err error) {
|
|
if rx.tx != nil {
|
|
err = rx.tx.Rollback()
|
|
rx.tx = nil
|
|
}
|
|
return err
|
|
}
|
|
|
|
func (rx *Rx) All_AccountingRollup_By_StartTime_GreaterOrEqual(ctx context.Context,
|
|
accounting_rollup_start_time_greater_or_equal AccountingRollup_StartTime_Field) (
|
|
rows []*AccountingRollup, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.All_AccountingRollup_By_StartTime_GreaterOrEqual(ctx, accounting_rollup_start_time_greater_or_equal)
|
|
}
|
|
|
|
func (rx *Rx) All_ApiKey_By_ProjectId_OrderBy_Asc_Name(ctx context.Context,
|
|
api_key_project_id ApiKey_ProjectId_Field) (
|
|
rows []*ApiKey, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.All_ApiKey_By_ProjectId_OrderBy_Asc_Name(ctx, api_key_project_id)
|
|
}
|
|
|
|
func (rx *Rx) All_BucketStorageTally(ctx context.Context) (
|
|
rows []*BucketStorageTally, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.All_BucketStorageTally(ctx)
|
|
}
|
|
|
|
func (rx *Rx) All_BucketStorageTally_By_ProjectId_And_BucketName_And_IntervalStart_GreaterOrEqual_And_IntervalStart_LessOrEqual_OrderBy_Desc_IntervalStart(ctx context.Context,
|
|
bucket_storage_tally_project_id BucketStorageTally_ProjectId_Field,
|
|
bucket_storage_tally_bucket_name BucketStorageTally_BucketName_Field,
|
|
bucket_storage_tally_interval_start_greater_or_equal BucketStorageTally_IntervalStart_Field,
|
|
bucket_storage_tally_interval_start_less_or_equal BucketStorageTally_IntervalStart_Field) (
|
|
rows []*BucketStorageTally, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.All_BucketStorageTally_By_ProjectId_And_BucketName_And_IntervalStart_GreaterOrEqual_And_IntervalStart_LessOrEqual_OrderBy_Desc_IntervalStart(ctx, bucket_storage_tally_project_id, bucket_storage_tally_bucket_name, bucket_storage_tally_interval_start_greater_or_equal, bucket_storage_tally_interval_start_less_or_equal)
|
|
}
|
|
|
|
func (rx *Rx) All_CoinpaymentsTransaction_By_UserId_OrderBy_Desc_CreatedAt(ctx context.Context,
|
|
coinpayments_transaction_user_id CoinpaymentsTransaction_UserId_Field) (
|
|
rows []*CoinpaymentsTransaction, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.All_CoinpaymentsTransaction_By_UserId_OrderBy_Desc_CreatedAt(ctx, coinpayments_transaction_user_id)
|
|
}
|
|
|
|
func (rx *Rx) All_Coupon_By_ProjectId_And_Status_Equal_Number_OrderBy_Desc_CreatedAt(ctx context.Context,
|
|
coupon_project_id Coupon_ProjectId_Field) (
|
|
rows []*Coupon, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.All_Coupon_By_ProjectId_And_Status_Equal_Number_OrderBy_Desc_CreatedAt(ctx, coupon_project_id)
|
|
}
|
|
|
|
func (rx *Rx) All_Coupon_By_Status_OrderBy_Desc_CreatedAt(ctx context.Context,
|
|
coupon_status Coupon_Status_Field) (
|
|
rows []*Coupon, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.All_Coupon_By_Status_OrderBy_Desc_CreatedAt(ctx, coupon_status)
|
|
}
|
|
|
|
func (rx *Rx) All_Coupon_By_UserId_And_Status_OrderBy_Desc_CreatedAt(ctx context.Context,
|
|
coupon_user_id Coupon_UserId_Field,
|
|
coupon_status Coupon_Status_Field) (
|
|
rows []*Coupon, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.All_Coupon_By_UserId_And_Status_OrderBy_Desc_CreatedAt(ctx, coupon_user_id, coupon_status)
|
|
}
|
|
|
|
func (rx *Rx) All_Coupon_By_UserId_OrderBy_Desc_CreatedAt(ctx context.Context,
|
|
coupon_user_id Coupon_UserId_Field) (
|
|
rows []*Coupon, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.All_Coupon_By_UserId_OrderBy_Desc_CreatedAt(ctx, coupon_user_id)
|
|
}
|
|
|
|
func (rx *Rx) All_Node_Id(ctx context.Context) (
|
|
rows []*Id_Row, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.All_Node_Id(ctx)
|
|
}
|
|
|
|
func (rx *Rx) All_Node_Id_Node_Address_Node_LastContactSuccess_Node_LastContactFailure_By_LastContactSuccess_Less_And_LastContactSuccess_Greater_LastContactFailure_And_Disqualified_Is_Null_OrderBy_Asc_LastContactSuccess(ctx context.Context,
|
|
node_last_contact_success_less Node_LastContactSuccess_Field) (
|
|
rows []*Id_Address_LastContactSuccess_LastContactFailure_Row, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.All_Node_Id_Node_Address_Node_LastContactSuccess_Node_LastContactFailure_By_LastContactSuccess_Less_And_LastContactSuccess_Greater_LastContactFailure_And_Disqualified_Is_Null_OrderBy_Asc_LastContactSuccess(ctx, node_last_contact_success_less)
|
|
}
|
|
|
|
func (rx *Rx) All_Node_Id_Node_PieceCount_By_PieceCount_Not_Number(ctx context.Context) (
|
|
rows []*Id_PieceCount_Row, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.All_Node_Id_Node_PieceCount_By_PieceCount_Not_Number(ctx)
|
|
}
|
|
|
|
func (rx *Rx) All_NodesOfflineTime_By_NodeId_And_TrackedAt_Greater_And_TrackedAt_LessOrEqual(ctx context.Context,
|
|
nodes_offline_time_node_id NodesOfflineTime_NodeId_Field,
|
|
nodes_offline_time_tracked_at_greater NodesOfflineTime_TrackedAt_Field,
|
|
nodes_offline_time_tracked_at_less_or_equal NodesOfflineTime_TrackedAt_Field) (
|
|
rows []*NodesOfflineTime, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.All_NodesOfflineTime_By_NodeId_And_TrackedAt_Greater_And_TrackedAt_LessOrEqual(ctx, nodes_offline_time_node_id, nodes_offline_time_tracked_at_greater, nodes_offline_time_tracked_at_less_or_equal)
|
|
}
|
|
|
|
func (rx *Rx) All_Offer_OrderBy_Asc_Id(ctx context.Context) (
|
|
rows []*Offer, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.All_Offer_OrderBy_Asc_Id(ctx)
|
|
}
|
|
|
|
func (rx *Rx) All_Project(ctx context.Context) (
|
|
rows []*Project, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.All_Project(ctx)
|
|
}
|
|
|
|
func (rx *Rx) All_ProjectInvoiceStamp_By_ProjectId_OrderBy_Desc_StartDate(ctx context.Context,
|
|
project_invoice_stamp_project_id ProjectInvoiceStamp_ProjectId_Field) (
|
|
rows []*ProjectInvoiceStamp, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.All_ProjectInvoiceStamp_By_ProjectId_OrderBy_Desc_StartDate(ctx, project_invoice_stamp_project_id)
|
|
}
|
|
|
|
func (rx *Rx) All_ProjectMember_By_MemberId(ctx context.Context,
|
|
project_member_member_id ProjectMember_MemberId_Field) (
|
|
rows []*ProjectMember, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.All_ProjectMember_By_MemberId(ctx, project_member_member_id)
|
|
}
|
|
|
|
func (rx *Rx) All_Project_By_CreatedAt_Less_OrderBy_Asc_CreatedAt(ctx context.Context,
|
|
project_created_at_less Project_CreatedAt_Field) (
|
|
rows []*Project, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.All_Project_By_CreatedAt_Less_OrderBy_Asc_CreatedAt(ctx, project_created_at_less)
|
|
}
|
|
|
|
func (rx *Rx) All_Project_By_OwnerId_OrderBy_Asc_CreatedAt(ctx context.Context,
|
|
project_owner_id Project_OwnerId_Field) (
|
|
rows []*Project, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.All_Project_By_OwnerId_OrderBy_Asc_CreatedAt(ctx, project_owner_id)
|
|
}
|
|
|
|
func (rx *Rx) All_Project_By_ProjectMember_MemberId_OrderBy_Asc_Project_Name(ctx context.Context,
|
|
project_member_member_id ProjectMember_MemberId_Field) (
|
|
rows []*Project, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.All_Project_By_ProjectMember_MemberId_OrderBy_Asc_Project_Name(ctx, project_member_member_id)
|
|
}
|
|
|
|
func (rx *Rx) All_StoragenodeBandwidthRollup_By_IntervalStart_GreaterOrEqual(ctx context.Context,
|
|
storagenode_bandwidth_rollup_interval_start_greater_or_equal StoragenodeBandwidthRollup_IntervalStart_Field) (
|
|
rows []*StoragenodeBandwidthRollup, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.All_StoragenodeBandwidthRollup_By_IntervalStart_GreaterOrEqual(ctx, storagenode_bandwidth_rollup_interval_start_greater_or_equal)
|
|
}
|
|
|
|
func (rx *Rx) All_StoragenodeStorageTally(ctx context.Context) (
|
|
rows []*StoragenodeStorageTally, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.All_StoragenodeStorageTally(ctx)
|
|
}
|
|
|
|
func (rx *Rx) All_StoragenodeStorageTally_By_IntervalEndTime_GreaterOrEqual(ctx context.Context,
|
|
storagenode_storage_tally_interval_end_time_greater_or_equal StoragenodeStorageTally_IntervalEndTime_Field) (
|
|
rows []*StoragenodeStorageTally, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.All_StoragenodeStorageTally_By_IntervalEndTime_GreaterOrEqual(ctx, storagenode_storage_tally_interval_end_time_greater_or_equal)
|
|
}
|
|
|
|
func (rx *Rx) All_UserCredit_By_UserId_And_ExpiresAt_Greater_And_CreditsUsedInCents_Less_CreditsEarnedInCents_OrderBy_Asc_ExpiresAt(ctx context.Context,
|
|
user_credit_user_id UserCredit_UserId_Field,
|
|
user_credit_expires_at_greater UserCredit_ExpiresAt_Field) (
|
|
rows []*UserCredit, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.All_UserCredit_By_UserId_And_ExpiresAt_Greater_And_CreditsUsedInCents_Less_CreditsEarnedInCents_OrderBy_Asc_ExpiresAt(ctx, user_credit_user_id, user_credit_expires_at_greater)
|
|
}
|
|
|
|
func (rx *Rx) Count_UserCredit_By_ReferredBy(ctx context.Context,
|
|
user_credit_referred_by UserCredit_ReferredBy_Field) (
|
|
count int64, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Count_UserCredit_By_ReferredBy(ctx, user_credit_referred_by)
|
|
}
|
|
|
|
func (rx *Rx) CreateNoReturn_AccountingRollup(ctx context.Context,
|
|
accounting_rollup_node_id AccountingRollup_NodeId_Field,
|
|
accounting_rollup_start_time AccountingRollup_StartTime_Field,
|
|
accounting_rollup_put_total AccountingRollup_PutTotal_Field,
|
|
accounting_rollup_get_total AccountingRollup_GetTotal_Field,
|
|
accounting_rollup_get_audit_total AccountingRollup_GetAuditTotal_Field,
|
|
accounting_rollup_get_repair_total AccountingRollup_GetRepairTotal_Field,
|
|
accounting_rollup_put_repair_total AccountingRollup_PutRepairTotal_Field,
|
|
accounting_rollup_at_rest_total AccountingRollup_AtRestTotal_Field) (
|
|
err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.CreateNoReturn_AccountingRollup(ctx, accounting_rollup_node_id, accounting_rollup_start_time, accounting_rollup_put_total, accounting_rollup_get_total, accounting_rollup_get_audit_total, accounting_rollup_get_repair_total, accounting_rollup_put_repair_total, accounting_rollup_at_rest_total)
|
|
|
|
}
|
|
|
|
func (rx *Rx) CreateNoReturn_AccountingTimestamps(ctx context.Context,
|
|
accounting_timestamps_name AccountingTimestamps_Name_Field,
|
|
accounting_timestamps_value AccountingTimestamps_Value_Field) (
|
|
err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.CreateNoReturn_AccountingTimestamps(ctx, accounting_timestamps_name, accounting_timestamps_value)
|
|
|
|
}
|
|
|
|
func (rx *Rx) CreateNoReturn_BucketStorageTally(ctx context.Context,
|
|
bucket_storage_tally_bucket_name BucketStorageTally_BucketName_Field,
|
|
bucket_storage_tally_project_id BucketStorageTally_ProjectId_Field,
|
|
bucket_storage_tally_interval_start BucketStorageTally_IntervalStart_Field,
|
|
bucket_storage_tally_inline BucketStorageTally_Inline_Field,
|
|
bucket_storage_tally_remote BucketStorageTally_Remote_Field,
|
|
bucket_storage_tally_remote_segments_count BucketStorageTally_RemoteSegmentsCount_Field,
|
|
bucket_storage_tally_inline_segments_count BucketStorageTally_InlineSegmentsCount_Field,
|
|
bucket_storage_tally_object_count BucketStorageTally_ObjectCount_Field,
|
|
bucket_storage_tally_metadata_size BucketStorageTally_MetadataSize_Field) (
|
|
err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.CreateNoReturn_BucketStorageTally(ctx, bucket_storage_tally_bucket_name, bucket_storage_tally_project_id, bucket_storage_tally_interval_start, bucket_storage_tally_inline, bucket_storage_tally_remote, bucket_storage_tally_remote_segments_count, bucket_storage_tally_inline_segments_count, bucket_storage_tally_object_count, bucket_storage_tally_metadata_size)
|
|
|
|
}
|
|
|
|
func (rx *Rx) CreateNoReturn_GracefulExitProgress(ctx context.Context,
|
|
graceful_exit_progress_node_id GracefulExitProgress_NodeId_Field,
|
|
graceful_exit_progress_bytes_transferred GracefulExitProgress_BytesTransferred_Field) (
|
|
err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.CreateNoReturn_GracefulExitProgress(ctx, graceful_exit_progress_node_id, graceful_exit_progress_bytes_transferred)
|
|
|
|
}
|
|
|
|
func (rx *Rx) CreateNoReturn_GracefulExitTransferQueue(ctx context.Context,
|
|
graceful_exit_transfer_queue_node_id GracefulExitTransferQueue_NodeId_Field,
|
|
graceful_exit_transfer_queue_path GracefulExitTransferQueue_Path_Field,
|
|
graceful_exit_transfer_queue_piece_num GracefulExitTransferQueue_PieceNum_Field,
|
|
graceful_exit_transfer_queue_durability_ratio GracefulExitTransferQueue_DurabilityRatio_Field,
|
|
graceful_exit_transfer_queue_order_limit_send_count GracefulExitTransferQueue_OrderLimitSendCount_Field,
|
|
optional GracefulExitTransferQueue_Create_Fields) (
|
|
err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.CreateNoReturn_GracefulExitTransferQueue(ctx, graceful_exit_transfer_queue_node_id, graceful_exit_transfer_queue_path, graceful_exit_transfer_queue_piece_num, graceful_exit_transfer_queue_durability_ratio, graceful_exit_transfer_queue_order_limit_send_count, optional)
|
|
|
|
}
|
|
|
|
func (rx *Rx) CreateNoReturn_Irreparabledb(ctx context.Context,
|
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field,
|
|
irreparabledb_segmentdetail Irreparabledb_Segmentdetail_Field,
|
|
irreparabledb_pieces_lost_count Irreparabledb_PiecesLostCount_Field,
|
|
irreparabledb_seg_damaged_unix_sec Irreparabledb_SegDamagedUnixSec_Field,
|
|
irreparabledb_repair_attempt_count Irreparabledb_RepairAttemptCount_Field) (
|
|
err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.CreateNoReturn_Irreparabledb(ctx, irreparabledb_segmentpath, irreparabledb_segmentdetail, irreparabledb_pieces_lost_count, irreparabledb_seg_damaged_unix_sec, irreparabledb_repair_attempt_count)
|
|
|
|
}
|
|
|
|
func (rx *Rx) CreateNoReturn_Node(ctx context.Context,
|
|
node_id Node_Id_Field,
|
|
node_address Node_Address_Field,
|
|
node_last_net Node_LastNet_Field,
|
|
node_protocol Node_Protocol_Field,
|
|
node_type Node_Type_Field,
|
|
node_email Node_Email_Field,
|
|
node_wallet Node_Wallet_Field,
|
|
node_free_bandwidth Node_FreeBandwidth_Field,
|
|
node_free_disk Node_FreeDisk_Field,
|
|
node_major Node_Major_Field,
|
|
node_minor Node_Minor_Field,
|
|
node_patch Node_Patch_Field,
|
|
node_hash Node_Hash_Field,
|
|
node_timestamp Node_Timestamp_Field,
|
|
node_release Node_Release_Field,
|
|
node_latency_90 Node_Latency90_Field,
|
|
node_audit_success_count Node_AuditSuccessCount_Field,
|
|
node_total_audit_count Node_TotalAuditCount_Field,
|
|
node_uptime_success_count Node_UptimeSuccessCount_Field,
|
|
node_total_uptime_count Node_TotalUptimeCount_Field,
|
|
node_last_contact_success Node_LastContactSuccess_Field,
|
|
node_last_contact_failure Node_LastContactFailure_Field,
|
|
node_contained Node_Contained_Field,
|
|
node_audit_reputation_alpha Node_AuditReputationAlpha_Field,
|
|
node_audit_reputation_beta Node_AuditReputationBeta_Field,
|
|
node_uptime_reputation_alpha Node_UptimeReputationAlpha_Field,
|
|
node_uptime_reputation_beta Node_UptimeReputationBeta_Field,
|
|
node_exit_success Node_ExitSuccess_Field,
|
|
optional Node_Create_Fields) (
|
|
err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.CreateNoReturn_Node(ctx, node_id, node_address, node_last_net, node_protocol, node_type, node_email, node_wallet, node_free_bandwidth, node_free_disk, node_major, node_minor, node_patch, node_hash, node_timestamp, node_release, node_latency_90, node_audit_success_count, node_total_audit_count, node_uptime_success_count, node_total_uptime_count, node_last_contact_success, node_last_contact_failure, node_contained, node_audit_reputation_alpha, node_audit_reputation_beta, node_uptime_reputation_alpha, node_uptime_reputation_beta, node_exit_success, optional)
|
|
|
|
}
|
|
|
|
func (rx *Rx) CreateNoReturn_PeerIdentity(ctx context.Context,
|
|
peer_identity_node_id PeerIdentity_NodeId_Field,
|
|
peer_identity_leaf_serial_number PeerIdentity_LeafSerialNumber_Field,
|
|
peer_identity_chain PeerIdentity_Chain_Field) (
|
|
err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.CreateNoReturn_PeerIdentity(ctx, peer_identity_node_id, peer_identity_leaf_serial_number, peer_identity_chain)
|
|
|
|
}
|
|
|
|
func (rx *Rx) CreateNoReturn_SerialNumber(ctx context.Context,
|
|
serial_number_serial_number SerialNumber_SerialNumber_Field,
|
|
serial_number_bucket_id SerialNumber_BucketId_Field,
|
|
serial_number_expires_at SerialNumber_ExpiresAt_Field) (
|
|
err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.CreateNoReturn_SerialNumber(ctx, serial_number_serial_number, serial_number_bucket_id, serial_number_expires_at)
|
|
|
|
}
|
|
|
|
func (rx *Rx) CreateNoReturn_StoragenodeStorageTally(ctx context.Context,
|
|
storagenode_storage_tally_node_id StoragenodeStorageTally_NodeId_Field,
|
|
storagenode_storage_tally_interval_end_time StoragenodeStorageTally_IntervalEndTime_Field,
|
|
storagenode_storage_tally_data_total StoragenodeStorageTally_DataTotal_Field) (
|
|
err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.CreateNoReturn_StoragenodeStorageTally(ctx, storagenode_storage_tally_node_id, storagenode_storage_tally_interval_end_time, storagenode_storage_tally_data_total)
|
|
|
|
}
|
|
|
|
func (rx *Rx) CreateNoReturn_UsedSerial(ctx context.Context,
|
|
used_serial_serial_number_id UsedSerial_SerialNumberId_Field,
|
|
used_serial_storage_node_id UsedSerial_StorageNodeId_Field) (
|
|
err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.CreateNoReturn_UsedSerial(ctx, used_serial_serial_number_id, used_serial_storage_node_id)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Create_ApiKey(ctx context.Context,
|
|
api_key_id ApiKey_Id_Field,
|
|
api_key_project_id ApiKey_ProjectId_Field,
|
|
api_key_head ApiKey_Head_Field,
|
|
api_key_name ApiKey_Name_Field,
|
|
api_key_secret ApiKey_Secret_Field,
|
|
optional ApiKey_Create_Fields) (
|
|
api_key *ApiKey, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Create_ApiKey(ctx, api_key_id, api_key_project_id, api_key_head, api_key_name, api_key_secret, optional)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Create_BucketMetainfo(ctx context.Context,
|
|
bucket_metainfo_id BucketMetainfo_Id_Field,
|
|
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
|
|
bucket_metainfo_name BucketMetainfo_Name_Field,
|
|
bucket_metainfo_path_cipher BucketMetainfo_PathCipher_Field,
|
|
bucket_metainfo_default_segment_size BucketMetainfo_DefaultSegmentSize_Field,
|
|
bucket_metainfo_default_encryption_cipher_suite BucketMetainfo_DefaultEncryptionCipherSuite_Field,
|
|
bucket_metainfo_default_encryption_block_size BucketMetainfo_DefaultEncryptionBlockSize_Field,
|
|
bucket_metainfo_default_redundancy_algorithm BucketMetainfo_DefaultRedundancyAlgorithm_Field,
|
|
bucket_metainfo_default_redundancy_share_size BucketMetainfo_DefaultRedundancyShareSize_Field,
|
|
bucket_metainfo_default_redundancy_required_shares BucketMetainfo_DefaultRedundancyRequiredShares_Field,
|
|
bucket_metainfo_default_redundancy_repair_shares BucketMetainfo_DefaultRedundancyRepairShares_Field,
|
|
bucket_metainfo_default_redundancy_optimal_shares BucketMetainfo_DefaultRedundancyOptimalShares_Field,
|
|
bucket_metainfo_default_redundancy_total_shares BucketMetainfo_DefaultRedundancyTotalShares_Field,
|
|
optional BucketMetainfo_Create_Fields) (
|
|
bucket_metainfo *BucketMetainfo, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Create_BucketMetainfo(ctx, bucket_metainfo_id, bucket_metainfo_project_id, bucket_metainfo_name, bucket_metainfo_path_cipher, bucket_metainfo_default_segment_size, bucket_metainfo_default_encryption_cipher_suite, bucket_metainfo_default_encryption_block_size, bucket_metainfo_default_redundancy_algorithm, bucket_metainfo_default_redundancy_share_size, bucket_metainfo_default_redundancy_required_shares, bucket_metainfo_default_redundancy_repair_shares, bucket_metainfo_default_redundancy_optimal_shares, bucket_metainfo_default_redundancy_total_shares, optional)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Create_CoinpaymentsTransaction(ctx context.Context,
|
|
coinpayments_transaction_id CoinpaymentsTransaction_Id_Field,
|
|
coinpayments_transaction_user_id CoinpaymentsTransaction_UserId_Field,
|
|
coinpayments_transaction_address CoinpaymentsTransaction_Address_Field,
|
|
coinpayments_transaction_amount CoinpaymentsTransaction_Amount_Field,
|
|
coinpayments_transaction_received CoinpaymentsTransaction_Received_Field,
|
|
coinpayments_transaction_status CoinpaymentsTransaction_Status_Field,
|
|
coinpayments_transaction_key CoinpaymentsTransaction_Key_Field,
|
|
coinpayments_transaction_timeout CoinpaymentsTransaction_Timeout_Field) (
|
|
coinpayments_transaction *CoinpaymentsTransaction, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Create_CoinpaymentsTransaction(ctx, coinpayments_transaction_id, coinpayments_transaction_user_id, coinpayments_transaction_address, coinpayments_transaction_amount, coinpayments_transaction_received, coinpayments_transaction_status, coinpayments_transaction_key, coinpayments_transaction_timeout)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Create_Coupon(ctx context.Context,
|
|
coupon_id Coupon_Id_Field,
|
|
coupon_project_id Coupon_ProjectId_Field,
|
|
coupon_user_id Coupon_UserId_Field,
|
|
coupon_amount Coupon_Amount_Field,
|
|
coupon_description Coupon_Description_Field,
|
|
coupon_type Coupon_Type_Field,
|
|
coupon_status Coupon_Status_Field,
|
|
coupon_duration Coupon_Duration_Field) (
|
|
coupon *Coupon, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Create_Coupon(ctx, coupon_id, coupon_project_id, coupon_user_id, coupon_amount, coupon_description, coupon_type, coupon_status, coupon_duration)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Create_CouponUsage(ctx context.Context,
|
|
coupon_usage_coupon_id CouponUsage_CouponId_Field,
|
|
coupon_usage_amount CouponUsage_Amount_Field,
|
|
coupon_usage_status CouponUsage_Status_Field,
|
|
coupon_usage_period CouponUsage_Period_Field) (
|
|
coupon_usage *CouponUsage, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Create_CouponUsage(ctx, coupon_usage_coupon_id, coupon_usage_amount, coupon_usage_status, coupon_usage_period)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Create_NodesOfflineTime(ctx context.Context,
|
|
nodes_offline_time_node_id NodesOfflineTime_NodeId_Field,
|
|
nodes_offline_time_tracked_at NodesOfflineTime_TrackedAt_Field,
|
|
nodes_offline_time_seconds NodesOfflineTime_Seconds_Field) (
|
|
nodes_offline_time *NodesOfflineTime, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Create_NodesOfflineTime(ctx, nodes_offline_time_node_id, nodes_offline_time_tracked_at, nodes_offline_time_seconds)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Create_Offer(ctx context.Context,
|
|
offer_name Offer_Name_Field,
|
|
offer_description Offer_Description_Field,
|
|
offer_award_credit_in_cents Offer_AwardCreditInCents_Field,
|
|
offer_invitee_credit_in_cents Offer_InviteeCreditInCents_Field,
|
|
offer_expires_at Offer_ExpiresAt_Field,
|
|
offer_status Offer_Status_Field,
|
|
offer_type Offer_Type_Field,
|
|
optional Offer_Create_Fields) (
|
|
offer *Offer, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Create_Offer(ctx, offer_name, offer_description, offer_award_credit_in_cents, offer_invitee_credit_in_cents, offer_expires_at, offer_status, offer_type, optional)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Create_PendingAudits(ctx context.Context,
|
|
pending_audits_node_id PendingAudits_NodeId_Field,
|
|
pending_audits_piece_id PendingAudits_PieceId_Field,
|
|
pending_audits_stripe_index PendingAudits_StripeIndex_Field,
|
|
pending_audits_share_size PendingAudits_ShareSize_Field,
|
|
pending_audits_expected_share_hash PendingAudits_ExpectedShareHash_Field,
|
|
pending_audits_reverify_count PendingAudits_ReverifyCount_Field,
|
|
pending_audits_path PendingAudits_Path_Field) (
|
|
pending_audits *PendingAudits, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Create_PendingAudits(ctx, pending_audits_node_id, pending_audits_piece_id, pending_audits_stripe_index, pending_audits_share_size, pending_audits_expected_share_hash, pending_audits_reverify_count, pending_audits_path)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Create_Project(ctx context.Context,
|
|
project_id Project_Id_Field,
|
|
project_name Project_Name_Field,
|
|
project_description Project_Description_Field,
|
|
project_usage_limit Project_UsageLimit_Field,
|
|
project_owner_id Project_OwnerId_Field,
|
|
optional Project_Create_Fields) (
|
|
project *Project, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Create_Project(ctx, project_id, project_name, project_description, project_usage_limit, project_owner_id, optional)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Create_ProjectInvoiceStamp(ctx context.Context,
|
|
project_invoice_stamp_project_id ProjectInvoiceStamp_ProjectId_Field,
|
|
project_invoice_stamp_invoice_id ProjectInvoiceStamp_InvoiceId_Field,
|
|
project_invoice_stamp_start_date ProjectInvoiceStamp_StartDate_Field,
|
|
project_invoice_stamp_end_date ProjectInvoiceStamp_EndDate_Field,
|
|
project_invoice_stamp_created_at ProjectInvoiceStamp_CreatedAt_Field) (
|
|
project_invoice_stamp *ProjectInvoiceStamp, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Create_ProjectInvoiceStamp(ctx, project_invoice_stamp_project_id, project_invoice_stamp_invoice_id, project_invoice_stamp_start_date, project_invoice_stamp_end_date, project_invoice_stamp_created_at)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Create_ProjectMember(ctx context.Context,
|
|
project_member_member_id ProjectMember_MemberId_Field,
|
|
project_member_project_id ProjectMember_ProjectId_Field) (
|
|
project_member *ProjectMember, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Create_ProjectMember(ctx, project_member_member_id, project_member_project_id)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Create_RegistrationToken(ctx context.Context,
|
|
registration_token_secret RegistrationToken_Secret_Field,
|
|
registration_token_project_limit RegistrationToken_ProjectLimit_Field,
|
|
optional RegistrationToken_Create_Fields) (
|
|
registration_token *RegistrationToken, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Create_RegistrationToken(ctx, registration_token_secret, registration_token_project_limit, optional)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Create_ResetPasswordToken(ctx context.Context,
|
|
reset_password_token_secret ResetPasswordToken_Secret_Field,
|
|
reset_password_token_owner_id ResetPasswordToken_OwnerId_Field) (
|
|
reset_password_token *ResetPasswordToken, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Create_ResetPasswordToken(ctx, reset_password_token_secret, reset_password_token_owner_id)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Create_StripeCustomer(ctx context.Context,
|
|
stripe_customer_user_id StripeCustomer_UserId_Field,
|
|
stripe_customer_customer_id StripeCustomer_CustomerId_Field) (
|
|
stripe_customer *StripeCustomer, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Create_StripeCustomer(ctx, stripe_customer_user_id, stripe_customer_customer_id)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Create_StripecoinpaymentsApplyBalanceIntent(ctx context.Context,
|
|
stripecoinpayments_apply_balance_intent_tx_id StripecoinpaymentsApplyBalanceIntent_TxId_Field,
|
|
stripecoinpayments_apply_balance_intent_state StripecoinpaymentsApplyBalanceIntent_State_Field) (
|
|
stripecoinpayments_apply_balance_intent *StripecoinpaymentsApplyBalanceIntent, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Create_StripecoinpaymentsApplyBalanceIntent(ctx, stripecoinpayments_apply_balance_intent_tx_id, stripecoinpayments_apply_balance_intent_state)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Create_StripecoinpaymentsInvoiceProjectRecord(ctx context.Context,
|
|
stripecoinpayments_invoice_project_record_id StripecoinpaymentsInvoiceProjectRecord_Id_Field,
|
|
stripecoinpayments_invoice_project_record_project_id StripecoinpaymentsInvoiceProjectRecord_ProjectId_Field,
|
|
stripecoinpayments_invoice_project_record_storage StripecoinpaymentsInvoiceProjectRecord_Storage_Field,
|
|
stripecoinpayments_invoice_project_record_egress StripecoinpaymentsInvoiceProjectRecord_Egress_Field,
|
|
stripecoinpayments_invoice_project_record_objects StripecoinpaymentsInvoiceProjectRecord_Objects_Field,
|
|
stripecoinpayments_invoice_project_record_period_start StripecoinpaymentsInvoiceProjectRecord_PeriodStart_Field,
|
|
stripecoinpayments_invoice_project_record_period_end StripecoinpaymentsInvoiceProjectRecord_PeriodEnd_Field,
|
|
stripecoinpayments_invoice_project_record_state StripecoinpaymentsInvoiceProjectRecord_State_Field) (
|
|
stripecoinpayments_invoice_project_record *StripecoinpaymentsInvoiceProjectRecord, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Create_StripecoinpaymentsInvoiceProjectRecord(ctx, stripecoinpayments_invoice_project_record_id, stripecoinpayments_invoice_project_record_project_id, stripecoinpayments_invoice_project_record_storage, stripecoinpayments_invoice_project_record_egress, stripecoinpayments_invoice_project_record_objects, stripecoinpayments_invoice_project_record_period_start, stripecoinpayments_invoice_project_record_period_end, stripecoinpayments_invoice_project_record_state)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Create_StripecoinpaymentsTxConversionRate(ctx context.Context,
|
|
stripecoinpayments_tx_conversion_rate_tx_id StripecoinpaymentsTxConversionRate_TxId_Field,
|
|
stripecoinpayments_tx_conversion_rate_rate StripecoinpaymentsTxConversionRate_Rate_Field) (
|
|
stripecoinpayments_tx_conversion_rate *StripecoinpaymentsTxConversionRate, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Create_StripecoinpaymentsTxConversionRate(ctx, stripecoinpayments_tx_conversion_rate_tx_id, stripecoinpayments_tx_conversion_rate_rate)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Create_User(ctx context.Context,
|
|
user_id User_Id_Field,
|
|
user_email User_Email_Field,
|
|
user_normalized_email User_NormalizedEmail_Field,
|
|
user_full_name User_FullName_Field,
|
|
user_password_hash User_PasswordHash_Field,
|
|
optional User_Create_Fields) (
|
|
user *User, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Create_User(ctx, user_id, user_email, user_normalized_email, user_full_name, user_password_hash, optional)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Create_UserCredit(ctx context.Context,
|
|
user_credit_user_id UserCredit_UserId_Field,
|
|
user_credit_offer_id UserCredit_OfferId_Field,
|
|
user_credit_type UserCredit_Type_Field,
|
|
user_credit_credits_earned_in_cents UserCredit_CreditsEarnedInCents_Field,
|
|
user_credit_expires_at UserCredit_ExpiresAt_Field,
|
|
optional UserCredit_Create_Fields) (
|
|
user_credit *UserCredit, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Create_UserCredit(ctx, user_credit_user_id, user_credit_offer_id, user_credit_type, user_credit_credits_earned_in_cents, user_credit_expires_at, optional)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Create_ValueAttribution(ctx context.Context,
|
|
value_attribution_project_id ValueAttribution_ProjectId_Field,
|
|
value_attribution_bucket_name ValueAttribution_BucketName_Field,
|
|
value_attribution_partner_id ValueAttribution_PartnerId_Field) (
|
|
value_attribution *ValueAttribution, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Create_ValueAttribution(ctx, value_attribution_project_id, value_attribution_bucket_name, value_attribution_partner_id)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Delete_AccountingRollup_By_Id(ctx context.Context,
|
|
accounting_rollup_id AccountingRollup_Id_Field) (
|
|
deleted bool, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Delete_AccountingRollup_By_Id(ctx, accounting_rollup_id)
|
|
}
|
|
|
|
func (rx *Rx) Delete_ApiKey_By_Id(ctx context.Context,
|
|
api_key_id ApiKey_Id_Field) (
|
|
deleted bool, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Delete_ApiKey_By_Id(ctx, api_key_id)
|
|
}
|
|
|
|
func (rx *Rx) Delete_BucketMetainfo_By_ProjectId_And_Name(ctx context.Context,
|
|
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
|
|
bucket_metainfo_name BucketMetainfo_Name_Field) (
|
|
deleted bool, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Delete_BucketMetainfo_By_ProjectId_And_Name(ctx, bucket_metainfo_project_id, bucket_metainfo_name)
|
|
}
|
|
|
|
func (rx *Rx) Delete_Coupon_By_Id(ctx context.Context,
|
|
coupon_id Coupon_Id_Field) (
|
|
deleted bool, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Delete_Coupon_By_Id(ctx, coupon_id)
|
|
}
|
|
|
|
func (rx *Rx) Delete_GracefulExitProgress_By_NodeId(ctx context.Context,
|
|
graceful_exit_progress_node_id GracefulExitProgress_NodeId_Field) (
|
|
deleted bool, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Delete_GracefulExitProgress_By_NodeId(ctx, graceful_exit_progress_node_id)
|
|
}
|
|
|
|
func (rx *Rx) Delete_GracefulExitTransferQueue_By_NodeId(ctx context.Context,
|
|
graceful_exit_transfer_queue_node_id GracefulExitTransferQueue_NodeId_Field) (
|
|
count int64, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Delete_GracefulExitTransferQueue_By_NodeId(ctx, graceful_exit_transfer_queue_node_id)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Delete_GracefulExitTransferQueue_By_NodeId_And_FinishedAt_IsNot_Null(ctx context.Context,
|
|
graceful_exit_transfer_queue_node_id GracefulExitTransferQueue_NodeId_Field) (
|
|
count int64, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Delete_GracefulExitTransferQueue_By_NodeId_And_FinishedAt_IsNot_Null(ctx, graceful_exit_transfer_queue_node_id)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Delete_GracefulExitTransferQueue_By_NodeId_And_Path_And_PieceNum(ctx context.Context,
|
|
graceful_exit_transfer_queue_node_id GracefulExitTransferQueue_NodeId_Field,
|
|
graceful_exit_transfer_queue_path GracefulExitTransferQueue_Path_Field,
|
|
graceful_exit_transfer_queue_piece_num GracefulExitTransferQueue_PieceNum_Field) (
|
|
deleted bool, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Delete_GracefulExitTransferQueue_By_NodeId_And_Path_And_PieceNum(ctx, graceful_exit_transfer_queue_node_id, graceful_exit_transfer_queue_path, graceful_exit_transfer_queue_piece_num)
|
|
}
|
|
|
|
func (rx *Rx) Delete_Irreparabledb_By_Segmentpath(ctx context.Context,
|
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field) (
|
|
deleted bool, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Delete_Irreparabledb_By_Segmentpath(ctx, irreparabledb_segmentpath)
|
|
}
|
|
|
|
func (rx *Rx) Delete_Node_By_Id(ctx context.Context,
|
|
node_id Node_Id_Field) (
|
|
deleted bool, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Delete_Node_By_Id(ctx, node_id)
|
|
}
|
|
|
|
func (rx *Rx) Delete_PendingAudits_By_NodeId(ctx context.Context,
|
|
pending_audits_node_id PendingAudits_NodeId_Field) (
|
|
deleted bool, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Delete_PendingAudits_By_NodeId(ctx, pending_audits_node_id)
|
|
}
|
|
|
|
func (rx *Rx) Delete_ProjectMember_By_MemberId_And_ProjectId(ctx context.Context,
|
|
project_member_member_id ProjectMember_MemberId_Field,
|
|
project_member_project_id ProjectMember_ProjectId_Field) (
|
|
deleted bool, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Delete_ProjectMember_By_MemberId_And_ProjectId(ctx, project_member_member_id, project_member_project_id)
|
|
}
|
|
|
|
func (rx *Rx) Delete_Project_By_Id(ctx context.Context,
|
|
project_id Project_Id_Field) (
|
|
deleted bool, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Delete_Project_By_Id(ctx, project_id)
|
|
}
|
|
|
|
func (rx *Rx) Delete_ResetPasswordToken_By_Secret(ctx context.Context,
|
|
reset_password_token_secret ResetPasswordToken_Secret_Field) (
|
|
deleted bool, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Delete_ResetPasswordToken_By_Secret(ctx, reset_password_token_secret)
|
|
}
|
|
|
|
func (rx *Rx) Delete_SerialNumber_By_ExpiresAt_LessOrEqual(ctx context.Context,
|
|
serial_number_expires_at_less_or_equal SerialNumber_ExpiresAt_Field) (
|
|
count int64, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Delete_SerialNumber_By_ExpiresAt_LessOrEqual(ctx, serial_number_expires_at_less_or_equal)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Delete_StoragenodeStorageTally_By_Id(ctx context.Context,
|
|
storagenode_storage_tally_id StoragenodeStorageTally_Id_Field) (
|
|
deleted bool, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Delete_StoragenodeStorageTally_By_Id(ctx, storagenode_storage_tally_id)
|
|
}
|
|
|
|
func (rx *Rx) Delete_StripecoinpaymentsApplyBalanceIntent_By_TxId(ctx context.Context,
|
|
stripecoinpayments_apply_balance_intent_tx_id StripecoinpaymentsApplyBalanceIntent_TxId_Field) (
|
|
deleted bool, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Delete_StripecoinpaymentsApplyBalanceIntent_By_TxId(ctx, stripecoinpayments_apply_balance_intent_tx_id)
|
|
}
|
|
|
|
func (rx *Rx) Delete_StripecoinpaymentsInvoiceProjectRecord_By_Id(ctx context.Context,
|
|
stripecoinpayments_invoice_project_record_id StripecoinpaymentsInvoiceProjectRecord_Id_Field) (
|
|
deleted bool, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Delete_StripecoinpaymentsInvoiceProjectRecord_By_Id(ctx, stripecoinpayments_invoice_project_record_id)
|
|
}
|
|
|
|
func (rx *Rx) Delete_User_By_Id(ctx context.Context,
|
|
user_id User_Id_Field) (
|
|
deleted bool, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Delete_User_By_Id(ctx, user_id)
|
|
}
|
|
|
|
func (rx *Rx) Delete_ValueAttribution_By_ProjectId_And_BucketName(ctx context.Context,
|
|
value_attribution_project_id ValueAttribution_ProjectId_Field,
|
|
value_attribution_bucket_name ValueAttribution_BucketName_Field) (
|
|
deleted bool, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Delete_ValueAttribution_By_ProjectId_And_BucketName(ctx, value_attribution_project_id, value_attribution_bucket_name)
|
|
}
|
|
|
|
func (rx *Rx) Find_AccountingTimestamps_Value_By_Name(ctx context.Context,
|
|
accounting_timestamps_name AccountingTimestamps_Name_Field) (
|
|
row *Value_Row, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Find_AccountingTimestamps_Value_By_Name(ctx, accounting_timestamps_name)
|
|
}
|
|
|
|
func (rx *Rx) Find_BucketBandwidthRollup_By_BucketName_And_ProjectId_And_IntervalStart_And_Action(ctx context.Context,
|
|
bucket_bandwidth_rollup_bucket_name BucketBandwidthRollup_BucketName_Field,
|
|
bucket_bandwidth_rollup_project_id BucketBandwidthRollup_ProjectId_Field,
|
|
bucket_bandwidth_rollup_interval_start BucketBandwidthRollup_IntervalStart_Field,
|
|
bucket_bandwidth_rollup_action BucketBandwidthRollup_Action_Field) (
|
|
bucket_bandwidth_rollup *BucketBandwidthRollup, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Find_BucketBandwidthRollup_By_BucketName_And_ProjectId_And_IntervalStart_And_Action(ctx, bucket_bandwidth_rollup_bucket_name, bucket_bandwidth_rollup_project_id, bucket_bandwidth_rollup_interval_start, bucket_bandwidth_rollup_action)
|
|
}
|
|
|
|
func (rx *Rx) Find_SerialNumber_By_SerialNumber(ctx context.Context,
|
|
serial_number_serial_number SerialNumber_SerialNumber_Field) (
|
|
serial_number *SerialNumber, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Find_SerialNumber_By_SerialNumber(ctx, serial_number_serial_number)
|
|
}
|
|
|
|
func (rx *Rx) Find_StoragenodeBandwidthRollup_By_StoragenodeId_And_IntervalStart_And_Action(ctx context.Context,
|
|
storagenode_bandwidth_rollup_storagenode_id StoragenodeBandwidthRollup_StoragenodeId_Field,
|
|
storagenode_bandwidth_rollup_interval_start StoragenodeBandwidthRollup_IntervalStart_Field,
|
|
storagenode_bandwidth_rollup_action StoragenodeBandwidthRollup_Action_Field) (
|
|
storagenode_bandwidth_rollup *StoragenodeBandwidthRollup, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Find_StoragenodeBandwidthRollup_By_StoragenodeId_And_IntervalStart_And_Action(ctx, storagenode_bandwidth_rollup_storagenode_id, storagenode_bandwidth_rollup_interval_start, storagenode_bandwidth_rollup_action)
|
|
}
|
|
|
|
func (rx *Rx) First_BucketStorageTally_By_ProjectId_OrderBy_Desc_IntervalStart(ctx context.Context,
|
|
bucket_storage_tally_project_id BucketStorageTally_ProjectId_Field) (
|
|
bucket_storage_tally *BucketStorageTally, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.First_BucketStorageTally_By_ProjectId_OrderBy_Desc_IntervalStart(ctx, bucket_storage_tally_project_id)
|
|
}
|
|
|
|
func (rx *Rx) Get_AccountingRollup_By_Id(ctx context.Context,
|
|
accounting_rollup_id AccountingRollup_Id_Field) (
|
|
accounting_rollup *AccountingRollup, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_AccountingRollup_By_Id(ctx, accounting_rollup_id)
|
|
}
|
|
|
|
func (rx *Rx) Get_ApiKey_By_Head(ctx context.Context,
|
|
api_key_head ApiKey_Head_Field) (
|
|
api_key *ApiKey, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_ApiKey_By_Head(ctx, api_key_head)
|
|
}
|
|
|
|
func (rx *Rx) Get_ApiKey_By_Id(ctx context.Context,
|
|
api_key_id ApiKey_Id_Field) (
|
|
api_key *ApiKey, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_ApiKey_By_Id(ctx, api_key_id)
|
|
}
|
|
|
|
func (rx *Rx) Get_ApiKey_By_Name_And_ProjectId(ctx context.Context,
|
|
api_key_name ApiKey_Name_Field,
|
|
api_key_project_id ApiKey_ProjectId_Field) (
|
|
api_key *ApiKey, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_ApiKey_By_Name_And_ProjectId(ctx, api_key_name, api_key_project_id)
|
|
}
|
|
|
|
func (rx *Rx) Get_BucketMetainfo_By_ProjectId_And_Name(ctx context.Context,
|
|
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
|
|
bucket_metainfo_name BucketMetainfo_Name_Field) (
|
|
bucket_metainfo *BucketMetainfo, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_BucketMetainfo_By_ProjectId_And_Name(ctx, bucket_metainfo_project_id, bucket_metainfo_name)
|
|
}
|
|
|
|
func (rx *Rx) Get_Coupon_By_Id(ctx context.Context,
|
|
coupon_id Coupon_Id_Field) (
|
|
coupon *Coupon, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_Coupon_By_Id(ctx, coupon_id)
|
|
}
|
|
|
|
func (rx *Rx) Get_GracefulExitProgress_By_NodeId(ctx context.Context,
|
|
graceful_exit_progress_node_id GracefulExitProgress_NodeId_Field) (
|
|
graceful_exit_progress *GracefulExitProgress, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_GracefulExitProgress_By_NodeId(ctx, graceful_exit_progress_node_id)
|
|
}
|
|
|
|
func (rx *Rx) Get_GracefulExitTransferQueue_By_NodeId_And_Path_And_PieceNum(ctx context.Context,
|
|
graceful_exit_transfer_queue_node_id GracefulExitTransferQueue_NodeId_Field,
|
|
graceful_exit_transfer_queue_path GracefulExitTransferQueue_Path_Field,
|
|
graceful_exit_transfer_queue_piece_num GracefulExitTransferQueue_PieceNum_Field) (
|
|
graceful_exit_transfer_queue *GracefulExitTransferQueue, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_GracefulExitTransferQueue_By_NodeId_And_Path_And_PieceNum(ctx, graceful_exit_transfer_queue_node_id, graceful_exit_transfer_queue_path, graceful_exit_transfer_queue_piece_num)
|
|
}
|
|
|
|
func (rx *Rx) Get_Irreparabledb_By_Segmentpath(ctx context.Context,
|
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field) (
|
|
irreparabledb *Irreparabledb, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_Irreparabledb_By_Segmentpath(ctx, irreparabledb_segmentpath)
|
|
}
|
|
|
|
func (rx *Rx) Get_Node_By_Id(ctx context.Context,
|
|
node_id Node_Id_Field) (
|
|
node *Node, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_Node_By_Id(ctx, node_id)
|
|
}
|
|
|
|
func (rx *Rx) Get_Offer_By_Id(ctx context.Context,
|
|
offer_id Offer_Id_Field) (
|
|
offer *Offer, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_Offer_By_Id(ctx, offer_id)
|
|
}
|
|
|
|
func (rx *Rx) Get_PeerIdentity_By_NodeId(ctx context.Context,
|
|
peer_identity_node_id PeerIdentity_NodeId_Field) (
|
|
peer_identity *PeerIdentity, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_PeerIdentity_By_NodeId(ctx, peer_identity_node_id)
|
|
}
|
|
|
|
func (rx *Rx) Get_PeerIdentity_LeafSerialNumber_By_NodeId(ctx context.Context,
|
|
peer_identity_node_id PeerIdentity_NodeId_Field) (
|
|
row *LeafSerialNumber_Row, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_PeerIdentity_LeafSerialNumber_By_NodeId(ctx, peer_identity_node_id)
|
|
}
|
|
|
|
func (rx *Rx) Get_PendingAudits_By_NodeId(ctx context.Context,
|
|
pending_audits_node_id PendingAudits_NodeId_Field) (
|
|
pending_audits *PendingAudits, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_PendingAudits_By_NodeId(ctx, pending_audits_node_id)
|
|
}
|
|
|
|
func (rx *Rx) Get_ProjectInvoiceStamp_By_ProjectId_And_StartDate(ctx context.Context,
|
|
project_invoice_stamp_project_id ProjectInvoiceStamp_ProjectId_Field,
|
|
project_invoice_stamp_start_date ProjectInvoiceStamp_StartDate_Field) (
|
|
project_invoice_stamp *ProjectInvoiceStamp, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_ProjectInvoiceStamp_By_ProjectId_And_StartDate(ctx, project_invoice_stamp_project_id, project_invoice_stamp_start_date)
|
|
}
|
|
|
|
func (rx *Rx) Get_Project_By_Id(ctx context.Context,
|
|
project_id Project_Id_Field) (
|
|
project *Project, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_Project_By_Id(ctx, project_id)
|
|
}
|
|
|
|
func (rx *Rx) Get_Project_UsageLimit_By_Id(ctx context.Context,
|
|
project_id Project_Id_Field) (
|
|
row *UsageLimit_Row, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_Project_UsageLimit_By_Id(ctx, project_id)
|
|
}
|
|
|
|
func (rx *Rx) Get_RegistrationToken_By_OwnerId(ctx context.Context,
|
|
registration_token_owner_id RegistrationToken_OwnerId_Field) (
|
|
registration_token *RegistrationToken, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_RegistrationToken_By_OwnerId(ctx, registration_token_owner_id)
|
|
}
|
|
|
|
func (rx *Rx) Get_RegistrationToken_By_Secret(ctx context.Context,
|
|
registration_token_secret RegistrationToken_Secret_Field) (
|
|
registration_token *RegistrationToken, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_RegistrationToken_By_Secret(ctx, registration_token_secret)
|
|
}
|
|
|
|
func (rx *Rx) Get_ResetPasswordToken_By_OwnerId(ctx context.Context,
|
|
reset_password_token_owner_id ResetPasswordToken_OwnerId_Field) (
|
|
reset_password_token *ResetPasswordToken, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_ResetPasswordToken_By_OwnerId(ctx, reset_password_token_owner_id)
|
|
}
|
|
|
|
func (rx *Rx) Get_ResetPasswordToken_By_Secret(ctx context.Context,
|
|
reset_password_token_secret ResetPasswordToken_Secret_Field) (
|
|
reset_password_token *ResetPasswordToken, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_ResetPasswordToken_By_Secret(ctx, reset_password_token_secret)
|
|
}
|
|
|
|
func (rx *Rx) Get_StoragenodeStorageTally_By_Id(ctx context.Context,
|
|
storagenode_storage_tally_id StoragenodeStorageTally_Id_Field) (
|
|
storagenode_storage_tally *StoragenodeStorageTally, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_StoragenodeStorageTally_By_Id(ctx, storagenode_storage_tally_id)
|
|
}
|
|
|
|
func (rx *Rx) Get_StripeCustomer_CustomerId_By_UserId(ctx context.Context,
|
|
stripe_customer_user_id StripeCustomer_UserId_Field) (
|
|
row *CustomerId_Row, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_StripeCustomer_CustomerId_By_UserId(ctx, stripe_customer_user_id)
|
|
}
|
|
|
|
func (rx *Rx) Get_StripecoinpaymentsInvoiceProjectRecord_By_ProjectId_And_PeriodStart_And_PeriodEnd(ctx context.Context,
|
|
stripecoinpayments_invoice_project_record_project_id StripecoinpaymentsInvoiceProjectRecord_ProjectId_Field,
|
|
stripecoinpayments_invoice_project_record_period_start StripecoinpaymentsInvoiceProjectRecord_PeriodStart_Field,
|
|
stripecoinpayments_invoice_project_record_period_end StripecoinpaymentsInvoiceProjectRecord_PeriodEnd_Field) (
|
|
stripecoinpayments_invoice_project_record *StripecoinpaymentsInvoiceProjectRecord, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_StripecoinpaymentsInvoiceProjectRecord_By_ProjectId_And_PeriodStart_And_PeriodEnd(ctx, stripecoinpayments_invoice_project_record_project_id, stripecoinpayments_invoice_project_record_period_start, stripecoinpayments_invoice_project_record_period_end)
|
|
}
|
|
|
|
func (rx *Rx) Get_StripecoinpaymentsTxConversionRate_By_TxId(ctx context.Context,
|
|
stripecoinpayments_tx_conversion_rate_tx_id StripecoinpaymentsTxConversionRate_TxId_Field) (
|
|
stripecoinpayments_tx_conversion_rate *StripecoinpaymentsTxConversionRate, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_StripecoinpaymentsTxConversionRate_By_TxId(ctx, stripecoinpayments_tx_conversion_rate_tx_id)
|
|
}
|
|
|
|
func (rx *Rx) Get_User_By_Id(ctx context.Context,
|
|
user_id User_Id_Field) (
|
|
user *User, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_User_By_Id(ctx, user_id)
|
|
}
|
|
|
|
func (rx *Rx) Get_User_By_NormalizedEmail_And_Status_Not_Number(ctx context.Context,
|
|
user_normalized_email User_NormalizedEmail_Field) (
|
|
user *User, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_User_By_NormalizedEmail_And_Status_Not_Number(ctx, user_normalized_email)
|
|
}
|
|
|
|
func (rx *Rx) Get_ValueAttribution_By_ProjectId_And_BucketName(ctx context.Context,
|
|
value_attribution_project_id ValueAttribution_ProjectId_Field,
|
|
value_attribution_bucket_name ValueAttribution_BucketName_Field) (
|
|
value_attribution *ValueAttribution, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_ValueAttribution_By_ProjectId_And_BucketName(ctx, value_attribution_project_id, value_attribution_bucket_name)
|
|
}
|
|
|
|
func (rx *Rx) Limited_BucketMetainfo_By_ProjectId_And_Name_GreaterOrEqual_OrderBy_Asc_Name(ctx context.Context,
|
|
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
|
|
bucket_metainfo_name_greater_or_equal BucketMetainfo_Name_Field,
|
|
limit int, offset int64) (
|
|
rows []*BucketMetainfo, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Limited_BucketMetainfo_By_ProjectId_And_Name_GreaterOrEqual_OrderBy_Asc_Name(ctx, bucket_metainfo_project_id, bucket_metainfo_name_greater_or_equal, limit, offset)
|
|
}
|
|
|
|
func (rx *Rx) Limited_BucketMetainfo_By_ProjectId_And_Name_Greater_OrderBy_Asc_Name(ctx context.Context,
|
|
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
|
|
bucket_metainfo_name_greater BucketMetainfo_Name_Field,
|
|
limit int, offset int64) (
|
|
rows []*BucketMetainfo, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Limited_BucketMetainfo_By_ProjectId_And_Name_Greater_OrderBy_Asc_Name(ctx, bucket_metainfo_project_id, bucket_metainfo_name_greater, limit, offset)
|
|
}
|
|
|
|
func (rx *Rx) Limited_CoinpaymentsTransaction_By_CreatedAt_LessOrEqual_And_Status_OrderBy_Desc_CreatedAt(ctx context.Context,
|
|
coinpayments_transaction_created_at_less_or_equal CoinpaymentsTransaction_CreatedAt_Field,
|
|
coinpayments_transaction_status CoinpaymentsTransaction_Status_Field,
|
|
limit int, offset int64) (
|
|
rows []*CoinpaymentsTransaction, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Limited_CoinpaymentsTransaction_By_CreatedAt_LessOrEqual_And_Status_OrderBy_Desc_CreatedAt(ctx, coinpayments_transaction_created_at_less_or_equal, coinpayments_transaction_status, limit, offset)
|
|
}
|
|
|
|
func (rx *Rx) Limited_CouponUsage_By_Period_LessOrEqual_And_Status_Equal_Number_OrderBy_Desc_Period(ctx context.Context,
|
|
coupon_usage_period_less_or_equal CouponUsage_Period_Field,
|
|
limit int, offset int64) (
|
|
rows []*CouponUsage, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Limited_CouponUsage_By_Period_LessOrEqual_And_Status_Equal_Number_OrderBy_Desc_Period(ctx, coupon_usage_period_less_or_equal, limit, offset)
|
|
}
|
|
|
|
func (rx *Rx) Limited_Coupon_By_CreatedAt_LessOrEqual_And_Status_OrderBy_Desc_CreatedAt(ctx context.Context,
|
|
coupon_created_at_less_or_equal Coupon_CreatedAt_Field,
|
|
coupon_status Coupon_Status_Field,
|
|
limit int, offset int64) (
|
|
rows []*Coupon, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Limited_Coupon_By_CreatedAt_LessOrEqual_And_Status_OrderBy_Desc_CreatedAt(ctx, coupon_created_at_less_or_equal, coupon_status, limit, offset)
|
|
}
|
|
|
|
func (rx *Rx) Limited_Irreparabledb_By_Segmentpath_Greater_OrderBy_Asc_Segmentpath(ctx context.Context,
|
|
irreparabledb_segmentpath_greater Irreparabledb_Segmentpath_Field,
|
|
limit int, offset int64) (
|
|
rows []*Irreparabledb, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Limited_Irreparabledb_By_Segmentpath_Greater_OrderBy_Asc_Segmentpath(ctx, irreparabledb_segmentpath_greater, limit, offset)
|
|
}
|
|
|
|
func (rx *Rx) Limited_Node_By_Id_GreaterOrEqual_OrderBy_Asc_Id(ctx context.Context,
|
|
node_id_greater_or_equal Node_Id_Field,
|
|
limit int, offset int64) (
|
|
rows []*Node, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Limited_Node_By_Id_GreaterOrEqual_OrderBy_Asc_Id(ctx, node_id_greater_or_equal, limit, offset)
|
|
}
|
|
|
|
func (rx *Rx) Limited_Node_Id_Node_Address_Node_LastContactSuccess_Node_LastContactFailure_By_LastContactSuccess_Less_LastContactFailure_And_Disqualified_Is_Null_OrderBy_Asc_LastContactFailure(ctx context.Context,
|
|
limit int, offset int64) (
|
|
rows []*Id_Address_LastContactSuccess_LastContactFailure_Row, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Limited_Node_Id_Node_Address_Node_LastContactSuccess_Node_LastContactFailure_By_LastContactSuccess_Less_LastContactFailure_And_Disqualified_Is_Null_OrderBy_Asc_LastContactFailure(ctx, limit, offset)
|
|
}
|
|
|
|
func (rx *Rx) Limited_Node_Id_Node_LastNet_Node_Address_Node_Protocol_By_Id_GreaterOrEqual_And_Disqualified_Is_Null_OrderBy_Asc_Id(ctx context.Context,
|
|
node_id_greater_or_equal Node_Id_Field,
|
|
limit int, offset int64) (
|
|
rows []*Id_LastNet_Address_Protocol_Row, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Limited_Node_Id_Node_LastNet_Node_Address_Node_Protocol_By_Id_GreaterOrEqual_And_Disqualified_Is_Null_OrderBy_Asc_Id(ctx, node_id_greater_or_equal, limit, offset)
|
|
}
|
|
|
|
func (rx *Rx) Limited_ProjectMember_By_ProjectId(ctx context.Context,
|
|
project_member_project_id ProjectMember_ProjectId_Field,
|
|
limit int, offset int64) (
|
|
rows []*ProjectMember, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Limited_ProjectMember_By_ProjectId(ctx, project_member_project_id, limit, offset)
|
|
}
|
|
|
|
func (rx *Rx) Limited_Project_By_CreatedAt_Less_OrderBy_Asc_CreatedAt(ctx context.Context,
|
|
project_created_at_less Project_CreatedAt_Field,
|
|
limit int, offset int64) (
|
|
rows []*Project, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Limited_Project_By_CreatedAt_Less_OrderBy_Asc_CreatedAt(ctx, project_created_at_less, limit, offset)
|
|
}
|
|
|
|
func (rx *Rx) Limited_StripeCustomer_By_CreatedAt_LessOrEqual_OrderBy_Desc_CreatedAt(ctx context.Context,
|
|
stripe_customer_created_at_less_or_equal StripeCustomer_CreatedAt_Field,
|
|
limit int, offset int64) (
|
|
rows []*StripeCustomer, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Limited_StripeCustomer_By_CreatedAt_LessOrEqual_OrderBy_Desc_CreatedAt(ctx, stripe_customer_created_at_less_or_equal, limit, offset)
|
|
}
|
|
|
|
func (rx *Rx) Limited_StripecoinpaymentsInvoiceProjectRecord_By_CreatedAt_LessOrEqual_And_State_OrderBy_Desc_CreatedAt(ctx context.Context,
|
|
stripecoinpayments_invoice_project_record_created_at_less_or_equal StripecoinpaymentsInvoiceProjectRecord_CreatedAt_Field,
|
|
stripecoinpayments_invoice_project_record_state StripecoinpaymentsInvoiceProjectRecord_State_Field,
|
|
limit int, offset int64) (
|
|
rows []*StripecoinpaymentsInvoiceProjectRecord, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Limited_StripecoinpaymentsInvoiceProjectRecord_By_CreatedAt_LessOrEqual_And_State_OrderBy_Desc_CreatedAt(ctx, stripecoinpayments_invoice_project_record_created_at_less_or_equal, stripecoinpayments_invoice_project_record_state, limit, offset)
|
|
}
|
|
|
|
func (rx *Rx) UpdateNoReturn_AccountingTimestamps_By_Name(ctx context.Context,
|
|
accounting_timestamps_name AccountingTimestamps_Name_Field,
|
|
update AccountingTimestamps_Update_Fields) (
|
|
err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.UpdateNoReturn_AccountingTimestamps_By_Name(ctx, accounting_timestamps_name, update)
|
|
}
|
|
|
|
func (rx *Rx) UpdateNoReturn_ApiKey_By_Id(ctx context.Context,
|
|
api_key_id ApiKey_Id_Field,
|
|
update ApiKey_Update_Fields) (
|
|
err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.UpdateNoReturn_ApiKey_By_Id(ctx, api_key_id, update)
|
|
}
|
|
|
|
func (rx *Rx) UpdateNoReturn_GracefulExitProgress_By_NodeId(ctx context.Context,
|
|
graceful_exit_progress_node_id GracefulExitProgress_NodeId_Field,
|
|
update GracefulExitProgress_Update_Fields) (
|
|
err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.UpdateNoReturn_GracefulExitProgress_By_NodeId(ctx, graceful_exit_progress_node_id, update)
|
|
}
|
|
|
|
func (rx *Rx) UpdateNoReturn_GracefulExitTransferQueue_By_NodeId_And_Path_And_PieceNum(ctx context.Context,
|
|
graceful_exit_transfer_queue_node_id GracefulExitTransferQueue_NodeId_Field,
|
|
graceful_exit_transfer_queue_path GracefulExitTransferQueue_Path_Field,
|
|
graceful_exit_transfer_queue_piece_num GracefulExitTransferQueue_PieceNum_Field,
|
|
update GracefulExitTransferQueue_Update_Fields) (
|
|
err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.UpdateNoReturn_GracefulExitTransferQueue_By_NodeId_And_Path_And_PieceNum(ctx, graceful_exit_transfer_queue_node_id, graceful_exit_transfer_queue_path, graceful_exit_transfer_queue_piece_num, update)
|
|
}
|
|
|
|
func (rx *Rx) UpdateNoReturn_Irreparabledb_By_Segmentpath(ctx context.Context,
|
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field,
|
|
update Irreparabledb_Update_Fields) (
|
|
err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.UpdateNoReturn_Irreparabledb_By_Segmentpath(ctx, irreparabledb_segmentpath, update)
|
|
}
|
|
|
|
func (rx *Rx) UpdateNoReturn_Node_By_Id(ctx context.Context,
|
|
node_id Node_Id_Field,
|
|
update Node_Update_Fields) (
|
|
err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.UpdateNoReturn_Node_By_Id(ctx, node_id, update)
|
|
}
|
|
|
|
func (rx *Rx) UpdateNoReturn_Offer_By_Id(ctx context.Context,
|
|
offer_id Offer_Id_Field,
|
|
update Offer_Update_Fields) (
|
|
err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.UpdateNoReturn_Offer_By_Id(ctx, offer_id, update)
|
|
}
|
|
|
|
func (rx *Rx) UpdateNoReturn_PeerIdentity_By_NodeId(ctx context.Context,
|
|
peer_identity_node_id PeerIdentity_NodeId_Field,
|
|
update PeerIdentity_Update_Fields) (
|
|
err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.UpdateNoReturn_PeerIdentity_By_NodeId(ctx, peer_identity_node_id, update)
|
|
}
|
|
|
|
func (rx *Rx) Update_BucketMetainfo_By_ProjectId_And_Name(ctx context.Context,
|
|
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
|
|
bucket_metainfo_name BucketMetainfo_Name_Field,
|
|
update BucketMetainfo_Update_Fields) (
|
|
bucket_metainfo *BucketMetainfo, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Update_BucketMetainfo_By_ProjectId_And_Name(ctx, bucket_metainfo_project_id, bucket_metainfo_name, update)
|
|
}
|
|
|
|
func (rx *Rx) Update_CoinpaymentsTransaction_By_Id(ctx context.Context,
|
|
coinpayments_transaction_id CoinpaymentsTransaction_Id_Field,
|
|
update CoinpaymentsTransaction_Update_Fields) (
|
|
coinpayments_transaction *CoinpaymentsTransaction, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Update_CoinpaymentsTransaction_By_Id(ctx, coinpayments_transaction_id, update)
|
|
}
|
|
|
|
func (rx *Rx) Update_CouponUsage_By_CouponId_And_Period(ctx context.Context,
|
|
coupon_usage_coupon_id CouponUsage_CouponId_Field,
|
|
coupon_usage_period CouponUsage_Period_Field,
|
|
update CouponUsage_Update_Fields) (
|
|
coupon_usage *CouponUsage, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Update_CouponUsage_By_CouponId_And_Period(ctx, coupon_usage_coupon_id, coupon_usage_period, update)
|
|
}
|
|
|
|
func (rx *Rx) Update_Coupon_By_Id(ctx context.Context,
|
|
coupon_id Coupon_Id_Field,
|
|
update Coupon_Update_Fields) (
|
|
coupon *Coupon, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Update_Coupon_By_Id(ctx, coupon_id, update)
|
|
}
|
|
|
|
func (rx *Rx) Update_Node_By_Id(ctx context.Context,
|
|
node_id Node_Id_Field,
|
|
update Node_Update_Fields) (
|
|
node *Node, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Update_Node_By_Id(ctx, node_id, update)
|
|
}
|
|
|
|
func (rx *Rx) Update_PendingAudits_By_NodeId(ctx context.Context,
|
|
pending_audits_node_id PendingAudits_NodeId_Field,
|
|
update PendingAudits_Update_Fields) (
|
|
pending_audits *PendingAudits, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Update_PendingAudits_By_NodeId(ctx, pending_audits_node_id, update)
|
|
}
|
|
|
|
func (rx *Rx) Update_Project_By_Id(ctx context.Context,
|
|
project_id Project_Id_Field,
|
|
update Project_Update_Fields) (
|
|
project *Project, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Update_Project_By_Id(ctx, project_id, update)
|
|
}
|
|
|
|
func (rx *Rx) Update_RegistrationToken_By_Secret(ctx context.Context,
|
|
registration_token_secret RegistrationToken_Secret_Field,
|
|
update RegistrationToken_Update_Fields) (
|
|
registration_token *RegistrationToken, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Update_RegistrationToken_By_Secret(ctx, registration_token_secret, update)
|
|
}
|
|
|
|
func (rx *Rx) Update_StripecoinpaymentsApplyBalanceIntent_By_TxId(ctx context.Context,
|
|
stripecoinpayments_apply_balance_intent_tx_id StripecoinpaymentsApplyBalanceIntent_TxId_Field,
|
|
update StripecoinpaymentsApplyBalanceIntent_Update_Fields) (
|
|
stripecoinpayments_apply_balance_intent *StripecoinpaymentsApplyBalanceIntent, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Update_StripecoinpaymentsApplyBalanceIntent_By_TxId(ctx, stripecoinpayments_apply_balance_intent_tx_id, update)
|
|
}
|
|
|
|
func (rx *Rx) Update_StripecoinpaymentsInvoiceProjectRecord_By_Id(ctx context.Context,
|
|
stripecoinpayments_invoice_project_record_id StripecoinpaymentsInvoiceProjectRecord_Id_Field,
|
|
update StripecoinpaymentsInvoiceProjectRecord_Update_Fields) (
|
|
stripecoinpayments_invoice_project_record *StripecoinpaymentsInvoiceProjectRecord, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Update_StripecoinpaymentsInvoiceProjectRecord_By_Id(ctx, stripecoinpayments_invoice_project_record_id, update)
|
|
}
|
|
|
|
func (rx *Rx) Update_User_By_Id(ctx context.Context,
|
|
user_id User_Id_Field,
|
|
update User_Update_Fields) (
|
|
user *User, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Update_User_By_Id(ctx, user_id, update)
|
|
}
|
|
|
|
type Methods interface {
|
|
All_AccountingRollup_By_StartTime_GreaterOrEqual(ctx context.Context,
|
|
accounting_rollup_start_time_greater_or_equal AccountingRollup_StartTime_Field) (
|
|
rows []*AccountingRollup, err error)
|
|
|
|
All_ApiKey_By_ProjectId_OrderBy_Asc_Name(ctx context.Context,
|
|
api_key_project_id ApiKey_ProjectId_Field) (
|
|
rows []*ApiKey, err error)
|
|
|
|
All_BucketStorageTally(ctx context.Context) (
|
|
rows []*BucketStorageTally, err error)
|
|
|
|
All_BucketStorageTally_By_ProjectId_And_BucketName_And_IntervalStart_GreaterOrEqual_And_IntervalStart_LessOrEqual_OrderBy_Desc_IntervalStart(ctx context.Context,
|
|
bucket_storage_tally_project_id BucketStorageTally_ProjectId_Field,
|
|
bucket_storage_tally_bucket_name BucketStorageTally_BucketName_Field,
|
|
bucket_storage_tally_interval_start_greater_or_equal BucketStorageTally_IntervalStart_Field,
|
|
bucket_storage_tally_interval_start_less_or_equal BucketStorageTally_IntervalStart_Field) (
|
|
rows []*BucketStorageTally, err error)
|
|
|
|
All_CoinpaymentsTransaction_By_UserId_OrderBy_Desc_CreatedAt(ctx context.Context,
|
|
coinpayments_transaction_user_id CoinpaymentsTransaction_UserId_Field) (
|
|
rows []*CoinpaymentsTransaction, err error)
|
|
|
|
All_Coupon_By_ProjectId_And_Status_Equal_Number_OrderBy_Desc_CreatedAt(ctx context.Context,
|
|
coupon_project_id Coupon_ProjectId_Field) (
|
|
rows []*Coupon, err error)
|
|
|
|
All_Coupon_By_Status_OrderBy_Desc_CreatedAt(ctx context.Context,
|
|
coupon_status Coupon_Status_Field) (
|
|
rows []*Coupon, err error)
|
|
|
|
All_Coupon_By_UserId_And_Status_OrderBy_Desc_CreatedAt(ctx context.Context,
|
|
coupon_user_id Coupon_UserId_Field,
|
|
coupon_status Coupon_Status_Field) (
|
|
rows []*Coupon, err error)
|
|
|
|
All_Coupon_By_UserId_OrderBy_Desc_CreatedAt(ctx context.Context,
|
|
coupon_user_id Coupon_UserId_Field) (
|
|
rows []*Coupon, err error)
|
|
|
|
All_Node_Id(ctx context.Context) (
|
|
rows []*Id_Row, err error)
|
|
|
|
All_Node_Id_Node_Address_Node_LastContactSuccess_Node_LastContactFailure_By_LastContactSuccess_Less_And_LastContactSuccess_Greater_LastContactFailure_And_Disqualified_Is_Null_OrderBy_Asc_LastContactSuccess(ctx context.Context,
|
|
node_last_contact_success_less Node_LastContactSuccess_Field) (
|
|
rows []*Id_Address_LastContactSuccess_LastContactFailure_Row, err error)
|
|
|
|
All_Node_Id_Node_PieceCount_By_PieceCount_Not_Number(ctx context.Context) (
|
|
rows []*Id_PieceCount_Row, err error)
|
|
|
|
All_NodesOfflineTime_By_NodeId_And_TrackedAt_Greater_And_TrackedAt_LessOrEqual(ctx context.Context,
|
|
nodes_offline_time_node_id NodesOfflineTime_NodeId_Field,
|
|
nodes_offline_time_tracked_at_greater NodesOfflineTime_TrackedAt_Field,
|
|
nodes_offline_time_tracked_at_less_or_equal NodesOfflineTime_TrackedAt_Field) (
|
|
rows []*NodesOfflineTime, err error)
|
|
|
|
All_Offer_OrderBy_Asc_Id(ctx context.Context) (
|
|
rows []*Offer, err error)
|
|
|
|
All_Project(ctx context.Context) (
|
|
rows []*Project, err error)
|
|
|
|
All_ProjectInvoiceStamp_By_ProjectId_OrderBy_Desc_StartDate(ctx context.Context,
|
|
project_invoice_stamp_project_id ProjectInvoiceStamp_ProjectId_Field) (
|
|
rows []*ProjectInvoiceStamp, err error)
|
|
|
|
All_ProjectMember_By_MemberId(ctx context.Context,
|
|
project_member_member_id ProjectMember_MemberId_Field) (
|
|
rows []*ProjectMember, err error)
|
|
|
|
All_Project_By_CreatedAt_Less_OrderBy_Asc_CreatedAt(ctx context.Context,
|
|
project_created_at_less Project_CreatedAt_Field) (
|
|
rows []*Project, err error)
|
|
|
|
All_Project_By_OwnerId_OrderBy_Asc_CreatedAt(ctx context.Context,
|
|
project_owner_id Project_OwnerId_Field) (
|
|
rows []*Project, err error)
|
|
|
|
All_Project_By_ProjectMember_MemberId_OrderBy_Asc_Project_Name(ctx context.Context,
|
|
project_member_member_id ProjectMember_MemberId_Field) (
|
|
rows []*Project, err error)
|
|
|
|
All_StoragenodeBandwidthRollup_By_IntervalStart_GreaterOrEqual(ctx context.Context,
|
|
storagenode_bandwidth_rollup_interval_start_greater_or_equal StoragenodeBandwidthRollup_IntervalStart_Field) (
|
|
rows []*StoragenodeBandwidthRollup, err error)
|
|
|
|
All_StoragenodeStorageTally(ctx context.Context) (
|
|
rows []*StoragenodeStorageTally, err error)
|
|
|
|
All_StoragenodeStorageTally_By_IntervalEndTime_GreaterOrEqual(ctx context.Context,
|
|
storagenode_storage_tally_interval_end_time_greater_or_equal StoragenodeStorageTally_IntervalEndTime_Field) (
|
|
rows []*StoragenodeStorageTally, err error)
|
|
|
|
All_UserCredit_By_UserId_And_ExpiresAt_Greater_And_CreditsUsedInCents_Less_CreditsEarnedInCents_OrderBy_Asc_ExpiresAt(ctx context.Context,
|
|
user_credit_user_id UserCredit_UserId_Field,
|
|
user_credit_expires_at_greater UserCredit_ExpiresAt_Field) (
|
|
rows []*UserCredit, err error)
|
|
|
|
Count_UserCredit_By_ReferredBy(ctx context.Context,
|
|
user_credit_referred_by UserCredit_ReferredBy_Field) (
|
|
count int64, err error)
|
|
|
|
CreateNoReturn_AccountingRollup(ctx context.Context,
|
|
accounting_rollup_node_id AccountingRollup_NodeId_Field,
|
|
accounting_rollup_start_time AccountingRollup_StartTime_Field,
|
|
accounting_rollup_put_total AccountingRollup_PutTotal_Field,
|
|
accounting_rollup_get_total AccountingRollup_GetTotal_Field,
|
|
accounting_rollup_get_audit_total AccountingRollup_GetAuditTotal_Field,
|
|
accounting_rollup_get_repair_total AccountingRollup_GetRepairTotal_Field,
|
|
accounting_rollup_put_repair_total AccountingRollup_PutRepairTotal_Field,
|
|
accounting_rollup_at_rest_total AccountingRollup_AtRestTotal_Field) (
|
|
err error)
|
|
|
|
CreateNoReturn_AccountingTimestamps(ctx context.Context,
|
|
accounting_timestamps_name AccountingTimestamps_Name_Field,
|
|
accounting_timestamps_value AccountingTimestamps_Value_Field) (
|
|
err error)
|
|
|
|
CreateNoReturn_BucketStorageTally(ctx context.Context,
|
|
bucket_storage_tally_bucket_name BucketStorageTally_BucketName_Field,
|
|
bucket_storage_tally_project_id BucketStorageTally_ProjectId_Field,
|
|
bucket_storage_tally_interval_start BucketStorageTally_IntervalStart_Field,
|
|
bucket_storage_tally_inline BucketStorageTally_Inline_Field,
|
|
bucket_storage_tally_remote BucketStorageTally_Remote_Field,
|
|
bucket_storage_tally_remote_segments_count BucketStorageTally_RemoteSegmentsCount_Field,
|
|
bucket_storage_tally_inline_segments_count BucketStorageTally_InlineSegmentsCount_Field,
|
|
bucket_storage_tally_object_count BucketStorageTally_ObjectCount_Field,
|
|
bucket_storage_tally_metadata_size BucketStorageTally_MetadataSize_Field) (
|
|
err error)
|
|
|
|
CreateNoReturn_GracefulExitProgress(ctx context.Context,
|
|
graceful_exit_progress_node_id GracefulExitProgress_NodeId_Field,
|
|
graceful_exit_progress_bytes_transferred GracefulExitProgress_BytesTransferred_Field) (
|
|
err error)
|
|
|
|
CreateNoReturn_GracefulExitTransferQueue(ctx context.Context,
|
|
graceful_exit_transfer_queue_node_id GracefulExitTransferQueue_NodeId_Field,
|
|
graceful_exit_transfer_queue_path GracefulExitTransferQueue_Path_Field,
|
|
graceful_exit_transfer_queue_piece_num GracefulExitTransferQueue_PieceNum_Field,
|
|
graceful_exit_transfer_queue_durability_ratio GracefulExitTransferQueue_DurabilityRatio_Field,
|
|
graceful_exit_transfer_queue_order_limit_send_count GracefulExitTransferQueue_OrderLimitSendCount_Field,
|
|
optional GracefulExitTransferQueue_Create_Fields) (
|
|
err error)
|
|
|
|
CreateNoReturn_Irreparabledb(ctx context.Context,
|
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field,
|
|
irreparabledb_segmentdetail Irreparabledb_Segmentdetail_Field,
|
|
irreparabledb_pieces_lost_count Irreparabledb_PiecesLostCount_Field,
|
|
irreparabledb_seg_damaged_unix_sec Irreparabledb_SegDamagedUnixSec_Field,
|
|
irreparabledb_repair_attempt_count Irreparabledb_RepairAttemptCount_Field) (
|
|
err error)
|
|
|
|
CreateNoReturn_Node(ctx context.Context,
|
|
node_id Node_Id_Field,
|
|
node_address Node_Address_Field,
|
|
node_last_net Node_LastNet_Field,
|
|
node_protocol Node_Protocol_Field,
|
|
node_type Node_Type_Field,
|
|
node_email Node_Email_Field,
|
|
node_wallet Node_Wallet_Field,
|
|
node_free_bandwidth Node_FreeBandwidth_Field,
|
|
node_free_disk Node_FreeDisk_Field,
|
|
node_major Node_Major_Field,
|
|
node_minor Node_Minor_Field,
|
|
node_patch Node_Patch_Field,
|
|
node_hash Node_Hash_Field,
|
|
node_timestamp Node_Timestamp_Field,
|
|
node_release Node_Release_Field,
|
|
node_latency_90 Node_Latency90_Field,
|
|
node_audit_success_count Node_AuditSuccessCount_Field,
|
|
node_total_audit_count Node_TotalAuditCount_Field,
|
|
node_uptime_success_count Node_UptimeSuccessCount_Field,
|
|
node_total_uptime_count Node_TotalUptimeCount_Field,
|
|
node_last_contact_success Node_LastContactSuccess_Field,
|
|
node_last_contact_failure Node_LastContactFailure_Field,
|
|
node_contained Node_Contained_Field,
|
|
node_audit_reputation_alpha Node_AuditReputationAlpha_Field,
|
|
node_audit_reputation_beta Node_AuditReputationBeta_Field,
|
|
node_uptime_reputation_alpha Node_UptimeReputationAlpha_Field,
|
|
node_uptime_reputation_beta Node_UptimeReputationBeta_Field,
|
|
node_exit_success Node_ExitSuccess_Field,
|
|
optional Node_Create_Fields) (
|
|
err error)
|
|
|
|
CreateNoReturn_PeerIdentity(ctx context.Context,
|
|
peer_identity_node_id PeerIdentity_NodeId_Field,
|
|
peer_identity_leaf_serial_number PeerIdentity_LeafSerialNumber_Field,
|
|
peer_identity_chain PeerIdentity_Chain_Field) (
|
|
err error)
|
|
|
|
CreateNoReturn_SerialNumber(ctx context.Context,
|
|
serial_number_serial_number SerialNumber_SerialNumber_Field,
|
|
serial_number_bucket_id SerialNumber_BucketId_Field,
|
|
serial_number_expires_at SerialNumber_ExpiresAt_Field) (
|
|
err error)
|
|
|
|
CreateNoReturn_StoragenodeStorageTally(ctx context.Context,
|
|
storagenode_storage_tally_node_id StoragenodeStorageTally_NodeId_Field,
|
|
storagenode_storage_tally_interval_end_time StoragenodeStorageTally_IntervalEndTime_Field,
|
|
storagenode_storage_tally_data_total StoragenodeStorageTally_DataTotal_Field) (
|
|
err error)
|
|
|
|
CreateNoReturn_UsedSerial(ctx context.Context,
|
|
used_serial_serial_number_id UsedSerial_SerialNumberId_Field,
|
|
used_serial_storage_node_id UsedSerial_StorageNodeId_Field) (
|
|
err error)
|
|
|
|
Create_ApiKey(ctx context.Context,
|
|
api_key_id ApiKey_Id_Field,
|
|
api_key_project_id ApiKey_ProjectId_Field,
|
|
api_key_head ApiKey_Head_Field,
|
|
api_key_name ApiKey_Name_Field,
|
|
api_key_secret ApiKey_Secret_Field,
|
|
optional ApiKey_Create_Fields) (
|
|
api_key *ApiKey, err error)
|
|
|
|
Create_BucketMetainfo(ctx context.Context,
|
|
bucket_metainfo_id BucketMetainfo_Id_Field,
|
|
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
|
|
bucket_metainfo_name BucketMetainfo_Name_Field,
|
|
bucket_metainfo_path_cipher BucketMetainfo_PathCipher_Field,
|
|
bucket_metainfo_default_segment_size BucketMetainfo_DefaultSegmentSize_Field,
|
|
bucket_metainfo_default_encryption_cipher_suite BucketMetainfo_DefaultEncryptionCipherSuite_Field,
|
|
bucket_metainfo_default_encryption_block_size BucketMetainfo_DefaultEncryptionBlockSize_Field,
|
|
bucket_metainfo_default_redundancy_algorithm BucketMetainfo_DefaultRedundancyAlgorithm_Field,
|
|
bucket_metainfo_default_redundancy_share_size BucketMetainfo_DefaultRedundancyShareSize_Field,
|
|
bucket_metainfo_default_redundancy_required_shares BucketMetainfo_DefaultRedundancyRequiredShares_Field,
|
|
bucket_metainfo_default_redundancy_repair_shares BucketMetainfo_DefaultRedundancyRepairShares_Field,
|
|
bucket_metainfo_default_redundancy_optimal_shares BucketMetainfo_DefaultRedundancyOptimalShares_Field,
|
|
bucket_metainfo_default_redundancy_total_shares BucketMetainfo_DefaultRedundancyTotalShares_Field,
|
|
optional BucketMetainfo_Create_Fields) (
|
|
bucket_metainfo *BucketMetainfo, err error)
|
|
|
|
Create_CoinpaymentsTransaction(ctx context.Context,
|
|
coinpayments_transaction_id CoinpaymentsTransaction_Id_Field,
|
|
coinpayments_transaction_user_id CoinpaymentsTransaction_UserId_Field,
|
|
coinpayments_transaction_address CoinpaymentsTransaction_Address_Field,
|
|
coinpayments_transaction_amount CoinpaymentsTransaction_Amount_Field,
|
|
coinpayments_transaction_received CoinpaymentsTransaction_Received_Field,
|
|
coinpayments_transaction_status CoinpaymentsTransaction_Status_Field,
|
|
coinpayments_transaction_key CoinpaymentsTransaction_Key_Field,
|
|
coinpayments_transaction_timeout CoinpaymentsTransaction_Timeout_Field) (
|
|
coinpayments_transaction *CoinpaymentsTransaction, err error)
|
|
|
|
Create_Coupon(ctx context.Context,
|
|
coupon_id Coupon_Id_Field,
|
|
coupon_project_id Coupon_ProjectId_Field,
|
|
coupon_user_id Coupon_UserId_Field,
|
|
coupon_amount Coupon_Amount_Field,
|
|
coupon_description Coupon_Description_Field,
|
|
coupon_type Coupon_Type_Field,
|
|
coupon_status Coupon_Status_Field,
|
|
coupon_duration Coupon_Duration_Field) (
|
|
coupon *Coupon, err error)
|
|
|
|
Create_CouponUsage(ctx context.Context,
|
|
coupon_usage_coupon_id CouponUsage_CouponId_Field,
|
|
coupon_usage_amount CouponUsage_Amount_Field,
|
|
coupon_usage_status CouponUsage_Status_Field,
|
|
coupon_usage_period CouponUsage_Period_Field) (
|
|
coupon_usage *CouponUsage, err error)
|
|
|
|
Create_NodesOfflineTime(ctx context.Context,
|
|
nodes_offline_time_node_id NodesOfflineTime_NodeId_Field,
|
|
nodes_offline_time_tracked_at NodesOfflineTime_TrackedAt_Field,
|
|
nodes_offline_time_seconds NodesOfflineTime_Seconds_Field) (
|
|
nodes_offline_time *NodesOfflineTime, err error)
|
|
|
|
Create_Offer(ctx context.Context,
|
|
offer_name Offer_Name_Field,
|
|
offer_description Offer_Description_Field,
|
|
offer_award_credit_in_cents Offer_AwardCreditInCents_Field,
|
|
offer_invitee_credit_in_cents Offer_InviteeCreditInCents_Field,
|
|
offer_expires_at Offer_ExpiresAt_Field,
|
|
offer_status Offer_Status_Field,
|
|
offer_type Offer_Type_Field,
|
|
optional Offer_Create_Fields) (
|
|
offer *Offer, err error)
|
|
|
|
Create_PendingAudits(ctx context.Context,
|
|
pending_audits_node_id PendingAudits_NodeId_Field,
|
|
pending_audits_piece_id PendingAudits_PieceId_Field,
|
|
pending_audits_stripe_index PendingAudits_StripeIndex_Field,
|
|
pending_audits_share_size PendingAudits_ShareSize_Field,
|
|
pending_audits_expected_share_hash PendingAudits_ExpectedShareHash_Field,
|
|
pending_audits_reverify_count PendingAudits_ReverifyCount_Field,
|
|
pending_audits_path PendingAudits_Path_Field) (
|
|
pending_audits *PendingAudits, err error)
|
|
|
|
Create_Project(ctx context.Context,
|
|
project_id Project_Id_Field,
|
|
project_name Project_Name_Field,
|
|
project_description Project_Description_Field,
|
|
project_usage_limit Project_UsageLimit_Field,
|
|
project_owner_id Project_OwnerId_Field,
|
|
optional Project_Create_Fields) (
|
|
project *Project, err error)
|
|
|
|
Create_ProjectInvoiceStamp(ctx context.Context,
|
|
project_invoice_stamp_project_id ProjectInvoiceStamp_ProjectId_Field,
|
|
project_invoice_stamp_invoice_id ProjectInvoiceStamp_InvoiceId_Field,
|
|
project_invoice_stamp_start_date ProjectInvoiceStamp_StartDate_Field,
|
|
project_invoice_stamp_end_date ProjectInvoiceStamp_EndDate_Field,
|
|
project_invoice_stamp_created_at ProjectInvoiceStamp_CreatedAt_Field) (
|
|
project_invoice_stamp *ProjectInvoiceStamp, err error)
|
|
|
|
Create_ProjectMember(ctx context.Context,
|
|
project_member_member_id ProjectMember_MemberId_Field,
|
|
project_member_project_id ProjectMember_ProjectId_Field) (
|
|
project_member *ProjectMember, err error)
|
|
|
|
Create_RegistrationToken(ctx context.Context,
|
|
registration_token_secret RegistrationToken_Secret_Field,
|
|
registration_token_project_limit RegistrationToken_ProjectLimit_Field,
|
|
optional RegistrationToken_Create_Fields) (
|
|
registration_token *RegistrationToken, err error)
|
|
|
|
Create_ResetPasswordToken(ctx context.Context,
|
|
reset_password_token_secret ResetPasswordToken_Secret_Field,
|
|
reset_password_token_owner_id ResetPasswordToken_OwnerId_Field) (
|
|
reset_password_token *ResetPasswordToken, err error)
|
|
|
|
Create_StripeCustomer(ctx context.Context,
|
|
stripe_customer_user_id StripeCustomer_UserId_Field,
|
|
stripe_customer_customer_id StripeCustomer_CustomerId_Field) (
|
|
stripe_customer *StripeCustomer, err error)
|
|
|
|
Create_StripecoinpaymentsApplyBalanceIntent(ctx context.Context,
|
|
stripecoinpayments_apply_balance_intent_tx_id StripecoinpaymentsApplyBalanceIntent_TxId_Field,
|
|
stripecoinpayments_apply_balance_intent_state StripecoinpaymentsApplyBalanceIntent_State_Field) (
|
|
stripecoinpayments_apply_balance_intent *StripecoinpaymentsApplyBalanceIntent, err error)
|
|
|
|
Create_StripecoinpaymentsInvoiceProjectRecord(ctx context.Context,
|
|
stripecoinpayments_invoice_project_record_id StripecoinpaymentsInvoiceProjectRecord_Id_Field,
|
|
stripecoinpayments_invoice_project_record_project_id StripecoinpaymentsInvoiceProjectRecord_ProjectId_Field,
|
|
stripecoinpayments_invoice_project_record_storage StripecoinpaymentsInvoiceProjectRecord_Storage_Field,
|
|
stripecoinpayments_invoice_project_record_egress StripecoinpaymentsInvoiceProjectRecord_Egress_Field,
|
|
stripecoinpayments_invoice_project_record_objects StripecoinpaymentsInvoiceProjectRecord_Objects_Field,
|
|
stripecoinpayments_invoice_project_record_period_start StripecoinpaymentsInvoiceProjectRecord_PeriodStart_Field,
|
|
stripecoinpayments_invoice_project_record_period_end StripecoinpaymentsInvoiceProjectRecord_PeriodEnd_Field,
|
|
stripecoinpayments_invoice_project_record_state StripecoinpaymentsInvoiceProjectRecord_State_Field) (
|
|
stripecoinpayments_invoice_project_record *StripecoinpaymentsInvoiceProjectRecord, err error)
|
|
|
|
Create_StripecoinpaymentsTxConversionRate(ctx context.Context,
|
|
stripecoinpayments_tx_conversion_rate_tx_id StripecoinpaymentsTxConversionRate_TxId_Field,
|
|
stripecoinpayments_tx_conversion_rate_rate StripecoinpaymentsTxConversionRate_Rate_Field) (
|
|
stripecoinpayments_tx_conversion_rate *StripecoinpaymentsTxConversionRate, err error)
|
|
|
|
Create_User(ctx context.Context,
|
|
user_id User_Id_Field,
|
|
user_email User_Email_Field,
|
|
user_normalized_email User_NormalizedEmail_Field,
|
|
user_full_name User_FullName_Field,
|
|
user_password_hash User_PasswordHash_Field,
|
|
optional User_Create_Fields) (
|
|
user *User, err error)
|
|
|
|
Create_UserCredit(ctx context.Context,
|
|
user_credit_user_id UserCredit_UserId_Field,
|
|
user_credit_offer_id UserCredit_OfferId_Field,
|
|
user_credit_type UserCredit_Type_Field,
|
|
user_credit_credits_earned_in_cents UserCredit_CreditsEarnedInCents_Field,
|
|
user_credit_expires_at UserCredit_ExpiresAt_Field,
|
|
optional UserCredit_Create_Fields) (
|
|
user_credit *UserCredit, err error)
|
|
|
|
Create_ValueAttribution(ctx context.Context,
|
|
value_attribution_project_id ValueAttribution_ProjectId_Field,
|
|
value_attribution_bucket_name ValueAttribution_BucketName_Field,
|
|
value_attribution_partner_id ValueAttribution_PartnerId_Field) (
|
|
value_attribution *ValueAttribution, err error)
|
|
|
|
Delete_AccountingRollup_By_Id(ctx context.Context,
|
|
accounting_rollup_id AccountingRollup_Id_Field) (
|
|
deleted bool, err error)
|
|
|
|
Delete_ApiKey_By_Id(ctx context.Context,
|
|
api_key_id ApiKey_Id_Field) (
|
|
deleted bool, err error)
|
|
|
|
Delete_BucketMetainfo_By_ProjectId_And_Name(ctx context.Context,
|
|
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
|
|
bucket_metainfo_name BucketMetainfo_Name_Field) (
|
|
deleted bool, err error)
|
|
|
|
Delete_Coupon_By_Id(ctx context.Context,
|
|
coupon_id Coupon_Id_Field) (
|
|
deleted bool, err error)
|
|
|
|
Delete_GracefulExitProgress_By_NodeId(ctx context.Context,
|
|
graceful_exit_progress_node_id GracefulExitProgress_NodeId_Field) (
|
|
deleted bool, err error)
|
|
|
|
Delete_GracefulExitTransferQueue_By_NodeId(ctx context.Context,
|
|
graceful_exit_transfer_queue_node_id GracefulExitTransferQueue_NodeId_Field) (
|
|
count int64, err error)
|
|
|
|
Delete_GracefulExitTransferQueue_By_NodeId_And_FinishedAt_IsNot_Null(ctx context.Context,
|
|
graceful_exit_transfer_queue_node_id GracefulExitTransferQueue_NodeId_Field) (
|
|
count int64, err error)
|
|
|
|
Delete_GracefulExitTransferQueue_By_NodeId_And_Path_And_PieceNum(ctx context.Context,
|
|
graceful_exit_transfer_queue_node_id GracefulExitTransferQueue_NodeId_Field,
|
|
graceful_exit_transfer_queue_path GracefulExitTransferQueue_Path_Field,
|
|
graceful_exit_transfer_queue_piece_num GracefulExitTransferQueue_PieceNum_Field) (
|
|
deleted bool, err error)
|
|
|
|
Delete_Irreparabledb_By_Segmentpath(ctx context.Context,
|
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field) (
|
|
deleted bool, err error)
|
|
|
|
Delete_Node_By_Id(ctx context.Context,
|
|
node_id Node_Id_Field) (
|
|
deleted bool, err error)
|
|
|
|
Delete_PendingAudits_By_NodeId(ctx context.Context,
|
|
pending_audits_node_id PendingAudits_NodeId_Field) (
|
|
deleted bool, err error)
|
|
|
|
Delete_ProjectMember_By_MemberId_And_ProjectId(ctx context.Context,
|
|
project_member_member_id ProjectMember_MemberId_Field,
|
|
project_member_project_id ProjectMember_ProjectId_Field) (
|
|
deleted bool, err error)
|
|
|
|
Delete_Project_By_Id(ctx context.Context,
|
|
project_id Project_Id_Field) (
|
|
deleted bool, err error)
|
|
|
|
Delete_ResetPasswordToken_By_Secret(ctx context.Context,
|
|
reset_password_token_secret ResetPasswordToken_Secret_Field) (
|
|
deleted bool, err error)
|
|
|
|
Delete_SerialNumber_By_ExpiresAt_LessOrEqual(ctx context.Context,
|
|
serial_number_expires_at_less_or_equal SerialNumber_ExpiresAt_Field) (
|
|
count int64, err error)
|
|
|
|
Delete_StoragenodeStorageTally_By_Id(ctx context.Context,
|
|
storagenode_storage_tally_id StoragenodeStorageTally_Id_Field) (
|
|
deleted bool, err error)
|
|
|
|
Delete_StripecoinpaymentsApplyBalanceIntent_By_TxId(ctx context.Context,
|
|
stripecoinpayments_apply_balance_intent_tx_id StripecoinpaymentsApplyBalanceIntent_TxId_Field) (
|
|
deleted bool, err error)
|
|
|
|
Delete_StripecoinpaymentsInvoiceProjectRecord_By_Id(ctx context.Context,
|
|
stripecoinpayments_invoice_project_record_id StripecoinpaymentsInvoiceProjectRecord_Id_Field) (
|
|
deleted bool, err error)
|
|
|
|
Delete_User_By_Id(ctx context.Context,
|
|
user_id User_Id_Field) (
|
|
deleted bool, err error)
|
|
|
|
Delete_ValueAttribution_By_ProjectId_And_BucketName(ctx context.Context,
|
|
value_attribution_project_id ValueAttribution_ProjectId_Field,
|
|
value_attribution_bucket_name ValueAttribution_BucketName_Field) (
|
|
deleted bool, err error)
|
|
|
|
Find_AccountingTimestamps_Value_By_Name(ctx context.Context,
|
|
accounting_timestamps_name AccountingTimestamps_Name_Field) (
|
|
row *Value_Row, err error)
|
|
|
|
Find_BucketBandwidthRollup_By_BucketName_And_ProjectId_And_IntervalStart_And_Action(ctx context.Context,
|
|
bucket_bandwidth_rollup_bucket_name BucketBandwidthRollup_BucketName_Field,
|
|
bucket_bandwidth_rollup_project_id BucketBandwidthRollup_ProjectId_Field,
|
|
bucket_bandwidth_rollup_interval_start BucketBandwidthRollup_IntervalStart_Field,
|
|
bucket_bandwidth_rollup_action BucketBandwidthRollup_Action_Field) (
|
|
bucket_bandwidth_rollup *BucketBandwidthRollup, err error)
|
|
|
|
Find_SerialNumber_By_SerialNumber(ctx context.Context,
|
|
serial_number_serial_number SerialNumber_SerialNumber_Field) (
|
|
serial_number *SerialNumber, err error)
|
|
|
|
Find_StoragenodeBandwidthRollup_By_StoragenodeId_And_IntervalStart_And_Action(ctx context.Context,
|
|
storagenode_bandwidth_rollup_storagenode_id StoragenodeBandwidthRollup_StoragenodeId_Field,
|
|
storagenode_bandwidth_rollup_interval_start StoragenodeBandwidthRollup_IntervalStart_Field,
|
|
storagenode_bandwidth_rollup_action StoragenodeBandwidthRollup_Action_Field) (
|
|
storagenode_bandwidth_rollup *StoragenodeBandwidthRollup, err error)
|
|
|
|
First_BucketStorageTally_By_ProjectId_OrderBy_Desc_IntervalStart(ctx context.Context,
|
|
bucket_storage_tally_project_id BucketStorageTally_ProjectId_Field) (
|
|
bucket_storage_tally *BucketStorageTally, err error)
|
|
|
|
Get_AccountingRollup_By_Id(ctx context.Context,
|
|
accounting_rollup_id AccountingRollup_Id_Field) (
|
|
accounting_rollup *AccountingRollup, err error)
|
|
|
|
Get_ApiKey_By_Head(ctx context.Context,
|
|
api_key_head ApiKey_Head_Field) (
|
|
api_key *ApiKey, err error)
|
|
|
|
Get_ApiKey_By_Id(ctx context.Context,
|
|
api_key_id ApiKey_Id_Field) (
|
|
api_key *ApiKey, err error)
|
|
|
|
Get_ApiKey_By_Name_And_ProjectId(ctx context.Context,
|
|
api_key_name ApiKey_Name_Field,
|
|
api_key_project_id ApiKey_ProjectId_Field) (
|
|
api_key *ApiKey, err error)
|
|
|
|
Get_BucketMetainfo_By_ProjectId_And_Name(ctx context.Context,
|
|
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
|
|
bucket_metainfo_name BucketMetainfo_Name_Field) (
|
|
bucket_metainfo *BucketMetainfo, err error)
|
|
|
|
Get_Coupon_By_Id(ctx context.Context,
|
|
coupon_id Coupon_Id_Field) (
|
|
coupon *Coupon, err error)
|
|
|
|
Get_GracefulExitProgress_By_NodeId(ctx context.Context,
|
|
graceful_exit_progress_node_id GracefulExitProgress_NodeId_Field) (
|
|
graceful_exit_progress *GracefulExitProgress, err error)
|
|
|
|
Get_GracefulExitTransferQueue_By_NodeId_And_Path_And_PieceNum(ctx context.Context,
|
|
graceful_exit_transfer_queue_node_id GracefulExitTransferQueue_NodeId_Field,
|
|
graceful_exit_transfer_queue_path GracefulExitTransferQueue_Path_Field,
|
|
graceful_exit_transfer_queue_piece_num GracefulExitTransferQueue_PieceNum_Field) (
|
|
graceful_exit_transfer_queue *GracefulExitTransferQueue, err error)
|
|
|
|
Get_Irreparabledb_By_Segmentpath(ctx context.Context,
|
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field) (
|
|
irreparabledb *Irreparabledb, err error)
|
|
|
|
Get_Node_By_Id(ctx context.Context,
|
|
node_id Node_Id_Field) (
|
|
node *Node, err error)
|
|
|
|
Get_Offer_By_Id(ctx context.Context,
|
|
offer_id Offer_Id_Field) (
|
|
offer *Offer, err error)
|
|
|
|
Get_PeerIdentity_By_NodeId(ctx context.Context,
|
|
peer_identity_node_id PeerIdentity_NodeId_Field) (
|
|
peer_identity *PeerIdentity, err error)
|
|
|
|
Get_PeerIdentity_LeafSerialNumber_By_NodeId(ctx context.Context,
|
|
peer_identity_node_id PeerIdentity_NodeId_Field) (
|
|
row *LeafSerialNumber_Row, err error)
|
|
|
|
Get_PendingAudits_By_NodeId(ctx context.Context,
|
|
pending_audits_node_id PendingAudits_NodeId_Field) (
|
|
pending_audits *PendingAudits, err error)
|
|
|
|
Get_ProjectInvoiceStamp_By_ProjectId_And_StartDate(ctx context.Context,
|
|
project_invoice_stamp_project_id ProjectInvoiceStamp_ProjectId_Field,
|
|
project_invoice_stamp_start_date ProjectInvoiceStamp_StartDate_Field) (
|
|
project_invoice_stamp *ProjectInvoiceStamp, err error)
|
|
|
|
Get_Project_By_Id(ctx context.Context,
|
|
project_id Project_Id_Field) (
|
|
project *Project, err error)
|
|
|
|
Get_Project_UsageLimit_By_Id(ctx context.Context,
|
|
project_id Project_Id_Field) (
|
|
row *UsageLimit_Row, err error)
|
|
|
|
Get_RegistrationToken_By_OwnerId(ctx context.Context,
|
|
registration_token_owner_id RegistrationToken_OwnerId_Field) (
|
|
registration_token *RegistrationToken, err error)
|
|
|
|
Get_RegistrationToken_By_Secret(ctx context.Context,
|
|
registration_token_secret RegistrationToken_Secret_Field) (
|
|
registration_token *RegistrationToken, err error)
|
|
|
|
Get_ResetPasswordToken_By_OwnerId(ctx context.Context,
|
|
reset_password_token_owner_id ResetPasswordToken_OwnerId_Field) (
|
|
reset_password_token *ResetPasswordToken, err error)
|
|
|
|
Get_ResetPasswordToken_By_Secret(ctx context.Context,
|
|
reset_password_token_secret ResetPasswordToken_Secret_Field) (
|
|
reset_password_token *ResetPasswordToken, err error)
|
|
|
|
Get_StoragenodeStorageTally_By_Id(ctx context.Context,
|
|
storagenode_storage_tally_id StoragenodeStorageTally_Id_Field) (
|
|
storagenode_storage_tally *StoragenodeStorageTally, err error)
|
|
|
|
Get_StripeCustomer_CustomerId_By_UserId(ctx context.Context,
|
|
stripe_customer_user_id StripeCustomer_UserId_Field) (
|
|
row *CustomerId_Row, err error)
|
|
|
|
Get_StripecoinpaymentsInvoiceProjectRecord_By_ProjectId_And_PeriodStart_And_PeriodEnd(ctx context.Context,
|
|
stripecoinpayments_invoice_project_record_project_id StripecoinpaymentsInvoiceProjectRecord_ProjectId_Field,
|
|
stripecoinpayments_invoice_project_record_period_start StripecoinpaymentsInvoiceProjectRecord_PeriodStart_Field,
|
|
stripecoinpayments_invoice_project_record_period_end StripecoinpaymentsInvoiceProjectRecord_PeriodEnd_Field) (
|
|
stripecoinpayments_invoice_project_record *StripecoinpaymentsInvoiceProjectRecord, err error)
|
|
|
|
Get_StripecoinpaymentsTxConversionRate_By_TxId(ctx context.Context,
|
|
stripecoinpayments_tx_conversion_rate_tx_id StripecoinpaymentsTxConversionRate_TxId_Field) (
|
|
stripecoinpayments_tx_conversion_rate *StripecoinpaymentsTxConversionRate, err error)
|
|
|
|
Get_User_By_Id(ctx context.Context,
|
|
user_id User_Id_Field) (
|
|
user *User, err error)
|
|
|
|
Get_User_By_NormalizedEmail_And_Status_Not_Number(ctx context.Context,
|
|
user_normalized_email User_NormalizedEmail_Field) (
|
|
user *User, err error)
|
|
|
|
Get_ValueAttribution_By_ProjectId_And_BucketName(ctx context.Context,
|
|
value_attribution_project_id ValueAttribution_ProjectId_Field,
|
|
value_attribution_bucket_name ValueAttribution_BucketName_Field) (
|
|
value_attribution *ValueAttribution, err error)
|
|
|
|
Limited_BucketMetainfo_By_ProjectId_And_Name_GreaterOrEqual_OrderBy_Asc_Name(ctx context.Context,
|
|
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
|
|
bucket_metainfo_name_greater_or_equal BucketMetainfo_Name_Field,
|
|
limit int, offset int64) (
|
|
rows []*BucketMetainfo, err error)
|
|
|
|
Limited_BucketMetainfo_By_ProjectId_And_Name_Greater_OrderBy_Asc_Name(ctx context.Context,
|
|
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
|
|
bucket_metainfo_name_greater BucketMetainfo_Name_Field,
|
|
limit int, offset int64) (
|
|
rows []*BucketMetainfo, err error)
|
|
|
|
Limited_CoinpaymentsTransaction_By_CreatedAt_LessOrEqual_And_Status_OrderBy_Desc_CreatedAt(ctx context.Context,
|
|
coinpayments_transaction_created_at_less_or_equal CoinpaymentsTransaction_CreatedAt_Field,
|
|
coinpayments_transaction_status CoinpaymentsTransaction_Status_Field,
|
|
limit int, offset int64) (
|
|
rows []*CoinpaymentsTransaction, err error)
|
|
|
|
Limited_CouponUsage_By_Period_LessOrEqual_And_Status_Equal_Number_OrderBy_Desc_Period(ctx context.Context,
|
|
coupon_usage_period_less_or_equal CouponUsage_Period_Field,
|
|
limit int, offset int64) (
|
|
rows []*CouponUsage, err error)
|
|
|
|
Limited_Coupon_By_CreatedAt_LessOrEqual_And_Status_OrderBy_Desc_CreatedAt(ctx context.Context,
|
|
coupon_created_at_less_or_equal Coupon_CreatedAt_Field,
|
|
coupon_status Coupon_Status_Field,
|
|
limit int, offset int64) (
|
|
rows []*Coupon, err error)
|
|
|
|
Limited_Irreparabledb_By_Segmentpath_Greater_OrderBy_Asc_Segmentpath(ctx context.Context,
|
|
irreparabledb_segmentpath_greater Irreparabledb_Segmentpath_Field,
|
|
limit int, offset int64) (
|
|
rows []*Irreparabledb, err error)
|
|
|
|
Limited_Node_By_Id_GreaterOrEqual_OrderBy_Asc_Id(ctx context.Context,
|
|
node_id_greater_or_equal Node_Id_Field,
|
|
limit int, offset int64) (
|
|
rows []*Node, err error)
|
|
|
|
Limited_Node_Id_Node_Address_Node_LastContactSuccess_Node_LastContactFailure_By_LastContactSuccess_Less_LastContactFailure_And_Disqualified_Is_Null_OrderBy_Asc_LastContactFailure(ctx context.Context,
|
|
limit int, offset int64) (
|
|
rows []*Id_Address_LastContactSuccess_LastContactFailure_Row, err error)
|
|
|
|
Limited_Node_Id_Node_LastNet_Node_Address_Node_Protocol_By_Id_GreaterOrEqual_And_Disqualified_Is_Null_OrderBy_Asc_Id(ctx context.Context,
|
|
node_id_greater_or_equal Node_Id_Field,
|
|
limit int, offset int64) (
|
|
rows []*Id_LastNet_Address_Protocol_Row, err error)
|
|
|
|
Limited_ProjectMember_By_ProjectId(ctx context.Context,
|
|
project_member_project_id ProjectMember_ProjectId_Field,
|
|
limit int, offset int64) (
|
|
rows []*ProjectMember, err error)
|
|
|
|
Limited_Project_By_CreatedAt_Less_OrderBy_Asc_CreatedAt(ctx context.Context,
|
|
project_created_at_less Project_CreatedAt_Field,
|
|
limit int, offset int64) (
|
|
rows []*Project, err error)
|
|
|
|
Limited_StripeCustomer_By_CreatedAt_LessOrEqual_OrderBy_Desc_CreatedAt(ctx context.Context,
|
|
stripe_customer_created_at_less_or_equal StripeCustomer_CreatedAt_Field,
|
|
limit int, offset int64) (
|
|
rows []*StripeCustomer, err error)
|
|
|
|
Limited_StripecoinpaymentsInvoiceProjectRecord_By_CreatedAt_LessOrEqual_And_State_OrderBy_Desc_CreatedAt(ctx context.Context,
|
|
stripecoinpayments_invoice_project_record_created_at_less_or_equal StripecoinpaymentsInvoiceProjectRecord_CreatedAt_Field,
|
|
stripecoinpayments_invoice_project_record_state StripecoinpaymentsInvoiceProjectRecord_State_Field,
|
|
limit int, offset int64) (
|
|
rows []*StripecoinpaymentsInvoiceProjectRecord, err error)
|
|
|
|
UpdateNoReturn_AccountingTimestamps_By_Name(ctx context.Context,
|
|
accounting_timestamps_name AccountingTimestamps_Name_Field,
|
|
update AccountingTimestamps_Update_Fields) (
|
|
err error)
|
|
|
|
UpdateNoReturn_ApiKey_By_Id(ctx context.Context,
|
|
api_key_id ApiKey_Id_Field,
|
|
update ApiKey_Update_Fields) (
|
|
err error)
|
|
|
|
UpdateNoReturn_GracefulExitProgress_By_NodeId(ctx context.Context,
|
|
graceful_exit_progress_node_id GracefulExitProgress_NodeId_Field,
|
|
update GracefulExitProgress_Update_Fields) (
|
|
err error)
|
|
|
|
UpdateNoReturn_GracefulExitTransferQueue_By_NodeId_And_Path_And_PieceNum(ctx context.Context,
|
|
graceful_exit_transfer_queue_node_id GracefulExitTransferQueue_NodeId_Field,
|
|
graceful_exit_transfer_queue_path GracefulExitTransferQueue_Path_Field,
|
|
graceful_exit_transfer_queue_piece_num GracefulExitTransferQueue_PieceNum_Field,
|
|
update GracefulExitTransferQueue_Update_Fields) (
|
|
err error)
|
|
|
|
UpdateNoReturn_Irreparabledb_By_Segmentpath(ctx context.Context,
|
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field,
|
|
update Irreparabledb_Update_Fields) (
|
|
err error)
|
|
|
|
UpdateNoReturn_Node_By_Id(ctx context.Context,
|
|
node_id Node_Id_Field,
|
|
update Node_Update_Fields) (
|
|
err error)
|
|
|
|
UpdateNoReturn_Offer_By_Id(ctx context.Context,
|
|
offer_id Offer_Id_Field,
|
|
update Offer_Update_Fields) (
|
|
err error)
|
|
|
|
UpdateNoReturn_PeerIdentity_By_NodeId(ctx context.Context,
|
|
peer_identity_node_id PeerIdentity_NodeId_Field,
|
|
update PeerIdentity_Update_Fields) (
|
|
err error)
|
|
|
|
Update_BucketMetainfo_By_ProjectId_And_Name(ctx context.Context,
|
|
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
|
|
bucket_metainfo_name BucketMetainfo_Name_Field,
|
|
update BucketMetainfo_Update_Fields) (
|
|
bucket_metainfo *BucketMetainfo, err error)
|
|
|
|
Update_CoinpaymentsTransaction_By_Id(ctx context.Context,
|
|
coinpayments_transaction_id CoinpaymentsTransaction_Id_Field,
|
|
update CoinpaymentsTransaction_Update_Fields) (
|
|
coinpayments_transaction *CoinpaymentsTransaction, err error)
|
|
|
|
Update_CouponUsage_By_CouponId_And_Period(ctx context.Context,
|
|
coupon_usage_coupon_id CouponUsage_CouponId_Field,
|
|
coupon_usage_period CouponUsage_Period_Field,
|
|
update CouponUsage_Update_Fields) (
|
|
coupon_usage *CouponUsage, err error)
|
|
|
|
Update_Coupon_By_Id(ctx context.Context,
|
|
coupon_id Coupon_Id_Field,
|
|
update Coupon_Update_Fields) (
|
|
coupon *Coupon, err error)
|
|
|
|
Update_Node_By_Id(ctx context.Context,
|
|
node_id Node_Id_Field,
|
|
update Node_Update_Fields) (
|
|
node *Node, err error)
|
|
|
|
Update_PendingAudits_By_NodeId(ctx context.Context,
|
|
pending_audits_node_id PendingAudits_NodeId_Field,
|
|
update PendingAudits_Update_Fields) (
|
|
pending_audits *PendingAudits, err error)
|
|
|
|
Update_Project_By_Id(ctx context.Context,
|
|
project_id Project_Id_Field,
|
|
update Project_Update_Fields) (
|
|
project *Project, err error)
|
|
|
|
Update_RegistrationToken_By_Secret(ctx context.Context,
|
|
registration_token_secret RegistrationToken_Secret_Field,
|
|
update RegistrationToken_Update_Fields) (
|
|
registration_token *RegistrationToken, err error)
|
|
|
|
Update_StripecoinpaymentsApplyBalanceIntent_By_TxId(ctx context.Context,
|
|
stripecoinpayments_apply_balance_intent_tx_id StripecoinpaymentsApplyBalanceIntent_TxId_Field,
|
|
update StripecoinpaymentsApplyBalanceIntent_Update_Fields) (
|
|
stripecoinpayments_apply_balance_intent *StripecoinpaymentsApplyBalanceIntent, err error)
|
|
|
|
Update_StripecoinpaymentsInvoiceProjectRecord_By_Id(ctx context.Context,
|
|
stripecoinpayments_invoice_project_record_id StripecoinpaymentsInvoiceProjectRecord_Id_Field,
|
|
update StripecoinpaymentsInvoiceProjectRecord_Update_Fields) (
|
|
stripecoinpayments_invoice_project_record *StripecoinpaymentsInvoiceProjectRecord, err error)
|
|
|
|
Update_User_By_Id(ctx context.Context,
|
|
user_id User_Id_Field,
|
|
update User_Update_Fields) (
|
|
user *User, err error)
|
|
}
|
|
|
|
type TxMethods interface {
|
|
Methods
|
|
|
|
Rebind(s string) string
|
|
Commit() error
|
|
Rollback() error
|
|
}
|
|
|
|
type txMethods interface {
|
|
TxMethods
|
|
|
|
deleteAll(ctx context.Context) (int64, error)
|
|
makeErr(err error) error
|
|
}
|
|
|
|
type DBMethods interface {
|
|
Methods
|
|
|
|
Schema() string
|
|
Rebind(sql string) string
|
|
}
|
|
|
|
type dbMethods interface {
|
|
DBMethods
|
|
|
|
wrapTx(tx *sql.Tx) txMethods
|
|
makeErr(err error) error
|
|
}
|
|
|
|
func openpostgres(source string) (*sql.DB, error) {
|
|
return sql.Open("postgres", source)
|
|
}
|
|
|
|
func opencockroach(source string) (*sql.DB, error) {
|
|
// try first with "cockroach" as a driver in case someone has registered
|
|
// some special stuff. if that fails, then try again with "postgres" as
|
|
// the driver.
|
|
db, err := sql.Open("cockroach", source)
|
|
if err != nil {
|
|
db, err = sql.Open("postgres", source)
|
|
}
|
|
return db, err
|
|
}
|