2462ac5917
Change-Id: I8318f5581eef559f0dfbf20d07ea1fe36f81ce87
22426 lines
765 KiB
Go
22426 lines
765 KiB
Go
//lint:file-ignore U1000,ST1012 generated file
|
|
// AUTOGENERATED BY storj.io/dbx
|
|
// DO NOT EDIT.
|
|
|
|
package dbx
|
|
|
|
import (
|
|
"bytes"
|
|
"context"
|
|
"database/sql"
|
|
"errors"
|
|
"fmt"
|
|
"reflect"
|
|
"strconv"
|
|
"strings"
|
|
"sync"
|
|
"time"
|
|
"unicode"
|
|
|
|
"github.com/jackc/pgconn"
|
|
_ "github.com/jackc/pgx/v4/stdlib"
|
|
|
|
"storj.io/storj/private/tagsql"
|
|
)
|
|
|
|
// Prevent conditional imports from causing build failures.
|
|
var _ = strconv.Itoa
|
|
var _ = strings.LastIndex
|
|
var _ = fmt.Sprint
|
|
var _ sync.Mutex
|
|
|
|
var (
|
|
WrapErr = func(err *Error) error { return err }
|
|
Logger func(format string, args ...interface{})
|
|
ShouldRetry func(driver string, err error) bool
|
|
|
|
errTooManyRows = errors.New("too many rows")
|
|
errUnsupportedDriver = errors.New("unsupported driver")
|
|
errEmptyUpdate = errors.New("empty update")
|
|
)
|
|
|
|
func logError(format string, args ...interface{}) {
|
|
if Logger != nil {
|
|
Logger(format, args...)
|
|
}
|
|
}
|
|
|
|
type ErrorCode int
|
|
|
|
const (
|
|
ErrorCode_Unknown ErrorCode = iota
|
|
ErrorCode_UnsupportedDriver
|
|
ErrorCode_NoRows
|
|
ErrorCode_TxDone
|
|
ErrorCode_TooManyRows
|
|
ErrorCode_ConstraintViolation
|
|
ErrorCode_EmptyUpdate
|
|
)
|
|
|
|
type Error struct {
|
|
Err error
|
|
Code ErrorCode
|
|
Driver string
|
|
Constraint string
|
|
QuerySuffix string
|
|
}
|
|
|
|
func (e *Error) Error() string {
|
|
return e.Err.Error()
|
|
}
|
|
|
|
func wrapErr(e *Error) error {
|
|
if WrapErr == nil {
|
|
return e
|
|
}
|
|
return WrapErr(e)
|
|
}
|
|
|
|
func makeErr(err error) error {
|
|
if err == nil {
|
|
return nil
|
|
}
|
|
e := &Error{Err: err}
|
|
switch err {
|
|
case sql.ErrNoRows:
|
|
e.Code = ErrorCode_NoRows
|
|
case sql.ErrTxDone:
|
|
e.Code = ErrorCode_TxDone
|
|
}
|
|
return wrapErr(e)
|
|
}
|
|
|
|
func shouldRetry(driver string, err error) bool {
|
|
if ShouldRetry == nil {
|
|
return false
|
|
}
|
|
return ShouldRetry(driver, err)
|
|
}
|
|
|
|
func unsupportedDriver(driver string) error {
|
|
return wrapErr(&Error{
|
|
Err: errUnsupportedDriver,
|
|
Code: ErrorCode_UnsupportedDriver,
|
|
Driver: driver,
|
|
})
|
|
}
|
|
|
|
func emptyUpdate() error {
|
|
return wrapErr(&Error{
|
|
Err: errEmptyUpdate,
|
|
Code: ErrorCode_EmptyUpdate,
|
|
})
|
|
}
|
|
|
|
func tooManyRows(query_suffix string) error {
|
|
return wrapErr(&Error{
|
|
Err: errTooManyRows,
|
|
Code: ErrorCode_TooManyRows,
|
|
QuerySuffix: query_suffix,
|
|
})
|
|
}
|
|
|
|
func constraintViolation(err error, constraint string) error {
|
|
return wrapErr(&Error{
|
|
Err: err,
|
|
Code: ErrorCode_ConstraintViolation,
|
|
Constraint: constraint,
|
|
})
|
|
}
|
|
|
|
type driver interface {
|
|
ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error)
|
|
QueryContext(ctx context.Context, query string, args ...interface{}) (tagsql.Rows, error)
|
|
QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row
|
|
}
|
|
|
|
var (
|
|
notAPointer = errors.New("destination not a pointer")
|
|
lossyConversion = errors.New("lossy conversion")
|
|
)
|
|
|
|
type DB struct {
|
|
tagsql.DB
|
|
dbMethods
|
|
|
|
Hooks struct {
|
|
Now func() time.Time
|
|
}
|
|
|
|
driver string
|
|
}
|
|
|
|
func Open(driver, source string) (db *DB, err error) {
|
|
var sql_db *sql.DB
|
|
switch driver {
|
|
case "pgx":
|
|
sql_db, err = openpgx(source)
|
|
case "pgxcockroach":
|
|
sql_db, err = openpgxcockroach(source)
|
|
default:
|
|
return nil, unsupportedDriver(driver)
|
|
}
|
|
if err != nil {
|
|
return nil, makeErr(err)
|
|
}
|
|
defer func(sql_db *sql.DB) {
|
|
if err != nil {
|
|
sql_db.Close()
|
|
}
|
|
}(sql_db)
|
|
|
|
if err := sql_db.Ping(); err != nil {
|
|
return nil, makeErr(err)
|
|
}
|
|
|
|
db = &DB{
|
|
DB: tagsql.Wrap(sql_db),
|
|
|
|
driver: driver,
|
|
}
|
|
db.Hooks.Now = time.Now
|
|
|
|
switch driver {
|
|
case "pgx":
|
|
db.dbMethods = newpgx(db)
|
|
case "pgxcockroach":
|
|
db.dbMethods = newpgxcockroach(db)
|
|
default:
|
|
return nil, unsupportedDriver(driver)
|
|
}
|
|
|
|
return db, nil
|
|
}
|
|
|
|
func (obj *DB) Close() (err error) {
|
|
return obj.makeErr(obj.DB.Close())
|
|
}
|
|
|
|
func (obj *DB) Open(ctx context.Context) (*Tx, error) {
|
|
tx, err := obj.DB.BeginTx(ctx, nil)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
|
|
return &Tx{
|
|
Tx: tx,
|
|
txMethods: obj.wrapTx(tx),
|
|
}, nil
|
|
}
|
|
|
|
func (obj *DB) NewRx() *Rx {
|
|
return &Rx{db: obj}
|
|
}
|
|
|
|
func DeleteAll(ctx context.Context, db *DB) (int64, error) {
|
|
tx, err := db.Open(ctx)
|
|
if err != nil {
|
|
return 0, err
|
|
}
|
|
defer func() {
|
|
if err == nil {
|
|
err = db.makeErr(tx.Commit())
|
|
return
|
|
}
|
|
|
|
if err_rollback := tx.Rollback(); err_rollback != nil {
|
|
logError("delete-all: rollback failed: %v", db.makeErr(err_rollback))
|
|
}
|
|
}()
|
|
return tx.deleteAll(ctx)
|
|
}
|
|
|
|
type Tx struct {
|
|
Tx tagsql.Tx
|
|
txMethods
|
|
}
|
|
|
|
type dialectTx struct {
|
|
tx tagsql.Tx
|
|
}
|
|
|
|
func (tx *dialectTx) Commit() (err error) {
|
|
return makeErr(tx.tx.Commit())
|
|
}
|
|
|
|
func (tx *dialectTx) Rollback() (err error) {
|
|
return makeErr(tx.tx.Rollback())
|
|
}
|
|
|
|
type pgxImpl struct {
|
|
db *DB
|
|
dialect __sqlbundle_pgx
|
|
driver driver
|
|
txn bool
|
|
}
|
|
|
|
func (obj *pgxImpl) Rebind(s string) string {
|
|
return obj.dialect.Rebind(s)
|
|
}
|
|
|
|
func (obj *pgxImpl) logStmt(stmt string, args ...interface{}) {
|
|
pgxLogStmt(stmt, args...)
|
|
}
|
|
|
|
func (obj *pgxImpl) makeErr(err error) error {
|
|
constraint, ok := obj.isConstraintError(err)
|
|
if ok {
|
|
return constraintViolation(err, constraint)
|
|
}
|
|
return makeErr(err)
|
|
}
|
|
|
|
func (obj *pgxImpl) shouldRetry(err error) bool {
|
|
return !obj.txn && shouldRetry(obj.db.driver, err)
|
|
}
|
|
|
|
type pgxImpl_retryingRow struct {
|
|
obj *pgxImpl
|
|
ctx context.Context
|
|
query string
|
|
args []interface{}
|
|
}
|
|
|
|
func (obj *pgxImpl) queryRowContext(ctx context.Context, query string, args ...interface{}) *pgxImpl_retryingRow {
|
|
return &pgxImpl_retryingRow{
|
|
obj: obj,
|
|
ctx: ctx,
|
|
query: query,
|
|
args: args,
|
|
}
|
|
}
|
|
|
|
func (rows *pgxImpl_retryingRow) Scan(dest ...interface{}) error {
|
|
for {
|
|
err := rows.obj.driver.QueryRowContext(rows.ctx, rows.query, rows.args...).Scan(dest...)
|
|
if err != nil {
|
|
if rows.obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
// caller will wrap this error
|
|
return err
|
|
}
|
|
return nil
|
|
}
|
|
}
|
|
|
|
type pgxDB struct {
|
|
db *DB
|
|
*pgxImpl
|
|
}
|
|
|
|
func newpgx(db *DB) *pgxDB {
|
|
return &pgxDB{
|
|
db: db,
|
|
pgxImpl: &pgxImpl{
|
|
db: db,
|
|
driver: db.DB,
|
|
},
|
|
}
|
|
}
|
|
|
|
func (obj *pgxDB) Schema() string {
|
|
return `CREATE TABLE accounting_rollups (
|
|
node_id bytea NOT NULL,
|
|
start_time timestamp with time zone NOT NULL,
|
|
put_total bigint NOT NULL,
|
|
get_total bigint NOT NULL,
|
|
get_audit_total bigint NOT NULL,
|
|
get_repair_total bigint NOT NULL,
|
|
put_repair_total bigint NOT NULL,
|
|
at_rest_total double precision NOT NULL,
|
|
PRIMARY KEY ( node_id, start_time )
|
|
);
|
|
CREATE TABLE accounting_timestamps (
|
|
name text NOT NULL,
|
|
value timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( name )
|
|
);
|
|
CREATE TABLE audit_histories (
|
|
node_id bytea NOT NULL,
|
|
history bytea NOT NULL,
|
|
PRIMARY KEY ( node_id )
|
|
);
|
|
CREATE TABLE bucket_bandwidth_rollups (
|
|
bucket_name bytea NOT NULL,
|
|
project_id bytea NOT NULL,
|
|
interval_start timestamp with time zone NOT NULL,
|
|
interval_seconds integer NOT NULL,
|
|
action integer NOT NULL,
|
|
inline bigint NOT NULL,
|
|
allocated bigint NOT NULL,
|
|
settled bigint NOT NULL,
|
|
PRIMARY KEY ( bucket_name, project_id, interval_start, action )
|
|
);
|
|
CREATE TABLE bucket_bandwidth_rollup_archives (
|
|
bucket_name bytea NOT NULL,
|
|
project_id bytea NOT NULL,
|
|
interval_start timestamp with time zone NOT NULL,
|
|
interval_seconds integer NOT NULL,
|
|
action integer NOT NULL,
|
|
inline bigint NOT NULL,
|
|
allocated bigint NOT NULL,
|
|
settled bigint NOT NULL,
|
|
PRIMARY KEY ( bucket_name, project_id, interval_start, action )
|
|
);
|
|
CREATE TABLE bucket_storage_tallies (
|
|
bucket_name bytea NOT NULL,
|
|
project_id bytea NOT NULL,
|
|
interval_start timestamp with time zone NOT NULL,
|
|
inline bigint NOT NULL,
|
|
remote bigint NOT NULL,
|
|
remote_segments_count integer NOT NULL,
|
|
inline_segments_count integer NOT NULL,
|
|
object_count integer NOT NULL,
|
|
metadata_size bigint NOT NULL,
|
|
PRIMARY KEY ( bucket_name, project_id, interval_start )
|
|
);
|
|
CREATE TABLE coinpayments_transactions (
|
|
id text NOT NULL,
|
|
user_id bytea NOT NULL,
|
|
address text NOT NULL,
|
|
amount bytea NOT NULL,
|
|
received bytea NOT NULL,
|
|
status integer NOT NULL,
|
|
key text NOT NULL,
|
|
timeout integer NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( id )
|
|
);
|
|
CREATE TABLE coupons (
|
|
id bytea NOT NULL,
|
|
user_id bytea NOT NULL,
|
|
amount bigint NOT NULL,
|
|
description text NOT NULL,
|
|
type integer NOT NULL,
|
|
status integer NOT NULL,
|
|
duration bigint NOT NULL,
|
|
billing_periods bigint,
|
|
coupon_code_name text,
|
|
created_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( id )
|
|
);
|
|
CREATE TABLE coupon_codes (
|
|
id bytea NOT NULL,
|
|
name text NOT NULL,
|
|
amount bigint NOT NULL,
|
|
description text NOT NULL,
|
|
type integer NOT NULL,
|
|
billing_periods bigint,
|
|
created_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( id ),
|
|
UNIQUE ( name )
|
|
);
|
|
CREATE TABLE coupon_usages (
|
|
coupon_id bytea NOT NULL,
|
|
amount bigint NOT NULL,
|
|
status integer NOT NULL,
|
|
period timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( coupon_id, period )
|
|
);
|
|
CREATE TABLE graceful_exit_progress (
|
|
node_id bytea NOT NULL,
|
|
bytes_transferred bigint NOT NULL,
|
|
pieces_transferred bigint NOT NULL DEFAULT 0,
|
|
pieces_failed bigint NOT NULL DEFAULT 0,
|
|
updated_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( node_id )
|
|
);
|
|
CREATE TABLE graceful_exit_transfer_queue (
|
|
node_id bytea NOT NULL,
|
|
path bytea NOT NULL,
|
|
piece_num integer NOT NULL,
|
|
root_piece_id bytea,
|
|
durability_ratio double precision NOT NULL,
|
|
queued_at timestamp with time zone NOT NULL,
|
|
requested_at timestamp with time zone,
|
|
last_failed_at timestamp with time zone,
|
|
last_failed_code integer,
|
|
failed_count integer,
|
|
finished_at timestamp with time zone,
|
|
order_limit_send_count integer NOT NULL DEFAULT 0,
|
|
PRIMARY KEY ( node_id, path, piece_num )
|
|
);
|
|
CREATE TABLE injuredsegments (
|
|
path bytea NOT NULL,
|
|
data bytea NOT NULL,
|
|
attempted timestamp with time zone,
|
|
updated_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
|
|
segment_health double precision NOT NULL DEFAULT 1,
|
|
PRIMARY KEY ( path )
|
|
);
|
|
CREATE TABLE irreparabledbs (
|
|
segmentpath bytea NOT NULL,
|
|
segmentdetail bytea NOT NULL,
|
|
pieces_lost_count bigint NOT NULL,
|
|
seg_damaged_unix_sec bigint NOT NULL,
|
|
repair_attempt_count bigint NOT NULL,
|
|
PRIMARY KEY ( segmentpath )
|
|
);
|
|
CREATE TABLE nodes (
|
|
id bytea NOT NULL,
|
|
address text NOT NULL DEFAULT '',
|
|
last_net text NOT NULL,
|
|
last_ip_port text,
|
|
protocol integer NOT NULL DEFAULT 0,
|
|
type integer NOT NULL DEFAULT 0,
|
|
email text NOT NULL,
|
|
wallet text NOT NULL,
|
|
wallet_features text NOT NULL DEFAULT '',
|
|
free_disk bigint NOT NULL DEFAULT -1,
|
|
piece_count bigint NOT NULL DEFAULT 0,
|
|
major bigint NOT NULL DEFAULT 0,
|
|
minor bigint NOT NULL DEFAULT 0,
|
|
patch bigint NOT NULL DEFAULT 0,
|
|
hash text NOT NULL DEFAULT '',
|
|
timestamp timestamp with time zone NOT NULL DEFAULT '0001-01-01 00:00:00+00',
|
|
release boolean NOT NULL DEFAULT false,
|
|
latency_90 bigint NOT NULL DEFAULT 0,
|
|
audit_success_count bigint NOT NULL DEFAULT 0,
|
|
total_audit_count bigint NOT NULL DEFAULT 0,
|
|
vetted_at timestamp with time zone,
|
|
created_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
|
|
updated_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
|
|
last_contact_success timestamp with time zone NOT NULL DEFAULT 'epoch',
|
|
last_contact_failure timestamp with time zone NOT NULL DEFAULT 'epoch',
|
|
contained boolean NOT NULL DEFAULT false,
|
|
disqualified timestamp with time zone,
|
|
suspended timestamp with time zone,
|
|
unknown_audit_suspended timestamp with time zone,
|
|
offline_suspended timestamp with time zone,
|
|
under_review timestamp with time zone,
|
|
online_score double precision NOT NULL DEFAULT 1,
|
|
audit_reputation_alpha double precision NOT NULL DEFAULT 1,
|
|
audit_reputation_beta double precision NOT NULL DEFAULT 0,
|
|
unknown_audit_reputation_alpha double precision NOT NULL DEFAULT 1,
|
|
unknown_audit_reputation_beta double precision NOT NULL DEFAULT 0,
|
|
exit_initiated_at timestamp with time zone,
|
|
exit_loop_completed_at timestamp with time zone,
|
|
exit_finished_at timestamp with time zone,
|
|
exit_success boolean NOT NULL DEFAULT false,
|
|
PRIMARY KEY ( id )
|
|
);
|
|
CREATE TABLE node_api_versions (
|
|
id bytea NOT NULL,
|
|
api_version integer NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
updated_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( id )
|
|
);
|
|
CREATE TABLE offers (
|
|
id serial NOT NULL,
|
|
name text NOT NULL,
|
|
description text NOT NULL,
|
|
award_credit_in_cents integer NOT NULL DEFAULT 0,
|
|
invitee_credit_in_cents integer NOT NULL DEFAULT 0,
|
|
award_credit_duration_days integer,
|
|
invitee_credit_duration_days integer,
|
|
redeemable_cap integer,
|
|
expires_at timestamp with time zone NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
status integer NOT NULL,
|
|
type integer NOT NULL,
|
|
PRIMARY KEY ( id )
|
|
);
|
|
CREATE TABLE peer_identities (
|
|
node_id bytea NOT NULL,
|
|
leaf_serial_number bytea NOT NULL,
|
|
chain bytea NOT NULL,
|
|
updated_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( node_id )
|
|
);
|
|
CREATE TABLE pending_audits (
|
|
node_id bytea NOT NULL,
|
|
piece_id bytea NOT NULL,
|
|
stripe_index bigint NOT NULL,
|
|
share_size bigint NOT NULL,
|
|
expected_share_hash bytea NOT NULL,
|
|
reverify_count bigint NOT NULL,
|
|
path bytea NOT NULL,
|
|
PRIMARY KEY ( node_id )
|
|
);
|
|
CREATE TABLE projects (
|
|
id bytea NOT NULL,
|
|
name text NOT NULL,
|
|
description text NOT NULL,
|
|
usage_limit bigint,
|
|
bandwidth_limit bigint,
|
|
rate_limit integer,
|
|
max_buckets integer,
|
|
partner_id bytea,
|
|
owner_id bytea NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( id )
|
|
);
|
|
CREATE TABLE project_bandwidth_rollups (
|
|
project_id bytea NOT NULL,
|
|
interval_month date NOT NULL,
|
|
egress_allocated bigint NOT NULL,
|
|
PRIMARY KEY ( project_id, interval_month )
|
|
);
|
|
CREATE TABLE registration_tokens (
|
|
secret bytea NOT NULL,
|
|
owner_id bytea,
|
|
project_limit integer NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( secret ),
|
|
UNIQUE ( owner_id )
|
|
);
|
|
CREATE TABLE reset_password_tokens (
|
|
secret bytea NOT NULL,
|
|
owner_id bytea NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( secret ),
|
|
UNIQUE ( owner_id )
|
|
);
|
|
CREATE TABLE revocations (
|
|
revoked bytea NOT NULL,
|
|
api_key_id bytea NOT NULL,
|
|
PRIMARY KEY ( revoked )
|
|
);
|
|
CREATE TABLE storagenode_bandwidth_rollups (
|
|
storagenode_id bytea NOT NULL,
|
|
interval_start timestamp with time zone NOT NULL,
|
|
interval_seconds integer NOT NULL,
|
|
action integer NOT NULL,
|
|
allocated bigint DEFAULT 0,
|
|
settled bigint NOT NULL,
|
|
PRIMARY KEY ( storagenode_id, interval_start, action )
|
|
);
|
|
CREATE TABLE storagenode_bandwidth_rollup_archives (
|
|
storagenode_id bytea NOT NULL,
|
|
interval_start timestamp with time zone NOT NULL,
|
|
interval_seconds integer NOT NULL,
|
|
action integer NOT NULL,
|
|
allocated bigint DEFAULT 0,
|
|
settled bigint NOT NULL,
|
|
PRIMARY KEY ( storagenode_id, interval_start, action )
|
|
);
|
|
CREATE TABLE storagenode_bandwidth_rollups_phase2 (
|
|
storagenode_id bytea NOT NULL,
|
|
interval_start timestamp with time zone NOT NULL,
|
|
interval_seconds integer NOT NULL,
|
|
action integer NOT NULL,
|
|
allocated bigint DEFAULT 0,
|
|
settled bigint NOT NULL,
|
|
PRIMARY KEY ( storagenode_id, interval_start, action )
|
|
);
|
|
CREATE TABLE storagenode_payments (
|
|
id bigserial NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
node_id bytea NOT NULL,
|
|
period text NOT NULL,
|
|
amount bigint NOT NULL,
|
|
receipt text,
|
|
notes text,
|
|
PRIMARY KEY ( id )
|
|
);
|
|
CREATE TABLE storagenode_paystubs (
|
|
period text NOT NULL,
|
|
node_id bytea NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
codes text NOT NULL,
|
|
usage_at_rest double precision NOT NULL,
|
|
usage_get bigint NOT NULL,
|
|
usage_put bigint NOT NULL,
|
|
usage_get_repair bigint NOT NULL,
|
|
usage_put_repair bigint NOT NULL,
|
|
usage_get_audit bigint NOT NULL,
|
|
comp_at_rest bigint NOT NULL,
|
|
comp_get bigint NOT NULL,
|
|
comp_put bigint NOT NULL,
|
|
comp_get_repair bigint NOT NULL,
|
|
comp_put_repair bigint NOT NULL,
|
|
comp_get_audit bigint NOT NULL,
|
|
surge_percent bigint NOT NULL,
|
|
held bigint NOT NULL,
|
|
owed bigint NOT NULL,
|
|
disposed bigint NOT NULL,
|
|
paid bigint NOT NULL,
|
|
distributed bigint NOT NULL,
|
|
PRIMARY KEY ( period, node_id )
|
|
);
|
|
CREATE TABLE storagenode_storage_tallies (
|
|
node_id bytea NOT NULL,
|
|
interval_end_time timestamp with time zone NOT NULL,
|
|
data_total double precision NOT NULL,
|
|
PRIMARY KEY ( interval_end_time, node_id )
|
|
);
|
|
CREATE TABLE stripe_customers (
|
|
user_id bytea NOT NULL,
|
|
customer_id text NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( user_id ),
|
|
UNIQUE ( customer_id )
|
|
);
|
|
CREATE TABLE stripecoinpayments_invoice_project_records (
|
|
id bytea NOT NULL,
|
|
project_id bytea NOT NULL,
|
|
storage double precision NOT NULL,
|
|
egress bigint NOT NULL,
|
|
objects bigint NOT NULL,
|
|
period_start timestamp with time zone NOT NULL,
|
|
period_end timestamp with time zone NOT NULL,
|
|
state integer NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( id ),
|
|
UNIQUE ( project_id, period_start, period_end )
|
|
);
|
|
CREATE TABLE stripecoinpayments_tx_conversion_rates (
|
|
tx_id text NOT NULL,
|
|
rate bytea NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( tx_id )
|
|
);
|
|
CREATE TABLE users (
|
|
id bytea NOT NULL,
|
|
email text NOT NULL,
|
|
normalized_email text NOT NULL,
|
|
full_name text NOT NULL,
|
|
short_name text,
|
|
password_hash bytea NOT NULL,
|
|
status integer NOT NULL,
|
|
partner_id bytea,
|
|
created_at timestamp with time zone NOT NULL,
|
|
project_limit integer NOT NULL DEFAULT 0,
|
|
position text,
|
|
company_name text,
|
|
company_size integer,
|
|
working_on text,
|
|
is_professional boolean NOT NULL DEFAULT false,
|
|
employee_count text,
|
|
PRIMARY KEY ( id )
|
|
);
|
|
CREATE TABLE value_attributions (
|
|
project_id bytea NOT NULL,
|
|
bucket_name bytea NOT NULL,
|
|
partner_id bytea NOT NULL,
|
|
last_updated timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( project_id, bucket_name )
|
|
);
|
|
CREATE TABLE api_keys (
|
|
id bytea NOT NULL,
|
|
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
|
head bytea NOT NULL,
|
|
name text NOT NULL,
|
|
secret bytea NOT NULL,
|
|
partner_id bytea,
|
|
created_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( id ),
|
|
UNIQUE ( head ),
|
|
UNIQUE ( name, project_id )
|
|
);
|
|
CREATE TABLE bucket_metainfos (
|
|
id bytea NOT NULL,
|
|
project_id bytea NOT NULL REFERENCES projects( id ),
|
|
name bytea NOT NULL,
|
|
partner_id bytea,
|
|
path_cipher integer NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
default_segment_size integer NOT NULL,
|
|
default_encryption_cipher_suite integer NOT NULL,
|
|
default_encryption_block_size integer NOT NULL,
|
|
default_redundancy_algorithm integer NOT NULL,
|
|
default_redundancy_share_size integer NOT NULL,
|
|
default_redundancy_required_shares integer NOT NULL,
|
|
default_redundancy_repair_shares integer NOT NULL,
|
|
default_redundancy_optimal_shares integer NOT NULL,
|
|
default_redundancy_total_shares integer NOT NULL,
|
|
PRIMARY KEY ( id ),
|
|
UNIQUE ( project_id, name )
|
|
);
|
|
CREATE TABLE project_members (
|
|
member_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
|
|
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
|
created_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( member_id, project_id )
|
|
);
|
|
CREATE TABLE stripecoinpayments_apply_balance_intents (
|
|
tx_id text NOT NULL REFERENCES coinpayments_transactions( id ) ON DELETE CASCADE,
|
|
state integer NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( tx_id )
|
|
);
|
|
CREATE TABLE user_credits (
|
|
id serial NOT NULL,
|
|
user_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
|
|
offer_id integer NOT NULL REFERENCES offers( id ),
|
|
referred_by bytea REFERENCES users( id ) ON DELETE SET NULL,
|
|
type text NOT NULL,
|
|
credits_earned_in_cents integer NOT NULL,
|
|
credits_used_in_cents integer NOT NULL,
|
|
expires_at timestamp with time zone NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( id ),
|
|
UNIQUE ( id, offer_id )
|
|
);
|
|
CREATE INDEX accounting_rollups_start_time_index ON accounting_rollups ( start_time );
|
|
CREATE INDEX bucket_bandwidth_rollups_project_id_action_interval_index ON bucket_bandwidth_rollups ( project_id, action, interval_start );
|
|
CREATE INDEX bucket_bandwidth_rollups_action_interval_project_id_index ON bucket_bandwidth_rollups ( action, interval_start, project_id );
|
|
CREATE INDEX bucket_bandwidth_rollups_archive_project_id_action_interval_index ON bucket_bandwidth_rollup_archives ( project_id, action, interval_start );
|
|
CREATE INDEX bucket_bandwidth_rollups_archive_action_interval_project_id_index ON bucket_bandwidth_rollup_archives ( action, interval_start, project_id );
|
|
CREATE INDEX bucket_storage_tallies_project_id_interval_start_index ON bucket_storage_tallies ( project_id, interval_start );
|
|
CREATE INDEX graceful_exit_transfer_queue_nid_dr_qa_fa_lfa_index ON graceful_exit_transfer_queue ( node_id, durability_ratio, queued_at, finished_at, last_failed_at );
|
|
CREATE INDEX injuredsegments_attempted_index ON injuredsegments ( attempted );
|
|
CREATE INDEX injuredsegments_segment_health_index ON injuredsegments ( segment_health );
|
|
CREATE INDEX injuredsegments_updated_at_index ON injuredsegments ( updated_at );
|
|
CREATE INDEX node_last_ip ON nodes ( last_net );
|
|
CREATE INDEX nodes_dis_unk_off_exit_fin_last_success_index ON nodes ( disqualified, unknown_audit_suspended, offline_suspended, exit_finished_at, last_contact_success );
|
|
CREATE INDEX storagenode_bandwidth_rollups_interval_start_index ON storagenode_bandwidth_rollups ( interval_start );
|
|
CREATE INDEX storagenode_bandwidth_rollup_archives_interval_start_index ON storagenode_bandwidth_rollup_archives ( interval_start );
|
|
CREATE INDEX storagenode_payments_node_id_period_index ON storagenode_payments ( node_id, period );
|
|
CREATE INDEX storagenode_paystubs_node_id_index ON storagenode_paystubs ( node_id );
|
|
CREATE INDEX storagenode_storage_tallies_node_id_index ON storagenode_storage_tallies ( node_id );
|
|
CREATE UNIQUE INDEX credits_earned_user_id_offer_id ON user_credits ( id, offer_id );`
|
|
}
|
|
|
|
func (obj *pgxDB) wrapTx(tx tagsql.Tx) txMethods {
|
|
return &pgxTx{
|
|
dialectTx: dialectTx{tx: tx},
|
|
pgxImpl: &pgxImpl{
|
|
db: obj.db,
|
|
driver: tx,
|
|
txn: true,
|
|
},
|
|
}
|
|
}
|
|
|
|
type pgxTx struct {
|
|
dialectTx
|
|
*pgxImpl
|
|
}
|
|
|
|
func pgxLogStmt(stmt string, args ...interface{}) {
|
|
// TODO: render placeholders
|
|
if Logger != nil {
|
|
out := fmt.Sprintf("stmt: %s\nargs: %v\n", stmt, pretty(args))
|
|
Logger(out)
|
|
}
|
|
}
|
|
|
|
type pgxcockroachImpl struct {
|
|
db *DB
|
|
dialect __sqlbundle_pgxcockroach
|
|
driver driver
|
|
txn bool
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Rebind(s string) string {
|
|
return obj.dialect.Rebind(s)
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) logStmt(stmt string, args ...interface{}) {
|
|
pgxcockroachLogStmt(stmt, args...)
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) makeErr(err error) error {
|
|
constraint, ok := obj.isConstraintError(err)
|
|
if ok {
|
|
return constraintViolation(err, constraint)
|
|
}
|
|
return makeErr(err)
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) shouldRetry(err error) bool {
|
|
return !obj.txn && shouldRetry(obj.db.driver, err)
|
|
}
|
|
|
|
type pgxcockroachImpl_retryingRow struct {
|
|
obj *pgxcockroachImpl
|
|
ctx context.Context
|
|
query string
|
|
args []interface{}
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) queryRowContext(ctx context.Context, query string, args ...interface{}) *pgxcockroachImpl_retryingRow {
|
|
return &pgxcockroachImpl_retryingRow{
|
|
obj: obj,
|
|
ctx: ctx,
|
|
query: query,
|
|
args: args,
|
|
}
|
|
}
|
|
|
|
func (rows *pgxcockroachImpl_retryingRow) Scan(dest ...interface{}) error {
|
|
for {
|
|
err := rows.obj.driver.QueryRowContext(rows.ctx, rows.query, rows.args...).Scan(dest...)
|
|
if err != nil {
|
|
if rows.obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
// caller will wrap this error
|
|
return err
|
|
}
|
|
return nil
|
|
}
|
|
}
|
|
|
|
type pgxcockroachDB struct {
|
|
db *DB
|
|
*pgxcockroachImpl
|
|
}
|
|
|
|
func newpgxcockroach(db *DB) *pgxcockroachDB {
|
|
return &pgxcockroachDB{
|
|
db: db,
|
|
pgxcockroachImpl: &pgxcockroachImpl{
|
|
db: db,
|
|
driver: db.DB,
|
|
},
|
|
}
|
|
}
|
|
|
|
func (obj *pgxcockroachDB) Schema() string {
|
|
return `CREATE TABLE accounting_rollups (
|
|
node_id bytea NOT NULL,
|
|
start_time timestamp with time zone NOT NULL,
|
|
put_total bigint NOT NULL,
|
|
get_total bigint NOT NULL,
|
|
get_audit_total bigint NOT NULL,
|
|
get_repair_total bigint NOT NULL,
|
|
put_repair_total bigint NOT NULL,
|
|
at_rest_total double precision NOT NULL,
|
|
PRIMARY KEY ( node_id, start_time )
|
|
);
|
|
CREATE TABLE accounting_timestamps (
|
|
name text NOT NULL,
|
|
value timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( name )
|
|
);
|
|
CREATE TABLE audit_histories (
|
|
node_id bytea NOT NULL,
|
|
history bytea NOT NULL,
|
|
PRIMARY KEY ( node_id )
|
|
);
|
|
CREATE TABLE bucket_bandwidth_rollups (
|
|
bucket_name bytea NOT NULL,
|
|
project_id bytea NOT NULL,
|
|
interval_start timestamp with time zone NOT NULL,
|
|
interval_seconds integer NOT NULL,
|
|
action integer NOT NULL,
|
|
inline bigint NOT NULL,
|
|
allocated bigint NOT NULL,
|
|
settled bigint NOT NULL,
|
|
PRIMARY KEY ( bucket_name, project_id, interval_start, action )
|
|
);
|
|
CREATE TABLE bucket_bandwidth_rollup_archives (
|
|
bucket_name bytea NOT NULL,
|
|
project_id bytea NOT NULL,
|
|
interval_start timestamp with time zone NOT NULL,
|
|
interval_seconds integer NOT NULL,
|
|
action integer NOT NULL,
|
|
inline bigint NOT NULL,
|
|
allocated bigint NOT NULL,
|
|
settled bigint NOT NULL,
|
|
PRIMARY KEY ( bucket_name, project_id, interval_start, action )
|
|
);
|
|
CREATE TABLE bucket_storage_tallies (
|
|
bucket_name bytea NOT NULL,
|
|
project_id bytea NOT NULL,
|
|
interval_start timestamp with time zone NOT NULL,
|
|
inline bigint NOT NULL,
|
|
remote bigint NOT NULL,
|
|
remote_segments_count integer NOT NULL,
|
|
inline_segments_count integer NOT NULL,
|
|
object_count integer NOT NULL,
|
|
metadata_size bigint NOT NULL,
|
|
PRIMARY KEY ( bucket_name, project_id, interval_start )
|
|
);
|
|
CREATE TABLE coinpayments_transactions (
|
|
id text NOT NULL,
|
|
user_id bytea NOT NULL,
|
|
address text NOT NULL,
|
|
amount bytea NOT NULL,
|
|
received bytea NOT NULL,
|
|
status integer NOT NULL,
|
|
key text NOT NULL,
|
|
timeout integer NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( id )
|
|
);
|
|
CREATE TABLE coupons (
|
|
id bytea NOT NULL,
|
|
user_id bytea NOT NULL,
|
|
amount bigint NOT NULL,
|
|
description text NOT NULL,
|
|
type integer NOT NULL,
|
|
status integer NOT NULL,
|
|
duration bigint NOT NULL,
|
|
billing_periods bigint,
|
|
coupon_code_name text,
|
|
created_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( id )
|
|
);
|
|
CREATE TABLE coupon_codes (
|
|
id bytea NOT NULL,
|
|
name text NOT NULL,
|
|
amount bigint NOT NULL,
|
|
description text NOT NULL,
|
|
type integer NOT NULL,
|
|
billing_periods bigint,
|
|
created_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( id ),
|
|
UNIQUE ( name )
|
|
);
|
|
CREATE TABLE coupon_usages (
|
|
coupon_id bytea NOT NULL,
|
|
amount bigint NOT NULL,
|
|
status integer NOT NULL,
|
|
period timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( coupon_id, period )
|
|
);
|
|
CREATE TABLE graceful_exit_progress (
|
|
node_id bytea NOT NULL,
|
|
bytes_transferred bigint NOT NULL,
|
|
pieces_transferred bigint NOT NULL DEFAULT 0,
|
|
pieces_failed bigint NOT NULL DEFAULT 0,
|
|
updated_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( node_id )
|
|
);
|
|
CREATE TABLE graceful_exit_transfer_queue (
|
|
node_id bytea NOT NULL,
|
|
path bytea NOT NULL,
|
|
piece_num integer NOT NULL,
|
|
root_piece_id bytea,
|
|
durability_ratio double precision NOT NULL,
|
|
queued_at timestamp with time zone NOT NULL,
|
|
requested_at timestamp with time zone,
|
|
last_failed_at timestamp with time zone,
|
|
last_failed_code integer,
|
|
failed_count integer,
|
|
finished_at timestamp with time zone,
|
|
order_limit_send_count integer NOT NULL DEFAULT 0,
|
|
PRIMARY KEY ( node_id, path, piece_num )
|
|
);
|
|
CREATE TABLE injuredsegments (
|
|
path bytea NOT NULL,
|
|
data bytea NOT NULL,
|
|
attempted timestamp with time zone,
|
|
updated_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
|
|
segment_health double precision NOT NULL DEFAULT 1,
|
|
PRIMARY KEY ( path )
|
|
);
|
|
CREATE TABLE irreparabledbs (
|
|
segmentpath bytea NOT NULL,
|
|
segmentdetail bytea NOT NULL,
|
|
pieces_lost_count bigint NOT NULL,
|
|
seg_damaged_unix_sec bigint NOT NULL,
|
|
repair_attempt_count bigint NOT NULL,
|
|
PRIMARY KEY ( segmentpath )
|
|
);
|
|
CREATE TABLE nodes (
|
|
id bytea NOT NULL,
|
|
address text NOT NULL DEFAULT '',
|
|
last_net text NOT NULL,
|
|
last_ip_port text,
|
|
protocol integer NOT NULL DEFAULT 0,
|
|
type integer NOT NULL DEFAULT 0,
|
|
email text NOT NULL,
|
|
wallet text NOT NULL,
|
|
wallet_features text NOT NULL DEFAULT '',
|
|
free_disk bigint NOT NULL DEFAULT -1,
|
|
piece_count bigint NOT NULL DEFAULT 0,
|
|
major bigint NOT NULL DEFAULT 0,
|
|
minor bigint NOT NULL DEFAULT 0,
|
|
patch bigint NOT NULL DEFAULT 0,
|
|
hash text NOT NULL DEFAULT '',
|
|
timestamp timestamp with time zone NOT NULL DEFAULT '0001-01-01 00:00:00+00',
|
|
release boolean NOT NULL DEFAULT false,
|
|
latency_90 bigint NOT NULL DEFAULT 0,
|
|
audit_success_count bigint NOT NULL DEFAULT 0,
|
|
total_audit_count bigint NOT NULL DEFAULT 0,
|
|
vetted_at timestamp with time zone,
|
|
created_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
|
|
updated_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
|
|
last_contact_success timestamp with time zone NOT NULL DEFAULT 'epoch',
|
|
last_contact_failure timestamp with time zone NOT NULL DEFAULT 'epoch',
|
|
contained boolean NOT NULL DEFAULT false,
|
|
disqualified timestamp with time zone,
|
|
suspended timestamp with time zone,
|
|
unknown_audit_suspended timestamp with time zone,
|
|
offline_suspended timestamp with time zone,
|
|
under_review timestamp with time zone,
|
|
online_score double precision NOT NULL DEFAULT 1,
|
|
audit_reputation_alpha double precision NOT NULL DEFAULT 1,
|
|
audit_reputation_beta double precision NOT NULL DEFAULT 0,
|
|
unknown_audit_reputation_alpha double precision NOT NULL DEFAULT 1,
|
|
unknown_audit_reputation_beta double precision NOT NULL DEFAULT 0,
|
|
exit_initiated_at timestamp with time zone,
|
|
exit_loop_completed_at timestamp with time zone,
|
|
exit_finished_at timestamp with time zone,
|
|
exit_success boolean NOT NULL DEFAULT false,
|
|
PRIMARY KEY ( id )
|
|
);
|
|
CREATE TABLE node_api_versions (
|
|
id bytea NOT NULL,
|
|
api_version integer NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
updated_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( id )
|
|
);
|
|
CREATE TABLE offers (
|
|
id serial NOT NULL,
|
|
name text NOT NULL,
|
|
description text NOT NULL,
|
|
award_credit_in_cents integer NOT NULL DEFAULT 0,
|
|
invitee_credit_in_cents integer NOT NULL DEFAULT 0,
|
|
award_credit_duration_days integer,
|
|
invitee_credit_duration_days integer,
|
|
redeemable_cap integer,
|
|
expires_at timestamp with time zone NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
status integer NOT NULL,
|
|
type integer NOT NULL,
|
|
PRIMARY KEY ( id )
|
|
);
|
|
CREATE TABLE peer_identities (
|
|
node_id bytea NOT NULL,
|
|
leaf_serial_number bytea NOT NULL,
|
|
chain bytea NOT NULL,
|
|
updated_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( node_id )
|
|
);
|
|
CREATE TABLE pending_audits (
|
|
node_id bytea NOT NULL,
|
|
piece_id bytea NOT NULL,
|
|
stripe_index bigint NOT NULL,
|
|
share_size bigint NOT NULL,
|
|
expected_share_hash bytea NOT NULL,
|
|
reverify_count bigint NOT NULL,
|
|
path bytea NOT NULL,
|
|
PRIMARY KEY ( node_id )
|
|
);
|
|
CREATE TABLE projects (
|
|
id bytea NOT NULL,
|
|
name text NOT NULL,
|
|
description text NOT NULL,
|
|
usage_limit bigint,
|
|
bandwidth_limit bigint,
|
|
rate_limit integer,
|
|
max_buckets integer,
|
|
partner_id bytea,
|
|
owner_id bytea NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( id )
|
|
);
|
|
CREATE TABLE project_bandwidth_rollups (
|
|
project_id bytea NOT NULL,
|
|
interval_month date NOT NULL,
|
|
egress_allocated bigint NOT NULL,
|
|
PRIMARY KEY ( project_id, interval_month )
|
|
);
|
|
CREATE TABLE registration_tokens (
|
|
secret bytea NOT NULL,
|
|
owner_id bytea,
|
|
project_limit integer NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( secret ),
|
|
UNIQUE ( owner_id )
|
|
);
|
|
CREATE TABLE reset_password_tokens (
|
|
secret bytea NOT NULL,
|
|
owner_id bytea NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( secret ),
|
|
UNIQUE ( owner_id )
|
|
);
|
|
CREATE TABLE revocations (
|
|
revoked bytea NOT NULL,
|
|
api_key_id bytea NOT NULL,
|
|
PRIMARY KEY ( revoked )
|
|
);
|
|
CREATE TABLE storagenode_bandwidth_rollups (
|
|
storagenode_id bytea NOT NULL,
|
|
interval_start timestamp with time zone NOT NULL,
|
|
interval_seconds integer NOT NULL,
|
|
action integer NOT NULL,
|
|
allocated bigint DEFAULT 0,
|
|
settled bigint NOT NULL,
|
|
PRIMARY KEY ( storagenode_id, interval_start, action )
|
|
);
|
|
CREATE TABLE storagenode_bandwidth_rollup_archives (
|
|
storagenode_id bytea NOT NULL,
|
|
interval_start timestamp with time zone NOT NULL,
|
|
interval_seconds integer NOT NULL,
|
|
action integer NOT NULL,
|
|
allocated bigint DEFAULT 0,
|
|
settled bigint NOT NULL,
|
|
PRIMARY KEY ( storagenode_id, interval_start, action )
|
|
);
|
|
CREATE TABLE storagenode_bandwidth_rollups_phase2 (
|
|
storagenode_id bytea NOT NULL,
|
|
interval_start timestamp with time zone NOT NULL,
|
|
interval_seconds integer NOT NULL,
|
|
action integer NOT NULL,
|
|
allocated bigint DEFAULT 0,
|
|
settled bigint NOT NULL,
|
|
PRIMARY KEY ( storagenode_id, interval_start, action )
|
|
);
|
|
CREATE TABLE storagenode_payments (
|
|
id bigserial NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
node_id bytea NOT NULL,
|
|
period text NOT NULL,
|
|
amount bigint NOT NULL,
|
|
receipt text,
|
|
notes text,
|
|
PRIMARY KEY ( id )
|
|
);
|
|
CREATE TABLE storagenode_paystubs (
|
|
period text NOT NULL,
|
|
node_id bytea NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
codes text NOT NULL,
|
|
usage_at_rest double precision NOT NULL,
|
|
usage_get bigint NOT NULL,
|
|
usage_put bigint NOT NULL,
|
|
usage_get_repair bigint NOT NULL,
|
|
usage_put_repair bigint NOT NULL,
|
|
usage_get_audit bigint NOT NULL,
|
|
comp_at_rest bigint NOT NULL,
|
|
comp_get bigint NOT NULL,
|
|
comp_put bigint NOT NULL,
|
|
comp_get_repair bigint NOT NULL,
|
|
comp_put_repair bigint NOT NULL,
|
|
comp_get_audit bigint NOT NULL,
|
|
surge_percent bigint NOT NULL,
|
|
held bigint NOT NULL,
|
|
owed bigint NOT NULL,
|
|
disposed bigint NOT NULL,
|
|
paid bigint NOT NULL,
|
|
distributed bigint NOT NULL,
|
|
PRIMARY KEY ( period, node_id )
|
|
);
|
|
CREATE TABLE storagenode_storage_tallies (
|
|
node_id bytea NOT NULL,
|
|
interval_end_time timestamp with time zone NOT NULL,
|
|
data_total double precision NOT NULL,
|
|
PRIMARY KEY ( interval_end_time, node_id )
|
|
);
|
|
CREATE TABLE stripe_customers (
|
|
user_id bytea NOT NULL,
|
|
customer_id text NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( user_id ),
|
|
UNIQUE ( customer_id )
|
|
);
|
|
CREATE TABLE stripecoinpayments_invoice_project_records (
|
|
id bytea NOT NULL,
|
|
project_id bytea NOT NULL,
|
|
storage double precision NOT NULL,
|
|
egress bigint NOT NULL,
|
|
objects bigint NOT NULL,
|
|
period_start timestamp with time zone NOT NULL,
|
|
period_end timestamp with time zone NOT NULL,
|
|
state integer NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( id ),
|
|
UNIQUE ( project_id, period_start, period_end )
|
|
);
|
|
CREATE TABLE stripecoinpayments_tx_conversion_rates (
|
|
tx_id text NOT NULL,
|
|
rate bytea NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( tx_id )
|
|
);
|
|
CREATE TABLE users (
|
|
id bytea NOT NULL,
|
|
email text NOT NULL,
|
|
normalized_email text NOT NULL,
|
|
full_name text NOT NULL,
|
|
short_name text,
|
|
password_hash bytea NOT NULL,
|
|
status integer NOT NULL,
|
|
partner_id bytea,
|
|
created_at timestamp with time zone NOT NULL,
|
|
project_limit integer NOT NULL DEFAULT 0,
|
|
position text,
|
|
company_name text,
|
|
company_size integer,
|
|
working_on text,
|
|
is_professional boolean NOT NULL DEFAULT false,
|
|
employee_count text,
|
|
PRIMARY KEY ( id )
|
|
);
|
|
CREATE TABLE value_attributions (
|
|
project_id bytea NOT NULL,
|
|
bucket_name bytea NOT NULL,
|
|
partner_id bytea NOT NULL,
|
|
last_updated timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( project_id, bucket_name )
|
|
);
|
|
CREATE TABLE api_keys (
|
|
id bytea NOT NULL,
|
|
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
|
head bytea NOT NULL,
|
|
name text NOT NULL,
|
|
secret bytea NOT NULL,
|
|
partner_id bytea,
|
|
created_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( id ),
|
|
UNIQUE ( head ),
|
|
UNIQUE ( name, project_id )
|
|
);
|
|
CREATE TABLE bucket_metainfos (
|
|
id bytea NOT NULL,
|
|
project_id bytea NOT NULL REFERENCES projects( id ),
|
|
name bytea NOT NULL,
|
|
partner_id bytea,
|
|
path_cipher integer NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
default_segment_size integer NOT NULL,
|
|
default_encryption_cipher_suite integer NOT NULL,
|
|
default_encryption_block_size integer NOT NULL,
|
|
default_redundancy_algorithm integer NOT NULL,
|
|
default_redundancy_share_size integer NOT NULL,
|
|
default_redundancy_required_shares integer NOT NULL,
|
|
default_redundancy_repair_shares integer NOT NULL,
|
|
default_redundancy_optimal_shares integer NOT NULL,
|
|
default_redundancy_total_shares integer NOT NULL,
|
|
PRIMARY KEY ( id ),
|
|
UNIQUE ( project_id, name )
|
|
);
|
|
CREATE TABLE project_members (
|
|
member_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
|
|
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
|
created_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( member_id, project_id )
|
|
);
|
|
CREATE TABLE stripecoinpayments_apply_balance_intents (
|
|
tx_id text NOT NULL REFERENCES coinpayments_transactions( id ) ON DELETE CASCADE,
|
|
state integer NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( tx_id )
|
|
);
|
|
CREATE TABLE user_credits (
|
|
id serial NOT NULL,
|
|
user_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
|
|
offer_id integer NOT NULL REFERENCES offers( id ),
|
|
referred_by bytea REFERENCES users( id ) ON DELETE SET NULL,
|
|
type text NOT NULL,
|
|
credits_earned_in_cents integer NOT NULL,
|
|
credits_used_in_cents integer NOT NULL,
|
|
expires_at timestamp with time zone NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( id ),
|
|
UNIQUE ( id, offer_id )
|
|
);
|
|
CREATE INDEX accounting_rollups_start_time_index ON accounting_rollups ( start_time );
|
|
CREATE INDEX bucket_bandwidth_rollups_project_id_action_interval_index ON bucket_bandwidth_rollups ( project_id, action, interval_start );
|
|
CREATE INDEX bucket_bandwidth_rollups_action_interval_project_id_index ON bucket_bandwidth_rollups ( action, interval_start, project_id );
|
|
CREATE INDEX bucket_bandwidth_rollups_archive_project_id_action_interval_index ON bucket_bandwidth_rollup_archives ( project_id, action, interval_start );
|
|
CREATE INDEX bucket_bandwidth_rollups_archive_action_interval_project_id_index ON bucket_bandwidth_rollup_archives ( action, interval_start, project_id );
|
|
CREATE INDEX bucket_storage_tallies_project_id_interval_start_index ON bucket_storage_tallies ( project_id, interval_start );
|
|
CREATE INDEX graceful_exit_transfer_queue_nid_dr_qa_fa_lfa_index ON graceful_exit_transfer_queue ( node_id, durability_ratio, queued_at, finished_at, last_failed_at );
|
|
CREATE INDEX injuredsegments_attempted_index ON injuredsegments ( attempted );
|
|
CREATE INDEX injuredsegments_segment_health_index ON injuredsegments ( segment_health );
|
|
CREATE INDEX injuredsegments_updated_at_index ON injuredsegments ( updated_at );
|
|
CREATE INDEX node_last_ip ON nodes ( last_net );
|
|
CREATE INDEX nodes_dis_unk_off_exit_fin_last_success_index ON nodes ( disqualified, unknown_audit_suspended, offline_suspended, exit_finished_at, last_contact_success );
|
|
CREATE INDEX storagenode_bandwidth_rollups_interval_start_index ON storagenode_bandwidth_rollups ( interval_start );
|
|
CREATE INDEX storagenode_bandwidth_rollup_archives_interval_start_index ON storagenode_bandwidth_rollup_archives ( interval_start );
|
|
CREATE INDEX storagenode_payments_node_id_period_index ON storagenode_payments ( node_id, period );
|
|
CREATE INDEX storagenode_paystubs_node_id_index ON storagenode_paystubs ( node_id );
|
|
CREATE INDEX storagenode_storage_tallies_node_id_index ON storagenode_storage_tallies ( node_id );
|
|
CREATE UNIQUE INDEX credits_earned_user_id_offer_id ON user_credits ( id, offer_id );`
|
|
}
|
|
|
|
func (obj *pgxcockroachDB) wrapTx(tx tagsql.Tx) txMethods {
|
|
return &pgxcockroachTx{
|
|
dialectTx: dialectTx{tx: tx},
|
|
pgxcockroachImpl: &pgxcockroachImpl{
|
|
db: obj.db,
|
|
driver: tx,
|
|
txn: true,
|
|
},
|
|
}
|
|
}
|
|
|
|
type pgxcockroachTx struct {
|
|
dialectTx
|
|
*pgxcockroachImpl
|
|
}
|
|
|
|
func pgxcockroachLogStmt(stmt string, args ...interface{}) {
|
|
// TODO: render placeholders
|
|
if Logger != nil {
|
|
out := fmt.Sprintf("stmt: %s\nargs: %v\n", stmt, pretty(args))
|
|
Logger(out)
|
|
}
|
|
}
|
|
|
|
type pretty []interface{}
|
|
|
|
func (p pretty) Format(f fmt.State, c rune) {
|
|
fmt.Fprint(f, "[")
|
|
nextval:
|
|
for i, val := range p {
|
|
if i > 0 {
|
|
fmt.Fprint(f, ", ")
|
|
}
|
|
rv := reflect.ValueOf(val)
|
|
if rv.Kind() == reflect.Ptr {
|
|
if rv.IsNil() {
|
|
fmt.Fprint(f, "NULL")
|
|
continue
|
|
}
|
|
val = rv.Elem().Interface()
|
|
}
|
|
switch v := val.(type) {
|
|
case string:
|
|
fmt.Fprintf(f, "%q", v)
|
|
case time.Time:
|
|
fmt.Fprintf(f, "%s", v.Format(time.RFC3339Nano))
|
|
case []byte:
|
|
for _, b := range v {
|
|
if !unicode.IsPrint(rune(b)) {
|
|
fmt.Fprintf(f, "%#x", v)
|
|
continue nextval
|
|
}
|
|
}
|
|
fmt.Fprintf(f, "%q", v)
|
|
default:
|
|
fmt.Fprintf(f, "%v", v)
|
|
}
|
|
}
|
|
fmt.Fprint(f, "]")
|
|
}
|
|
|
|
type AccountingRollup struct {
|
|
NodeId []byte
|
|
StartTime time.Time
|
|
PutTotal int64
|
|
GetTotal int64
|
|
GetAuditTotal int64
|
|
GetRepairTotal int64
|
|
PutRepairTotal int64
|
|
AtRestTotal float64
|
|
}
|
|
|
|
func (AccountingRollup) _Table() string { return "accounting_rollups" }
|
|
|
|
type AccountingRollup_Update_Fields struct {
|
|
}
|
|
|
|
type AccountingRollup_NodeId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func AccountingRollup_NodeId(v []byte) AccountingRollup_NodeId_Field {
|
|
return AccountingRollup_NodeId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f AccountingRollup_NodeId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (AccountingRollup_NodeId_Field) _Column() string { return "node_id" }
|
|
|
|
type AccountingRollup_StartTime_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func AccountingRollup_StartTime(v time.Time) AccountingRollup_StartTime_Field {
|
|
return AccountingRollup_StartTime_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f AccountingRollup_StartTime_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (AccountingRollup_StartTime_Field) _Column() string { return "start_time" }
|
|
|
|
type AccountingRollup_PutTotal_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func AccountingRollup_PutTotal(v int64) AccountingRollup_PutTotal_Field {
|
|
return AccountingRollup_PutTotal_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f AccountingRollup_PutTotal_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (AccountingRollup_PutTotal_Field) _Column() string { return "put_total" }
|
|
|
|
type AccountingRollup_GetTotal_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func AccountingRollup_GetTotal(v int64) AccountingRollup_GetTotal_Field {
|
|
return AccountingRollup_GetTotal_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f AccountingRollup_GetTotal_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (AccountingRollup_GetTotal_Field) _Column() string { return "get_total" }
|
|
|
|
type AccountingRollup_GetAuditTotal_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func AccountingRollup_GetAuditTotal(v int64) AccountingRollup_GetAuditTotal_Field {
|
|
return AccountingRollup_GetAuditTotal_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f AccountingRollup_GetAuditTotal_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (AccountingRollup_GetAuditTotal_Field) _Column() string { return "get_audit_total" }
|
|
|
|
type AccountingRollup_GetRepairTotal_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func AccountingRollup_GetRepairTotal(v int64) AccountingRollup_GetRepairTotal_Field {
|
|
return AccountingRollup_GetRepairTotal_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f AccountingRollup_GetRepairTotal_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (AccountingRollup_GetRepairTotal_Field) _Column() string { return "get_repair_total" }
|
|
|
|
type AccountingRollup_PutRepairTotal_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func AccountingRollup_PutRepairTotal(v int64) AccountingRollup_PutRepairTotal_Field {
|
|
return AccountingRollup_PutRepairTotal_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f AccountingRollup_PutRepairTotal_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (AccountingRollup_PutRepairTotal_Field) _Column() string { return "put_repair_total" }
|
|
|
|
type AccountingRollup_AtRestTotal_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value float64
|
|
}
|
|
|
|
func AccountingRollup_AtRestTotal(v float64) AccountingRollup_AtRestTotal_Field {
|
|
return AccountingRollup_AtRestTotal_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f AccountingRollup_AtRestTotal_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (AccountingRollup_AtRestTotal_Field) _Column() string { return "at_rest_total" }
|
|
|
|
type AccountingTimestamps struct {
|
|
Name string
|
|
Value time.Time
|
|
}
|
|
|
|
func (AccountingTimestamps) _Table() string { return "accounting_timestamps" }
|
|
|
|
type AccountingTimestamps_Update_Fields struct {
|
|
Value AccountingTimestamps_Value_Field
|
|
}
|
|
|
|
type AccountingTimestamps_Name_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func AccountingTimestamps_Name(v string) AccountingTimestamps_Name_Field {
|
|
return AccountingTimestamps_Name_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f AccountingTimestamps_Name_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (AccountingTimestamps_Name_Field) _Column() string { return "name" }
|
|
|
|
type AccountingTimestamps_Value_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func AccountingTimestamps_Value(v time.Time) AccountingTimestamps_Value_Field {
|
|
return AccountingTimestamps_Value_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f AccountingTimestamps_Value_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (AccountingTimestamps_Value_Field) _Column() string { return "value" }
|
|
|
|
type AuditHistory struct {
|
|
NodeId []byte
|
|
History []byte
|
|
}
|
|
|
|
func (AuditHistory) _Table() string { return "audit_histories" }
|
|
|
|
type AuditHistory_Update_Fields struct {
|
|
History AuditHistory_History_Field
|
|
}
|
|
|
|
type AuditHistory_NodeId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func AuditHistory_NodeId(v []byte) AuditHistory_NodeId_Field {
|
|
return AuditHistory_NodeId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f AuditHistory_NodeId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (AuditHistory_NodeId_Field) _Column() string { return "node_id" }
|
|
|
|
type AuditHistory_History_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func AuditHistory_History(v []byte) AuditHistory_History_Field {
|
|
return AuditHistory_History_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f AuditHistory_History_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (AuditHistory_History_Field) _Column() string { return "history" }
|
|
|
|
type BucketBandwidthRollup struct {
|
|
BucketName []byte
|
|
ProjectId []byte
|
|
IntervalStart time.Time
|
|
IntervalSeconds uint
|
|
Action uint
|
|
Inline uint64
|
|
Allocated uint64
|
|
Settled uint64
|
|
}
|
|
|
|
func (BucketBandwidthRollup) _Table() string { return "bucket_bandwidth_rollups" }
|
|
|
|
type BucketBandwidthRollup_Update_Fields struct {
|
|
Inline BucketBandwidthRollup_Inline_Field
|
|
Allocated BucketBandwidthRollup_Allocated_Field
|
|
Settled BucketBandwidthRollup_Settled_Field
|
|
}
|
|
|
|
type BucketBandwidthRollup_BucketName_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func BucketBandwidthRollup_BucketName(v []byte) BucketBandwidthRollup_BucketName_Field {
|
|
return BucketBandwidthRollup_BucketName_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketBandwidthRollup_BucketName_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketBandwidthRollup_BucketName_Field) _Column() string { return "bucket_name" }
|
|
|
|
type BucketBandwidthRollup_ProjectId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func BucketBandwidthRollup_ProjectId(v []byte) BucketBandwidthRollup_ProjectId_Field {
|
|
return BucketBandwidthRollup_ProjectId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketBandwidthRollup_ProjectId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketBandwidthRollup_ProjectId_Field) _Column() string { return "project_id" }
|
|
|
|
type BucketBandwidthRollup_IntervalStart_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func BucketBandwidthRollup_IntervalStart(v time.Time) BucketBandwidthRollup_IntervalStart_Field {
|
|
return BucketBandwidthRollup_IntervalStart_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketBandwidthRollup_IntervalStart_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketBandwidthRollup_IntervalStart_Field) _Column() string { return "interval_start" }
|
|
|
|
type BucketBandwidthRollup_IntervalSeconds_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value uint
|
|
}
|
|
|
|
func BucketBandwidthRollup_IntervalSeconds(v uint) BucketBandwidthRollup_IntervalSeconds_Field {
|
|
return BucketBandwidthRollup_IntervalSeconds_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketBandwidthRollup_IntervalSeconds_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketBandwidthRollup_IntervalSeconds_Field) _Column() string { return "interval_seconds" }
|
|
|
|
type BucketBandwidthRollup_Action_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value uint
|
|
}
|
|
|
|
func BucketBandwidthRollup_Action(v uint) BucketBandwidthRollup_Action_Field {
|
|
return BucketBandwidthRollup_Action_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketBandwidthRollup_Action_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketBandwidthRollup_Action_Field) _Column() string { return "action" }
|
|
|
|
type BucketBandwidthRollup_Inline_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value uint64
|
|
}
|
|
|
|
func BucketBandwidthRollup_Inline(v uint64) BucketBandwidthRollup_Inline_Field {
|
|
return BucketBandwidthRollup_Inline_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketBandwidthRollup_Inline_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketBandwidthRollup_Inline_Field) _Column() string { return "inline" }
|
|
|
|
type BucketBandwidthRollup_Allocated_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value uint64
|
|
}
|
|
|
|
func BucketBandwidthRollup_Allocated(v uint64) BucketBandwidthRollup_Allocated_Field {
|
|
return BucketBandwidthRollup_Allocated_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketBandwidthRollup_Allocated_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketBandwidthRollup_Allocated_Field) _Column() string { return "allocated" }
|
|
|
|
type BucketBandwidthRollup_Settled_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value uint64
|
|
}
|
|
|
|
func BucketBandwidthRollup_Settled(v uint64) BucketBandwidthRollup_Settled_Field {
|
|
return BucketBandwidthRollup_Settled_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketBandwidthRollup_Settled_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketBandwidthRollup_Settled_Field) _Column() string { return "settled" }
|
|
|
|
type BucketBandwidthRollupArchive struct {
|
|
BucketName []byte
|
|
ProjectId []byte
|
|
IntervalStart time.Time
|
|
IntervalSeconds uint
|
|
Action uint
|
|
Inline uint64
|
|
Allocated uint64
|
|
Settled uint64
|
|
}
|
|
|
|
func (BucketBandwidthRollupArchive) _Table() string { return "bucket_bandwidth_rollup_archives" }
|
|
|
|
type BucketBandwidthRollupArchive_Update_Fields struct {
|
|
Inline BucketBandwidthRollupArchive_Inline_Field
|
|
Allocated BucketBandwidthRollupArchive_Allocated_Field
|
|
Settled BucketBandwidthRollupArchive_Settled_Field
|
|
}
|
|
|
|
type BucketBandwidthRollupArchive_BucketName_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func BucketBandwidthRollupArchive_BucketName(v []byte) BucketBandwidthRollupArchive_BucketName_Field {
|
|
return BucketBandwidthRollupArchive_BucketName_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketBandwidthRollupArchive_BucketName_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketBandwidthRollupArchive_BucketName_Field) _Column() string { return "bucket_name" }
|
|
|
|
type BucketBandwidthRollupArchive_ProjectId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func BucketBandwidthRollupArchive_ProjectId(v []byte) BucketBandwidthRollupArchive_ProjectId_Field {
|
|
return BucketBandwidthRollupArchive_ProjectId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketBandwidthRollupArchive_ProjectId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketBandwidthRollupArchive_ProjectId_Field) _Column() string { return "project_id" }
|
|
|
|
type BucketBandwidthRollupArchive_IntervalStart_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func BucketBandwidthRollupArchive_IntervalStart(v time.Time) BucketBandwidthRollupArchive_IntervalStart_Field {
|
|
return BucketBandwidthRollupArchive_IntervalStart_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketBandwidthRollupArchive_IntervalStart_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketBandwidthRollupArchive_IntervalStart_Field) _Column() string { return "interval_start" }
|
|
|
|
type BucketBandwidthRollupArchive_IntervalSeconds_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value uint
|
|
}
|
|
|
|
func BucketBandwidthRollupArchive_IntervalSeconds(v uint) BucketBandwidthRollupArchive_IntervalSeconds_Field {
|
|
return BucketBandwidthRollupArchive_IntervalSeconds_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketBandwidthRollupArchive_IntervalSeconds_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketBandwidthRollupArchive_IntervalSeconds_Field) _Column() string { return "interval_seconds" }
|
|
|
|
type BucketBandwidthRollupArchive_Action_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value uint
|
|
}
|
|
|
|
func BucketBandwidthRollupArchive_Action(v uint) BucketBandwidthRollupArchive_Action_Field {
|
|
return BucketBandwidthRollupArchive_Action_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketBandwidthRollupArchive_Action_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketBandwidthRollupArchive_Action_Field) _Column() string { return "action" }
|
|
|
|
type BucketBandwidthRollupArchive_Inline_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value uint64
|
|
}
|
|
|
|
func BucketBandwidthRollupArchive_Inline(v uint64) BucketBandwidthRollupArchive_Inline_Field {
|
|
return BucketBandwidthRollupArchive_Inline_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketBandwidthRollupArchive_Inline_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketBandwidthRollupArchive_Inline_Field) _Column() string { return "inline" }
|
|
|
|
type BucketBandwidthRollupArchive_Allocated_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value uint64
|
|
}
|
|
|
|
func BucketBandwidthRollupArchive_Allocated(v uint64) BucketBandwidthRollupArchive_Allocated_Field {
|
|
return BucketBandwidthRollupArchive_Allocated_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketBandwidthRollupArchive_Allocated_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketBandwidthRollupArchive_Allocated_Field) _Column() string { return "allocated" }
|
|
|
|
type BucketBandwidthRollupArchive_Settled_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value uint64
|
|
}
|
|
|
|
func BucketBandwidthRollupArchive_Settled(v uint64) BucketBandwidthRollupArchive_Settled_Field {
|
|
return BucketBandwidthRollupArchive_Settled_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketBandwidthRollupArchive_Settled_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketBandwidthRollupArchive_Settled_Field) _Column() string { return "settled" }
|
|
|
|
type BucketStorageTally struct {
|
|
BucketName []byte
|
|
ProjectId []byte
|
|
IntervalStart time.Time
|
|
Inline uint64
|
|
Remote uint64
|
|
RemoteSegmentsCount uint
|
|
InlineSegmentsCount uint
|
|
ObjectCount uint
|
|
MetadataSize uint64
|
|
}
|
|
|
|
func (BucketStorageTally) _Table() string { return "bucket_storage_tallies" }
|
|
|
|
type BucketStorageTally_Update_Fields struct {
|
|
}
|
|
|
|
type BucketStorageTally_BucketName_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func BucketStorageTally_BucketName(v []byte) BucketStorageTally_BucketName_Field {
|
|
return BucketStorageTally_BucketName_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketStorageTally_BucketName_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketStorageTally_BucketName_Field) _Column() string { return "bucket_name" }
|
|
|
|
type BucketStorageTally_ProjectId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func BucketStorageTally_ProjectId(v []byte) BucketStorageTally_ProjectId_Field {
|
|
return BucketStorageTally_ProjectId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketStorageTally_ProjectId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketStorageTally_ProjectId_Field) _Column() string { return "project_id" }
|
|
|
|
type BucketStorageTally_IntervalStart_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func BucketStorageTally_IntervalStart(v time.Time) BucketStorageTally_IntervalStart_Field {
|
|
return BucketStorageTally_IntervalStart_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketStorageTally_IntervalStart_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketStorageTally_IntervalStart_Field) _Column() string { return "interval_start" }
|
|
|
|
type BucketStorageTally_Inline_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value uint64
|
|
}
|
|
|
|
func BucketStorageTally_Inline(v uint64) BucketStorageTally_Inline_Field {
|
|
return BucketStorageTally_Inline_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketStorageTally_Inline_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketStorageTally_Inline_Field) _Column() string { return "inline" }
|
|
|
|
type BucketStorageTally_Remote_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value uint64
|
|
}
|
|
|
|
func BucketStorageTally_Remote(v uint64) BucketStorageTally_Remote_Field {
|
|
return BucketStorageTally_Remote_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketStorageTally_Remote_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketStorageTally_Remote_Field) _Column() string { return "remote" }
|
|
|
|
type BucketStorageTally_RemoteSegmentsCount_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value uint
|
|
}
|
|
|
|
func BucketStorageTally_RemoteSegmentsCount(v uint) BucketStorageTally_RemoteSegmentsCount_Field {
|
|
return BucketStorageTally_RemoteSegmentsCount_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketStorageTally_RemoteSegmentsCount_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketStorageTally_RemoteSegmentsCount_Field) _Column() string { return "remote_segments_count" }
|
|
|
|
type BucketStorageTally_InlineSegmentsCount_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value uint
|
|
}
|
|
|
|
func BucketStorageTally_InlineSegmentsCount(v uint) BucketStorageTally_InlineSegmentsCount_Field {
|
|
return BucketStorageTally_InlineSegmentsCount_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketStorageTally_InlineSegmentsCount_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketStorageTally_InlineSegmentsCount_Field) _Column() string { return "inline_segments_count" }
|
|
|
|
type BucketStorageTally_ObjectCount_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value uint
|
|
}
|
|
|
|
func BucketStorageTally_ObjectCount(v uint) BucketStorageTally_ObjectCount_Field {
|
|
return BucketStorageTally_ObjectCount_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketStorageTally_ObjectCount_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketStorageTally_ObjectCount_Field) _Column() string { return "object_count" }
|
|
|
|
type BucketStorageTally_MetadataSize_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value uint64
|
|
}
|
|
|
|
func BucketStorageTally_MetadataSize(v uint64) BucketStorageTally_MetadataSize_Field {
|
|
return BucketStorageTally_MetadataSize_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketStorageTally_MetadataSize_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketStorageTally_MetadataSize_Field) _Column() string { return "metadata_size" }
|
|
|
|
type CoinpaymentsTransaction struct {
|
|
Id string
|
|
UserId []byte
|
|
Address string
|
|
Amount []byte
|
|
Received []byte
|
|
Status int
|
|
Key string
|
|
Timeout int
|
|
CreatedAt time.Time
|
|
}
|
|
|
|
func (CoinpaymentsTransaction) _Table() string { return "coinpayments_transactions" }
|
|
|
|
type CoinpaymentsTransaction_Update_Fields struct {
|
|
Received CoinpaymentsTransaction_Received_Field
|
|
Status CoinpaymentsTransaction_Status_Field
|
|
}
|
|
|
|
type CoinpaymentsTransaction_Id_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func CoinpaymentsTransaction_Id(v string) CoinpaymentsTransaction_Id_Field {
|
|
return CoinpaymentsTransaction_Id_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f CoinpaymentsTransaction_Id_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (CoinpaymentsTransaction_Id_Field) _Column() string { return "id" }
|
|
|
|
type CoinpaymentsTransaction_UserId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func CoinpaymentsTransaction_UserId(v []byte) CoinpaymentsTransaction_UserId_Field {
|
|
return CoinpaymentsTransaction_UserId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f CoinpaymentsTransaction_UserId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (CoinpaymentsTransaction_UserId_Field) _Column() string { return "user_id" }
|
|
|
|
type CoinpaymentsTransaction_Address_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func CoinpaymentsTransaction_Address(v string) CoinpaymentsTransaction_Address_Field {
|
|
return CoinpaymentsTransaction_Address_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f CoinpaymentsTransaction_Address_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (CoinpaymentsTransaction_Address_Field) _Column() string { return "address" }
|
|
|
|
type CoinpaymentsTransaction_Amount_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func CoinpaymentsTransaction_Amount(v []byte) CoinpaymentsTransaction_Amount_Field {
|
|
return CoinpaymentsTransaction_Amount_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f CoinpaymentsTransaction_Amount_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (CoinpaymentsTransaction_Amount_Field) _Column() string { return "amount" }
|
|
|
|
type CoinpaymentsTransaction_Received_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func CoinpaymentsTransaction_Received(v []byte) CoinpaymentsTransaction_Received_Field {
|
|
return CoinpaymentsTransaction_Received_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f CoinpaymentsTransaction_Received_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (CoinpaymentsTransaction_Received_Field) _Column() string { return "received" }
|
|
|
|
type CoinpaymentsTransaction_Status_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func CoinpaymentsTransaction_Status(v int) CoinpaymentsTransaction_Status_Field {
|
|
return CoinpaymentsTransaction_Status_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f CoinpaymentsTransaction_Status_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (CoinpaymentsTransaction_Status_Field) _Column() string { return "status" }
|
|
|
|
type CoinpaymentsTransaction_Key_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func CoinpaymentsTransaction_Key(v string) CoinpaymentsTransaction_Key_Field {
|
|
return CoinpaymentsTransaction_Key_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f CoinpaymentsTransaction_Key_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (CoinpaymentsTransaction_Key_Field) _Column() string { return "key" }
|
|
|
|
type CoinpaymentsTransaction_Timeout_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func CoinpaymentsTransaction_Timeout(v int) CoinpaymentsTransaction_Timeout_Field {
|
|
return CoinpaymentsTransaction_Timeout_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f CoinpaymentsTransaction_Timeout_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (CoinpaymentsTransaction_Timeout_Field) _Column() string { return "timeout" }
|
|
|
|
type CoinpaymentsTransaction_CreatedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func CoinpaymentsTransaction_CreatedAt(v time.Time) CoinpaymentsTransaction_CreatedAt_Field {
|
|
return CoinpaymentsTransaction_CreatedAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f CoinpaymentsTransaction_CreatedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (CoinpaymentsTransaction_CreatedAt_Field) _Column() string { return "created_at" }
|
|
|
|
type Coupon struct {
|
|
Id []byte
|
|
UserId []byte
|
|
Amount int64
|
|
Description string
|
|
Type int
|
|
Status int
|
|
Duration int64
|
|
BillingPeriods *int64
|
|
CouponCodeName *string
|
|
CreatedAt time.Time
|
|
}
|
|
|
|
func (Coupon) _Table() string { return "coupons" }
|
|
|
|
type Coupon_Create_Fields struct {
|
|
BillingPeriods Coupon_BillingPeriods_Field
|
|
CouponCodeName Coupon_CouponCodeName_Field
|
|
}
|
|
|
|
type Coupon_Update_Fields struct {
|
|
Status Coupon_Status_Field
|
|
}
|
|
|
|
type Coupon_Id_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func Coupon_Id(v []byte) Coupon_Id_Field {
|
|
return Coupon_Id_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Coupon_Id_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Coupon_Id_Field) _Column() string { return "id" }
|
|
|
|
type Coupon_UserId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func Coupon_UserId(v []byte) Coupon_UserId_Field {
|
|
return Coupon_UserId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Coupon_UserId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Coupon_UserId_Field) _Column() string { return "user_id" }
|
|
|
|
type Coupon_Amount_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func Coupon_Amount(v int64) Coupon_Amount_Field {
|
|
return Coupon_Amount_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Coupon_Amount_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Coupon_Amount_Field) _Column() string { return "amount" }
|
|
|
|
type Coupon_Description_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func Coupon_Description(v string) Coupon_Description_Field {
|
|
return Coupon_Description_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Coupon_Description_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Coupon_Description_Field) _Column() string { return "description" }
|
|
|
|
type Coupon_Type_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func Coupon_Type(v int) Coupon_Type_Field {
|
|
return Coupon_Type_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Coupon_Type_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Coupon_Type_Field) _Column() string { return "type" }
|
|
|
|
type Coupon_Status_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func Coupon_Status(v int) Coupon_Status_Field {
|
|
return Coupon_Status_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Coupon_Status_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Coupon_Status_Field) _Column() string { return "status" }
|
|
|
|
type Coupon_Duration_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func Coupon_Duration(v int64) Coupon_Duration_Field {
|
|
return Coupon_Duration_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Coupon_Duration_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Coupon_Duration_Field) _Column() string { return "duration" }
|
|
|
|
type Coupon_BillingPeriods_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value *int64
|
|
}
|
|
|
|
func Coupon_BillingPeriods(v int64) Coupon_BillingPeriods_Field {
|
|
return Coupon_BillingPeriods_Field{_set: true, _value: &v}
|
|
}
|
|
|
|
func Coupon_BillingPeriods_Raw(v *int64) Coupon_BillingPeriods_Field {
|
|
if v == nil {
|
|
return Coupon_BillingPeriods_Null()
|
|
}
|
|
return Coupon_BillingPeriods(*v)
|
|
}
|
|
|
|
func Coupon_BillingPeriods_Null() Coupon_BillingPeriods_Field {
|
|
return Coupon_BillingPeriods_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f Coupon_BillingPeriods_Field) isnull() bool { return !f._set || f._null || f._value == nil }
|
|
|
|
func (f Coupon_BillingPeriods_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Coupon_BillingPeriods_Field) _Column() string { return "billing_periods" }
|
|
|
|
type Coupon_CouponCodeName_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value *string
|
|
}
|
|
|
|
func Coupon_CouponCodeName(v string) Coupon_CouponCodeName_Field {
|
|
return Coupon_CouponCodeName_Field{_set: true, _value: &v}
|
|
}
|
|
|
|
func Coupon_CouponCodeName_Raw(v *string) Coupon_CouponCodeName_Field {
|
|
if v == nil {
|
|
return Coupon_CouponCodeName_Null()
|
|
}
|
|
return Coupon_CouponCodeName(*v)
|
|
}
|
|
|
|
func Coupon_CouponCodeName_Null() Coupon_CouponCodeName_Field {
|
|
return Coupon_CouponCodeName_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f Coupon_CouponCodeName_Field) isnull() bool { return !f._set || f._null || f._value == nil }
|
|
|
|
func (f Coupon_CouponCodeName_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Coupon_CouponCodeName_Field) _Column() string { return "coupon_code_name" }
|
|
|
|
type Coupon_CreatedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func Coupon_CreatedAt(v time.Time) Coupon_CreatedAt_Field {
|
|
return Coupon_CreatedAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Coupon_CreatedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Coupon_CreatedAt_Field) _Column() string { return "created_at" }
|
|
|
|
type CouponCode struct {
|
|
Id []byte
|
|
Name string
|
|
Amount int64
|
|
Description string
|
|
Type int
|
|
BillingPeriods *int64
|
|
CreatedAt time.Time
|
|
}
|
|
|
|
func (CouponCode) _Table() string { return "coupon_codes" }
|
|
|
|
type CouponCode_Create_Fields struct {
|
|
BillingPeriods CouponCode_BillingPeriods_Field
|
|
}
|
|
|
|
type CouponCode_Update_Fields struct {
|
|
}
|
|
|
|
type CouponCode_Id_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func CouponCode_Id(v []byte) CouponCode_Id_Field {
|
|
return CouponCode_Id_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f CouponCode_Id_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (CouponCode_Id_Field) _Column() string { return "id" }
|
|
|
|
type CouponCode_Name_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func CouponCode_Name(v string) CouponCode_Name_Field {
|
|
return CouponCode_Name_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f CouponCode_Name_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (CouponCode_Name_Field) _Column() string { return "name" }
|
|
|
|
type CouponCode_Amount_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func CouponCode_Amount(v int64) CouponCode_Amount_Field {
|
|
return CouponCode_Amount_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f CouponCode_Amount_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (CouponCode_Amount_Field) _Column() string { return "amount" }
|
|
|
|
type CouponCode_Description_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func CouponCode_Description(v string) CouponCode_Description_Field {
|
|
return CouponCode_Description_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f CouponCode_Description_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (CouponCode_Description_Field) _Column() string { return "description" }
|
|
|
|
type CouponCode_Type_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func CouponCode_Type(v int) CouponCode_Type_Field {
|
|
return CouponCode_Type_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f CouponCode_Type_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (CouponCode_Type_Field) _Column() string { return "type" }
|
|
|
|
type CouponCode_BillingPeriods_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value *int64
|
|
}
|
|
|
|
func CouponCode_BillingPeriods(v int64) CouponCode_BillingPeriods_Field {
|
|
return CouponCode_BillingPeriods_Field{_set: true, _value: &v}
|
|
}
|
|
|
|
func CouponCode_BillingPeriods_Raw(v *int64) CouponCode_BillingPeriods_Field {
|
|
if v == nil {
|
|
return CouponCode_BillingPeriods_Null()
|
|
}
|
|
return CouponCode_BillingPeriods(*v)
|
|
}
|
|
|
|
func CouponCode_BillingPeriods_Null() CouponCode_BillingPeriods_Field {
|
|
return CouponCode_BillingPeriods_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f CouponCode_BillingPeriods_Field) isnull() bool { return !f._set || f._null || f._value == nil }
|
|
|
|
func (f CouponCode_BillingPeriods_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (CouponCode_BillingPeriods_Field) _Column() string { return "billing_periods" }
|
|
|
|
type CouponCode_CreatedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func CouponCode_CreatedAt(v time.Time) CouponCode_CreatedAt_Field {
|
|
return CouponCode_CreatedAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f CouponCode_CreatedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (CouponCode_CreatedAt_Field) _Column() string { return "created_at" }
|
|
|
|
type CouponUsage struct {
|
|
CouponId []byte
|
|
Amount int64
|
|
Status int
|
|
Period time.Time
|
|
}
|
|
|
|
func (CouponUsage) _Table() string { return "coupon_usages" }
|
|
|
|
type CouponUsage_Update_Fields struct {
|
|
Status CouponUsage_Status_Field
|
|
}
|
|
|
|
type CouponUsage_CouponId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func CouponUsage_CouponId(v []byte) CouponUsage_CouponId_Field {
|
|
return CouponUsage_CouponId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f CouponUsage_CouponId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (CouponUsage_CouponId_Field) _Column() string { return "coupon_id" }
|
|
|
|
type CouponUsage_Amount_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func CouponUsage_Amount(v int64) CouponUsage_Amount_Field {
|
|
return CouponUsage_Amount_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f CouponUsage_Amount_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (CouponUsage_Amount_Field) _Column() string { return "amount" }
|
|
|
|
type CouponUsage_Status_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func CouponUsage_Status(v int) CouponUsage_Status_Field {
|
|
return CouponUsage_Status_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f CouponUsage_Status_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (CouponUsage_Status_Field) _Column() string { return "status" }
|
|
|
|
type CouponUsage_Period_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func CouponUsage_Period(v time.Time) CouponUsage_Period_Field {
|
|
return CouponUsage_Period_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f CouponUsage_Period_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (CouponUsage_Period_Field) _Column() string { return "period" }
|
|
|
|
type GracefulExitProgress struct {
|
|
NodeId []byte
|
|
BytesTransferred int64
|
|
PiecesTransferred int64
|
|
PiecesFailed int64
|
|
UpdatedAt time.Time
|
|
}
|
|
|
|
func (GracefulExitProgress) _Table() string { return "graceful_exit_progress" }
|
|
|
|
type GracefulExitProgress_Update_Fields struct {
|
|
BytesTransferred GracefulExitProgress_BytesTransferred_Field
|
|
PiecesTransferred GracefulExitProgress_PiecesTransferred_Field
|
|
PiecesFailed GracefulExitProgress_PiecesFailed_Field
|
|
}
|
|
|
|
type GracefulExitProgress_NodeId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func GracefulExitProgress_NodeId(v []byte) GracefulExitProgress_NodeId_Field {
|
|
return GracefulExitProgress_NodeId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f GracefulExitProgress_NodeId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (GracefulExitProgress_NodeId_Field) _Column() string { return "node_id" }
|
|
|
|
type GracefulExitProgress_BytesTransferred_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func GracefulExitProgress_BytesTransferred(v int64) GracefulExitProgress_BytesTransferred_Field {
|
|
return GracefulExitProgress_BytesTransferred_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f GracefulExitProgress_BytesTransferred_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (GracefulExitProgress_BytesTransferred_Field) _Column() string { return "bytes_transferred" }
|
|
|
|
type GracefulExitProgress_PiecesTransferred_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func GracefulExitProgress_PiecesTransferred(v int64) GracefulExitProgress_PiecesTransferred_Field {
|
|
return GracefulExitProgress_PiecesTransferred_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f GracefulExitProgress_PiecesTransferred_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (GracefulExitProgress_PiecesTransferred_Field) _Column() string { return "pieces_transferred" }
|
|
|
|
type GracefulExitProgress_PiecesFailed_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func GracefulExitProgress_PiecesFailed(v int64) GracefulExitProgress_PiecesFailed_Field {
|
|
return GracefulExitProgress_PiecesFailed_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f GracefulExitProgress_PiecesFailed_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (GracefulExitProgress_PiecesFailed_Field) _Column() string { return "pieces_failed" }
|
|
|
|
type GracefulExitProgress_UpdatedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func GracefulExitProgress_UpdatedAt(v time.Time) GracefulExitProgress_UpdatedAt_Field {
|
|
return GracefulExitProgress_UpdatedAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f GracefulExitProgress_UpdatedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (GracefulExitProgress_UpdatedAt_Field) _Column() string { return "updated_at" }
|
|
|
|
type GracefulExitTransferQueue struct {
|
|
NodeId []byte
|
|
Path []byte
|
|
PieceNum int
|
|
RootPieceId []byte
|
|
DurabilityRatio float64
|
|
QueuedAt time.Time
|
|
RequestedAt *time.Time
|
|
LastFailedAt *time.Time
|
|
LastFailedCode *int
|
|
FailedCount *int
|
|
FinishedAt *time.Time
|
|
OrderLimitSendCount int
|
|
}
|
|
|
|
func (GracefulExitTransferQueue) _Table() string { return "graceful_exit_transfer_queue" }
|
|
|
|
type GracefulExitTransferQueue_Create_Fields struct {
|
|
RootPieceId GracefulExitTransferQueue_RootPieceId_Field
|
|
RequestedAt GracefulExitTransferQueue_RequestedAt_Field
|
|
LastFailedAt GracefulExitTransferQueue_LastFailedAt_Field
|
|
LastFailedCode GracefulExitTransferQueue_LastFailedCode_Field
|
|
FailedCount GracefulExitTransferQueue_FailedCount_Field
|
|
FinishedAt GracefulExitTransferQueue_FinishedAt_Field
|
|
OrderLimitSendCount GracefulExitTransferQueue_OrderLimitSendCount_Field
|
|
}
|
|
|
|
type GracefulExitTransferQueue_Update_Fields struct {
|
|
DurabilityRatio GracefulExitTransferQueue_DurabilityRatio_Field
|
|
RequestedAt GracefulExitTransferQueue_RequestedAt_Field
|
|
LastFailedAt GracefulExitTransferQueue_LastFailedAt_Field
|
|
LastFailedCode GracefulExitTransferQueue_LastFailedCode_Field
|
|
FailedCount GracefulExitTransferQueue_FailedCount_Field
|
|
FinishedAt GracefulExitTransferQueue_FinishedAt_Field
|
|
OrderLimitSendCount GracefulExitTransferQueue_OrderLimitSendCount_Field
|
|
}
|
|
|
|
type GracefulExitTransferQueue_NodeId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func GracefulExitTransferQueue_NodeId(v []byte) GracefulExitTransferQueue_NodeId_Field {
|
|
return GracefulExitTransferQueue_NodeId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f GracefulExitTransferQueue_NodeId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (GracefulExitTransferQueue_NodeId_Field) _Column() string { return "node_id" }
|
|
|
|
type GracefulExitTransferQueue_Path_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func GracefulExitTransferQueue_Path(v []byte) GracefulExitTransferQueue_Path_Field {
|
|
return GracefulExitTransferQueue_Path_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f GracefulExitTransferQueue_Path_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (GracefulExitTransferQueue_Path_Field) _Column() string { return "path" }
|
|
|
|
type GracefulExitTransferQueue_PieceNum_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func GracefulExitTransferQueue_PieceNum(v int) GracefulExitTransferQueue_PieceNum_Field {
|
|
return GracefulExitTransferQueue_PieceNum_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f GracefulExitTransferQueue_PieceNum_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (GracefulExitTransferQueue_PieceNum_Field) _Column() string { return "piece_num" }
|
|
|
|
type GracefulExitTransferQueue_RootPieceId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func GracefulExitTransferQueue_RootPieceId(v []byte) GracefulExitTransferQueue_RootPieceId_Field {
|
|
return GracefulExitTransferQueue_RootPieceId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func GracefulExitTransferQueue_RootPieceId_Raw(v []byte) GracefulExitTransferQueue_RootPieceId_Field {
|
|
if v == nil {
|
|
return GracefulExitTransferQueue_RootPieceId_Null()
|
|
}
|
|
return GracefulExitTransferQueue_RootPieceId(v)
|
|
}
|
|
|
|
func GracefulExitTransferQueue_RootPieceId_Null() GracefulExitTransferQueue_RootPieceId_Field {
|
|
return GracefulExitTransferQueue_RootPieceId_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f GracefulExitTransferQueue_RootPieceId_Field) isnull() bool {
|
|
return !f._set || f._null || f._value == nil
|
|
}
|
|
|
|
func (f GracefulExitTransferQueue_RootPieceId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (GracefulExitTransferQueue_RootPieceId_Field) _Column() string { return "root_piece_id" }
|
|
|
|
type GracefulExitTransferQueue_DurabilityRatio_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value float64
|
|
}
|
|
|
|
func GracefulExitTransferQueue_DurabilityRatio(v float64) GracefulExitTransferQueue_DurabilityRatio_Field {
|
|
return GracefulExitTransferQueue_DurabilityRatio_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f GracefulExitTransferQueue_DurabilityRatio_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (GracefulExitTransferQueue_DurabilityRatio_Field) _Column() string { return "durability_ratio" }
|
|
|
|
type GracefulExitTransferQueue_QueuedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func GracefulExitTransferQueue_QueuedAt(v time.Time) GracefulExitTransferQueue_QueuedAt_Field {
|
|
return GracefulExitTransferQueue_QueuedAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f GracefulExitTransferQueue_QueuedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (GracefulExitTransferQueue_QueuedAt_Field) _Column() string { return "queued_at" }
|
|
|
|
type GracefulExitTransferQueue_RequestedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value *time.Time
|
|
}
|
|
|
|
func GracefulExitTransferQueue_RequestedAt(v time.Time) GracefulExitTransferQueue_RequestedAt_Field {
|
|
return GracefulExitTransferQueue_RequestedAt_Field{_set: true, _value: &v}
|
|
}
|
|
|
|
func GracefulExitTransferQueue_RequestedAt_Raw(v *time.Time) GracefulExitTransferQueue_RequestedAt_Field {
|
|
if v == nil {
|
|
return GracefulExitTransferQueue_RequestedAt_Null()
|
|
}
|
|
return GracefulExitTransferQueue_RequestedAt(*v)
|
|
}
|
|
|
|
func GracefulExitTransferQueue_RequestedAt_Null() GracefulExitTransferQueue_RequestedAt_Field {
|
|
return GracefulExitTransferQueue_RequestedAt_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f GracefulExitTransferQueue_RequestedAt_Field) isnull() bool {
|
|
return !f._set || f._null || f._value == nil
|
|
}
|
|
|
|
func (f GracefulExitTransferQueue_RequestedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (GracefulExitTransferQueue_RequestedAt_Field) _Column() string { return "requested_at" }
|
|
|
|
type GracefulExitTransferQueue_LastFailedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value *time.Time
|
|
}
|
|
|
|
func GracefulExitTransferQueue_LastFailedAt(v time.Time) GracefulExitTransferQueue_LastFailedAt_Field {
|
|
return GracefulExitTransferQueue_LastFailedAt_Field{_set: true, _value: &v}
|
|
}
|
|
|
|
func GracefulExitTransferQueue_LastFailedAt_Raw(v *time.Time) GracefulExitTransferQueue_LastFailedAt_Field {
|
|
if v == nil {
|
|
return GracefulExitTransferQueue_LastFailedAt_Null()
|
|
}
|
|
return GracefulExitTransferQueue_LastFailedAt(*v)
|
|
}
|
|
|
|
func GracefulExitTransferQueue_LastFailedAt_Null() GracefulExitTransferQueue_LastFailedAt_Field {
|
|
return GracefulExitTransferQueue_LastFailedAt_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f GracefulExitTransferQueue_LastFailedAt_Field) isnull() bool {
|
|
return !f._set || f._null || f._value == nil
|
|
}
|
|
|
|
func (f GracefulExitTransferQueue_LastFailedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (GracefulExitTransferQueue_LastFailedAt_Field) _Column() string { return "last_failed_at" }
|
|
|
|
type GracefulExitTransferQueue_LastFailedCode_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value *int
|
|
}
|
|
|
|
func GracefulExitTransferQueue_LastFailedCode(v int) GracefulExitTransferQueue_LastFailedCode_Field {
|
|
return GracefulExitTransferQueue_LastFailedCode_Field{_set: true, _value: &v}
|
|
}
|
|
|
|
func GracefulExitTransferQueue_LastFailedCode_Raw(v *int) GracefulExitTransferQueue_LastFailedCode_Field {
|
|
if v == nil {
|
|
return GracefulExitTransferQueue_LastFailedCode_Null()
|
|
}
|
|
return GracefulExitTransferQueue_LastFailedCode(*v)
|
|
}
|
|
|
|
func GracefulExitTransferQueue_LastFailedCode_Null() GracefulExitTransferQueue_LastFailedCode_Field {
|
|
return GracefulExitTransferQueue_LastFailedCode_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f GracefulExitTransferQueue_LastFailedCode_Field) isnull() bool {
|
|
return !f._set || f._null || f._value == nil
|
|
}
|
|
|
|
func (f GracefulExitTransferQueue_LastFailedCode_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (GracefulExitTransferQueue_LastFailedCode_Field) _Column() string { return "last_failed_code" }
|
|
|
|
type GracefulExitTransferQueue_FailedCount_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value *int
|
|
}
|
|
|
|
func GracefulExitTransferQueue_FailedCount(v int) GracefulExitTransferQueue_FailedCount_Field {
|
|
return GracefulExitTransferQueue_FailedCount_Field{_set: true, _value: &v}
|
|
}
|
|
|
|
func GracefulExitTransferQueue_FailedCount_Raw(v *int) GracefulExitTransferQueue_FailedCount_Field {
|
|
if v == nil {
|
|
return GracefulExitTransferQueue_FailedCount_Null()
|
|
}
|
|
return GracefulExitTransferQueue_FailedCount(*v)
|
|
}
|
|
|
|
func GracefulExitTransferQueue_FailedCount_Null() GracefulExitTransferQueue_FailedCount_Field {
|
|
return GracefulExitTransferQueue_FailedCount_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f GracefulExitTransferQueue_FailedCount_Field) isnull() bool {
|
|
return !f._set || f._null || f._value == nil
|
|
}
|
|
|
|
func (f GracefulExitTransferQueue_FailedCount_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (GracefulExitTransferQueue_FailedCount_Field) _Column() string { return "failed_count" }
|
|
|
|
type GracefulExitTransferQueue_FinishedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value *time.Time
|
|
}
|
|
|
|
func GracefulExitTransferQueue_FinishedAt(v time.Time) GracefulExitTransferQueue_FinishedAt_Field {
|
|
return GracefulExitTransferQueue_FinishedAt_Field{_set: true, _value: &v}
|
|
}
|
|
|
|
func GracefulExitTransferQueue_FinishedAt_Raw(v *time.Time) GracefulExitTransferQueue_FinishedAt_Field {
|
|
if v == nil {
|
|
return GracefulExitTransferQueue_FinishedAt_Null()
|
|
}
|
|
return GracefulExitTransferQueue_FinishedAt(*v)
|
|
}
|
|
|
|
func GracefulExitTransferQueue_FinishedAt_Null() GracefulExitTransferQueue_FinishedAt_Field {
|
|
return GracefulExitTransferQueue_FinishedAt_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f GracefulExitTransferQueue_FinishedAt_Field) isnull() bool {
|
|
return !f._set || f._null || f._value == nil
|
|
}
|
|
|
|
func (f GracefulExitTransferQueue_FinishedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (GracefulExitTransferQueue_FinishedAt_Field) _Column() string { return "finished_at" }
|
|
|
|
type GracefulExitTransferQueue_OrderLimitSendCount_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func GracefulExitTransferQueue_OrderLimitSendCount(v int) GracefulExitTransferQueue_OrderLimitSendCount_Field {
|
|
return GracefulExitTransferQueue_OrderLimitSendCount_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f GracefulExitTransferQueue_OrderLimitSendCount_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (GracefulExitTransferQueue_OrderLimitSendCount_Field) _Column() string {
|
|
return "order_limit_send_count"
|
|
}
|
|
|
|
type Injuredsegment struct {
|
|
Path []byte
|
|
Data []byte
|
|
Attempted *time.Time
|
|
UpdatedAt time.Time
|
|
SegmentHealth float64
|
|
}
|
|
|
|
func (Injuredsegment) _Table() string { return "injuredsegments" }
|
|
|
|
type Injuredsegment_Create_Fields struct {
|
|
Attempted Injuredsegment_Attempted_Field
|
|
UpdatedAt Injuredsegment_UpdatedAt_Field
|
|
SegmentHealth Injuredsegment_SegmentHealth_Field
|
|
}
|
|
|
|
type Injuredsegment_Update_Fields struct {
|
|
Attempted Injuredsegment_Attempted_Field
|
|
UpdatedAt Injuredsegment_UpdatedAt_Field
|
|
}
|
|
|
|
type Injuredsegment_Path_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func Injuredsegment_Path(v []byte) Injuredsegment_Path_Field {
|
|
return Injuredsegment_Path_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Injuredsegment_Path_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Injuredsegment_Path_Field) _Column() string { return "path" }
|
|
|
|
type Injuredsegment_Data_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func Injuredsegment_Data(v []byte) Injuredsegment_Data_Field {
|
|
return Injuredsegment_Data_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Injuredsegment_Data_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Injuredsegment_Data_Field) _Column() string { return "data" }
|
|
|
|
type Injuredsegment_Attempted_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value *time.Time
|
|
}
|
|
|
|
func Injuredsegment_Attempted(v time.Time) Injuredsegment_Attempted_Field {
|
|
return Injuredsegment_Attempted_Field{_set: true, _value: &v}
|
|
}
|
|
|
|
func Injuredsegment_Attempted_Raw(v *time.Time) Injuredsegment_Attempted_Field {
|
|
if v == nil {
|
|
return Injuredsegment_Attempted_Null()
|
|
}
|
|
return Injuredsegment_Attempted(*v)
|
|
}
|
|
|
|
func Injuredsegment_Attempted_Null() Injuredsegment_Attempted_Field {
|
|
return Injuredsegment_Attempted_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f Injuredsegment_Attempted_Field) isnull() bool { return !f._set || f._null || f._value == nil }
|
|
|
|
func (f Injuredsegment_Attempted_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Injuredsegment_Attempted_Field) _Column() string { return "attempted" }
|
|
|
|
type Injuredsegment_UpdatedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func Injuredsegment_UpdatedAt(v time.Time) Injuredsegment_UpdatedAt_Field {
|
|
return Injuredsegment_UpdatedAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Injuredsegment_UpdatedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Injuredsegment_UpdatedAt_Field) _Column() string { return "updated_at" }
|
|
|
|
type Injuredsegment_SegmentHealth_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value float64
|
|
}
|
|
|
|
func Injuredsegment_SegmentHealth(v float64) Injuredsegment_SegmentHealth_Field {
|
|
return Injuredsegment_SegmentHealth_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Injuredsegment_SegmentHealth_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Injuredsegment_SegmentHealth_Field) _Column() string { return "segment_health" }
|
|
|
|
type Irreparabledb struct {
|
|
Segmentpath []byte
|
|
Segmentdetail []byte
|
|
PiecesLostCount int64
|
|
SegDamagedUnixSec int64
|
|
RepairAttemptCount int64
|
|
}
|
|
|
|
func (Irreparabledb) _Table() string { return "irreparabledbs" }
|
|
|
|
type Irreparabledb_Update_Fields struct {
|
|
Segmentdetail Irreparabledb_Segmentdetail_Field
|
|
PiecesLostCount Irreparabledb_PiecesLostCount_Field
|
|
SegDamagedUnixSec Irreparabledb_SegDamagedUnixSec_Field
|
|
RepairAttemptCount Irreparabledb_RepairAttemptCount_Field
|
|
}
|
|
|
|
type Irreparabledb_Segmentpath_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func Irreparabledb_Segmentpath(v []byte) Irreparabledb_Segmentpath_Field {
|
|
return Irreparabledb_Segmentpath_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Irreparabledb_Segmentpath_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Irreparabledb_Segmentpath_Field) _Column() string { return "segmentpath" }
|
|
|
|
type Irreparabledb_Segmentdetail_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func Irreparabledb_Segmentdetail(v []byte) Irreparabledb_Segmentdetail_Field {
|
|
return Irreparabledb_Segmentdetail_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Irreparabledb_Segmentdetail_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Irreparabledb_Segmentdetail_Field) _Column() string { return "segmentdetail" }
|
|
|
|
type Irreparabledb_PiecesLostCount_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func Irreparabledb_PiecesLostCount(v int64) Irreparabledb_PiecesLostCount_Field {
|
|
return Irreparabledb_PiecesLostCount_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Irreparabledb_PiecesLostCount_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Irreparabledb_PiecesLostCount_Field) _Column() string { return "pieces_lost_count" }
|
|
|
|
type Irreparabledb_SegDamagedUnixSec_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func Irreparabledb_SegDamagedUnixSec(v int64) Irreparabledb_SegDamagedUnixSec_Field {
|
|
return Irreparabledb_SegDamagedUnixSec_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Irreparabledb_SegDamagedUnixSec_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Irreparabledb_SegDamagedUnixSec_Field) _Column() string { return "seg_damaged_unix_sec" }
|
|
|
|
type Irreparabledb_RepairAttemptCount_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func Irreparabledb_RepairAttemptCount(v int64) Irreparabledb_RepairAttemptCount_Field {
|
|
return Irreparabledb_RepairAttemptCount_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Irreparabledb_RepairAttemptCount_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Irreparabledb_RepairAttemptCount_Field) _Column() string { return "repair_attempt_count" }
|
|
|
|
type Node struct {
|
|
Id []byte
|
|
Address string
|
|
LastNet string
|
|
LastIpPort *string
|
|
Protocol int
|
|
Type int
|
|
Email string
|
|
Wallet string
|
|
WalletFeatures string
|
|
FreeDisk int64
|
|
PieceCount int64
|
|
Major int64
|
|
Minor int64
|
|
Patch int64
|
|
Hash string
|
|
Timestamp time.Time
|
|
Release bool
|
|
Latency90 int64
|
|
AuditSuccessCount int64
|
|
TotalAuditCount int64
|
|
VettedAt *time.Time
|
|
CreatedAt time.Time
|
|
UpdatedAt time.Time
|
|
LastContactSuccess time.Time
|
|
LastContactFailure time.Time
|
|
Contained bool
|
|
Disqualified *time.Time
|
|
Suspended *time.Time
|
|
UnknownAuditSuspended *time.Time
|
|
OfflineSuspended *time.Time
|
|
UnderReview *time.Time
|
|
OnlineScore float64
|
|
AuditReputationAlpha float64
|
|
AuditReputationBeta float64
|
|
UnknownAuditReputationAlpha float64
|
|
UnknownAuditReputationBeta float64
|
|
ExitInitiatedAt *time.Time
|
|
ExitLoopCompletedAt *time.Time
|
|
ExitFinishedAt *time.Time
|
|
ExitSuccess bool
|
|
}
|
|
|
|
func (Node) _Table() string { return "nodes" }
|
|
|
|
type Node_Create_Fields struct {
|
|
Address Node_Address_Field
|
|
LastIpPort Node_LastIpPort_Field
|
|
Protocol Node_Protocol_Field
|
|
Type Node_Type_Field
|
|
WalletFeatures Node_WalletFeatures_Field
|
|
FreeDisk Node_FreeDisk_Field
|
|
Major Node_Major_Field
|
|
Minor Node_Minor_Field
|
|
Patch Node_Patch_Field
|
|
Hash Node_Hash_Field
|
|
Timestamp Node_Timestamp_Field
|
|
Release Node_Release_Field
|
|
Latency90 Node_Latency90_Field
|
|
AuditSuccessCount Node_AuditSuccessCount_Field
|
|
TotalAuditCount Node_TotalAuditCount_Field
|
|
VettedAt Node_VettedAt_Field
|
|
LastContactSuccess Node_LastContactSuccess_Field
|
|
LastContactFailure Node_LastContactFailure_Field
|
|
Contained Node_Contained_Field
|
|
Disqualified Node_Disqualified_Field
|
|
Suspended Node_Suspended_Field
|
|
UnknownAuditSuspended Node_UnknownAuditSuspended_Field
|
|
OfflineSuspended Node_OfflineSuspended_Field
|
|
UnderReview Node_UnderReview_Field
|
|
OnlineScore Node_OnlineScore_Field
|
|
AuditReputationAlpha Node_AuditReputationAlpha_Field
|
|
AuditReputationBeta Node_AuditReputationBeta_Field
|
|
UnknownAuditReputationAlpha Node_UnknownAuditReputationAlpha_Field
|
|
UnknownAuditReputationBeta Node_UnknownAuditReputationBeta_Field
|
|
ExitInitiatedAt Node_ExitInitiatedAt_Field
|
|
ExitLoopCompletedAt Node_ExitLoopCompletedAt_Field
|
|
ExitFinishedAt Node_ExitFinishedAt_Field
|
|
ExitSuccess Node_ExitSuccess_Field
|
|
}
|
|
|
|
type Node_Update_Fields struct {
|
|
Address Node_Address_Field
|
|
LastNet Node_LastNet_Field
|
|
LastIpPort Node_LastIpPort_Field
|
|
Protocol Node_Protocol_Field
|
|
Type Node_Type_Field
|
|
Email Node_Email_Field
|
|
Wallet Node_Wallet_Field
|
|
WalletFeatures Node_WalletFeatures_Field
|
|
FreeDisk Node_FreeDisk_Field
|
|
PieceCount Node_PieceCount_Field
|
|
Major Node_Major_Field
|
|
Minor Node_Minor_Field
|
|
Patch Node_Patch_Field
|
|
Hash Node_Hash_Field
|
|
Timestamp Node_Timestamp_Field
|
|
Release Node_Release_Field
|
|
Latency90 Node_Latency90_Field
|
|
AuditSuccessCount Node_AuditSuccessCount_Field
|
|
TotalAuditCount Node_TotalAuditCount_Field
|
|
VettedAt Node_VettedAt_Field
|
|
LastContactSuccess Node_LastContactSuccess_Field
|
|
LastContactFailure Node_LastContactFailure_Field
|
|
Contained Node_Contained_Field
|
|
Disqualified Node_Disqualified_Field
|
|
Suspended Node_Suspended_Field
|
|
UnknownAuditSuspended Node_UnknownAuditSuspended_Field
|
|
OfflineSuspended Node_OfflineSuspended_Field
|
|
UnderReview Node_UnderReview_Field
|
|
OnlineScore Node_OnlineScore_Field
|
|
AuditReputationAlpha Node_AuditReputationAlpha_Field
|
|
AuditReputationBeta Node_AuditReputationBeta_Field
|
|
UnknownAuditReputationAlpha Node_UnknownAuditReputationAlpha_Field
|
|
UnknownAuditReputationBeta Node_UnknownAuditReputationBeta_Field
|
|
ExitInitiatedAt Node_ExitInitiatedAt_Field
|
|
ExitLoopCompletedAt Node_ExitLoopCompletedAt_Field
|
|
ExitFinishedAt Node_ExitFinishedAt_Field
|
|
ExitSuccess Node_ExitSuccess_Field
|
|
}
|
|
|
|
type Node_Id_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func Node_Id(v []byte) Node_Id_Field {
|
|
return Node_Id_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_Id_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_Id_Field) _Column() string { return "id" }
|
|
|
|
type Node_Address_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func Node_Address(v string) Node_Address_Field {
|
|
return Node_Address_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_Address_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_Address_Field) _Column() string { return "address" }
|
|
|
|
type Node_LastNet_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func Node_LastNet(v string) Node_LastNet_Field {
|
|
return Node_LastNet_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_LastNet_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_LastNet_Field) _Column() string { return "last_net" }
|
|
|
|
type Node_LastIpPort_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value *string
|
|
}
|
|
|
|
func Node_LastIpPort(v string) Node_LastIpPort_Field {
|
|
return Node_LastIpPort_Field{_set: true, _value: &v}
|
|
}
|
|
|
|
func Node_LastIpPort_Raw(v *string) Node_LastIpPort_Field {
|
|
if v == nil {
|
|
return Node_LastIpPort_Null()
|
|
}
|
|
return Node_LastIpPort(*v)
|
|
}
|
|
|
|
func Node_LastIpPort_Null() Node_LastIpPort_Field {
|
|
return Node_LastIpPort_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f Node_LastIpPort_Field) isnull() bool { return !f._set || f._null || f._value == nil }
|
|
|
|
func (f Node_LastIpPort_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_LastIpPort_Field) _Column() string { return "last_ip_port" }
|
|
|
|
type Node_Protocol_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func Node_Protocol(v int) Node_Protocol_Field {
|
|
return Node_Protocol_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_Protocol_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_Protocol_Field) _Column() string { return "protocol" }
|
|
|
|
type Node_Type_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func Node_Type(v int) Node_Type_Field {
|
|
return Node_Type_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_Type_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_Type_Field) _Column() string { return "type" }
|
|
|
|
type Node_Email_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func Node_Email(v string) Node_Email_Field {
|
|
return Node_Email_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_Email_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_Email_Field) _Column() string { return "email" }
|
|
|
|
type Node_Wallet_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func Node_Wallet(v string) Node_Wallet_Field {
|
|
return Node_Wallet_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_Wallet_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_Wallet_Field) _Column() string { return "wallet" }
|
|
|
|
type Node_WalletFeatures_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func Node_WalletFeatures(v string) Node_WalletFeatures_Field {
|
|
return Node_WalletFeatures_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_WalletFeatures_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_WalletFeatures_Field) _Column() string { return "wallet_features" }
|
|
|
|
type Node_FreeDisk_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func Node_FreeDisk(v int64) Node_FreeDisk_Field {
|
|
return Node_FreeDisk_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_FreeDisk_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_FreeDisk_Field) _Column() string { return "free_disk" }
|
|
|
|
type Node_PieceCount_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func Node_PieceCount(v int64) Node_PieceCount_Field {
|
|
return Node_PieceCount_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_PieceCount_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_PieceCount_Field) _Column() string { return "piece_count" }
|
|
|
|
type Node_Major_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func Node_Major(v int64) Node_Major_Field {
|
|
return Node_Major_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_Major_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_Major_Field) _Column() string { return "major" }
|
|
|
|
type Node_Minor_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func Node_Minor(v int64) Node_Minor_Field {
|
|
return Node_Minor_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_Minor_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_Minor_Field) _Column() string { return "minor" }
|
|
|
|
type Node_Patch_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func Node_Patch(v int64) Node_Patch_Field {
|
|
return Node_Patch_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_Patch_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_Patch_Field) _Column() string { return "patch" }
|
|
|
|
type Node_Hash_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func Node_Hash(v string) Node_Hash_Field {
|
|
return Node_Hash_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_Hash_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_Hash_Field) _Column() string { return "hash" }
|
|
|
|
type Node_Timestamp_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func Node_Timestamp(v time.Time) Node_Timestamp_Field {
|
|
return Node_Timestamp_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_Timestamp_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_Timestamp_Field) _Column() string { return "timestamp" }
|
|
|
|
type Node_Release_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value bool
|
|
}
|
|
|
|
func Node_Release(v bool) Node_Release_Field {
|
|
return Node_Release_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_Release_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_Release_Field) _Column() string { return "release" }
|
|
|
|
type Node_Latency90_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func Node_Latency90(v int64) Node_Latency90_Field {
|
|
return Node_Latency90_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_Latency90_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_Latency90_Field) _Column() string { return "latency_90" }
|
|
|
|
type Node_AuditSuccessCount_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func Node_AuditSuccessCount(v int64) Node_AuditSuccessCount_Field {
|
|
return Node_AuditSuccessCount_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_AuditSuccessCount_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_AuditSuccessCount_Field) _Column() string { return "audit_success_count" }
|
|
|
|
type Node_TotalAuditCount_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func Node_TotalAuditCount(v int64) Node_TotalAuditCount_Field {
|
|
return Node_TotalAuditCount_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_TotalAuditCount_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_TotalAuditCount_Field) _Column() string { return "total_audit_count" }
|
|
|
|
type Node_VettedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value *time.Time
|
|
}
|
|
|
|
func Node_VettedAt(v time.Time) Node_VettedAt_Field {
|
|
return Node_VettedAt_Field{_set: true, _value: &v}
|
|
}
|
|
|
|
func Node_VettedAt_Raw(v *time.Time) Node_VettedAt_Field {
|
|
if v == nil {
|
|
return Node_VettedAt_Null()
|
|
}
|
|
return Node_VettedAt(*v)
|
|
}
|
|
|
|
func Node_VettedAt_Null() Node_VettedAt_Field {
|
|
return Node_VettedAt_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f Node_VettedAt_Field) isnull() bool { return !f._set || f._null || f._value == nil }
|
|
|
|
func (f Node_VettedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_VettedAt_Field) _Column() string { return "vetted_at" }
|
|
|
|
type Node_CreatedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func Node_CreatedAt(v time.Time) Node_CreatedAt_Field {
|
|
return Node_CreatedAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_CreatedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_CreatedAt_Field) _Column() string { return "created_at" }
|
|
|
|
type Node_UpdatedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func Node_UpdatedAt(v time.Time) Node_UpdatedAt_Field {
|
|
return Node_UpdatedAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_UpdatedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_UpdatedAt_Field) _Column() string { return "updated_at" }
|
|
|
|
type Node_LastContactSuccess_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func Node_LastContactSuccess(v time.Time) Node_LastContactSuccess_Field {
|
|
return Node_LastContactSuccess_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_LastContactSuccess_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_LastContactSuccess_Field) _Column() string { return "last_contact_success" }
|
|
|
|
type Node_LastContactFailure_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func Node_LastContactFailure(v time.Time) Node_LastContactFailure_Field {
|
|
return Node_LastContactFailure_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_LastContactFailure_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_LastContactFailure_Field) _Column() string { return "last_contact_failure" }
|
|
|
|
type Node_Contained_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value bool
|
|
}
|
|
|
|
func Node_Contained(v bool) Node_Contained_Field {
|
|
return Node_Contained_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_Contained_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_Contained_Field) _Column() string { return "contained" }
|
|
|
|
type Node_Disqualified_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value *time.Time
|
|
}
|
|
|
|
func Node_Disqualified(v time.Time) Node_Disqualified_Field {
|
|
return Node_Disqualified_Field{_set: true, _value: &v}
|
|
}
|
|
|
|
func Node_Disqualified_Raw(v *time.Time) Node_Disqualified_Field {
|
|
if v == nil {
|
|
return Node_Disqualified_Null()
|
|
}
|
|
return Node_Disqualified(*v)
|
|
}
|
|
|
|
func Node_Disqualified_Null() Node_Disqualified_Field {
|
|
return Node_Disqualified_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f Node_Disqualified_Field) isnull() bool { return !f._set || f._null || f._value == nil }
|
|
|
|
func (f Node_Disqualified_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_Disqualified_Field) _Column() string { return "disqualified" }
|
|
|
|
type Node_Suspended_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value *time.Time
|
|
}
|
|
|
|
func Node_Suspended(v time.Time) Node_Suspended_Field {
|
|
return Node_Suspended_Field{_set: true, _value: &v}
|
|
}
|
|
|
|
func Node_Suspended_Raw(v *time.Time) Node_Suspended_Field {
|
|
if v == nil {
|
|
return Node_Suspended_Null()
|
|
}
|
|
return Node_Suspended(*v)
|
|
}
|
|
|
|
func Node_Suspended_Null() Node_Suspended_Field {
|
|
return Node_Suspended_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f Node_Suspended_Field) isnull() bool { return !f._set || f._null || f._value == nil }
|
|
|
|
func (f Node_Suspended_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_Suspended_Field) _Column() string { return "suspended" }
|
|
|
|
type Node_UnknownAuditSuspended_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value *time.Time
|
|
}
|
|
|
|
func Node_UnknownAuditSuspended(v time.Time) Node_UnknownAuditSuspended_Field {
|
|
return Node_UnknownAuditSuspended_Field{_set: true, _value: &v}
|
|
}
|
|
|
|
func Node_UnknownAuditSuspended_Raw(v *time.Time) Node_UnknownAuditSuspended_Field {
|
|
if v == nil {
|
|
return Node_UnknownAuditSuspended_Null()
|
|
}
|
|
return Node_UnknownAuditSuspended(*v)
|
|
}
|
|
|
|
func Node_UnknownAuditSuspended_Null() Node_UnknownAuditSuspended_Field {
|
|
return Node_UnknownAuditSuspended_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f Node_UnknownAuditSuspended_Field) isnull() bool { return !f._set || f._null || f._value == nil }
|
|
|
|
func (f Node_UnknownAuditSuspended_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_UnknownAuditSuspended_Field) _Column() string { return "unknown_audit_suspended" }
|
|
|
|
type Node_OfflineSuspended_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value *time.Time
|
|
}
|
|
|
|
func Node_OfflineSuspended(v time.Time) Node_OfflineSuspended_Field {
|
|
return Node_OfflineSuspended_Field{_set: true, _value: &v}
|
|
}
|
|
|
|
func Node_OfflineSuspended_Raw(v *time.Time) Node_OfflineSuspended_Field {
|
|
if v == nil {
|
|
return Node_OfflineSuspended_Null()
|
|
}
|
|
return Node_OfflineSuspended(*v)
|
|
}
|
|
|
|
func Node_OfflineSuspended_Null() Node_OfflineSuspended_Field {
|
|
return Node_OfflineSuspended_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f Node_OfflineSuspended_Field) isnull() bool { return !f._set || f._null || f._value == nil }
|
|
|
|
func (f Node_OfflineSuspended_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_OfflineSuspended_Field) _Column() string { return "offline_suspended" }
|
|
|
|
type Node_UnderReview_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value *time.Time
|
|
}
|
|
|
|
func Node_UnderReview(v time.Time) Node_UnderReview_Field {
|
|
return Node_UnderReview_Field{_set: true, _value: &v}
|
|
}
|
|
|
|
func Node_UnderReview_Raw(v *time.Time) Node_UnderReview_Field {
|
|
if v == nil {
|
|
return Node_UnderReview_Null()
|
|
}
|
|
return Node_UnderReview(*v)
|
|
}
|
|
|
|
func Node_UnderReview_Null() Node_UnderReview_Field {
|
|
return Node_UnderReview_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f Node_UnderReview_Field) isnull() bool { return !f._set || f._null || f._value == nil }
|
|
|
|
func (f Node_UnderReview_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_UnderReview_Field) _Column() string { return "under_review" }
|
|
|
|
type Node_OnlineScore_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value float64
|
|
}
|
|
|
|
func Node_OnlineScore(v float64) Node_OnlineScore_Field {
|
|
return Node_OnlineScore_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_OnlineScore_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_OnlineScore_Field) _Column() string { return "online_score" }
|
|
|
|
type Node_AuditReputationAlpha_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value float64
|
|
}
|
|
|
|
func Node_AuditReputationAlpha(v float64) Node_AuditReputationAlpha_Field {
|
|
return Node_AuditReputationAlpha_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_AuditReputationAlpha_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_AuditReputationAlpha_Field) _Column() string { return "audit_reputation_alpha" }
|
|
|
|
type Node_AuditReputationBeta_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value float64
|
|
}
|
|
|
|
func Node_AuditReputationBeta(v float64) Node_AuditReputationBeta_Field {
|
|
return Node_AuditReputationBeta_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_AuditReputationBeta_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_AuditReputationBeta_Field) _Column() string { return "audit_reputation_beta" }
|
|
|
|
type Node_UnknownAuditReputationAlpha_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value float64
|
|
}
|
|
|
|
func Node_UnknownAuditReputationAlpha(v float64) Node_UnknownAuditReputationAlpha_Field {
|
|
return Node_UnknownAuditReputationAlpha_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_UnknownAuditReputationAlpha_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_UnknownAuditReputationAlpha_Field) _Column() string {
|
|
return "unknown_audit_reputation_alpha"
|
|
}
|
|
|
|
type Node_UnknownAuditReputationBeta_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value float64
|
|
}
|
|
|
|
func Node_UnknownAuditReputationBeta(v float64) Node_UnknownAuditReputationBeta_Field {
|
|
return Node_UnknownAuditReputationBeta_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_UnknownAuditReputationBeta_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_UnknownAuditReputationBeta_Field) _Column() string { return "unknown_audit_reputation_beta" }
|
|
|
|
type Node_ExitInitiatedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value *time.Time
|
|
}
|
|
|
|
func Node_ExitInitiatedAt(v time.Time) Node_ExitInitiatedAt_Field {
|
|
return Node_ExitInitiatedAt_Field{_set: true, _value: &v}
|
|
}
|
|
|
|
func Node_ExitInitiatedAt_Raw(v *time.Time) Node_ExitInitiatedAt_Field {
|
|
if v == nil {
|
|
return Node_ExitInitiatedAt_Null()
|
|
}
|
|
return Node_ExitInitiatedAt(*v)
|
|
}
|
|
|
|
func Node_ExitInitiatedAt_Null() Node_ExitInitiatedAt_Field {
|
|
return Node_ExitInitiatedAt_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f Node_ExitInitiatedAt_Field) isnull() bool { return !f._set || f._null || f._value == nil }
|
|
|
|
func (f Node_ExitInitiatedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_ExitInitiatedAt_Field) _Column() string { return "exit_initiated_at" }
|
|
|
|
type Node_ExitLoopCompletedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value *time.Time
|
|
}
|
|
|
|
func Node_ExitLoopCompletedAt(v time.Time) Node_ExitLoopCompletedAt_Field {
|
|
return Node_ExitLoopCompletedAt_Field{_set: true, _value: &v}
|
|
}
|
|
|
|
func Node_ExitLoopCompletedAt_Raw(v *time.Time) Node_ExitLoopCompletedAt_Field {
|
|
if v == nil {
|
|
return Node_ExitLoopCompletedAt_Null()
|
|
}
|
|
return Node_ExitLoopCompletedAt(*v)
|
|
}
|
|
|
|
func Node_ExitLoopCompletedAt_Null() Node_ExitLoopCompletedAt_Field {
|
|
return Node_ExitLoopCompletedAt_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f Node_ExitLoopCompletedAt_Field) isnull() bool { return !f._set || f._null || f._value == nil }
|
|
|
|
func (f Node_ExitLoopCompletedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_ExitLoopCompletedAt_Field) _Column() string { return "exit_loop_completed_at" }
|
|
|
|
type Node_ExitFinishedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value *time.Time
|
|
}
|
|
|
|
func Node_ExitFinishedAt(v time.Time) Node_ExitFinishedAt_Field {
|
|
return Node_ExitFinishedAt_Field{_set: true, _value: &v}
|
|
}
|
|
|
|
func Node_ExitFinishedAt_Raw(v *time.Time) Node_ExitFinishedAt_Field {
|
|
if v == nil {
|
|
return Node_ExitFinishedAt_Null()
|
|
}
|
|
return Node_ExitFinishedAt(*v)
|
|
}
|
|
|
|
func Node_ExitFinishedAt_Null() Node_ExitFinishedAt_Field {
|
|
return Node_ExitFinishedAt_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f Node_ExitFinishedAt_Field) isnull() bool { return !f._set || f._null || f._value == nil }
|
|
|
|
func (f Node_ExitFinishedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_ExitFinishedAt_Field) _Column() string { return "exit_finished_at" }
|
|
|
|
type Node_ExitSuccess_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value bool
|
|
}
|
|
|
|
func Node_ExitSuccess(v bool) Node_ExitSuccess_Field {
|
|
return Node_ExitSuccess_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_ExitSuccess_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_ExitSuccess_Field) _Column() string { return "exit_success" }
|
|
|
|
type NodeApiVersion struct {
|
|
Id []byte
|
|
ApiVersion int
|
|
CreatedAt time.Time
|
|
UpdatedAt time.Time
|
|
}
|
|
|
|
func (NodeApiVersion) _Table() string { return "node_api_versions" }
|
|
|
|
type NodeApiVersion_Update_Fields struct {
|
|
ApiVersion NodeApiVersion_ApiVersion_Field
|
|
}
|
|
|
|
type NodeApiVersion_Id_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func NodeApiVersion_Id(v []byte) NodeApiVersion_Id_Field {
|
|
return NodeApiVersion_Id_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f NodeApiVersion_Id_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (NodeApiVersion_Id_Field) _Column() string { return "id" }
|
|
|
|
type NodeApiVersion_ApiVersion_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func NodeApiVersion_ApiVersion(v int) NodeApiVersion_ApiVersion_Field {
|
|
return NodeApiVersion_ApiVersion_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f NodeApiVersion_ApiVersion_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (NodeApiVersion_ApiVersion_Field) _Column() string { return "api_version" }
|
|
|
|
type NodeApiVersion_CreatedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func NodeApiVersion_CreatedAt(v time.Time) NodeApiVersion_CreatedAt_Field {
|
|
return NodeApiVersion_CreatedAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f NodeApiVersion_CreatedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (NodeApiVersion_CreatedAt_Field) _Column() string { return "created_at" }
|
|
|
|
type NodeApiVersion_UpdatedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func NodeApiVersion_UpdatedAt(v time.Time) NodeApiVersion_UpdatedAt_Field {
|
|
return NodeApiVersion_UpdatedAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f NodeApiVersion_UpdatedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (NodeApiVersion_UpdatedAt_Field) _Column() string { return "updated_at" }
|
|
|
|
type Offer struct {
|
|
Id int
|
|
Name string
|
|
Description string
|
|
AwardCreditInCents int
|
|
InviteeCreditInCents int
|
|
AwardCreditDurationDays *int
|
|
InviteeCreditDurationDays *int
|
|
RedeemableCap *int
|
|
ExpiresAt time.Time
|
|
CreatedAt time.Time
|
|
Status int
|
|
Type int
|
|
}
|
|
|
|
func (Offer) _Table() string { return "offers" }
|
|
|
|
type Offer_Create_Fields struct {
|
|
AwardCreditInCents Offer_AwardCreditInCents_Field
|
|
InviteeCreditInCents Offer_InviteeCreditInCents_Field
|
|
AwardCreditDurationDays Offer_AwardCreditDurationDays_Field
|
|
InviteeCreditDurationDays Offer_InviteeCreditDurationDays_Field
|
|
RedeemableCap Offer_RedeemableCap_Field
|
|
}
|
|
|
|
type Offer_Update_Fields struct {
|
|
Name Offer_Name_Field
|
|
Description Offer_Description_Field
|
|
AwardCreditInCents Offer_AwardCreditInCents_Field
|
|
InviteeCreditInCents Offer_InviteeCreditInCents_Field
|
|
AwardCreditDurationDays Offer_AwardCreditDurationDays_Field
|
|
InviteeCreditDurationDays Offer_InviteeCreditDurationDays_Field
|
|
RedeemableCap Offer_RedeemableCap_Field
|
|
ExpiresAt Offer_ExpiresAt_Field
|
|
Status Offer_Status_Field
|
|
Type Offer_Type_Field
|
|
}
|
|
|
|
type Offer_Id_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func Offer_Id(v int) Offer_Id_Field {
|
|
return Offer_Id_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Offer_Id_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Offer_Id_Field) _Column() string { return "id" }
|
|
|
|
type Offer_Name_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func Offer_Name(v string) Offer_Name_Field {
|
|
return Offer_Name_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Offer_Name_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Offer_Name_Field) _Column() string { return "name" }
|
|
|
|
type Offer_Description_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func Offer_Description(v string) Offer_Description_Field {
|
|
return Offer_Description_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Offer_Description_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Offer_Description_Field) _Column() string { return "description" }
|
|
|
|
type Offer_AwardCreditInCents_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func Offer_AwardCreditInCents(v int) Offer_AwardCreditInCents_Field {
|
|
return Offer_AwardCreditInCents_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Offer_AwardCreditInCents_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Offer_AwardCreditInCents_Field) _Column() string { return "award_credit_in_cents" }
|
|
|
|
type Offer_InviteeCreditInCents_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func Offer_InviteeCreditInCents(v int) Offer_InviteeCreditInCents_Field {
|
|
return Offer_InviteeCreditInCents_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Offer_InviteeCreditInCents_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Offer_InviteeCreditInCents_Field) _Column() string { return "invitee_credit_in_cents" }
|
|
|
|
type Offer_AwardCreditDurationDays_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value *int
|
|
}
|
|
|
|
func Offer_AwardCreditDurationDays(v int) Offer_AwardCreditDurationDays_Field {
|
|
return Offer_AwardCreditDurationDays_Field{_set: true, _value: &v}
|
|
}
|
|
|
|
func Offer_AwardCreditDurationDays_Raw(v *int) Offer_AwardCreditDurationDays_Field {
|
|
if v == nil {
|
|
return Offer_AwardCreditDurationDays_Null()
|
|
}
|
|
return Offer_AwardCreditDurationDays(*v)
|
|
}
|
|
|
|
func Offer_AwardCreditDurationDays_Null() Offer_AwardCreditDurationDays_Field {
|
|
return Offer_AwardCreditDurationDays_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f Offer_AwardCreditDurationDays_Field) isnull() bool {
|
|
return !f._set || f._null || f._value == nil
|
|
}
|
|
|
|
func (f Offer_AwardCreditDurationDays_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Offer_AwardCreditDurationDays_Field) _Column() string { return "award_credit_duration_days" }
|
|
|
|
type Offer_InviteeCreditDurationDays_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value *int
|
|
}
|
|
|
|
func Offer_InviteeCreditDurationDays(v int) Offer_InviteeCreditDurationDays_Field {
|
|
return Offer_InviteeCreditDurationDays_Field{_set: true, _value: &v}
|
|
}
|
|
|
|
func Offer_InviteeCreditDurationDays_Raw(v *int) Offer_InviteeCreditDurationDays_Field {
|
|
if v == nil {
|
|
return Offer_InviteeCreditDurationDays_Null()
|
|
}
|
|
return Offer_InviteeCreditDurationDays(*v)
|
|
}
|
|
|
|
func Offer_InviteeCreditDurationDays_Null() Offer_InviteeCreditDurationDays_Field {
|
|
return Offer_InviteeCreditDurationDays_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f Offer_InviteeCreditDurationDays_Field) isnull() bool {
|
|
return !f._set || f._null || f._value == nil
|
|
}
|
|
|
|
func (f Offer_InviteeCreditDurationDays_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Offer_InviteeCreditDurationDays_Field) _Column() string { return "invitee_credit_duration_days" }
|
|
|
|
type Offer_RedeemableCap_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value *int
|
|
}
|
|
|
|
func Offer_RedeemableCap(v int) Offer_RedeemableCap_Field {
|
|
return Offer_RedeemableCap_Field{_set: true, _value: &v}
|
|
}
|
|
|
|
func Offer_RedeemableCap_Raw(v *int) Offer_RedeemableCap_Field {
|
|
if v == nil {
|
|
return Offer_RedeemableCap_Null()
|
|
}
|
|
return Offer_RedeemableCap(*v)
|
|
}
|
|
|
|
func Offer_RedeemableCap_Null() Offer_RedeemableCap_Field {
|
|
return Offer_RedeemableCap_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f Offer_RedeemableCap_Field) isnull() bool { return !f._set || f._null || f._value == nil }
|
|
|
|
func (f Offer_RedeemableCap_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Offer_RedeemableCap_Field) _Column() string { return "redeemable_cap" }
|
|
|
|
type Offer_ExpiresAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func Offer_ExpiresAt(v time.Time) Offer_ExpiresAt_Field {
|
|
return Offer_ExpiresAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Offer_ExpiresAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Offer_ExpiresAt_Field) _Column() string { return "expires_at" }
|
|
|
|
type Offer_CreatedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func Offer_CreatedAt(v time.Time) Offer_CreatedAt_Field {
|
|
return Offer_CreatedAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Offer_CreatedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Offer_CreatedAt_Field) _Column() string { return "created_at" }
|
|
|
|
type Offer_Status_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func Offer_Status(v int) Offer_Status_Field {
|
|
return Offer_Status_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Offer_Status_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Offer_Status_Field) _Column() string { return "status" }
|
|
|
|
type Offer_Type_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func Offer_Type(v int) Offer_Type_Field {
|
|
return Offer_Type_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Offer_Type_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Offer_Type_Field) _Column() string { return "type" }
|
|
|
|
type PeerIdentity struct {
|
|
NodeId []byte
|
|
LeafSerialNumber []byte
|
|
Chain []byte
|
|
UpdatedAt time.Time
|
|
}
|
|
|
|
func (PeerIdentity) _Table() string { return "peer_identities" }
|
|
|
|
type PeerIdentity_Update_Fields struct {
|
|
LeafSerialNumber PeerIdentity_LeafSerialNumber_Field
|
|
Chain PeerIdentity_Chain_Field
|
|
}
|
|
|
|
type PeerIdentity_NodeId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func PeerIdentity_NodeId(v []byte) PeerIdentity_NodeId_Field {
|
|
return PeerIdentity_NodeId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f PeerIdentity_NodeId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (PeerIdentity_NodeId_Field) _Column() string { return "node_id" }
|
|
|
|
type PeerIdentity_LeafSerialNumber_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func PeerIdentity_LeafSerialNumber(v []byte) PeerIdentity_LeafSerialNumber_Field {
|
|
return PeerIdentity_LeafSerialNumber_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f PeerIdentity_LeafSerialNumber_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (PeerIdentity_LeafSerialNumber_Field) _Column() string { return "leaf_serial_number" }
|
|
|
|
type PeerIdentity_Chain_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func PeerIdentity_Chain(v []byte) PeerIdentity_Chain_Field {
|
|
return PeerIdentity_Chain_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f PeerIdentity_Chain_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (PeerIdentity_Chain_Field) _Column() string { return "chain" }
|
|
|
|
type PeerIdentity_UpdatedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func PeerIdentity_UpdatedAt(v time.Time) PeerIdentity_UpdatedAt_Field {
|
|
return PeerIdentity_UpdatedAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f PeerIdentity_UpdatedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (PeerIdentity_UpdatedAt_Field) _Column() string { return "updated_at" }
|
|
|
|
type PendingAudits struct {
|
|
NodeId []byte
|
|
PieceId []byte
|
|
StripeIndex int64
|
|
ShareSize int64
|
|
ExpectedShareHash []byte
|
|
ReverifyCount int64
|
|
Path []byte
|
|
}
|
|
|
|
func (PendingAudits) _Table() string { return "pending_audits" }
|
|
|
|
type PendingAudits_Update_Fields struct {
|
|
ReverifyCount PendingAudits_ReverifyCount_Field
|
|
}
|
|
|
|
type PendingAudits_NodeId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func PendingAudits_NodeId(v []byte) PendingAudits_NodeId_Field {
|
|
return PendingAudits_NodeId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f PendingAudits_NodeId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (PendingAudits_NodeId_Field) _Column() string { return "node_id" }
|
|
|
|
type PendingAudits_PieceId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func PendingAudits_PieceId(v []byte) PendingAudits_PieceId_Field {
|
|
return PendingAudits_PieceId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f PendingAudits_PieceId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (PendingAudits_PieceId_Field) _Column() string { return "piece_id" }
|
|
|
|
type PendingAudits_StripeIndex_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func PendingAudits_StripeIndex(v int64) PendingAudits_StripeIndex_Field {
|
|
return PendingAudits_StripeIndex_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f PendingAudits_StripeIndex_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (PendingAudits_StripeIndex_Field) _Column() string { return "stripe_index" }
|
|
|
|
type PendingAudits_ShareSize_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func PendingAudits_ShareSize(v int64) PendingAudits_ShareSize_Field {
|
|
return PendingAudits_ShareSize_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f PendingAudits_ShareSize_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (PendingAudits_ShareSize_Field) _Column() string { return "share_size" }
|
|
|
|
type PendingAudits_ExpectedShareHash_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func PendingAudits_ExpectedShareHash(v []byte) PendingAudits_ExpectedShareHash_Field {
|
|
return PendingAudits_ExpectedShareHash_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f PendingAudits_ExpectedShareHash_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (PendingAudits_ExpectedShareHash_Field) _Column() string { return "expected_share_hash" }
|
|
|
|
type PendingAudits_ReverifyCount_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func PendingAudits_ReverifyCount(v int64) PendingAudits_ReverifyCount_Field {
|
|
return PendingAudits_ReverifyCount_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f PendingAudits_ReverifyCount_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (PendingAudits_ReverifyCount_Field) _Column() string { return "reverify_count" }
|
|
|
|
type PendingAudits_Path_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func PendingAudits_Path(v []byte) PendingAudits_Path_Field {
|
|
return PendingAudits_Path_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f PendingAudits_Path_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (PendingAudits_Path_Field) _Column() string { return "path" }
|
|
|
|
type Project struct {
|
|
Id []byte
|
|
Name string
|
|
Description string
|
|
UsageLimit *int64
|
|
BandwidthLimit *int64
|
|
RateLimit *int
|
|
MaxBuckets *int
|
|
PartnerId []byte
|
|
OwnerId []byte
|
|
CreatedAt time.Time
|
|
}
|
|
|
|
func (Project) _Table() string { return "projects" }
|
|
|
|
type Project_Create_Fields struct {
|
|
UsageLimit Project_UsageLimit_Field
|
|
BandwidthLimit Project_BandwidthLimit_Field
|
|
RateLimit Project_RateLimit_Field
|
|
MaxBuckets Project_MaxBuckets_Field
|
|
PartnerId Project_PartnerId_Field
|
|
}
|
|
|
|
type Project_Update_Fields struct {
|
|
Name Project_Name_Field
|
|
Description Project_Description_Field
|
|
UsageLimit Project_UsageLimit_Field
|
|
BandwidthLimit Project_BandwidthLimit_Field
|
|
RateLimit Project_RateLimit_Field
|
|
MaxBuckets Project_MaxBuckets_Field
|
|
}
|
|
|
|
type Project_Id_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func Project_Id(v []byte) Project_Id_Field {
|
|
return Project_Id_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Project_Id_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Project_Id_Field) _Column() string { return "id" }
|
|
|
|
type Project_Name_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func Project_Name(v string) Project_Name_Field {
|
|
return Project_Name_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Project_Name_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Project_Name_Field) _Column() string { return "name" }
|
|
|
|
type Project_Description_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func Project_Description(v string) Project_Description_Field {
|
|
return Project_Description_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Project_Description_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Project_Description_Field) _Column() string { return "description" }
|
|
|
|
type Project_UsageLimit_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value *int64
|
|
}
|
|
|
|
func Project_UsageLimit(v int64) Project_UsageLimit_Field {
|
|
return Project_UsageLimit_Field{_set: true, _value: &v}
|
|
}
|
|
|
|
func Project_UsageLimit_Raw(v *int64) Project_UsageLimit_Field {
|
|
if v == nil {
|
|
return Project_UsageLimit_Null()
|
|
}
|
|
return Project_UsageLimit(*v)
|
|
}
|
|
|
|
func Project_UsageLimit_Null() Project_UsageLimit_Field {
|
|
return Project_UsageLimit_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f Project_UsageLimit_Field) isnull() bool { return !f._set || f._null || f._value == nil }
|
|
|
|
func (f Project_UsageLimit_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Project_UsageLimit_Field) _Column() string { return "usage_limit" }
|
|
|
|
type Project_BandwidthLimit_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value *int64
|
|
}
|
|
|
|
func Project_BandwidthLimit(v int64) Project_BandwidthLimit_Field {
|
|
return Project_BandwidthLimit_Field{_set: true, _value: &v}
|
|
}
|
|
|
|
func Project_BandwidthLimit_Raw(v *int64) Project_BandwidthLimit_Field {
|
|
if v == nil {
|
|
return Project_BandwidthLimit_Null()
|
|
}
|
|
return Project_BandwidthLimit(*v)
|
|
}
|
|
|
|
func Project_BandwidthLimit_Null() Project_BandwidthLimit_Field {
|
|
return Project_BandwidthLimit_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f Project_BandwidthLimit_Field) isnull() bool { return !f._set || f._null || f._value == nil }
|
|
|
|
func (f Project_BandwidthLimit_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Project_BandwidthLimit_Field) _Column() string { return "bandwidth_limit" }
|
|
|
|
type Project_RateLimit_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value *int
|
|
}
|
|
|
|
func Project_RateLimit(v int) Project_RateLimit_Field {
|
|
return Project_RateLimit_Field{_set: true, _value: &v}
|
|
}
|
|
|
|
func Project_RateLimit_Raw(v *int) Project_RateLimit_Field {
|
|
if v == nil {
|
|
return Project_RateLimit_Null()
|
|
}
|
|
return Project_RateLimit(*v)
|
|
}
|
|
|
|
func Project_RateLimit_Null() Project_RateLimit_Field {
|
|
return Project_RateLimit_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f Project_RateLimit_Field) isnull() bool { return !f._set || f._null || f._value == nil }
|
|
|
|
func (f Project_RateLimit_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Project_RateLimit_Field) _Column() string { return "rate_limit" }
|
|
|
|
type Project_MaxBuckets_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value *int
|
|
}
|
|
|
|
func Project_MaxBuckets(v int) Project_MaxBuckets_Field {
|
|
return Project_MaxBuckets_Field{_set: true, _value: &v}
|
|
}
|
|
|
|
func Project_MaxBuckets_Raw(v *int) Project_MaxBuckets_Field {
|
|
if v == nil {
|
|
return Project_MaxBuckets_Null()
|
|
}
|
|
return Project_MaxBuckets(*v)
|
|
}
|
|
|
|
func Project_MaxBuckets_Null() Project_MaxBuckets_Field {
|
|
return Project_MaxBuckets_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f Project_MaxBuckets_Field) isnull() bool { return !f._set || f._null || f._value == nil }
|
|
|
|
func (f Project_MaxBuckets_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Project_MaxBuckets_Field) _Column() string { return "max_buckets" }
|
|
|
|
type Project_PartnerId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func Project_PartnerId(v []byte) Project_PartnerId_Field {
|
|
return Project_PartnerId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func Project_PartnerId_Raw(v []byte) Project_PartnerId_Field {
|
|
if v == nil {
|
|
return Project_PartnerId_Null()
|
|
}
|
|
return Project_PartnerId(v)
|
|
}
|
|
|
|
func Project_PartnerId_Null() Project_PartnerId_Field {
|
|
return Project_PartnerId_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f Project_PartnerId_Field) isnull() bool { return !f._set || f._null || f._value == nil }
|
|
|
|
func (f Project_PartnerId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Project_PartnerId_Field) _Column() string { return "partner_id" }
|
|
|
|
type Project_OwnerId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func Project_OwnerId(v []byte) Project_OwnerId_Field {
|
|
return Project_OwnerId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Project_OwnerId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Project_OwnerId_Field) _Column() string { return "owner_id" }
|
|
|
|
type Project_CreatedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func Project_CreatedAt(v time.Time) Project_CreatedAt_Field {
|
|
return Project_CreatedAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Project_CreatedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Project_CreatedAt_Field) _Column() string { return "created_at" }
|
|
|
|
type ProjectBandwidthRollup struct {
|
|
ProjectId []byte
|
|
IntervalMonth time.Time
|
|
EgressAllocated uint64
|
|
}
|
|
|
|
func (ProjectBandwidthRollup) _Table() string { return "project_bandwidth_rollups" }
|
|
|
|
type ProjectBandwidthRollup_Update_Fields struct {
|
|
EgressAllocated ProjectBandwidthRollup_EgressAllocated_Field
|
|
}
|
|
|
|
type ProjectBandwidthRollup_ProjectId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func ProjectBandwidthRollup_ProjectId(v []byte) ProjectBandwidthRollup_ProjectId_Field {
|
|
return ProjectBandwidthRollup_ProjectId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f ProjectBandwidthRollup_ProjectId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (ProjectBandwidthRollup_ProjectId_Field) _Column() string { return "project_id" }
|
|
|
|
type ProjectBandwidthRollup_IntervalMonth_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func ProjectBandwidthRollup_IntervalMonth(v time.Time) ProjectBandwidthRollup_IntervalMonth_Field {
|
|
v = toDate(v)
|
|
return ProjectBandwidthRollup_IntervalMonth_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f ProjectBandwidthRollup_IntervalMonth_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (ProjectBandwidthRollup_IntervalMonth_Field) _Column() string { return "interval_month" }
|
|
|
|
type ProjectBandwidthRollup_EgressAllocated_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value uint64
|
|
}
|
|
|
|
func ProjectBandwidthRollup_EgressAllocated(v uint64) ProjectBandwidthRollup_EgressAllocated_Field {
|
|
return ProjectBandwidthRollup_EgressAllocated_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f ProjectBandwidthRollup_EgressAllocated_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (ProjectBandwidthRollup_EgressAllocated_Field) _Column() string { return "egress_allocated" }
|
|
|
|
type RegistrationToken struct {
|
|
Secret []byte
|
|
OwnerId []byte
|
|
ProjectLimit int
|
|
CreatedAt time.Time
|
|
}
|
|
|
|
func (RegistrationToken) _Table() string { return "registration_tokens" }
|
|
|
|
type RegistrationToken_Create_Fields struct {
|
|
OwnerId RegistrationToken_OwnerId_Field
|
|
}
|
|
|
|
type RegistrationToken_Update_Fields struct {
|
|
OwnerId RegistrationToken_OwnerId_Field
|
|
}
|
|
|
|
type RegistrationToken_Secret_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func RegistrationToken_Secret(v []byte) RegistrationToken_Secret_Field {
|
|
return RegistrationToken_Secret_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f RegistrationToken_Secret_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (RegistrationToken_Secret_Field) _Column() string { return "secret" }
|
|
|
|
type RegistrationToken_OwnerId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func RegistrationToken_OwnerId(v []byte) RegistrationToken_OwnerId_Field {
|
|
return RegistrationToken_OwnerId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func RegistrationToken_OwnerId_Raw(v []byte) RegistrationToken_OwnerId_Field {
|
|
if v == nil {
|
|
return RegistrationToken_OwnerId_Null()
|
|
}
|
|
return RegistrationToken_OwnerId(v)
|
|
}
|
|
|
|
func RegistrationToken_OwnerId_Null() RegistrationToken_OwnerId_Field {
|
|
return RegistrationToken_OwnerId_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f RegistrationToken_OwnerId_Field) isnull() bool { return !f._set || f._null || f._value == nil }
|
|
|
|
func (f RegistrationToken_OwnerId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (RegistrationToken_OwnerId_Field) _Column() string { return "owner_id" }
|
|
|
|
type RegistrationToken_ProjectLimit_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func RegistrationToken_ProjectLimit(v int) RegistrationToken_ProjectLimit_Field {
|
|
return RegistrationToken_ProjectLimit_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f RegistrationToken_ProjectLimit_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (RegistrationToken_ProjectLimit_Field) _Column() string { return "project_limit" }
|
|
|
|
type RegistrationToken_CreatedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func RegistrationToken_CreatedAt(v time.Time) RegistrationToken_CreatedAt_Field {
|
|
return RegistrationToken_CreatedAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f RegistrationToken_CreatedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (RegistrationToken_CreatedAt_Field) _Column() string { return "created_at" }
|
|
|
|
type ResetPasswordToken struct {
|
|
Secret []byte
|
|
OwnerId []byte
|
|
CreatedAt time.Time
|
|
}
|
|
|
|
func (ResetPasswordToken) _Table() string { return "reset_password_tokens" }
|
|
|
|
type ResetPasswordToken_Update_Fields struct {
|
|
OwnerId ResetPasswordToken_OwnerId_Field
|
|
}
|
|
|
|
type ResetPasswordToken_Secret_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func ResetPasswordToken_Secret(v []byte) ResetPasswordToken_Secret_Field {
|
|
return ResetPasswordToken_Secret_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f ResetPasswordToken_Secret_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (ResetPasswordToken_Secret_Field) _Column() string { return "secret" }
|
|
|
|
type ResetPasswordToken_OwnerId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func ResetPasswordToken_OwnerId(v []byte) ResetPasswordToken_OwnerId_Field {
|
|
return ResetPasswordToken_OwnerId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f ResetPasswordToken_OwnerId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (ResetPasswordToken_OwnerId_Field) _Column() string { return "owner_id" }
|
|
|
|
type ResetPasswordToken_CreatedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func ResetPasswordToken_CreatedAt(v time.Time) ResetPasswordToken_CreatedAt_Field {
|
|
return ResetPasswordToken_CreatedAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f ResetPasswordToken_CreatedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (ResetPasswordToken_CreatedAt_Field) _Column() string { return "created_at" }
|
|
|
|
type Revocation struct {
|
|
Revoked []byte
|
|
ApiKeyId []byte
|
|
}
|
|
|
|
func (Revocation) _Table() string { return "revocations" }
|
|
|
|
type Revocation_Update_Fields struct {
|
|
}
|
|
|
|
type Revocation_Revoked_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func Revocation_Revoked(v []byte) Revocation_Revoked_Field {
|
|
return Revocation_Revoked_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Revocation_Revoked_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Revocation_Revoked_Field) _Column() string { return "revoked" }
|
|
|
|
type Revocation_ApiKeyId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func Revocation_ApiKeyId(v []byte) Revocation_ApiKeyId_Field {
|
|
return Revocation_ApiKeyId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Revocation_ApiKeyId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Revocation_ApiKeyId_Field) _Column() string { return "api_key_id" }
|
|
|
|
type StoragenodeBandwidthRollup struct {
|
|
StoragenodeId []byte
|
|
IntervalStart time.Time
|
|
IntervalSeconds uint
|
|
Action uint
|
|
Allocated *uint64
|
|
Settled uint64
|
|
}
|
|
|
|
func (StoragenodeBandwidthRollup) _Table() string { return "storagenode_bandwidth_rollups" }
|
|
|
|
type StoragenodeBandwidthRollup_Create_Fields struct {
|
|
Allocated StoragenodeBandwidthRollup_Allocated_Field
|
|
}
|
|
|
|
type StoragenodeBandwidthRollup_Update_Fields struct {
|
|
Allocated StoragenodeBandwidthRollup_Allocated_Field
|
|
Settled StoragenodeBandwidthRollup_Settled_Field
|
|
}
|
|
|
|
type StoragenodeBandwidthRollup_StoragenodeId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func StoragenodeBandwidthRollup_StoragenodeId(v []byte) StoragenodeBandwidthRollup_StoragenodeId_Field {
|
|
return StoragenodeBandwidthRollup_StoragenodeId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodeBandwidthRollup_StoragenodeId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodeBandwidthRollup_StoragenodeId_Field) _Column() string { return "storagenode_id" }
|
|
|
|
type StoragenodeBandwidthRollup_IntervalStart_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func StoragenodeBandwidthRollup_IntervalStart(v time.Time) StoragenodeBandwidthRollup_IntervalStart_Field {
|
|
return StoragenodeBandwidthRollup_IntervalStart_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodeBandwidthRollup_IntervalStart_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodeBandwidthRollup_IntervalStart_Field) _Column() string { return "interval_start" }
|
|
|
|
type StoragenodeBandwidthRollup_IntervalSeconds_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value uint
|
|
}
|
|
|
|
func StoragenodeBandwidthRollup_IntervalSeconds(v uint) StoragenodeBandwidthRollup_IntervalSeconds_Field {
|
|
return StoragenodeBandwidthRollup_IntervalSeconds_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodeBandwidthRollup_IntervalSeconds_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodeBandwidthRollup_IntervalSeconds_Field) _Column() string { return "interval_seconds" }
|
|
|
|
type StoragenodeBandwidthRollup_Action_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value uint
|
|
}
|
|
|
|
func StoragenodeBandwidthRollup_Action(v uint) StoragenodeBandwidthRollup_Action_Field {
|
|
return StoragenodeBandwidthRollup_Action_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodeBandwidthRollup_Action_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodeBandwidthRollup_Action_Field) _Column() string { return "action" }
|
|
|
|
type StoragenodeBandwidthRollup_Allocated_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value *uint64
|
|
}
|
|
|
|
func StoragenodeBandwidthRollup_Allocated(v uint64) StoragenodeBandwidthRollup_Allocated_Field {
|
|
return StoragenodeBandwidthRollup_Allocated_Field{_set: true, _value: &v}
|
|
}
|
|
|
|
func StoragenodeBandwidthRollup_Allocated_Raw(v *uint64) StoragenodeBandwidthRollup_Allocated_Field {
|
|
if v == nil {
|
|
return StoragenodeBandwidthRollup_Allocated_Null()
|
|
}
|
|
return StoragenodeBandwidthRollup_Allocated(*v)
|
|
}
|
|
|
|
func StoragenodeBandwidthRollup_Allocated_Null() StoragenodeBandwidthRollup_Allocated_Field {
|
|
return StoragenodeBandwidthRollup_Allocated_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f StoragenodeBandwidthRollup_Allocated_Field) isnull() bool {
|
|
return !f._set || f._null || f._value == nil
|
|
}
|
|
|
|
func (f StoragenodeBandwidthRollup_Allocated_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodeBandwidthRollup_Allocated_Field) _Column() string { return "allocated" }
|
|
|
|
type StoragenodeBandwidthRollup_Settled_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value uint64
|
|
}
|
|
|
|
func StoragenodeBandwidthRollup_Settled(v uint64) StoragenodeBandwidthRollup_Settled_Field {
|
|
return StoragenodeBandwidthRollup_Settled_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodeBandwidthRollup_Settled_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodeBandwidthRollup_Settled_Field) _Column() string { return "settled" }
|
|
|
|
type StoragenodeBandwidthRollupArchive struct {
|
|
StoragenodeId []byte
|
|
IntervalStart time.Time
|
|
IntervalSeconds uint
|
|
Action uint
|
|
Allocated *uint64
|
|
Settled uint64
|
|
}
|
|
|
|
func (StoragenodeBandwidthRollupArchive) _Table() string {
|
|
return "storagenode_bandwidth_rollup_archives"
|
|
}
|
|
|
|
type StoragenodeBandwidthRollupArchive_Create_Fields struct {
|
|
Allocated StoragenodeBandwidthRollupArchive_Allocated_Field
|
|
}
|
|
|
|
type StoragenodeBandwidthRollupArchive_Update_Fields struct {
|
|
Allocated StoragenodeBandwidthRollupArchive_Allocated_Field
|
|
Settled StoragenodeBandwidthRollupArchive_Settled_Field
|
|
}
|
|
|
|
type StoragenodeBandwidthRollupArchive_StoragenodeId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func StoragenodeBandwidthRollupArchive_StoragenodeId(v []byte) StoragenodeBandwidthRollupArchive_StoragenodeId_Field {
|
|
return StoragenodeBandwidthRollupArchive_StoragenodeId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodeBandwidthRollupArchive_StoragenodeId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodeBandwidthRollupArchive_StoragenodeId_Field) _Column() string {
|
|
return "storagenode_id"
|
|
}
|
|
|
|
type StoragenodeBandwidthRollupArchive_IntervalStart_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func StoragenodeBandwidthRollupArchive_IntervalStart(v time.Time) StoragenodeBandwidthRollupArchive_IntervalStart_Field {
|
|
return StoragenodeBandwidthRollupArchive_IntervalStart_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodeBandwidthRollupArchive_IntervalStart_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodeBandwidthRollupArchive_IntervalStart_Field) _Column() string {
|
|
return "interval_start"
|
|
}
|
|
|
|
type StoragenodeBandwidthRollupArchive_IntervalSeconds_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value uint
|
|
}
|
|
|
|
func StoragenodeBandwidthRollupArchive_IntervalSeconds(v uint) StoragenodeBandwidthRollupArchive_IntervalSeconds_Field {
|
|
return StoragenodeBandwidthRollupArchive_IntervalSeconds_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodeBandwidthRollupArchive_IntervalSeconds_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodeBandwidthRollupArchive_IntervalSeconds_Field) _Column() string {
|
|
return "interval_seconds"
|
|
}
|
|
|
|
type StoragenodeBandwidthRollupArchive_Action_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value uint
|
|
}
|
|
|
|
func StoragenodeBandwidthRollupArchive_Action(v uint) StoragenodeBandwidthRollupArchive_Action_Field {
|
|
return StoragenodeBandwidthRollupArchive_Action_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodeBandwidthRollupArchive_Action_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodeBandwidthRollupArchive_Action_Field) _Column() string { return "action" }
|
|
|
|
type StoragenodeBandwidthRollupArchive_Allocated_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value *uint64
|
|
}
|
|
|
|
func StoragenodeBandwidthRollupArchive_Allocated(v uint64) StoragenodeBandwidthRollupArchive_Allocated_Field {
|
|
return StoragenodeBandwidthRollupArchive_Allocated_Field{_set: true, _value: &v}
|
|
}
|
|
|
|
func StoragenodeBandwidthRollupArchive_Allocated_Raw(v *uint64) StoragenodeBandwidthRollupArchive_Allocated_Field {
|
|
if v == nil {
|
|
return StoragenodeBandwidthRollupArchive_Allocated_Null()
|
|
}
|
|
return StoragenodeBandwidthRollupArchive_Allocated(*v)
|
|
}
|
|
|
|
func StoragenodeBandwidthRollupArchive_Allocated_Null() StoragenodeBandwidthRollupArchive_Allocated_Field {
|
|
return StoragenodeBandwidthRollupArchive_Allocated_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f StoragenodeBandwidthRollupArchive_Allocated_Field) isnull() bool {
|
|
return !f._set || f._null || f._value == nil
|
|
}
|
|
|
|
func (f StoragenodeBandwidthRollupArchive_Allocated_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodeBandwidthRollupArchive_Allocated_Field) _Column() string { return "allocated" }
|
|
|
|
type StoragenodeBandwidthRollupArchive_Settled_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value uint64
|
|
}
|
|
|
|
func StoragenodeBandwidthRollupArchive_Settled(v uint64) StoragenodeBandwidthRollupArchive_Settled_Field {
|
|
return StoragenodeBandwidthRollupArchive_Settled_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodeBandwidthRollupArchive_Settled_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodeBandwidthRollupArchive_Settled_Field) _Column() string { return "settled" }
|
|
|
|
type StoragenodeBandwidthRollupPhase2 struct {
|
|
StoragenodeId []byte
|
|
IntervalStart time.Time
|
|
IntervalSeconds uint
|
|
Action uint
|
|
Allocated *uint64
|
|
Settled uint64
|
|
}
|
|
|
|
func (StoragenodeBandwidthRollupPhase2) _Table() string {
|
|
return "storagenode_bandwidth_rollups_phase2"
|
|
}
|
|
|
|
type StoragenodeBandwidthRollupPhase2_Create_Fields struct {
|
|
Allocated StoragenodeBandwidthRollupPhase2_Allocated_Field
|
|
}
|
|
|
|
type StoragenodeBandwidthRollupPhase2_Update_Fields struct {
|
|
Allocated StoragenodeBandwidthRollupPhase2_Allocated_Field
|
|
Settled StoragenodeBandwidthRollupPhase2_Settled_Field
|
|
}
|
|
|
|
type StoragenodeBandwidthRollupPhase2_StoragenodeId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func StoragenodeBandwidthRollupPhase2_StoragenodeId(v []byte) StoragenodeBandwidthRollupPhase2_StoragenodeId_Field {
|
|
return StoragenodeBandwidthRollupPhase2_StoragenodeId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodeBandwidthRollupPhase2_StoragenodeId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodeBandwidthRollupPhase2_StoragenodeId_Field) _Column() string { return "storagenode_id" }
|
|
|
|
type StoragenodeBandwidthRollupPhase2_IntervalStart_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func StoragenodeBandwidthRollupPhase2_IntervalStart(v time.Time) StoragenodeBandwidthRollupPhase2_IntervalStart_Field {
|
|
return StoragenodeBandwidthRollupPhase2_IntervalStart_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodeBandwidthRollupPhase2_IntervalStart_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodeBandwidthRollupPhase2_IntervalStart_Field) _Column() string { return "interval_start" }
|
|
|
|
type StoragenodeBandwidthRollupPhase2_IntervalSeconds_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value uint
|
|
}
|
|
|
|
func StoragenodeBandwidthRollupPhase2_IntervalSeconds(v uint) StoragenodeBandwidthRollupPhase2_IntervalSeconds_Field {
|
|
return StoragenodeBandwidthRollupPhase2_IntervalSeconds_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodeBandwidthRollupPhase2_IntervalSeconds_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodeBandwidthRollupPhase2_IntervalSeconds_Field) _Column() string {
|
|
return "interval_seconds"
|
|
}
|
|
|
|
type StoragenodeBandwidthRollupPhase2_Action_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value uint
|
|
}
|
|
|
|
func StoragenodeBandwidthRollupPhase2_Action(v uint) StoragenodeBandwidthRollupPhase2_Action_Field {
|
|
return StoragenodeBandwidthRollupPhase2_Action_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodeBandwidthRollupPhase2_Action_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodeBandwidthRollupPhase2_Action_Field) _Column() string { return "action" }
|
|
|
|
type StoragenodeBandwidthRollupPhase2_Allocated_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value *uint64
|
|
}
|
|
|
|
func StoragenodeBandwidthRollupPhase2_Allocated(v uint64) StoragenodeBandwidthRollupPhase2_Allocated_Field {
|
|
return StoragenodeBandwidthRollupPhase2_Allocated_Field{_set: true, _value: &v}
|
|
}
|
|
|
|
func StoragenodeBandwidthRollupPhase2_Allocated_Raw(v *uint64) StoragenodeBandwidthRollupPhase2_Allocated_Field {
|
|
if v == nil {
|
|
return StoragenodeBandwidthRollupPhase2_Allocated_Null()
|
|
}
|
|
return StoragenodeBandwidthRollupPhase2_Allocated(*v)
|
|
}
|
|
|
|
func StoragenodeBandwidthRollupPhase2_Allocated_Null() StoragenodeBandwidthRollupPhase2_Allocated_Field {
|
|
return StoragenodeBandwidthRollupPhase2_Allocated_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f StoragenodeBandwidthRollupPhase2_Allocated_Field) isnull() bool {
|
|
return !f._set || f._null || f._value == nil
|
|
}
|
|
|
|
func (f StoragenodeBandwidthRollupPhase2_Allocated_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodeBandwidthRollupPhase2_Allocated_Field) _Column() string { return "allocated" }
|
|
|
|
type StoragenodeBandwidthRollupPhase2_Settled_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value uint64
|
|
}
|
|
|
|
func StoragenodeBandwidthRollupPhase2_Settled(v uint64) StoragenodeBandwidthRollupPhase2_Settled_Field {
|
|
return StoragenodeBandwidthRollupPhase2_Settled_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodeBandwidthRollupPhase2_Settled_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodeBandwidthRollupPhase2_Settled_Field) _Column() string { return "settled" }
|
|
|
|
type StoragenodePayment struct {
|
|
Id int64
|
|
CreatedAt time.Time
|
|
NodeId []byte
|
|
Period string
|
|
Amount int64
|
|
Receipt *string
|
|
Notes *string
|
|
}
|
|
|
|
func (StoragenodePayment) _Table() string { return "storagenode_payments" }
|
|
|
|
type StoragenodePayment_Create_Fields struct {
|
|
Receipt StoragenodePayment_Receipt_Field
|
|
Notes StoragenodePayment_Notes_Field
|
|
}
|
|
|
|
type StoragenodePayment_Update_Fields struct {
|
|
}
|
|
|
|
type StoragenodePayment_Id_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func StoragenodePayment_Id(v int64) StoragenodePayment_Id_Field {
|
|
return StoragenodePayment_Id_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodePayment_Id_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodePayment_Id_Field) _Column() string { return "id" }
|
|
|
|
type StoragenodePayment_CreatedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func StoragenodePayment_CreatedAt(v time.Time) StoragenodePayment_CreatedAt_Field {
|
|
return StoragenodePayment_CreatedAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodePayment_CreatedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodePayment_CreatedAt_Field) _Column() string { return "created_at" }
|
|
|
|
type StoragenodePayment_NodeId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func StoragenodePayment_NodeId(v []byte) StoragenodePayment_NodeId_Field {
|
|
return StoragenodePayment_NodeId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodePayment_NodeId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodePayment_NodeId_Field) _Column() string { return "node_id" }
|
|
|
|
type StoragenodePayment_Period_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func StoragenodePayment_Period(v string) StoragenodePayment_Period_Field {
|
|
return StoragenodePayment_Period_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodePayment_Period_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodePayment_Period_Field) _Column() string { return "period" }
|
|
|
|
type StoragenodePayment_Amount_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func StoragenodePayment_Amount(v int64) StoragenodePayment_Amount_Field {
|
|
return StoragenodePayment_Amount_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodePayment_Amount_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodePayment_Amount_Field) _Column() string { return "amount" }
|
|
|
|
type StoragenodePayment_Receipt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value *string
|
|
}
|
|
|
|
func StoragenodePayment_Receipt(v string) StoragenodePayment_Receipt_Field {
|
|
return StoragenodePayment_Receipt_Field{_set: true, _value: &v}
|
|
}
|
|
|
|
func StoragenodePayment_Receipt_Raw(v *string) StoragenodePayment_Receipt_Field {
|
|
if v == nil {
|
|
return StoragenodePayment_Receipt_Null()
|
|
}
|
|
return StoragenodePayment_Receipt(*v)
|
|
}
|
|
|
|
func StoragenodePayment_Receipt_Null() StoragenodePayment_Receipt_Field {
|
|
return StoragenodePayment_Receipt_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f StoragenodePayment_Receipt_Field) isnull() bool { return !f._set || f._null || f._value == nil }
|
|
|
|
func (f StoragenodePayment_Receipt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodePayment_Receipt_Field) _Column() string { return "receipt" }
|
|
|
|
type StoragenodePayment_Notes_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value *string
|
|
}
|
|
|
|
func StoragenodePayment_Notes(v string) StoragenodePayment_Notes_Field {
|
|
return StoragenodePayment_Notes_Field{_set: true, _value: &v}
|
|
}
|
|
|
|
func StoragenodePayment_Notes_Raw(v *string) StoragenodePayment_Notes_Field {
|
|
if v == nil {
|
|
return StoragenodePayment_Notes_Null()
|
|
}
|
|
return StoragenodePayment_Notes(*v)
|
|
}
|
|
|
|
func StoragenodePayment_Notes_Null() StoragenodePayment_Notes_Field {
|
|
return StoragenodePayment_Notes_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f StoragenodePayment_Notes_Field) isnull() bool { return !f._set || f._null || f._value == nil }
|
|
|
|
func (f StoragenodePayment_Notes_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodePayment_Notes_Field) _Column() string { return "notes" }
|
|
|
|
type StoragenodePaystub struct {
|
|
Period string
|
|
NodeId []byte
|
|
CreatedAt time.Time
|
|
Codes string
|
|
UsageAtRest float64
|
|
UsageGet int64
|
|
UsagePut int64
|
|
UsageGetRepair int64
|
|
UsagePutRepair int64
|
|
UsageGetAudit int64
|
|
CompAtRest int64
|
|
CompGet int64
|
|
CompPut int64
|
|
CompGetRepair int64
|
|
CompPutRepair int64
|
|
CompGetAudit int64
|
|
SurgePercent int64
|
|
Held int64
|
|
Owed int64
|
|
Disposed int64
|
|
Paid int64
|
|
Distributed int64
|
|
}
|
|
|
|
func (StoragenodePaystub) _Table() string { return "storagenode_paystubs" }
|
|
|
|
type StoragenodePaystub_Update_Fields struct {
|
|
}
|
|
|
|
type StoragenodePaystub_Period_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func StoragenodePaystub_Period(v string) StoragenodePaystub_Period_Field {
|
|
return StoragenodePaystub_Period_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodePaystub_Period_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodePaystub_Period_Field) _Column() string { return "period" }
|
|
|
|
type StoragenodePaystub_NodeId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func StoragenodePaystub_NodeId(v []byte) StoragenodePaystub_NodeId_Field {
|
|
return StoragenodePaystub_NodeId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodePaystub_NodeId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodePaystub_NodeId_Field) _Column() string { return "node_id" }
|
|
|
|
type StoragenodePaystub_CreatedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func StoragenodePaystub_CreatedAt(v time.Time) StoragenodePaystub_CreatedAt_Field {
|
|
return StoragenodePaystub_CreatedAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodePaystub_CreatedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodePaystub_CreatedAt_Field) _Column() string { return "created_at" }
|
|
|
|
type StoragenodePaystub_Codes_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func StoragenodePaystub_Codes(v string) StoragenodePaystub_Codes_Field {
|
|
return StoragenodePaystub_Codes_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodePaystub_Codes_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodePaystub_Codes_Field) _Column() string { return "codes" }
|
|
|
|
type StoragenodePaystub_UsageAtRest_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value float64
|
|
}
|
|
|
|
func StoragenodePaystub_UsageAtRest(v float64) StoragenodePaystub_UsageAtRest_Field {
|
|
return StoragenodePaystub_UsageAtRest_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodePaystub_UsageAtRest_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodePaystub_UsageAtRest_Field) _Column() string { return "usage_at_rest" }
|
|
|
|
type StoragenodePaystub_UsageGet_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func StoragenodePaystub_UsageGet(v int64) StoragenodePaystub_UsageGet_Field {
|
|
return StoragenodePaystub_UsageGet_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodePaystub_UsageGet_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodePaystub_UsageGet_Field) _Column() string { return "usage_get" }
|
|
|
|
type StoragenodePaystub_UsagePut_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func StoragenodePaystub_UsagePut(v int64) StoragenodePaystub_UsagePut_Field {
|
|
return StoragenodePaystub_UsagePut_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodePaystub_UsagePut_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodePaystub_UsagePut_Field) _Column() string { return "usage_put" }
|
|
|
|
type StoragenodePaystub_UsageGetRepair_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func StoragenodePaystub_UsageGetRepair(v int64) StoragenodePaystub_UsageGetRepair_Field {
|
|
return StoragenodePaystub_UsageGetRepair_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodePaystub_UsageGetRepair_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodePaystub_UsageGetRepair_Field) _Column() string { return "usage_get_repair" }
|
|
|
|
type StoragenodePaystub_UsagePutRepair_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func StoragenodePaystub_UsagePutRepair(v int64) StoragenodePaystub_UsagePutRepair_Field {
|
|
return StoragenodePaystub_UsagePutRepair_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodePaystub_UsagePutRepair_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodePaystub_UsagePutRepair_Field) _Column() string { return "usage_put_repair" }
|
|
|
|
type StoragenodePaystub_UsageGetAudit_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func StoragenodePaystub_UsageGetAudit(v int64) StoragenodePaystub_UsageGetAudit_Field {
|
|
return StoragenodePaystub_UsageGetAudit_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodePaystub_UsageGetAudit_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodePaystub_UsageGetAudit_Field) _Column() string { return "usage_get_audit" }
|
|
|
|
type StoragenodePaystub_CompAtRest_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func StoragenodePaystub_CompAtRest(v int64) StoragenodePaystub_CompAtRest_Field {
|
|
return StoragenodePaystub_CompAtRest_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodePaystub_CompAtRest_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodePaystub_CompAtRest_Field) _Column() string { return "comp_at_rest" }
|
|
|
|
type StoragenodePaystub_CompGet_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func StoragenodePaystub_CompGet(v int64) StoragenodePaystub_CompGet_Field {
|
|
return StoragenodePaystub_CompGet_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodePaystub_CompGet_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodePaystub_CompGet_Field) _Column() string { return "comp_get" }
|
|
|
|
type StoragenodePaystub_CompPut_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func StoragenodePaystub_CompPut(v int64) StoragenodePaystub_CompPut_Field {
|
|
return StoragenodePaystub_CompPut_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodePaystub_CompPut_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodePaystub_CompPut_Field) _Column() string { return "comp_put" }
|
|
|
|
type StoragenodePaystub_CompGetRepair_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func StoragenodePaystub_CompGetRepair(v int64) StoragenodePaystub_CompGetRepair_Field {
|
|
return StoragenodePaystub_CompGetRepair_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodePaystub_CompGetRepair_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodePaystub_CompGetRepair_Field) _Column() string { return "comp_get_repair" }
|
|
|
|
type StoragenodePaystub_CompPutRepair_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func StoragenodePaystub_CompPutRepair(v int64) StoragenodePaystub_CompPutRepair_Field {
|
|
return StoragenodePaystub_CompPutRepair_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodePaystub_CompPutRepair_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodePaystub_CompPutRepair_Field) _Column() string { return "comp_put_repair" }
|
|
|
|
type StoragenodePaystub_CompGetAudit_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func StoragenodePaystub_CompGetAudit(v int64) StoragenodePaystub_CompGetAudit_Field {
|
|
return StoragenodePaystub_CompGetAudit_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodePaystub_CompGetAudit_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodePaystub_CompGetAudit_Field) _Column() string { return "comp_get_audit" }
|
|
|
|
type StoragenodePaystub_SurgePercent_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func StoragenodePaystub_SurgePercent(v int64) StoragenodePaystub_SurgePercent_Field {
|
|
return StoragenodePaystub_SurgePercent_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodePaystub_SurgePercent_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodePaystub_SurgePercent_Field) _Column() string { return "surge_percent" }
|
|
|
|
type StoragenodePaystub_Held_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func StoragenodePaystub_Held(v int64) StoragenodePaystub_Held_Field {
|
|
return StoragenodePaystub_Held_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodePaystub_Held_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodePaystub_Held_Field) _Column() string { return "held" }
|
|
|
|
type StoragenodePaystub_Owed_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func StoragenodePaystub_Owed(v int64) StoragenodePaystub_Owed_Field {
|
|
return StoragenodePaystub_Owed_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodePaystub_Owed_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodePaystub_Owed_Field) _Column() string { return "owed" }
|
|
|
|
type StoragenodePaystub_Disposed_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func StoragenodePaystub_Disposed(v int64) StoragenodePaystub_Disposed_Field {
|
|
return StoragenodePaystub_Disposed_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodePaystub_Disposed_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodePaystub_Disposed_Field) _Column() string { return "disposed" }
|
|
|
|
type StoragenodePaystub_Paid_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func StoragenodePaystub_Paid(v int64) StoragenodePaystub_Paid_Field {
|
|
return StoragenodePaystub_Paid_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodePaystub_Paid_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodePaystub_Paid_Field) _Column() string { return "paid" }
|
|
|
|
type StoragenodePaystub_Distributed_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func StoragenodePaystub_Distributed(v int64) StoragenodePaystub_Distributed_Field {
|
|
return StoragenodePaystub_Distributed_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodePaystub_Distributed_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodePaystub_Distributed_Field) _Column() string { return "distributed" }
|
|
|
|
type StoragenodeStorageTally struct {
|
|
NodeId []byte
|
|
IntervalEndTime time.Time
|
|
DataTotal float64
|
|
}
|
|
|
|
func (StoragenodeStorageTally) _Table() string { return "storagenode_storage_tallies" }
|
|
|
|
type StoragenodeStorageTally_Update_Fields struct {
|
|
}
|
|
|
|
type StoragenodeStorageTally_NodeId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func StoragenodeStorageTally_NodeId(v []byte) StoragenodeStorageTally_NodeId_Field {
|
|
return StoragenodeStorageTally_NodeId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodeStorageTally_NodeId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodeStorageTally_NodeId_Field) _Column() string { return "node_id" }
|
|
|
|
type StoragenodeStorageTally_IntervalEndTime_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func StoragenodeStorageTally_IntervalEndTime(v time.Time) StoragenodeStorageTally_IntervalEndTime_Field {
|
|
return StoragenodeStorageTally_IntervalEndTime_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodeStorageTally_IntervalEndTime_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodeStorageTally_IntervalEndTime_Field) _Column() string { return "interval_end_time" }
|
|
|
|
type StoragenodeStorageTally_DataTotal_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value float64
|
|
}
|
|
|
|
func StoragenodeStorageTally_DataTotal(v float64) StoragenodeStorageTally_DataTotal_Field {
|
|
return StoragenodeStorageTally_DataTotal_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodeStorageTally_DataTotal_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodeStorageTally_DataTotal_Field) _Column() string { return "data_total" }
|
|
|
|
type StripeCustomer struct {
|
|
UserId []byte
|
|
CustomerId string
|
|
CreatedAt time.Time
|
|
}
|
|
|
|
func (StripeCustomer) _Table() string { return "stripe_customers" }
|
|
|
|
type StripeCustomer_Update_Fields struct {
|
|
}
|
|
|
|
type StripeCustomer_UserId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func StripeCustomer_UserId(v []byte) StripeCustomer_UserId_Field {
|
|
return StripeCustomer_UserId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StripeCustomer_UserId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StripeCustomer_UserId_Field) _Column() string { return "user_id" }
|
|
|
|
type StripeCustomer_CustomerId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func StripeCustomer_CustomerId(v string) StripeCustomer_CustomerId_Field {
|
|
return StripeCustomer_CustomerId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StripeCustomer_CustomerId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StripeCustomer_CustomerId_Field) _Column() string { return "customer_id" }
|
|
|
|
type StripeCustomer_CreatedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func StripeCustomer_CreatedAt(v time.Time) StripeCustomer_CreatedAt_Field {
|
|
return StripeCustomer_CreatedAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StripeCustomer_CreatedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StripeCustomer_CreatedAt_Field) _Column() string { return "created_at" }
|
|
|
|
type StripecoinpaymentsInvoiceProjectRecord struct {
|
|
Id []byte
|
|
ProjectId []byte
|
|
Storage float64
|
|
Egress int64
|
|
Objects int64
|
|
PeriodStart time.Time
|
|
PeriodEnd time.Time
|
|
State int
|
|
CreatedAt time.Time
|
|
}
|
|
|
|
func (StripecoinpaymentsInvoiceProjectRecord) _Table() string {
|
|
return "stripecoinpayments_invoice_project_records"
|
|
}
|
|
|
|
type StripecoinpaymentsInvoiceProjectRecord_Update_Fields struct {
|
|
State StripecoinpaymentsInvoiceProjectRecord_State_Field
|
|
}
|
|
|
|
type StripecoinpaymentsInvoiceProjectRecord_Id_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func StripecoinpaymentsInvoiceProjectRecord_Id(v []byte) StripecoinpaymentsInvoiceProjectRecord_Id_Field {
|
|
return StripecoinpaymentsInvoiceProjectRecord_Id_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StripecoinpaymentsInvoiceProjectRecord_Id_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StripecoinpaymentsInvoiceProjectRecord_Id_Field) _Column() string { return "id" }
|
|
|
|
type StripecoinpaymentsInvoiceProjectRecord_ProjectId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func StripecoinpaymentsInvoiceProjectRecord_ProjectId(v []byte) StripecoinpaymentsInvoiceProjectRecord_ProjectId_Field {
|
|
return StripecoinpaymentsInvoiceProjectRecord_ProjectId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StripecoinpaymentsInvoiceProjectRecord_ProjectId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StripecoinpaymentsInvoiceProjectRecord_ProjectId_Field) _Column() string { return "project_id" }
|
|
|
|
type StripecoinpaymentsInvoiceProjectRecord_Storage_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value float64
|
|
}
|
|
|
|
func StripecoinpaymentsInvoiceProjectRecord_Storage(v float64) StripecoinpaymentsInvoiceProjectRecord_Storage_Field {
|
|
return StripecoinpaymentsInvoiceProjectRecord_Storage_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StripecoinpaymentsInvoiceProjectRecord_Storage_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StripecoinpaymentsInvoiceProjectRecord_Storage_Field) _Column() string { return "storage" }
|
|
|
|
type StripecoinpaymentsInvoiceProjectRecord_Egress_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func StripecoinpaymentsInvoiceProjectRecord_Egress(v int64) StripecoinpaymentsInvoiceProjectRecord_Egress_Field {
|
|
return StripecoinpaymentsInvoiceProjectRecord_Egress_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StripecoinpaymentsInvoiceProjectRecord_Egress_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StripecoinpaymentsInvoiceProjectRecord_Egress_Field) _Column() string { return "egress" }
|
|
|
|
type StripecoinpaymentsInvoiceProjectRecord_Objects_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func StripecoinpaymentsInvoiceProjectRecord_Objects(v int64) StripecoinpaymentsInvoiceProjectRecord_Objects_Field {
|
|
return StripecoinpaymentsInvoiceProjectRecord_Objects_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StripecoinpaymentsInvoiceProjectRecord_Objects_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StripecoinpaymentsInvoiceProjectRecord_Objects_Field) _Column() string { return "objects" }
|
|
|
|
type StripecoinpaymentsInvoiceProjectRecord_PeriodStart_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func StripecoinpaymentsInvoiceProjectRecord_PeriodStart(v time.Time) StripecoinpaymentsInvoiceProjectRecord_PeriodStart_Field {
|
|
return StripecoinpaymentsInvoiceProjectRecord_PeriodStart_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StripecoinpaymentsInvoiceProjectRecord_PeriodStart_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StripecoinpaymentsInvoiceProjectRecord_PeriodStart_Field) _Column() string {
|
|
return "period_start"
|
|
}
|
|
|
|
type StripecoinpaymentsInvoiceProjectRecord_PeriodEnd_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func StripecoinpaymentsInvoiceProjectRecord_PeriodEnd(v time.Time) StripecoinpaymentsInvoiceProjectRecord_PeriodEnd_Field {
|
|
return StripecoinpaymentsInvoiceProjectRecord_PeriodEnd_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StripecoinpaymentsInvoiceProjectRecord_PeriodEnd_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StripecoinpaymentsInvoiceProjectRecord_PeriodEnd_Field) _Column() string { return "period_end" }
|
|
|
|
type StripecoinpaymentsInvoiceProjectRecord_State_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func StripecoinpaymentsInvoiceProjectRecord_State(v int) StripecoinpaymentsInvoiceProjectRecord_State_Field {
|
|
return StripecoinpaymentsInvoiceProjectRecord_State_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StripecoinpaymentsInvoiceProjectRecord_State_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StripecoinpaymentsInvoiceProjectRecord_State_Field) _Column() string { return "state" }
|
|
|
|
type StripecoinpaymentsInvoiceProjectRecord_CreatedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func StripecoinpaymentsInvoiceProjectRecord_CreatedAt(v time.Time) StripecoinpaymentsInvoiceProjectRecord_CreatedAt_Field {
|
|
return StripecoinpaymentsInvoiceProjectRecord_CreatedAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StripecoinpaymentsInvoiceProjectRecord_CreatedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StripecoinpaymentsInvoiceProjectRecord_CreatedAt_Field) _Column() string { return "created_at" }
|
|
|
|
type StripecoinpaymentsTxConversionRate struct {
|
|
TxId string
|
|
Rate []byte
|
|
CreatedAt time.Time
|
|
}
|
|
|
|
func (StripecoinpaymentsTxConversionRate) _Table() string {
|
|
return "stripecoinpayments_tx_conversion_rates"
|
|
}
|
|
|
|
type StripecoinpaymentsTxConversionRate_Update_Fields struct {
|
|
}
|
|
|
|
type StripecoinpaymentsTxConversionRate_TxId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func StripecoinpaymentsTxConversionRate_TxId(v string) StripecoinpaymentsTxConversionRate_TxId_Field {
|
|
return StripecoinpaymentsTxConversionRate_TxId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StripecoinpaymentsTxConversionRate_TxId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StripecoinpaymentsTxConversionRate_TxId_Field) _Column() string { return "tx_id" }
|
|
|
|
type StripecoinpaymentsTxConversionRate_Rate_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func StripecoinpaymentsTxConversionRate_Rate(v []byte) StripecoinpaymentsTxConversionRate_Rate_Field {
|
|
return StripecoinpaymentsTxConversionRate_Rate_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StripecoinpaymentsTxConversionRate_Rate_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StripecoinpaymentsTxConversionRate_Rate_Field) _Column() string { return "rate" }
|
|
|
|
type StripecoinpaymentsTxConversionRate_CreatedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func StripecoinpaymentsTxConversionRate_CreatedAt(v time.Time) StripecoinpaymentsTxConversionRate_CreatedAt_Field {
|
|
return StripecoinpaymentsTxConversionRate_CreatedAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StripecoinpaymentsTxConversionRate_CreatedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StripecoinpaymentsTxConversionRate_CreatedAt_Field) _Column() string { return "created_at" }
|
|
|
|
type User struct {
|
|
Id []byte
|
|
Email string
|
|
NormalizedEmail string
|
|
FullName string
|
|
ShortName *string
|
|
PasswordHash []byte
|
|
Status int
|
|
PartnerId []byte
|
|
CreatedAt time.Time
|
|
ProjectLimit int
|
|
Position *string
|
|
CompanyName *string
|
|
CompanySize *int
|
|
WorkingOn *string
|
|
IsProfessional bool
|
|
EmployeeCount *string
|
|
}
|
|
|
|
func (User) _Table() string { return "users" }
|
|
|
|
type User_Create_Fields struct {
|
|
ShortName User_ShortName_Field
|
|
PartnerId User_PartnerId_Field
|
|
ProjectLimit User_ProjectLimit_Field
|
|
Position User_Position_Field
|
|
CompanyName User_CompanyName_Field
|
|
CompanySize User_CompanySize_Field
|
|
WorkingOn User_WorkingOn_Field
|
|
IsProfessional User_IsProfessional_Field
|
|
EmployeeCount User_EmployeeCount_Field
|
|
}
|
|
|
|
type User_Update_Fields struct {
|
|
Email User_Email_Field
|
|
NormalizedEmail User_NormalizedEmail_Field
|
|
FullName User_FullName_Field
|
|
ShortName User_ShortName_Field
|
|
PasswordHash User_PasswordHash_Field
|
|
Status User_Status_Field
|
|
ProjectLimit User_ProjectLimit_Field
|
|
Position User_Position_Field
|
|
CompanyName User_CompanyName_Field
|
|
CompanySize User_CompanySize_Field
|
|
WorkingOn User_WorkingOn_Field
|
|
IsProfessional User_IsProfessional_Field
|
|
EmployeeCount User_EmployeeCount_Field
|
|
}
|
|
|
|
type User_Id_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func User_Id(v []byte) User_Id_Field {
|
|
return User_Id_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f User_Id_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (User_Id_Field) _Column() string { return "id" }
|
|
|
|
type User_Email_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func User_Email(v string) User_Email_Field {
|
|
return User_Email_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f User_Email_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (User_Email_Field) _Column() string { return "email" }
|
|
|
|
type User_NormalizedEmail_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func User_NormalizedEmail(v string) User_NormalizedEmail_Field {
|
|
return User_NormalizedEmail_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f User_NormalizedEmail_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (User_NormalizedEmail_Field) _Column() string { return "normalized_email" }
|
|
|
|
type User_FullName_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func User_FullName(v string) User_FullName_Field {
|
|
return User_FullName_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f User_FullName_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (User_FullName_Field) _Column() string { return "full_name" }
|
|
|
|
type User_ShortName_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value *string
|
|
}
|
|
|
|
func User_ShortName(v string) User_ShortName_Field {
|
|
return User_ShortName_Field{_set: true, _value: &v}
|
|
}
|
|
|
|
func User_ShortName_Raw(v *string) User_ShortName_Field {
|
|
if v == nil {
|
|
return User_ShortName_Null()
|
|
}
|
|
return User_ShortName(*v)
|
|
}
|
|
|
|
func User_ShortName_Null() User_ShortName_Field {
|
|
return User_ShortName_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f User_ShortName_Field) isnull() bool { return !f._set || f._null || f._value == nil }
|
|
|
|
func (f User_ShortName_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (User_ShortName_Field) _Column() string { return "short_name" }
|
|
|
|
type User_PasswordHash_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func User_PasswordHash(v []byte) User_PasswordHash_Field {
|
|
return User_PasswordHash_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f User_PasswordHash_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (User_PasswordHash_Field) _Column() string { return "password_hash" }
|
|
|
|
type User_Status_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func User_Status(v int) User_Status_Field {
|
|
return User_Status_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f User_Status_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (User_Status_Field) _Column() string { return "status" }
|
|
|
|
type User_PartnerId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func User_PartnerId(v []byte) User_PartnerId_Field {
|
|
return User_PartnerId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func User_PartnerId_Raw(v []byte) User_PartnerId_Field {
|
|
if v == nil {
|
|
return User_PartnerId_Null()
|
|
}
|
|
return User_PartnerId(v)
|
|
}
|
|
|
|
func User_PartnerId_Null() User_PartnerId_Field {
|
|
return User_PartnerId_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f User_PartnerId_Field) isnull() bool { return !f._set || f._null || f._value == nil }
|
|
|
|
func (f User_PartnerId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (User_PartnerId_Field) _Column() string { return "partner_id" }
|
|
|
|
type User_CreatedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func User_CreatedAt(v time.Time) User_CreatedAt_Field {
|
|
return User_CreatedAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f User_CreatedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (User_CreatedAt_Field) _Column() string { return "created_at" }
|
|
|
|
type User_ProjectLimit_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func User_ProjectLimit(v int) User_ProjectLimit_Field {
|
|
return User_ProjectLimit_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f User_ProjectLimit_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (User_ProjectLimit_Field) _Column() string { return "project_limit" }
|
|
|
|
type User_Position_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value *string
|
|
}
|
|
|
|
func User_Position(v string) User_Position_Field {
|
|
return User_Position_Field{_set: true, _value: &v}
|
|
}
|
|
|
|
func User_Position_Raw(v *string) User_Position_Field {
|
|
if v == nil {
|
|
return User_Position_Null()
|
|
}
|
|
return User_Position(*v)
|
|
}
|
|
|
|
func User_Position_Null() User_Position_Field {
|
|
return User_Position_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f User_Position_Field) isnull() bool { return !f._set || f._null || f._value == nil }
|
|
|
|
func (f User_Position_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (User_Position_Field) _Column() string { return "position" }
|
|
|
|
type User_CompanyName_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value *string
|
|
}
|
|
|
|
func User_CompanyName(v string) User_CompanyName_Field {
|
|
return User_CompanyName_Field{_set: true, _value: &v}
|
|
}
|
|
|
|
func User_CompanyName_Raw(v *string) User_CompanyName_Field {
|
|
if v == nil {
|
|
return User_CompanyName_Null()
|
|
}
|
|
return User_CompanyName(*v)
|
|
}
|
|
|
|
func User_CompanyName_Null() User_CompanyName_Field {
|
|
return User_CompanyName_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f User_CompanyName_Field) isnull() bool { return !f._set || f._null || f._value == nil }
|
|
|
|
func (f User_CompanyName_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (User_CompanyName_Field) _Column() string { return "company_name" }
|
|
|
|
type User_CompanySize_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value *int
|
|
}
|
|
|
|
func User_CompanySize(v int) User_CompanySize_Field {
|
|
return User_CompanySize_Field{_set: true, _value: &v}
|
|
}
|
|
|
|
func User_CompanySize_Raw(v *int) User_CompanySize_Field {
|
|
if v == nil {
|
|
return User_CompanySize_Null()
|
|
}
|
|
return User_CompanySize(*v)
|
|
}
|
|
|
|
func User_CompanySize_Null() User_CompanySize_Field {
|
|
return User_CompanySize_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f User_CompanySize_Field) isnull() bool { return !f._set || f._null || f._value == nil }
|
|
|
|
func (f User_CompanySize_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (User_CompanySize_Field) _Column() string { return "company_size" }
|
|
|
|
type User_WorkingOn_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value *string
|
|
}
|
|
|
|
func User_WorkingOn(v string) User_WorkingOn_Field {
|
|
return User_WorkingOn_Field{_set: true, _value: &v}
|
|
}
|
|
|
|
func User_WorkingOn_Raw(v *string) User_WorkingOn_Field {
|
|
if v == nil {
|
|
return User_WorkingOn_Null()
|
|
}
|
|
return User_WorkingOn(*v)
|
|
}
|
|
|
|
func User_WorkingOn_Null() User_WorkingOn_Field {
|
|
return User_WorkingOn_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f User_WorkingOn_Field) isnull() bool { return !f._set || f._null || f._value == nil }
|
|
|
|
func (f User_WorkingOn_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (User_WorkingOn_Field) _Column() string { return "working_on" }
|
|
|
|
type User_IsProfessional_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value bool
|
|
}
|
|
|
|
func User_IsProfessional(v bool) User_IsProfessional_Field {
|
|
return User_IsProfessional_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f User_IsProfessional_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (User_IsProfessional_Field) _Column() string { return "is_professional" }
|
|
|
|
type User_EmployeeCount_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value *string
|
|
}
|
|
|
|
func User_EmployeeCount(v string) User_EmployeeCount_Field {
|
|
return User_EmployeeCount_Field{_set: true, _value: &v}
|
|
}
|
|
|
|
func User_EmployeeCount_Raw(v *string) User_EmployeeCount_Field {
|
|
if v == nil {
|
|
return User_EmployeeCount_Null()
|
|
}
|
|
return User_EmployeeCount(*v)
|
|
}
|
|
|
|
func User_EmployeeCount_Null() User_EmployeeCount_Field {
|
|
return User_EmployeeCount_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f User_EmployeeCount_Field) isnull() bool { return !f._set || f._null || f._value == nil }
|
|
|
|
func (f User_EmployeeCount_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (User_EmployeeCount_Field) _Column() string { return "employee_count" }
|
|
|
|
type ValueAttribution struct {
|
|
ProjectId []byte
|
|
BucketName []byte
|
|
PartnerId []byte
|
|
LastUpdated time.Time
|
|
}
|
|
|
|
func (ValueAttribution) _Table() string { return "value_attributions" }
|
|
|
|
type ValueAttribution_Update_Fields struct {
|
|
}
|
|
|
|
type ValueAttribution_ProjectId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func ValueAttribution_ProjectId(v []byte) ValueAttribution_ProjectId_Field {
|
|
return ValueAttribution_ProjectId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f ValueAttribution_ProjectId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (ValueAttribution_ProjectId_Field) _Column() string { return "project_id" }
|
|
|
|
type ValueAttribution_BucketName_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func ValueAttribution_BucketName(v []byte) ValueAttribution_BucketName_Field {
|
|
return ValueAttribution_BucketName_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f ValueAttribution_BucketName_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (ValueAttribution_BucketName_Field) _Column() string { return "bucket_name" }
|
|
|
|
type ValueAttribution_PartnerId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func ValueAttribution_PartnerId(v []byte) ValueAttribution_PartnerId_Field {
|
|
return ValueAttribution_PartnerId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f ValueAttribution_PartnerId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (ValueAttribution_PartnerId_Field) _Column() string { return "partner_id" }
|
|
|
|
type ValueAttribution_LastUpdated_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func ValueAttribution_LastUpdated(v time.Time) ValueAttribution_LastUpdated_Field {
|
|
return ValueAttribution_LastUpdated_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f ValueAttribution_LastUpdated_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (ValueAttribution_LastUpdated_Field) _Column() string { return "last_updated" }
|
|
|
|
type ApiKey struct {
|
|
Id []byte
|
|
ProjectId []byte
|
|
Head []byte
|
|
Name string
|
|
Secret []byte
|
|
PartnerId []byte
|
|
CreatedAt time.Time
|
|
}
|
|
|
|
func (ApiKey) _Table() string { return "api_keys" }
|
|
|
|
type ApiKey_Create_Fields struct {
|
|
PartnerId ApiKey_PartnerId_Field
|
|
}
|
|
|
|
type ApiKey_Update_Fields struct {
|
|
Name ApiKey_Name_Field
|
|
}
|
|
|
|
type ApiKey_Id_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func ApiKey_Id(v []byte) ApiKey_Id_Field {
|
|
return ApiKey_Id_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f ApiKey_Id_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (ApiKey_Id_Field) _Column() string { return "id" }
|
|
|
|
type ApiKey_ProjectId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func ApiKey_ProjectId(v []byte) ApiKey_ProjectId_Field {
|
|
return ApiKey_ProjectId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f ApiKey_ProjectId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (ApiKey_ProjectId_Field) _Column() string { return "project_id" }
|
|
|
|
type ApiKey_Head_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func ApiKey_Head(v []byte) ApiKey_Head_Field {
|
|
return ApiKey_Head_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f ApiKey_Head_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (ApiKey_Head_Field) _Column() string { return "head" }
|
|
|
|
type ApiKey_Name_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func ApiKey_Name(v string) ApiKey_Name_Field {
|
|
return ApiKey_Name_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f ApiKey_Name_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (ApiKey_Name_Field) _Column() string { return "name" }
|
|
|
|
type ApiKey_Secret_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func ApiKey_Secret(v []byte) ApiKey_Secret_Field {
|
|
return ApiKey_Secret_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f ApiKey_Secret_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (ApiKey_Secret_Field) _Column() string { return "secret" }
|
|
|
|
type ApiKey_PartnerId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func ApiKey_PartnerId(v []byte) ApiKey_PartnerId_Field {
|
|
return ApiKey_PartnerId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func ApiKey_PartnerId_Raw(v []byte) ApiKey_PartnerId_Field {
|
|
if v == nil {
|
|
return ApiKey_PartnerId_Null()
|
|
}
|
|
return ApiKey_PartnerId(v)
|
|
}
|
|
|
|
func ApiKey_PartnerId_Null() ApiKey_PartnerId_Field {
|
|
return ApiKey_PartnerId_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f ApiKey_PartnerId_Field) isnull() bool { return !f._set || f._null || f._value == nil }
|
|
|
|
func (f ApiKey_PartnerId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (ApiKey_PartnerId_Field) _Column() string { return "partner_id" }
|
|
|
|
type ApiKey_CreatedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func ApiKey_CreatedAt(v time.Time) ApiKey_CreatedAt_Field {
|
|
return ApiKey_CreatedAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f ApiKey_CreatedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (ApiKey_CreatedAt_Field) _Column() string { return "created_at" }
|
|
|
|
type BucketMetainfo struct {
|
|
Id []byte
|
|
ProjectId []byte
|
|
Name []byte
|
|
PartnerId []byte
|
|
PathCipher int
|
|
CreatedAt time.Time
|
|
DefaultSegmentSize int
|
|
DefaultEncryptionCipherSuite int
|
|
DefaultEncryptionBlockSize int
|
|
DefaultRedundancyAlgorithm int
|
|
DefaultRedundancyShareSize int
|
|
DefaultRedundancyRequiredShares int
|
|
DefaultRedundancyRepairShares int
|
|
DefaultRedundancyOptimalShares int
|
|
DefaultRedundancyTotalShares int
|
|
}
|
|
|
|
func (BucketMetainfo) _Table() string { return "bucket_metainfos" }
|
|
|
|
type BucketMetainfo_Create_Fields struct {
|
|
PartnerId BucketMetainfo_PartnerId_Field
|
|
}
|
|
|
|
type BucketMetainfo_Update_Fields struct {
|
|
PartnerId BucketMetainfo_PartnerId_Field
|
|
DefaultSegmentSize BucketMetainfo_DefaultSegmentSize_Field
|
|
DefaultEncryptionCipherSuite BucketMetainfo_DefaultEncryptionCipherSuite_Field
|
|
DefaultEncryptionBlockSize BucketMetainfo_DefaultEncryptionBlockSize_Field
|
|
DefaultRedundancyAlgorithm BucketMetainfo_DefaultRedundancyAlgorithm_Field
|
|
DefaultRedundancyShareSize BucketMetainfo_DefaultRedundancyShareSize_Field
|
|
DefaultRedundancyRequiredShares BucketMetainfo_DefaultRedundancyRequiredShares_Field
|
|
DefaultRedundancyRepairShares BucketMetainfo_DefaultRedundancyRepairShares_Field
|
|
DefaultRedundancyOptimalShares BucketMetainfo_DefaultRedundancyOptimalShares_Field
|
|
DefaultRedundancyTotalShares BucketMetainfo_DefaultRedundancyTotalShares_Field
|
|
}
|
|
|
|
type BucketMetainfo_Id_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func BucketMetainfo_Id(v []byte) BucketMetainfo_Id_Field {
|
|
return BucketMetainfo_Id_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketMetainfo_Id_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketMetainfo_Id_Field) _Column() string { return "id" }
|
|
|
|
type BucketMetainfo_ProjectId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func BucketMetainfo_ProjectId(v []byte) BucketMetainfo_ProjectId_Field {
|
|
return BucketMetainfo_ProjectId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketMetainfo_ProjectId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketMetainfo_ProjectId_Field) _Column() string { return "project_id" }
|
|
|
|
type BucketMetainfo_Name_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func BucketMetainfo_Name(v []byte) BucketMetainfo_Name_Field {
|
|
return BucketMetainfo_Name_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketMetainfo_Name_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketMetainfo_Name_Field) _Column() string { return "name" }
|
|
|
|
type BucketMetainfo_PartnerId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func BucketMetainfo_PartnerId(v []byte) BucketMetainfo_PartnerId_Field {
|
|
return BucketMetainfo_PartnerId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func BucketMetainfo_PartnerId_Raw(v []byte) BucketMetainfo_PartnerId_Field {
|
|
if v == nil {
|
|
return BucketMetainfo_PartnerId_Null()
|
|
}
|
|
return BucketMetainfo_PartnerId(v)
|
|
}
|
|
|
|
func BucketMetainfo_PartnerId_Null() BucketMetainfo_PartnerId_Field {
|
|
return BucketMetainfo_PartnerId_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f BucketMetainfo_PartnerId_Field) isnull() bool { return !f._set || f._null || f._value == nil }
|
|
|
|
func (f BucketMetainfo_PartnerId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketMetainfo_PartnerId_Field) _Column() string { return "partner_id" }
|
|
|
|
type BucketMetainfo_PathCipher_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func BucketMetainfo_PathCipher(v int) BucketMetainfo_PathCipher_Field {
|
|
return BucketMetainfo_PathCipher_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketMetainfo_PathCipher_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketMetainfo_PathCipher_Field) _Column() string { return "path_cipher" }
|
|
|
|
type BucketMetainfo_CreatedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func BucketMetainfo_CreatedAt(v time.Time) BucketMetainfo_CreatedAt_Field {
|
|
return BucketMetainfo_CreatedAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketMetainfo_CreatedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketMetainfo_CreatedAt_Field) _Column() string { return "created_at" }
|
|
|
|
type BucketMetainfo_DefaultSegmentSize_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func BucketMetainfo_DefaultSegmentSize(v int) BucketMetainfo_DefaultSegmentSize_Field {
|
|
return BucketMetainfo_DefaultSegmentSize_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketMetainfo_DefaultSegmentSize_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketMetainfo_DefaultSegmentSize_Field) _Column() string { return "default_segment_size" }
|
|
|
|
type BucketMetainfo_DefaultEncryptionCipherSuite_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func BucketMetainfo_DefaultEncryptionCipherSuite(v int) BucketMetainfo_DefaultEncryptionCipherSuite_Field {
|
|
return BucketMetainfo_DefaultEncryptionCipherSuite_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketMetainfo_DefaultEncryptionCipherSuite_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketMetainfo_DefaultEncryptionCipherSuite_Field) _Column() string {
|
|
return "default_encryption_cipher_suite"
|
|
}
|
|
|
|
type BucketMetainfo_DefaultEncryptionBlockSize_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func BucketMetainfo_DefaultEncryptionBlockSize(v int) BucketMetainfo_DefaultEncryptionBlockSize_Field {
|
|
return BucketMetainfo_DefaultEncryptionBlockSize_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketMetainfo_DefaultEncryptionBlockSize_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketMetainfo_DefaultEncryptionBlockSize_Field) _Column() string {
|
|
return "default_encryption_block_size"
|
|
}
|
|
|
|
type BucketMetainfo_DefaultRedundancyAlgorithm_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func BucketMetainfo_DefaultRedundancyAlgorithm(v int) BucketMetainfo_DefaultRedundancyAlgorithm_Field {
|
|
return BucketMetainfo_DefaultRedundancyAlgorithm_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketMetainfo_DefaultRedundancyAlgorithm_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketMetainfo_DefaultRedundancyAlgorithm_Field) _Column() string {
|
|
return "default_redundancy_algorithm"
|
|
}
|
|
|
|
type BucketMetainfo_DefaultRedundancyShareSize_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func BucketMetainfo_DefaultRedundancyShareSize(v int) BucketMetainfo_DefaultRedundancyShareSize_Field {
|
|
return BucketMetainfo_DefaultRedundancyShareSize_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketMetainfo_DefaultRedundancyShareSize_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketMetainfo_DefaultRedundancyShareSize_Field) _Column() string {
|
|
return "default_redundancy_share_size"
|
|
}
|
|
|
|
type BucketMetainfo_DefaultRedundancyRequiredShares_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func BucketMetainfo_DefaultRedundancyRequiredShares(v int) BucketMetainfo_DefaultRedundancyRequiredShares_Field {
|
|
return BucketMetainfo_DefaultRedundancyRequiredShares_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketMetainfo_DefaultRedundancyRequiredShares_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketMetainfo_DefaultRedundancyRequiredShares_Field) _Column() string {
|
|
return "default_redundancy_required_shares"
|
|
}
|
|
|
|
type BucketMetainfo_DefaultRedundancyRepairShares_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func BucketMetainfo_DefaultRedundancyRepairShares(v int) BucketMetainfo_DefaultRedundancyRepairShares_Field {
|
|
return BucketMetainfo_DefaultRedundancyRepairShares_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketMetainfo_DefaultRedundancyRepairShares_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketMetainfo_DefaultRedundancyRepairShares_Field) _Column() string {
|
|
return "default_redundancy_repair_shares"
|
|
}
|
|
|
|
type BucketMetainfo_DefaultRedundancyOptimalShares_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func BucketMetainfo_DefaultRedundancyOptimalShares(v int) BucketMetainfo_DefaultRedundancyOptimalShares_Field {
|
|
return BucketMetainfo_DefaultRedundancyOptimalShares_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketMetainfo_DefaultRedundancyOptimalShares_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketMetainfo_DefaultRedundancyOptimalShares_Field) _Column() string {
|
|
return "default_redundancy_optimal_shares"
|
|
}
|
|
|
|
type BucketMetainfo_DefaultRedundancyTotalShares_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func BucketMetainfo_DefaultRedundancyTotalShares(v int) BucketMetainfo_DefaultRedundancyTotalShares_Field {
|
|
return BucketMetainfo_DefaultRedundancyTotalShares_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketMetainfo_DefaultRedundancyTotalShares_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketMetainfo_DefaultRedundancyTotalShares_Field) _Column() string {
|
|
return "default_redundancy_total_shares"
|
|
}
|
|
|
|
type ProjectMember struct {
|
|
MemberId []byte
|
|
ProjectId []byte
|
|
CreatedAt time.Time
|
|
}
|
|
|
|
func (ProjectMember) _Table() string { return "project_members" }
|
|
|
|
type ProjectMember_Update_Fields struct {
|
|
}
|
|
|
|
type ProjectMember_MemberId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func ProjectMember_MemberId(v []byte) ProjectMember_MemberId_Field {
|
|
return ProjectMember_MemberId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f ProjectMember_MemberId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (ProjectMember_MemberId_Field) _Column() string { return "member_id" }
|
|
|
|
type ProjectMember_ProjectId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func ProjectMember_ProjectId(v []byte) ProjectMember_ProjectId_Field {
|
|
return ProjectMember_ProjectId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f ProjectMember_ProjectId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (ProjectMember_ProjectId_Field) _Column() string { return "project_id" }
|
|
|
|
type ProjectMember_CreatedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func ProjectMember_CreatedAt(v time.Time) ProjectMember_CreatedAt_Field {
|
|
return ProjectMember_CreatedAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f ProjectMember_CreatedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (ProjectMember_CreatedAt_Field) _Column() string { return "created_at" }
|
|
|
|
type StripecoinpaymentsApplyBalanceIntent struct {
|
|
TxId string
|
|
State int
|
|
CreatedAt time.Time
|
|
}
|
|
|
|
func (StripecoinpaymentsApplyBalanceIntent) _Table() string {
|
|
return "stripecoinpayments_apply_balance_intents"
|
|
}
|
|
|
|
type StripecoinpaymentsApplyBalanceIntent_Update_Fields struct {
|
|
State StripecoinpaymentsApplyBalanceIntent_State_Field
|
|
}
|
|
|
|
type StripecoinpaymentsApplyBalanceIntent_TxId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func StripecoinpaymentsApplyBalanceIntent_TxId(v string) StripecoinpaymentsApplyBalanceIntent_TxId_Field {
|
|
return StripecoinpaymentsApplyBalanceIntent_TxId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StripecoinpaymentsApplyBalanceIntent_TxId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StripecoinpaymentsApplyBalanceIntent_TxId_Field) _Column() string { return "tx_id" }
|
|
|
|
type StripecoinpaymentsApplyBalanceIntent_State_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func StripecoinpaymentsApplyBalanceIntent_State(v int) StripecoinpaymentsApplyBalanceIntent_State_Field {
|
|
return StripecoinpaymentsApplyBalanceIntent_State_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StripecoinpaymentsApplyBalanceIntent_State_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StripecoinpaymentsApplyBalanceIntent_State_Field) _Column() string { return "state" }
|
|
|
|
type StripecoinpaymentsApplyBalanceIntent_CreatedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func StripecoinpaymentsApplyBalanceIntent_CreatedAt(v time.Time) StripecoinpaymentsApplyBalanceIntent_CreatedAt_Field {
|
|
return StripecoinpaymentsApplyBalanceIntent_CreatedAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StripecoinpaymentsApplyBalanceIntent_CreatedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StripecoinpaymentsApplyBalanceIntent_CreatedAt_Field) _Column() string { return "created_at" }
|
|
|
|
type UserCredit struct {
|
|
Id int
|
|
UserId []byte
|
|
OfferId int
|
|
ReferredBy []byte
|
|
Type string
|
|
CreditsEarnedInCents int
|
|
CreditsUsedInCents int
|
|
ExpiresAt time.Time
|
|
CreatedAt time.Time
|
|
}
|
|
|
|
func (UserCredit) _Table() string { return "user_credits" }
|
|
|
|
type UserCredit_Create_Fields struct {
|
|
ReferredBy UserCredit_ReferredBy_Field
|
|
}
|
|
|
|
type UserCredit_Update_Fields struct {
|
|
CreditsUsedInCents UserCredit_CreditsUsedInCents_Field
|
|
ExpiresAt UserCredit_ExpiresAt_Field
|
|
}
|
|
|
|
type UserCredit_Id_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func UserCredit_Id(v int) UserCredit_Id_Field {
|
|
return UserCredit_Id_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f UserCredit_Id_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (UserCredit_Id_Field) _Column() string { return "id" }
|
|
|
|
type UserCredit_UserId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func UserCredit_UserId(v []byte) UserCredit_UserId_Field {
|
|
return UserCredit_UserId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f UserCredit_UserId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (UserCredit_UserId_Field) _Column() string { return "user_id" }
|
|
|
|
type UserCredit_OfferId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func UserCredit_OfferId(v int) UserCredit_OfferId_Field {
|
|
return UserCredit_OfferId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f UserCredit_OfferId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (UserCredit_OfferId_Field) _Column() string { return "offer_id" }
|
|
|
|
type UserCredit_ReferredBy_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func UserCredit_ReferredBy(v []byte) UserCredit_ReferredBy_Field {
|
|
return UserCredit_ReferredBy_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func UserCredit_ReferredBy_Raw(v []byte) UserCredit_ReferredBy_Field {
|
|
if v == nil {
|
|
return UserCredit_ReferredBy_Null()
|
|
}
|
|
return UserCredit_ReferredBy(v)
|
|
}
|
|
|
|
func UserCredit_ReferredBy_Null() UserCredit_ReferredBy_Field {
|
|
return UserCredit_ReferredBy_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f UserCredit_ReferredBy_Field) isnull() bool { return !f._set || f._null || f._value == nil }
|
|
|
|
func (f UserCredit_ReferredBy_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (UserCredit_ReferredBy_Field) _Column() string { return "referred_by" }
|
|
|
|
type UserCredit_Type_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func UserCredit_Type(v string) UserCredit_Type_Field {
|
|
return UserCredit_Type_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f UserCredit_Type_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (UserCredit_Type_Field) _Column() string { return "type" }
|
|
|
|
type UserCredit_CreditsEarnedInCents_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func UserCredit_CreditsEarnedInCents(v int) UserCredit_CreditsEarnedInCents_Field {
|
|
return UserCredit_CreditsEarnedInCents_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f UserCredit_CreditsEarnedInCents_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (UserCredit_CreditsEarnedInCents_Field) _Column() string { return "credits_earned_in_cents" }
|
|
|
|
type UserCredit_CreditsUsedInCents_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func UserCredit_CreditsUsedInCents(v int) UserCredit_CreditsUsedInCents_Field {
|
|
return UserCredit_CreditsUsedInCents_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f UserCredit_CreditsUsedInCents_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (UserCredit_CreditsUsedInCents_Field) _Column() string { return "credits_used_in_cents" }
|
|
|
|
type UserCredit_ExpiresAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func UserCredit_ExpiresAt(v time.Time) UserCredit_ExpiresAt_Field {
|
|
return UserCredit_ExpiresAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f UserCredit_ExpiresAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (UserCredit_ExpiresAt_Field) _Column() string { return "expires_at" }
|
|
|
|
type UserCredit_CreatedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func UserCredit_CreatedAt(v time.Time) UserCredit_CreatedAt_Field {
|
|
return UserCredit_CreatedAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f UserCredit_CreatedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (UserCredit_CreatedAt_Field) _Column() string { return "created_at" }
|
|
|
|
func toUTC(t time.Time) time.Time {
|
|
return t.UTC()
|
|
}
|
|
|
|
func toDate(t time.Time) time.Time {
|
|
// keep up the minute portion so that translations between timezones will
|
|
// continue to reflect properly.
|
|
return t.Truncate(time.Minute)
|
|
}
|
|
|
|
//
|
|
// runtime support for building sql statements
|
|
//
|
|
|
|
type __sqlbundle_SQL interface {
|
|
Render() string
|
|
|
|
private()
|
|
}
|
|
|
|
type __sqlbundle_Dialect interface {
|
|
Rebind(sql string) string
|
|
}
|
|
|
|
type __sqlbundle_RenderOp int
|
|
|
|
const (
|
|
__sqlbundle_NoFlatten __sqlbundle_RenderOp = iota
|
|
__sqlbundle_NoTerminate
|
|
)
|
|
|
|
func __sqlbundle_Render(dialect __sqlbundle_Dialect, sql __sqlbundle_SQL, ops ...__sqlbundle_RenderOp) string {
|
|
out := sql.Render()
|
|
|
|
flatten := true
|
|
terminate := true
|
|
for _, op := range ops {
|
|
switch op {
|
|
case __sqlbundle_NoFlatten:
|
|
flatten = false
|
|
case __sqlbundle_NoTerminate:
|
|
terminate = false
|
|
}
|
|
}
|
|
|
|
if flatten {
|
|
out = __sqlbundle_flattenSQL(out)
|
|
}
|
|
if terminate {
|
|
out += ";"
|
|
}
|
|
|
|
return dialect.Rebind(out)
|
|
}
|
|
|
|
func __sqlbundle_flattenSQL(x string) string {
|
|
// trim whitespace from beginning and end
|
|
s, e := 0, len(x)-1
|
|
for s < len(x) && (x[s] == ' ' || x[s] == '\t' || x[s] == '\n') {
|
|
s++
|
|
}
|
|
for s <= e && (x[e] == ' ' || x[e] == '\t' || x[e] == '\n') {
|
|
e--
|
|
}
|
|
if s > e {
|
|
return ""
|
|
}
|
|
x = x[s : e+1]
|
|
|
|
// check for whitespace that needs fixing
|
|
wasSpace := false
|
|
for i := 0; i < len(x); i++ {
|
|
r := x[i]
|
|
justSpace := r == ' '
|
|
if (wasSpace && justSpace) || r == '\t' || r == '\n' {
|
|
// whitespace detected, start writing a new string
|
|
var result strings.Builder
|
|
result.Grow(len(x))
|
|
if wasSpace {
|
|
result.WriteString(x[:i-1])
|
|
} else {
|
|
result.WriteString(x[:i])
|
|
}
|
|
for p := i; p < len(x); p++ {
|
|
for p < len(x) && (x[p] == ' ' || x[p] == '\t' || x[p] == '\n') {
|
|
p++
|
|
}
|
|
result.WriteByte(' ')
|
|
|
|
start := p
|
|
for p < len(x) && !(x[p] == ' ' || x[p] == '\t' || x[p] == '\n') {
|
|
p++
|
|
}
|
|
result.WriteString(x[start:p])
|
|
}
|
|
|
|
return result.String()
|
|
}
|
|
wasSpace = justSpace
|
|
}
|
|
|
|
// no problematic whitespace found
|
|
return x
|
|
}
|
|
|
|
// this type is specially named to match up with the name returned by the
|
|
// dialect impl in the sql package.
|
|
type __sqlbundle_postgres struct{}
|
|
|
|
func (p __sqlbundle_postgres) Rebind(sql string) string {
|
|
type sqlParseState int
|
|
const (
|
|
sqlParseStart sqlParseState = iota
|
|
sqlParseInStringLiteral
|
|
sqlParseInQuotedIdentifier
|
|
sqlParseInComment
|
|
)
|
|
|
|
out := make([]byte, 0, len(sql)+10)
|
|
|
|
j := 1
|
|
state := sqlParseStart
|
|
for i := 0; i < len(sql); i++ {
|
|
ch := sql[i]
|
|
switch state {
|
|
case sqlParseStart:
|
|
switch ch {
|
|
case '?':
|
|
out = append(out, '$')
|
|
out = append(out, strconv.Itoa(j)...)
|
|
state = sqlParseStart
|
|
j++
|
|
continue
|
|
case '-':
|
|
if i+1 < len(sql) && sql[i+1] == '-' {
|
|
state = sqlParseInComment
|
|
}
|
|
case '"':
|
|
state = sqlParseInQuotedIdentifier
|
|
case '\'':
|
|
state = sqlParseInStringLiteral
|
|
}
|
|
case sqlParseInStringLiteral:
|
|
if ch == '\'' {
|
|
state = sqlParseStart
|
|
}
|
|
case sqlParseInQuotedIdentifier:
|
|
if ch == '"' {
|
|
state = sqlParseStart
|
|
}
|
|
case sqlParseInComment:
|
|
if ch == '\n' {
|
|
state = sqlParseStart
|
|
}
|
|
}
|
|
out = append(out, ch)
|
|
}
|
|
|
|
return string(out)
|
|
}
|
|
|
|
// this type is specially named to match up with the name returned by the
|
|
// dialect impl in the sql package.
|
|
type __sqlbundle_sqlite3 struct{}
|
|
|
|
func (s __sqlbundle_sqlite3) Rebind(sql string) string {
|
|
return sql
|
|
}
|
|
|
|
// this type is specially named to match up with the name returned by the
|
|
// dialect impl in the sql package.
|
|
type __sqlbundle_cockroach struct{}
|
|
|
|
func (p __sqlbundle_cockroach) Rebind(sql string) string {
|
|
type sqlParseState int
|
|
const (
|
|
sqlParseStart sqlParseState = iota
|
|
sqlParseInStringLiteral
|
|
sqlParseInQuotedIdentifier
|
|
sqlParseInComment
|
|
)
|
|
|
|
out := make([]byte, 0, len(sql)+10)
|
|
|
|
j := 1
|
|
state := sqlParseStart
|
|
for i := 0; i < len(sql); i++ {
|
|
ch := sql[i]
|
|
switch state {
|
|
case sqlParseStart:
|
|
switch ch {
|
|
case '?':
|
|
out = append(out, '$')
|
|
out = append(out, strconv.Itoa(j)...)
|
|
state = sqlParseStart
|
|
j++
|
|
continue
|
|
case '-':
|
|
if i+1 < len(sql) && sql[i+1] == '-' {
|
|
state = sqlParseInComment
|
|
}
|
|
case '"':
|
|
state = sqlParseInQuotedIdentifier
|
|
case '\'':
|
|
state = sqlParseInStringLiteral
|
|
}
|
|
case sqlParseInStringLiteral:
|
|
if ch == '\'' {
|
|
state = sqlParseStart
|
|
}
|
|
case sqlParseInQuotedIdentifier:
|
|
if ch == '"' {
|
|
state = sqlParseStart
|
|
}
|
|
case sqlParseInComment:
|
|
if ch == '\n' {
|
|
state = sqlParseStart
|
|
}
|
|
}
|
|
out = append(out, ch)
|
|
}
|
|
|
|
return string(out)
|
|
}
|
|
|
|
// this type is specially named to match up with the name returned by the
|
|
// dialect impl in the sql package.
|
|
type __sqlbundle_pgx struct{}
|
|
|
|
func (p __sqlbundle_pgx) Rebind(sql string) string {
|
|
type sqlParseState int
|
|
const (
|
|
sqlParseStart sqlParseState = iota
|
|
sqlParseInStringLiteral
|
|
sqlParseInQuotedIdentifier
|
|
sqlParseInComment
|
|
)
|
|
|
|
out := make([]byte, 0, len(sql)+10)
|
|
|
|
j := 1
|
|
state := sqlParseStart
|
|
for i := 0; i < len(sql); i++ {
|
|
ch := sql[i]
|
|
switch state {
|
|
case sqlParseStart:
|
|
switch ch {
|
|
case '?':
|
|
out = append(out, '$')
|
|
out = append(out, strconv.Itoa(j)...)
|
|
state = sqlParseStart
|
|
j++
|
|
continue
|
|
case '-':
|
|
if i+1 < len(sql) && sql[i+1] == '-' {
|
|
state = sqlParseInComment
|
|
}
|
|
case '"':
|
|
state = sqlParseInQuotedIdentifier
|
|
case '\'':
|
|
state = sqlParseInStringLiteral
|
|
}
|
|
case sqlParseInStringLiteral:
|
|
if ch == '\'' {
|
|
state = sqlParseStart
|
|
}
|
|
case sqlParseInQuotedIdentifier:
|
|
if ch == '"' {
|
|
state = sqlParseStart
|
|
}
|
|
case sqlParseInComment:
|
|
if ch == '\n' {
|
|
state = sqlParseStart
|
|
}
|
|
}
|
|
out = append(out, ch)
|
|
}
|
|
|
|
return string(out)
|
|
}
|
|
|
|
// this type is specially named to match up with the name returned by the
|
|
// dialect impl in the sql package.
|
|
type __sqlbundle_pgxcockroach struct{}
|
|
|
|
func (p __sqlbundle_pgxcockroach) Rebind(sql string) string {
|
|
type sqlParseState int
|
|
const (
|
|
sqlParseStart sqlParseState = iota
|
|
sqlParseInStringLiteral
|
|
sqlParseInQuotedIdentifier
|
|
sqlParseInComment
|
|
)
|
|
|
|
out := make([]byte, 0, len(sql)+10)
|
|
|
|
j := 1
|
|
state := sqlParseStart
|
|
for i := 0; i < len(sql); i++ {
|
|
ch := sql[i]
|
|
switch state {
|
|
case sqlParseStart:
|
|
switch ch {
|
|
case '?':
|
|
out = append(out, '$')
|
|
out = append(out, strconv.Itoa(j)...)
|
|
state = sqlParseStart
|
|
j++
|
|
continue
|
|
case '-':
|
|
if i+1 < len(sql) && sql[i+1] == '-' {
|
|
state = sqlParseInComment
|
|
}
|
|
case '"':
|
|
state = sqlParseInQuotedIdentifier
|
|
case '\'':
|
|
state = sqlParseInStringLiteral
|
|
}
|
|
case sqlParseInStringLiteral:
|
|
if ch == '\'' {
|
|
state = sqlParseStart
|
|
}
|
|
case sqlParseInQuotedIdentifier:
|
|
if ch == '"' {
|
|
state = sqlParseStart
|
|
}
|
|
case sqlParseInComment:
|
|
if ch == '\n' {
|
|
state = sqlParseStart
|
|
}
|
|
}
|
|
out = append(out, ch)
|
|
}
|
|
|
|
return string(out)
|
|
}
|
|
|
|
type __sqlbundle_Literal string
|
|
|
|
func (__sqlbundle_Literal) private() {}
|
|
|
|
func (l __sqlbundle_Literal) Render() string { return string(l) }
|
|
|
|
type __sqlbundle_Literals struct {
|
|
Join string
|
|
SQLs []__sqlbundle_SQL
|
|
}
|
|
|
|
func (__sqlbundle_Literals) private() {}
|
|
|
|
func (l __sqlbundle_Literals) Render() string {
|
|
var out bytes.Buffer
|
|
|
|
first := true
|
|
for _, sql := range l.SQLs {
|
|
if sql == nil {
|
|
continue
|
|
}
|
|
if !first {
|
|
out.WriteString(l.Join)
|
|
}
|
|
first = false
|
|
out.WriteString(sql.Render())
|
|
}
|
|
|
|
return out.String()
|
|
}
|
|
|
|
type __sqlbundle_Condition struct {
|
|
// set at compile/embed time
|
|
Name string
|
|
Left string
|
|
Equal bool
|
|
Right string
|
|
|
|
// set at runtime
|
|
Null bool
|
|
}
|
|
|
|
func (*__sqlbundle_Condition) private() {}
|
|
|
|
func (c *__sqlbundle_Condition) Render() string {
|
|
// TODO(jeff): maybe check if we can use placeholders instead of the
|
|
// literal null: this would make the templates easier.
|
|
|
|
switch {
|
|
case c.Equal && c.Null:
|
|
return c.Left + " is null"
|
|
case c.Equal && !c.Null:
|
|
return c.Left + " = " + c.Right
|
|
case !c.Equal && c.Null:
|
|
return c.Left + " is not null"
|
|
case !c.Equal && !c.Null:
|
|
return c.Left + " != " + c.Right
|
|
default:
|
|
panic("unhandled case")
|
|
}
|
|
}
|
|
|
|
type __sqlbundle_Hole struct {
|
|
// set at compiile/embed time
|
|
Name string
|
|
|
|
// set at runtime or possibly embed time
|
|
SQL __sqlbundle_SQL
|
|
}
|
|
|
|
func (*__sqlbundle_Hole) private() {}
|
|
|
|
func (h *__sqlbundle_Hole) Render() string {
|
|
if h.SQL == nil {
|
|
return ""
|
|
}
|
|
return h.SQL.Render()
|
|
}
|
|
|
|
//
|
|
// end runtime support for building sql statements
|
|
//
|
|
|
|
type BandwidthLimit_Row struct {
|
|
BandwidthLimit *int64
|
|
}
|
|
|
|
type BandwidthLimit_UsageLimit_Row struct {
|
|
BandwidthLimit *int64
|
|
UsageLimit *int64
|
|
}
|
|
|
|
type CustomerId_Row struct {
|
|
CustomerId string
|
|
}
|
|
|
|
type Id_PieceCount_Row struct {
|
|
Id []byte
|
|
PieceCount int64
|
|
}
|
|
|
|
type Id_Row struct {
|
|
Id []byte
|
|
}
|
|
|
|
type LeafSerialNumber_Row struct {
|
|
LeafSerialNumber []byte
|
|
}
|
|
|
|
type MaxBuckets_Row struct {
|
|
MaxBuckets *int
|
|
}
|
|
|
|
type Paged_BucketBandwidthRollupArchive_By_IntervalStart_GreaterOrEqual_Continuation struct {
|
|
_value_bucket_name []byte
|
|
_value_project_id []byte
|
|
_value_interval_start time.Time
|
|
_value_action uint
|
|
_set bool
|
|
}
|
|
|
|
type Paged_BucketBandwidthRollup_By_IntervalStart_GreaterOrEqual_Continuation struct {
|
|
_value_bucket_name []byte
|
|
_value_project_id []byte
|
|
_value_interval_start time.Time
|
|
_value_action uint
|
|
_set bool
|
|
}
|
|
|
|
type Paged_Node_Continuation struct {
|
|
_value_id []byte
|
|
_set bool
|
|
}
|
|
|
|
type Paged_StoragenodeBandwidthRollupArchive_By_IntervalStart_GreaterOrEqual_Continuation struct {
|
|
_value_storagenode_id []byte
|
|
_value_interval_start time.Time
|
|
_value_action uint
|
|
_set bool
|
|
}
|
|
|
|
type Paged_StoragenodeBandwidthRollupPhase2_By_StoragenodeId_And_IntervalStart_GreaterOrEqual_Continuation struct {
|
|
_value_storagenode_id []byte
|
|
_value_interval_start time.Time
|
|
_value_action uint
|
|
_set bool
|
|
}
|
|
|
|
type Paged_StoragenodeBandwidthRollup_By_IntervalStart_GreaterOrEqual_Continuation struct {
|
|
_value_storagenode_id []byte
|
|
_value_interval_start time.Time
|
|
_value_action uint
|
|
_set bool
|
|
}
|
|
|
|
type Paged_StoragenodeBandwidthRollup_By_StoragenodeId_And_IntervalStart_GreaterOrEqual_Continuation struct {
|
|
_value_storagenode_id []byte
|
|
_value_interval_start time.Time
|
|
_value_action uint
|
|
_set bool
|
|
}
|
|
|
|
type ProjectLimit_Row struct {
|
|
ProjectLimit int
|
|
}
|
|
|
|
type UsageLimit_Row struct {
|
|
UsageLimit *int64
|
|
}
|
|
|
|
type Value_Row struct {
|
|
Value time.Time
|
|
}
|
|
|
|
func (obj *pgxImpl) Create_ValueAttribution(ctx context.Context,
|
|
value_attribution_project_id ValueAttribution_ProjectId_Field,
|
|
value_attribution_bucket_name ValueAttribution_BucketName_Field,
|
|
value_attribution_partner_id ValueAttribution_PartnerId_Field) (
|
|
value_attribution *ValueAttribution, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__project_id_val := value_attribution_project_id.value()
|
|
__bucket_name_val := value_attribution_bucket_name.value()
|
|
__partner_id_val := value_attribution_partner_id.value()
|
|
__last_updated_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO value_attributions ( project_id, bucket_name, partner_id, last_updated ) VALUES ( ?, ?, ?, ? ) RETURNING value_attributions.project_id, value_attributions.bucket_name, value_attributions.partner_id, value_attributions.last_updated")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, __project_id_val, __bucket_name_val, __partner_id_val, __last_updated_val)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
value_attribution = &ValueAttribution{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&value_attribution.ProjectId, &value_attribution.BucketName, &value_attribution.PartnerId, &value_attribution.LastUpdated)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return value_attribution, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) CreateNoReturn_Irreparabledb(ctx context.Context,
|
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field,
|
|
irreparabledb_segmentdetail Irreparabledb_Segmentdetail_Field,
|
|
irreparabledb_pieces_lost_count Irreparabledb_PiecesLostCount_Field,
|
|
irreparabledb_seg_damaged_unix_sec Irreparabledb_SegDamagedUnixSec_Field,
|
|
irreparabledb_repair_attempt_count Irreparabledb_RepairAttemptCount_Field) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
__segmentpath_val := irreparabledb_segmentpath.value()
|
|
__segmentdetail_val := irreparabledb_segmentdetail.value()
|
|
__pieces_lost_count_val := irreparabledb_pieces_lost_count.value()
|
|
__seg_damaged_unix_sec_val := irreparabledb_seg_damaged_unix_sec.value()
|
|
__repair_attempt_count_val := irreparabledb_repair_attempt_count.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO irreparabledbs ( segmentpath, segmentdetail, pieces_lost_count, seg_damaged_unix_sec, repair_attempt_count ) VALUES ( ?, ?, ?, ?, ? )")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, __segmentpath_val, __segmentdetail_val, __pieces_lost_count_val, __seg_damaged_unix_sec_val, __repair_attempt_count_val)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
_, err = obj.driver.ExecContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) CreateNoReturn_AccountingTimestamps(ctx context.Context,
|
|
accounting_timestamps_name AccountingTimestamps_Name_Field,
|
|
accounting_timestamps_value AccountingTimestamps_Value_Field) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
__name_val := accounting_timestamps_name.value()
|
|
__value_val := accounting_timestamps_value.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO accounting_timestamps ( name, value ) VALUES ( ?, ? )")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, __name_val, __value_val)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
_, err = obj.driver.ExecContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Create_AuditHistory(ctx context.Context,
|
|
audit_history_node_id AuditHistory_NodeId_Field,
|
|
audit_history_history AuditHistory_History_Field) (
|
|
audit_history *AuditHistory, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
__node_id_val := audit_history_node_id.value()
|
|
__history_val := audit_history_history.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO audit_histories ( node_id, history ) VALUES ( ?, ? ) RETURNING audit_histories.node_id, audit_histories.history")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, __node_id_val, __history_val)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
audit_history = &AuditHistory{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&audit_history.NodeId, &audit_history.History)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return audit_history, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Create_User(ctx context.Context,
|
|
user_id User_Id_Field,
|
|
user_email User_Email_Field,
|
|
user_normalized_email User_NormalizedEmail_Field,
|
|
user_full_name User_FullName_Field,
|
|
user_password_hash User_PasswordHash_Field,
|
|
optional User_Create_Fields) (
|
|
user *User, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__id_val := user_id.value()
|
|
__email_val := user_email.value()
|
|
__normalized_email_val := user_normalized_email.value()
|
|
__full_name_val := user_full_name.value()
|
|
__short_name_val := optional.ShortName.value()
|
|
__password_hash_val := user_password_hash.value()
|
|
__status_val := int(0)
|
|
__partner_id_val := optional.PartnerId.value()
|
|
__created_at_val := __now
|
|
__position_val := optional.Position.value()
|
|
__company_name_val := optional.CompanyName.value()
|
|
__company_size_val := optional.CompanySize.value()
|
|
__working_on_val := optional.WorkingOn.value()
|
|
__employee_count_val := optional.EmployeeCount.value()
|
|
|
|
var __columns = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("id, email, normalized_email, full_name, short_name, password_hash, status, partner_id, created_at, position, company_name, company_size, working_on, employee_count")}
|
|
var __placeholders = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?")}
|
|
var __clause = &__sqlbundle_Hole{SQL: __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("("), __columns, __sqlbundle_Literal(") VALUES ("), __placeholders, __sqlbundle_Literal(")")}}}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("INSERT INTO users "), __clause, __sqlbundle_Literal(" RETURNING users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.created_at, users.project_limit, users.position, users.company_name, users.company_size, users.working_on, users.is_professional, users.employee_count")}}
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, __id_val, __email_val, __normalized_email_val, __full_name_val, __short_name_val, __password_hash_val, __status_val, __partner_id_val, __created_at_val, __position_val, __company_name_val, __company_size_val, __working_on_val, __employee_count_val)
|
|
|
|
__optional_columns := __sqlbundle_Literals{Join: ", "}
|
|
__optional_placeholders := __sqlbundle_Literals{Join: ", "}
|
|
|
|
if optional.ProjectLimit._set {
|
|
__values = append(__values, optional.ProjectLimit.value())
|
|
__optional_columns.SQLs = append(__optional_columns.SQLs, __sqlbundle_Literal("project_limit"))
|
|
__optional_placeholders.SQLs = append(__optional_placeholders.SQLs, __sqlbundle_Literal("?"))
|
|
}
|
|
|
|
if optional.IsProfessional._set {
|
|
__values = append(__values, optional.IsProfessional.value())
|
|
__optional_columns.SQLs = append(__optional_columns.SQLs, __sqlbundle_Literal("is_professional"))
|
|
__optional_placeholders.SQLs = append(__optional_placeholders.SQLs, __sqlbundle_Literal("?"))
|
|
}
|
|
|
|
if len(__optional_columns.SQLs) == 0 {
|
|
if __columns.SQL == nil {
|
|
__clause.SQL = __sqlbundle_Literal("DEFAULT VALUES")
|
|
}
|
|
} else {
|
|
__columns.SQL = __sqlbundle_Literals{Join: ", ", SQLs: []__sqlbundle_SQL{__columns.SQL, __optional_columns}}
|
|
__placeholders.SQL = __sqlbundle_Literals{Join: ", ", SQLs: []__sqlbundle_SQL{__placeholders.SQL, __optional_placeholders}}
|
|
}
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
user = &User{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.CreatedAt, &user.ProjectLimit, &user.Position, &user.CompanyName, &user.CompanySize, &user.WorkingOn, &user.IsProfessional, &user.EmployeeCount)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return user, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Create_Project(ctx context.Context,
|
|
project_id Project_Id_Field,
|
|
project_name Project_Name_Field,
|
|
project_description Project_Description_Field,
|
|
project_owner_id Project_OwnerId_Field,
|
|
optional Project_Create_Fields) (
|
|
project *Project, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__id_val := project_id.value()
|
|
__name_val := project_name.value()
|
|
__description_val := project_description.value()
|
|
__usage_limit_val := optional.UsageLimit.value()
|
|
__bandwidth_limit_val := optional.BandwidthLimit.value()
|
|
__rate_limit_val := optional.RateLimit.value()
|
|
__max_buckets_val := optional.MaxBuckets.value()
|
|
__partner_id_val := optional.PartnerId.value()
|
|
__owner_id_val := project_owner_id.value()
|
|
__created_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO projects ( id, name, description, usage_limit, bandwidth_limit, rate_limit, max_buckets, partner_id, owner_id, created_at ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ? ) RETURNING projects.id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.rate_limit, projects.max_buckets, projects.partner_id, projects.owner_id, projects.created_at")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, __id_val, __name_val, __description_val, __usage_limit_val, __bandwidth_limit_val, __rate_limit_val, __max_buckets_val, __partner_id_val, __owner_id_val, __created_at_val)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
project = &Project{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.RateLimit, &project.MaxBuckets, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return project, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Create_ProjectMember(ctx context.Context,
|
|
project_member_member_id ProjectMember_MemberId_Field,
|
|
project_member_project_id ProjectMember_ProjectId_Field) (
|
|
project_member *ProjectMember, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__member_id_val := project_member_member_id.value()
|
|
__project_id_val := project_member_project_id.value()
|
|
__created_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO project_members ( member_id, project_id, created_at ) VALUES ( ?, ?, ? ) RETURNING project_members.member_id, project_members.project_id, project_members.created_at")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, __member_id_val, __project_id_val, __created_at_val)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
project_member = &ProjectMember{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&project_member.MemberId, &project_member.ProjectId, &project_member.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return project_member, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Create_ApiKey(ctx context.Context,
|
|
api_key_id ApiKey_Id_Field,
|
|
api_key_project_id ApiKey_ProjectId_Field,
|
|
api_key_head ApiKey_Head_Field,
|
|
api_key_name ApiKey_Name_Field,
|
|
api_key_secret ApiKey_Secret_Field,
|
|
optional ApiKey_Create_Fields) (
|
|
api_key *ApiKey, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__id_val := api_key_id.value()
|
|
__project_id_val := api_key_project_id.value()
|
|
__head_val := api_key_head.value()
|
|
__name_val := api_key_name.value()
|
|
__secret_val := api_key_secret.value()
|
|
__partner_id_val := optional.PartnerId.value()
|
|
__created_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO api_keys ( id, project_id, head, name, secret, partner_id, created_at ) VALUES ( ?, ?, ?, ?, ?, ?, ? ) RETURNING api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.partner_id, api_keys.created_at")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, __id_val, __project_id_val, __head_val, __name_val, __secret_val, __partner_id_val, __created_at_val)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
api_key = &ApiKey{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&api_key.Id, &api_key.ProjectId, &api_key.Head, &api_key.Name, &api_key.Secret, &api_key.PartnerId, &api_key.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return api_key, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) CreateNoReturn_Revocation(ctx context.Context,
|
|
revocation_revoked Revocation_Revoked_Field,
|
|
revocation_api_key_id Revocation_ApiKeyId_Field) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
__revoked_val := revocation_revoked.value()
|
|
__api_key_id_val := revocation_api_key_id.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO revocations ( revoked, api_key_id ) VALUES ( ?, ? )")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, __revoked_val, __api_key_id_val)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
_, err = obj.driver.ExecContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) CreateNoReturn_BucketStorageTally(ctx context.Context,
|
|
bucket_storage_tally_bucket_name BucketStorageTally_BucketName_Field,
|
|
bucket_storage_tally_project_id BucketStorageTally_ProjectId_Field,
|
|
bucket_storage_tally_interval_start BucketStorageTally_IntervalStart_Field,
|
|
bucket_storage_tally_inline BucketStorageTally_Inline_Field,
|
|
bucket_storage_tally_remote BucketStorageTally_Remote_Field,
|
|
bucket_storage_tally_remote_segments_count BucketStorageTally_RemoteSegmentsCount_Field,
|
|
bucket_storage_tally_inline_segments_count BucketStorageTally_InlineSegmentsCount_Field,
|
|
bucket_storage_tally_object_count BucketStorageTally_ObjectCount_Field,
|
|
bucket_storage_tally_metadata_size BucketStorageTally_MetadataSize_Field) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
__bucket_name_val := bucket_storage_tally_bucket_name.value()
|
|
__project_id_val := bucket_storage_tally_project_id.value()
|
|
__interval_start_val := bucket_storage_tally_interval_start.value()
|
|
__inline_val := bucket_storage_tally_inline.value()
|
|
__remote_val := bucket_storage_tally_remote.value()
|
|
__remote_segments_count_val := bucket_storage_tally_remote_segments_count.value()
|
|
__inline_segments_count_val := bucket_storage_tally_inline_segments_count.value()
|
|
__object_count_val := bucket_storage_tally_object_count.value()
|
|
__metadata_size_val := bucket_storage_tally_metadata_size.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO bucket_storage_tallies ( bucket_name, project_id, interval_start, inline, remote, remote_segments_count, inline_segments_count, object_count, metadata_size ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ? )")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, __bucket_name_val, __project_id_val, __interval_start_val, __inline_val, __remote_val, __remote_segments_count_val, __inline_segments_count_val, __object_count_val, __metadata_size_val)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
_, err = obj.driver.ExecContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Create_StoragenodeBandwidthRollup(ctx context.Context,
|
|
storagenode_bandwidth_rollup_storagenode_id StoragenodeBandwidthRollup_StoragenodeId_Field,
|
|
storagenode_bandwidth_rollup_interval_start StoragenodeBandwidthRollup_IntervalStart_Field,
|
|
storagenode_bandwidth_rollup_interval_seconds StoragenodeBandwidthRollup_IntervalSeconds_Field,
|
|
storagenode_bandwidth_rollup_action StoragenodeBandwidthRollup_Action_Field,
|
|
storagenode_bandwidth_rollup_settled StoragenodeBandwidthRollup_Settled_Field,
|
|
optional StoragenodeBandwidthRollup_Create_Fields) (
|
|
storagenode_bandwidth_rollup *StoragenodeBandwidthRollup, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
__storagenode_id_val := storagenode_bandwidth_rollup_storagenode_id.value()
|
|
__interval_start_val := storagenode_bandwidth_rollup_interval_start.value()
|
|
__interval_seconds_val := storagenode_bandwidth_rollup_interval_seconds.value()
|
|
__action_val := storagenode_bandwidth_rollup_action.value()
|
|
__settled_val := storagenode_bandwidth_rollup_settled.value()
|
|
|
|
var __columns = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("storagenode_id, interval_start, interval_seconds, action, settled")}
|
|
var __placeholders = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("?, ?, ?, ?, ?")}
|
|
var __clause = &__sqlbundle_Hole{SQL: __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("("), __columns, __sqlbundle_Literal(") VALUES ("), __placeholders, __sqlbundle_Literal(")")}}}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("INSERT INTO storagenode_bandwidth_rollups "), __clause, __sqlbundle_Literal(" RETURNING storagenode_bandwidth_rollups.storagenode_id, storagenode_bandwidth_rollups.interval_start, storagenode_bandwidth_rollups.interval_seconds, storagenode_bandwidth_rollups.action, storagenode_bandwidth_rollups.allocated, storagenode_bandwidth_rollups.settled")}}
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, __storagenode_id_val, __interval_start_val, __interval_seconds_val, __action_val, __settled_val)
|
|
|
|
__optional_columns := __sqlbundle_Literals{Join: ", "}
|
|
__optional_placeholders := __sqlbundle_Literals{Join: ", "}
|
|
|
|
if optional.Allocated._set {
|
|
__values = append(__values, optional.Allocated.value())
|
|
__optional_columns.SQLs = append(__optional_columns.SQLs, __sqlbundle_Literal("allocated"))
|
|
__optional_placeholders.SQLs = append(__optional_placeholders.SQLs, __sqlbundle_Literal("?"))
|
|
}
|
|
|
|
if len(__optional_columns.SQLs) == 0 {
|
|
if __columns.SQL == nil {
|
|
__clause.SQL = __sqlbundle_Literal("DEFAULT VALUES")
|
|
}
|
|
} else {
|
|
__columns.SQL = __sqlbundle_Literals{Join: ", ", SQLs: []__sqlbundle_SQL{__columns.SQL, __optional_columns}}
|
|
__placeholders.SQL = __sqlbundle_Literals{Join: ", ", SQLs: []__sqlbundle_SQL{__placeholders.SQL, __optional_placeholders}}
|
|
}
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
storagenode_bandwidth_rollup = &StoragenodeBandwidthRollup{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&storagenode_bandwidth_rollup.StoragenodeId, &storagenode_bandwidth_rollup.IntervalStart, &storagenode_bandwidth_rollup.IntervalSeconds, &storagenode_bandwidth_rollup.Action, &storagenode_bandwidth_rollup.Allocated, &storagenode_bandwidth_rollup.Settled)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return storagenode_bandwidth_rollup, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) ReplaceNoReturn_StoragenodePaystub(ctx context.Context,
|
|
storagenode_paystub_period StoragenodePaystub_Period_Field,
|
|
storagenode_paystub_node_id StoragenodePaystub_NodeId_Field,
|
|
storagenode_paystub_codes StoragenodePaystub_Codes_Field,
|
|
storagenode_paystub_usage_at_rest StoragenodePaystub_UsageAtRest_Field,
|
|
storagenode_paystub_usage_get StoragenodePaystub_UsageGet_Field,
|
|
storagenode_paystub_usage_put StoragenodePaystub_UsagePut_Field,
|
|
storagenode_paystub_usage_get_repair StoragenodePaystub_UsageGetRepair_Field,
|
|
storagenode_paystub_usage_put_repair StoragenodePaystub_UsagePutRepair_Field,
|
|
storagenode_paystub_usage_get_audit StoragenodePaystub_UsageGetAudit_Field,
|
|
storagenode_paystub_comp_at_rest StoragenodePaystub_CompAtRest_Field,
|
|
storagenode_paystub_comp_get StoragenodePaystub_CompGet_Field,
|
|
storagenode_paystub_comp_put StoragenodePaystub_CompPut_Field,
|
|
storagenode_paystub_comp_get_repair StoragenodePaystub_CompGetRepair_Field,
|
|
storagenode_paystub_comp_put_repair StoragenodePaystub_CompPutRepair_Field,
|
|
storagenode_paystub_comp_get_audit StoragenodePaystub_CompGetAudit_Field,
|
|
storagenode_paystub_surge_percent StoragenodePaystub_SurgePercent_Field,
|
|
storagenode_paystub_held StoragenodePaystub_Held_Field,
|
|
storagenode_paystub_owed StoragenodePaystub_Owed_Field,
|
|
storagenode_paystub_disposed StoragenodePaystub_Disposed_Field,
|
|
storagenode_paystub_paid StoragenodePaystub_Paid_Field,
|
|
storagenode_paystub_distributed StoragenodePaystub_Distributed_Field) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__period_val := storagenode_paystub_period.value()
|
|
__node_id_val := storagenode_paystub_node_id.value()
|
|
__created_at_val := __now
|
|
__codes_val := storagenode_paystub_codes.value()
|
|
__usage_at_rest_val := storagenode_paystub_usage_at_rest.value()
|
|
__usage_get_val := storagenode_paystub_usage_get.value()
|
|
__usage_put_val := storagenode_paystub_usage_put.value()
|
|
__usage_get_repair_val := storagenode_paystub_usage_get_repair.value()
|
|
__usage_put_repair_val := storagenode_paystub_usage_put_repair.value()
|
|
__usage_get_audit_val := storagenode_paystub_usage_get_audit.value()
|
|
__comp_at_rest_val := storagenode_paystub_comp_at_rest.value()
|
|
__comp_get_val := storagenode_paystub_comp_get.value()
|
|
__comp_put_val := storagenode_paystub_comp_put.value()
|
|
__comp_get_repair_val := storagenode_paystub_comp_get_repair.value()
|
|
__comp_put_repair_val := storagenode_paystub_comp_put_repair.value()
|
|
__comp_get_audit_val := storagenode_paystub_comp_get_audit.value()
|
|
__surge_percent_val := storagenode_paystub_surge_percent.value()
|
|
__held_val := storagenode_paystub_held.value()
|
|
__owed_val := storagenode_paystub_owed.value()
|
|
__disposed_val := storagenode_paystub_disposed.value()
|
|
__paid_val := storagenode_paystub_paid.value()
|
|
__distributed_val := storagenode_paystub_distributed.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO storagenode_paystubs ( period, node_id, created_at, codes, usage_at_rest, usage_get, usage_put, usage_get_repair, usage_put_repair, usage_get_audit, comp_at_rest, comp_get, comp_put, comp_get_repair, comp_put_repair, comp_get_audit, surge_percent, held, owed, disposed, paid, distributed ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? ) ON CONFLICT ( period, node_id ) DO UPDATE SET period = EXCLUDED.period, node_id = EXCLUDED.node_id, created_at = EXCLUDED.created_at, codes = EXCLUDED.codes, usage_at_rest = EXCLUDED.usage_at_rest, usage_get = EXCLUDED.usage_get, usage_put = EXCLUDED.usage_put, usage_get_repair = EXCLUDED.usage_get_repair, usage_put_repair = EXCLUDED.usage_put_repair, usage_get_audit = EXCLUDED.usage_get_audit, comp_at_rest = EXCLUDED.comp_at_rest, comp_get = EXCLUDED.comp_get, comp_put = EXCLUDED.comp_put, comp_get_repair = EXCLUDED.comp_get_repair, comp_put_repair = EXCLUDED.comp_put_repair, comp_get_audit = EXCLUDED.comp_get_audit, surge_percent = EXCLUDED.surge_percent, held = EXCLUDED.held, owed = EXCLUDED.owed, disposed = EXCLUDED.disposed, paid = EXCLUDED.paid, distributed = EXCLUDED.distributed")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, __period_val, __node_id_val, __created_at_val, __codes_val, __usage_at_rest_val, __usage_get_val, __usage_put_val, __usage_get_repair_val, __usage_put_repair_val, __usage_get_audit_val, __comp_at_rest_val, __comp_get_val, __comp_put_val, __comp_get_repair_val, __comp_put_repair_val, __comp_get_audit_val, __surge_percent_val, __held_val, __owed_val, __disposed_val, __paid_val, __distributed_val)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
_, err = obj.driver.ExecContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) CreateNoReturn_StoragenodePayment(ctx context.Context,
|
|
storagenode_payment_node_id StoragenodePayment_NodeId_Field,
|
|
storagenode_payment_period StoragenodePayment_Period_Field,
|
|
storagenode_payment_amount StoragenodePayment_Amount_Field,
|
|
optional StoragenodePayment_Create_Fields) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__created_at_val := __now
|
|
__node_id_val := storagenode_payment_node_id.value()
|
|
__period_val := storagenode_payment_period.value()
|
|
__amount_val := storagenode_payment_amount.value()
|
|
__receipt_val := optional.Receipt.value()
|
|
__notes_val := optional.Notes.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO storagenode_payments ( created_at, node_id, period, amount, receipt, notes ) VALUES ( ?, ?, ?, ?, ?, ? )")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, __created_at_val, __node_id_val, __period_val, __amount_val, __receipt_val, __notes_val)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
_, err = obj.driver.ExecContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) CreateNoReturn_PeerIdentity(ctx context.Context,
|
|
peer_identity_node_id PeerIdentity_NodeId_Field,
|
|
peer_identity_leaf_serial_number PeerIdentity_LeafSerialNumber_Field,
|
|
peer_identity_chain PeerIdentity_Chain_Field) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__node_id_val := peer_identity_node_id.value()
|
|
__leaf_serial_number_val := peer_identity_leaf_serial_number.value()
|
|
__chain_val := peer_identity_chain.value()
|
|
__updated_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO peer_identities ( node_id, leaf_serial_number, chain, updated_at ) VALUES ( ?, ?, ?, ? )")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, __node_id_val, __leaf_serial_number_val, __chain_val, __updated_at_val)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
_, err = obj.driver.ExecContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Create_RegistrationToken(ctx context.Context,
|
|
registration_token_secret RegistrationToken_Secret_Field,
|
|
registration_token_project_limit RegistrationToken_ProjectLimit_Field,
|
|
optional RegistrationToken_Create_Fields) (
|
|
registration_token *RegistrationToken, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__secret_val := registration_token_secret.value()
|
|
__owner_id_val := optional.OwnerId.value()
|
|
__project_limit_val := registration_token_project_limit.value()
|
|
__created_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO registration_tokens ( secret, owner_id, project_limit, created_at ) VALUES ( ?, ?, ?, ? ) RETURNING registration_tokens.secret, registration_tokens.owner_id, registration_tokens.project_limit, registration_tokens.created_at")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, __secret_val, __owner_id_val, __project_limit_val, __created_at_val)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
registration_token = &RegistrationToken{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(®istration_token.Secret, ®istration_token.OwnerId, ®istration_token.ProjectLimit, ®istration_token.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return registration_token, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Create_ResetPasswordToken(ctx context.Context,
|
|
reset_password_token_secret ResetPasswordToken_Secret_Field,
|
|
reset_password_token_owner_id ResetPasswordToken_OwnerId_Field) (
|
|
reset_password_token *ResetPasswordToken, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__secret_val := reset_password_token_secret.value()
|
|
__owner_id_val := reset_password_token_owner_id.value()
|
|
__created_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO reset_password_tokens ( secret, owner_id, created_at ) VALUES ( ?, ?, ? ) RETURNING reset_password_tokens.secret, reset_password_tokens.owner_id, reset_password_tokens.created_at")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, __secret_val, __owner_id_val, __created_at_val)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
reset_password_token = &ResetPasswordToken{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&reset_password_token.Secret, &reset_password_token.OwnerId, &reset_password_token.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return reset_password_token, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Create_BucketMetainfo(ctx context.Context,
|
|
bucket_metainfo_id BucketMetainfo_Id_Field,
|
|
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
|
|
bucket_metainfo_name BucketMetainfo_Name_Field,
|
|
bucket_metainfo_path_cipher BucketMetainfo_PathCipher_Field,
|
|
bucket_metainfo_default_segment_size BucketMetainfo_DefaultSegmentSize_Field,
|
|
bucket_metainfo_default_encryption_cipher_suite BucketMetainfo_DefaultEncryptionCipherSuite_Field,
|
|
bucket_metainfo_default_encryption_block_size BucketMetainfo_DefaultEncryptionBlockSize_Field,
|
|
bucket_metainfo_default_redundancy_algorithm BucketMetainfo_DefaultRedundancyAlgorithm_Field,
|
|
bucket_metainfo_default_redundancy_share_size BucketMetainfo_DefaultRedundancyShareSize_Field,
|
|
bucket_metainfo_default_redundancy_required_shares BucketMetainfo_DefaultRedundancyRequiredShares_Field,
|
|
bucket_metainfo_default_redundancy_repair_shares BucketMetainfo_DefaultRedundancyRepairShares_Field,
|
|
bucket_metainfo_default_redundancy_optimal_shares BucketMetainfo_DefaultRedundancyOptimalShares_Field,
|
|
bucket_metainfo_default_redundancy_total_shares BucketMetainfo_DefaultRedundancyTotalShares_Field,
|
|
optional BucketMetainfo_Create_Fields) (
|
|
bucket_metainfo *BucketMetainfo, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__id_val := bucket_metainfo_id.value()
|
|
__project_id_val := bucket_metainfo_project_id.value()
|
|
__name_val := bucket_metainfo_name.value()
|
|
__partner_id_val := optional.PartnerId.value()
|
|
__path_cipher_val := bucket_metainfo_path_cipher.value()
|
|
__created_at_val := __now
|
|
__default_segment_size_val := bucket_metainfo_default_segment_size.value()
|
|
__default_encryption_cipher_suite_val := bucket_metainfo_default_encryption_cipher_suite.value()
|
|
__default_encryption_block_size_val := bucket_metainfo_default_encryption_block_size.value()
|
|
__default_redundancy_algorithm_val := bucket_metainfo_default_redundancy_algorithm.value()
|
|
__default_redundancy_share_size_val := bucket_metainfo_default_redundancy_share_size.value()
|
|
__default_redundancy_required_shares_val := bucket_metainfo_default_redundancy_required_shares.value()
|
|
__default_redundancy_repair_shares_val := bucket_metainfo_default_redundancy_repair_shares.value()
|
|
__default_redundancy_optimal_shares_val := bucket_metainfo_default_redundancy_optimal_shares.value()
|
|
__default_redundancy_total_shares_val := bucket_metainfo_default_redundancy_total_shares.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO bucket_metainfos ( id, project_id, name, partner_id, path_cipher, created_at, default_segment_size, default_encryption_cipher_suite, default_encryption_block_size, default_redundancy_algorithm, default_redundancy_share_size, default_redundancy_required_shares, default_redundancy_repair_shares, default_redundancy_optimal_shares, default_redundancy_total_shares ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? ) RETURNING bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.partner_id, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, __id_val, __project_id_val, __name_val, __partner_id_val, __path_cipher_val, __created_at_val, __default_segment_size_val, __default_encryption_cipher_suite_val, __default_encryption_block_size_val, __default_redundancy_algorithm_val, __default_redundancy_share_size_val, __default_redundancy_required_shares_val, __default_redundancy_repair_shares_val, __default_redundancy_optimal_shares_val, __default_redundancy_total_shares_val)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
bucket_metainfo = &BucketMetainfo{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.PartnerId, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return bucket_metainfo, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Create_StripeCustomer(ctx context.Context,
|
|
stripe_customer_user_id StripeCustomer_UserId_Field,
|
|
stripe_customer_customer_id StripeCustomer_CustomerId_Field) (
|
|
stripe_customer *StripeCustomer, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__user_id_val := stripe_customer_user_id.value()
|
|
__customer_id_val := stripe_customer_customer_id.value()
|
|
__created_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO stripe_customers ( user_id, customer_id, created_at ) VALUES ( ?, ?, ? ) RETURNING stripe_customers.user_id, stripe_customers.customer_id, stripe_customers.created_at")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, __user_id_val, __customer_id_val, __created_at_val)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
stripe_customer = &StripeCustomer{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&stripe_customer.UserId, &stripe_customer.CustomerId, &stripe_customer.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return stripe_customer, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Create_CoinpaymentsTransaction(ctx context.Context,
|
|
coinpayments_transaction_id CoinpaymentsTransaction_Id_Field,
|
|
coinpayments_transaction_user_id CoinpaymentsTransaction_UserId_Field,
|
|
coinpayments_transaction_address CoinpaymentsTransaction_Address_Field,
|
|
coinpayments_transaction_amount CoinpaymentsTransaction_Amount_Field,
|
|
coinpayments_transaction_received CoinpaymentsTransaction_Received_Field,
|
|
coinpayments_transaction_status CoinpaymentsTransaction_Status_Field,
|
|
coinpayments_transaction_key CoinpaymentsTransaction_Key_Field,
|
|
coinpayments_transaction_timeout CoinpaymentsTransaction_Timeout_Field) (
|
|
coinpayments_transaction *CoinpaymentsTransaction, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__id_val := coinpayments_transaction_id.value()
|
|
__user_id_val := coinpayments_transaction_user_id.value()
|
|
__address_val := coinpayments_transaction_address.value()
|
|
__amount_val := coinpayments_transaction_amount.value()
|
|
__received_val := coinpayments_transaction_received.value()
|
|
__status_val := coinpayments_transaction_status.value()
|
|
__key_val := coinpayments_transaction_key.value()
|
|
__timeout_val := coinpayments_transaction_timeout.value()
|
|
__created_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO coinpayments_transactions ( id, user_id, address, amount, received, status, key, timeout, created_at ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ? ) RETURNING coinpayments_transactions.id, coinpayments_transactions.user_id, coinpayments_transactions.address, coinpayments_transactions.amount, coinpayments_transactions.received, coinpayments_transactions.status, coinpayments_transactions.key, coinpayments_transactions.timeout, coinpayments_transactions.created_at")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, __id_val, __user_id_val, __address_val, __amount_val, __received_val, __status_val, __key_val, __timeout_val, __created_at_val)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
coinpayments_transaction = &CoinpaymentsTransaction{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&coinpayments_transaction.Id, &coinpayments_transaction.UserId, &coinpayments_transaction.Address, &coinpayments_transaction.Amount, &coinpayments_transaction.Received, &coinpayments_transaction.Status, &coinpayments_transaction.Key, &coinpayments_transaction.Timeout, &coinpayments_transaction.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return coinpayments_transaction, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Create_StripecoinpaymentsInvoiceProjectRecord(ctx context.Context,
|
|
stripecoinpayments_invoice_project_record_id StripecoinpaymentsInvoiceProjectRecord_Id_Field,
|
|
stripecoinpayments_invoice_project_record_project_id StripecoinpaymentsInvoiceProjectRecord_ProjectId_Field,
|
|
stripecoinpayments_invoice_project_record_storage StripecoinpaymentsInvoiceProjectRecord_Storage_Field,
|
|
stripecoinpayments_invoice_project_record_egress StripecoinpaymentsInvoiceProjectRecord_Egress_Field,
|
|
stripecoinpayments_invoice_project_record_objects StripecoinpaymentsInvoiceProjectRecord_Objects_Field,
|
|
stripecoinpayments_invoice_project_record_period_start StripecoinpaymentsInvoiceProjectRecord_PeriodStart_Field,
|
|
stripecoinpayments_invoice_project_record_period_end StripecoinpaymentsInvoiceProjectRecord_PeriodEnd_Field,
|
|
stripecoinpayments_invoice_project_record_state StripecoinpaymentsInvoiceProjectRecord_State_Field) (
|
|
stripecoinpayments_invoice_project_record *StripecoinpaymentsInvoiceProjectRecord, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__id_val := stripecoinpayments_invoice_project_record_id.value()
|
|
__project_id_val := stripecoinpayments_invoice_project_record_project_id.value()
|
|
__storage_val := stripecoinpayments_invoice_project_record_storage.value()
|
|
__egress_val := stripecoinpayments_invoice_project_record_egress.value()
|
|
__objects_val := stripecoinpayments_invoice_project_record_objects.value()
|
|
__period_start_val := stripecoinpayments_invoice_project_record_period_start.value()
|
|
__period_end_val := stripecoinpayments_invoice_project_record_period_end.value()
|
|
__state_val := stripecoinpayments_invoice_project_record_state.value()
|
|
__created_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO stripecoinpayments_invoice_project_records ( id, project_id, storage, egress, objects, period_start, period_end, state, created_at ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ? ) RETURNING stripecoinpayments_invoice_project_records.id, stripecoinpayments_invoice_project_records.project_id, stripecoinpayments_invoice_project_records.storage, stripecoinpayments_invoice_project_records.egress, stripecoinpayments_invoice_project_records.objects, stripecoinpayments_invoice_project_records.period_start, stripecoinpayments_invoice_project_records.period_end, stripecoinpayments_invoice_project_records.state, stripecoinpayments_invoice_project_records.created_at")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, __id_val, __project_id_val, __storage_val, __egress_val, __objects_val, __period_start_val, __period_end_val, __state_val, __created_at_val)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
stripecoinpayments_invoice_project_record = &StripecoinpaymentsInvoiceProjectRecord{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&stripecoinpayments_invoice_project_record.Id, &stripecoinpayments_invoice_project_record.ProjectId, &stripecoinpayments_invoice_project_record.Storage, &stripecoinpayments_invoice_project_record.Egress, &stripecoinpayments_invoice_project_record.Objects, &stripecoinpayments_invoice_project_record.PeriodStart, &stripecoinpayments_invoice_project_record.PeriodEnd, &stripecoinpayments_invoice_project_record.State, &stripecoinpayments_invoice_project_record.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return stripecoinpayments_invoice_project_record, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Create_StripecoinpaymentsTxConversionRate(ctx context.Context,
|
|
stripecoinpayments_tx_conversion_rate_tx_id StripecoinpaymentsTxConversionRate_TxId_Field,
|
|
stripecoinpayments_tx_conversion_rate_rate StripecoinpaymentsTxConversionRate_Rate_Field) (
|
|
stripecoinpayments_tx_conversion_rate *StripecoinpaymentsTxConversionRate, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__tx_id_val := stripecoinpayments_tx_conversion_rate_tx_id.value()
|
|
__rate_val := stripecoinpayments_tx_conversion_rate_rate.value()
|
|
__created_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO stripecoinpayments_tx_conversion_rates ( tx_id, rate, created_at ) VALUES ( ?, ?, ? ) RETURNING stripecoinpayments_tx_conversion_rates.tx_id, stripecoinpayments_tx_conversion_rates.rate, stripecoinpayments_tx_conversion_rates.created_at")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, __tx_id_val, __rate_val, __created_at_val)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
stripecoinpayments_tx_conversion_rate = &StripecoinpaymentsTxConversionRate{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&stripecoinpayments_tx_conversion_rate.TxId, &stripecoinpayments_tx_conversion_rate.Rate, &stripecoinpayments_tx_conversion_rate.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return stripecoinpayments_tx_conversion_rate, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Create_CouponCode(ctx context.Context,
|
|
coupon_code_id CouponCode_Id_Field,
|
|
coupon_code_name CouponCode_Name_Field,
|
|
coupon_code_amount CouponCode_Amount_Field,
|
|
coupon_code_description CouponCode_Description_Field,
|
|
coupon_code_type CouponCode_Type_Field,
|
|
optional CouponCode_Create_Fields) (
|
|
coupon_code *CouponCode, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__id_val := coupon_code_id.value()
|
|
__name_val := coupon_code_name.value()
|
|
__amount_val := coupon_code_amount.value()
|
|
__description_val := coupon_code_description.value()
|
|
__type_val := coupon_code_type.value()
|
|
__billing_periods_val := optional.BillingPeriods.value()
|
|
__created_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO coupon_codes ( id, name, amount, description, type, billing_periods, created_at ) VALUES ( ?, ?, ?, ?, ?, ?, ? ) RETURNING coupon_codes.id, coupon_codes.name, coupon_codes.amount, coupon_codes.description, coupon_codes.type, coupon_codes.billing_periods, coupon_codes.created_at")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, __id_val, __name_val, __amount_val, __description_val, __type_val, __billing_periods_val, __created_at_val)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
coupon_code = &CouponCode{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&coupon_code.Id, &coupon_code.Name, &coupon_code.Amount, &coupon_code.Description, &coupon_code.Type, &coupon_code.BillingPeriods, &coupon_code.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return coupon_code, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Create_Coupon(ctx context.Context,
|
|
coupon_id Coupon_Id_Field,
|
|
coupon_user_id Coupon_UserId_Field,
|
|
coupon_amount Coupon_Amount_Field,
|
|
coupon_description Coupon_Description_Field,
|
|
coupon_type Coupon_Type_Field,
|
|
coupon_status Coupon_Status_Field,
|
|
coupon_duration Coupon_Duration_Field,
|
|
optional Coupon_Create_Fields) (
|
|
coupon *Coupon, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__id_val := coupon_id.value()
|
|
__user_id_val := coupon_user_id.value()
|
|
__amount_val := coupon_amount.value()
|
|
__description_val := coupon_description.value()
|
|
__type_val := coupon_type.value()
|
|
__status_val := coupon_status.value()
|
|
__duration_val := coupon_duration.value()
|
|
__billing_periods_val := optional.BillingPeriods.value()
|
|
__coupon_code_name_val := optional.CouponCodeName.value()
|
|
__created_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO coupons ( id, user_id, amount, description, type, status, duration, billing_periods, coupon_code_name, created_at ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ? ) RETURNING coupons.id, coupons.user_id, coupons.amount, coupons.description, coupons.type, coupons.status, coupons.duration, coupons.billing_periods, coupons.coupon_code_name, coupons.created_at")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, __id_val, __user_id_val, __amount_val, __description_val, __type_val, __status_val, __duration_val, __billing_periods_val, __coupon_code_name_val, __created_at_val)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
coupon = &Coupon{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&coupon.Id, &coupon.UserId, &coupon.Amount, &coupon.Description, &coupon.Type, &coupon.Status, &coupon.Duration, &coupon.BillingPeriods, &coupon.CouponCodeName, &coupon.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return coupon, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Create_CouponUsage(ctx context.Context,
|
|
coupon_usage_coupon_id CouponUsage_CouponId_Field,
|
|
coupon_usage_amount CouponUsage_Amount_Field,
|
|
coupon_usage_status CouponUsage_Status_Field,
|
|
coupon_usage_period CouponUsage_Period_Field) (
|
|
coupon_usage *CouponUsage, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
__coupon_id_val := coupon_usage_coupon_id.value()
|
|
__amount_val := coupon_usage_amount.value()
|
|
__status_val := coupon_usage_status.value()
|
|
__period_val := coupon_usage_period.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO coupon_usages ( coupon_id, amount, status, period ) VALUES ( ?, ?, ?, ? ) RETURNING coupon_usages.coupon_id, coupon_usages.amount, coupon_usages.status, coupon_usages.period")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, __coupon_id_val, __amount_val, __status_val, __period_val)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
coupon_usage = &CouponUsage{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&coupon_usage.CouponId, &coupon_usage.Amount, &coupon_usage.Status, &coupon_usage.Period)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return coupon_usage, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) ReplaceNoReturn_NodeApiVersion(ctx context.Context,
|
|
node_api_version_id NodeApiVersion_Id_Field,
|
|
node_api_version_api_version NodeApiVersion_ApiVersion_Field) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__id_val := node_api_version_id.value()
|
|
__api_version_val := node_api_version_api_version.value()
|
|
__created_at_val := __now
|
|
__updated_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO node_api_versions ( id, api_version, created_at, updated_at ) VALUES ( ?, ?, ?, ? ) ON CONFLICT ( id ) DO UPDATE SET id = EXCLUDED.id, api_version = EXCLUDED.api_version, created_at = EXCLUDED.created_at, updated_at = EXCLUDED.updated_at")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, __id_val, __api_version_val, __created_at_val, __updated_at_val)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
_, err = obj.driver.ExecContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Get_ValueAttribution_By_ProjectId_And_BucketName(ctx context.Context,
|
|
value_attribution_project_id ValueAttribution_ProjectId_Field,
|
|
value_attribution_bucket_name ValueAttribution_BucketName_Field) (
|
|
value_attribution *ValueAttribution, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT value_attributions.project_id, value_attributions.bucket_name, value_attributions.partner_id, value_attributions.last_updated FROM value_attributions WHERE value_attributions.project_id = ? AND value_attributions.bucket_name = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, value_attribution_project_id.value(), value_attribution_bucket_name.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
value_attribution = &ValueAttribution{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&value_attribution.ProjectId, &value_attribution.BucketName, &value_attribution.PartnerId, &value_attribution.LastUpdated)
|
|
if err != nil {
|
|
return (*ValueAttribution)(nil), obj.makeErr(err)
|
|
}
|
|
return value_attribution, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Get_PendingAudits_By_NodeId(ctx context.Context,
|
|
pending_audits_node_id PendingAudits_NodeId_Field) (
|
|
pending_audits *PendingAudits, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT pending_audits.node_id, pending_audits.piece_id, pending_audits.stripe_index, pending_audits.share_size, pending_audits.expected_share_hash, pending_audits.reverify_count, pending_audits.path FROM pending_audits WHERE pending_audits.node_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, pending_audits_node_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
pending_audits = &PendingAudits{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&pending_audits.NodeId, &pending_audits.PieceId, &pending_audits.StripeIndex, &pending_audits.ShareSize, &pending_audits.ExpectedShareHash, &pending_audits.ReverifyCount, &pending_audits.Path)
|
|
if err != nil {
|
|
return (*PendingAudits)(nil), obj.makeErr(err)
|
|
}
|
|
return pending_audits, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Get_Irreparabledb_By_Segmentpath(ctx context.Context,
|
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field) (
|
|
irreparabledb *Irreparabledb, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT irreparabledbs.segmentpath, irreparabledbs.segmentdetail, irreparabledbs.pieces_lost_count, irreparabledbs.seg_damaged_unix_sec, irreparabledbs.repair_attempt_count FROM irreparabledbs WHERE irreparabledbs.segmentpath = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, irreparabledb_segmentpath.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
irreparabledb = &Irreparabledb{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&irreparabledb.Segmentpath, &irreparabledb.Segmentdetail, &irreparabledb.PiecesLostCount, &irreparabledb.SegDamagedUnixSec, &irreparabledb.RepairAttemptCount)
|
|
if err != nil {
|
|
return (*Irreparabledb)(nil), obj.makeErr(err)
|
|
}
|
|
return irreparabledb, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Limited_Irreparabledb_By_Segmentpath_Greater_OrderBy_Asc_Segmentpath(ctx context.Context,
|
|
irreparabledb_segmentpath_greater Irreparabledb_Segmentpath_Field,
|
|
limit int, offset int64) (
|
|
rows []*Irreparabledb, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT irreparabledbs.segmentpath, irreparabledbs.segmentdetail, irreparabledbs.pieces_lost_count, irreparabledbs.seg_damaged_unix_sec, irreparabledbs.repair_attempt_count FROM irreparabledbs WHERE irreparabledbs.segmentpath > ? ORDER BY irreparabledbs.segmentpath LIMIT ? OFFSET ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, irreparabledb_segmentpath_greater.value())
|
|
|
|
__values = append(__values, limit, offset)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, err = func() (rows []*Irreparabledb, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
irreparabledb := &Irreparabledb{}
|
|
err = __rows.Scan(&irreparabledb.Segmentpath, &irreparabledb.Segmentdetail, &irreparabledb.PiecesLostCount, &irreparabledb.SegDamagedUnixSec, &irreparabledb.RepairAttemptCount)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
rows = append(rows, irreparabledb)
|
|
}
|
|
err = __rows.Err()
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
return rows, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Find_AccountingTimestamps_Value_By_Name(ctx context.Context,
|
|
accounting_timestamps_name AccountingTimestamps_Name_Field) (
|
|
row *Value_Row, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT accounting_timestamps.value FROM accounting_timestamps WHERE accounting_timestamps.name = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, accounting_timestamps_name.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
row = &Value_Row{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&row.Value)
|
|
if err == sql.ErrNoRows {
|
|
return (*Value_Row)(nil), nil
|
|
}
|
|
if err != nil {
|
|
return (*Value_Row)(nil), obj.makeErr(err)
|
|
}
|
|
return row, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Get_Node_By_Id(ctx context.Context,
|
|
node_id Node_Id_Field) (
|
|
node *Node, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.address, nodes.last_net, nodes.last_ip_port, nodes.protocol, nodes.type, nodes.email, nodes.wallet, nodes.wallet_features, nodes.free_disk, nodes.piece_count, nodes.major, nodes.minor, nodes.patch, nodes.hash, nodes.timestamp, nodes.release, nodes.latency_90, nodes.audit_success_count, nodes.total_audit_count, nodes.vetted_at, nodes.created_at, nodes.updated_at, nodes.last_contact_success, nodes.last_contact_failure, nodes.contained, nodes.disqualified, nodes.suspended, nodes.unknown_audit_suspended, nodes.offline_suspended, nodes.under_review, nodes.online_score, nodes.audit_reputation_alpha, nodes.audit_reputation_beta, nodes.unknown_audit_reputation_alpha, nodes.unknown_audit_reputation_beta, nodes.exit_initiated_at, nodes.exit_loop_completed_at, nodes.exit_finished_at, nodes.exit_success FROM nodes WHERE nodes.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, node_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
node = &Node{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&node.Id, &node.Address, &node.LastNet, &node.LastIpPort, &node.Protocol, &node.Type, &node.Email, &node.Wallet, &node.WalletFeatures, &node.FreeDisk, &node.PieceCount, &node.Major, &node.Minor, &node.Patch, &node.Hash, &node.Timestamp, &node.Release, &node.Latency90, &node.AuditSuccessCount, &node.TotalAuditCount, &node.VettedAt, &node.CreatedAt, &node.UpdatedAt, &node.LastContactSuccess, &node.LastContactFailure, &node.Contained, &node.Disqualified, &node.Suspended, &node.UnknownAuditSuspended, &node.OfflineSuspended, &node.UnderReview, &node.OnlineScore, &node.AuditReputationAlpha, &node.AuditReputationBeta, &node.UnknownAuditReputationAlpha, &node.UnknownAuditReputationBeta, &node.ExitInitiatedAt, &node.ExitLoopCompletedAt, &node.ExitFinishedAt, &node.ExitSuccess)
|
|
if err != nil {
|
|
return (*Node)(nil), obj.makeErr(err)
|
|
}
|
|
return node, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) All_Node_Id(ctx context.Context) (
|
|
rows []*Id_Row, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id FROM nodes")
|
|
|
|
var __values []interface{}
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, err = func() (rows []*Id_Row, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
row := &Id_Row{}
|
|
err = __rows.Scan(&row.Id)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
rows = append(rows, row)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return rows, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Paged_Node(ctx context.Context,
|
|
limit int, start *Paged_Node_Continuation) (
|
|
rows []*Node, next *Paged_Node_Continuation, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.address, nodes.last_net, nodes.last_ip_port, nodes.protocol, nodes.type, nodes.email, nodes.wallet, nodes.wallet_features, nodes.free_disk, nodes.piece_count, nodes.major, nodes.minor, nodes.patch, nodes.hash, nodes.timestamp, nodes.release, nodes.latency_90, nodes.audit_success_count, nodes.total_audit_count, nodes.vetted_at, nodes.created_at, nodes.updated_at, nodes.last_contact_success, nodes.last_contact_failure, nodes.contained, nodes.disqualified, nodes.suspended, nodes.unknown_audit_suspended, nodes.offline_suspended, nodes.under_review, nodes.online_score, nodes.audit_reputation_alpha, nodes.audit_reputation_beta, nodes.unknown_audit_reputation_alpha, nodes.unknown_audit_reputation_beta, nodes.exit_initiated_at, nodes.exit_loop_completed_at, nodes.exit_finished_at, nodes.exit_success, nodes.id FROM nodes WHERE (nodes.id) > ? ORDER BY nodes.id LIMIT ?")
|
|
|
|
var __embed_first_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.address, nodes.last_net, nodes.last_ip_port, nodes.protocol, nodes.type, nodes.email, nodes.wallet, nodes.wallet_features, nodes.free_disk, nodes.piece_count, nodes.major, nodes.minor, nodes.patch, nodes.hash, nodes.timestamp, nodes.release, nodes.latency_90, nodes.audit_success_count, nodes.total_audit_count, nodes.vetted_at, nodes.created_at, nodes.updated_at, nodes.last_contact_success, nodes.last_contact_failure, nodes.contained, nodes.disqualified, nodes.suspended, nodes.unknown_audit_suspended, nodes.offline_suspended, nodes.under_review, nodes.online_score, nodes.audit_reputation_alpha, nodes.audit_reputation_beta, nodes.unknown_audit_reputation_alpha, nodes.unknown_audit_reputation_beta, nodes.exit_initiated_at, nodes.exit_loop_completed_at, nodes.exit_finished_at, nodes.exit_success, nodes.id FROM nodes ORDER BY nodes.id LIMIT ?")
|
|
|
|
var __values []interface{}
|
|
|
|
var __stmt string
|
|
if start != nil && start._set {
|
|
__values = append(__values, start._value_id, limit)
|
|
__stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
} else {
|
|
__values = append(__values, limit)
|
|
__stmt = __sqlbundle_Render(obj.dialect, __embed_first_stmt)
|
|
}
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, next, err = func() (rows []*Node, next *Paged_Node_Continuation, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
var __continuation Paged_Node_Continuation
|
|
__continuation._set = true
|
|
|
|
for __rows.Next() {
|
|
node := &Node{}
|
|
err = __rows.Scan(&node.Id, &node.Address, &node.LastNet, &node.LastIpPort, &node.Protocol, &node.Type, &node.Email, &node.Wallet, &node.WalletFeatures, &node.FreeDisk, &node.PieceCount, &node.Major, &node.Minor, &node.Patch, &node.Hash, &node.Timestamp, &node.Release, &node.Latency90, &node.AuditSuccessCount, &node.TotalAuditCount, &node.VettedAt, &node.CreatedAt, &node.UpdatedAt, &node.LastContactSuccess, &node.LastContactFailure, &node.Contained, &node.Disqualified, &node.Suspended, &node.UnknownAuditSuspended, &node.OfflineSuspended, &node.UnderReview, &node.OnlineScore, &node.AuditReputationAlpha, &node.AuditReputationBeta, &node.UnknownAuditReputationAlpha, &node.UnknownAuditReputationBeta, &node.ExitInitiatedAt, &node.ExitLoopCompletedAt, &node.ExitFinishedAt, &node.ExitSuccess, &__continuation._value_id)
|
|
if err != nil {
|
|
return nil, nil, err
|
|
}
|
|
rows = append(rows, node)
|
|
next = &__continuation
|
|
}
|
|
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, nil, err
|
|
}
|
|
|
|
return rows, next, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, nil, obj.makeErr(err)
|
|
}
|
|
return rows, next, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) All_Node_Id_Node_PieceCount_By_PieceCount_Not_Number(ctx context.Context) (
|
|
rows []*Id_PieceCount_Row, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.piece_count FROM nodes WHERE nodes.piece_count != 0")
|
|
|
|
var __values []interface{}
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, err = func() (rows []*Id_PieceCount_Row, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
row := &Id_PieceCount_Row{}
|
|
err = __rows.Scan(&row.Id, &row.PieceCount)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
rows = append(rows, row)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return rows, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Get_AuditHistory_By_NodeId(ctx context.Context,
|
|
audit_history_node_id AuditHistory_NodeId_Field) (
|
|
audit_history *AuditHistory, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT audit_histories.node_id, audit_histories.history FROM audit_histories WHERE audit_histories.node_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, audit_history_node_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
audit_history = &AuditHistory{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&audit_history.NodeId, &audit_history.History)
|
|
if err != nil {
|
|
return (*AuditHistory)(nil), obj.makeErr(err)
|
|
}
|
|
return audit_history, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Get_User_By_NormalizedEmail_And_Status_Not_Number(ctx context.Context,
|
|
user_normalized_email User_NormalizedEmail_Field) (
|
|
user *User, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.created_at, users.project_limit, users.position, users.company_name, users.company_size, users.working_on, users.is_professional, users.employee_count FROM users WHERE users.normalized_email = ? AND users.status != 0 LIMIT 2")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, user_normalized_email.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
user, err = func() (user *User, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
if !__rows.Next() {
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return nil, sql.ErrNoRows
|
|
}
|
|
|
|
user = &User{}
|
|
err = __rows.Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.CreatedAt, &user.ProjectLimit, &user.Position, &user.CompanyName, &user.CompanySize, &user.WorkingOn, &user.IsProfessional, &user.EmployeeCount)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
if __rows.Next() {
|
|
return nil, errTooManyRows
|
|
}
|
|
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
return user, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
if err == errTooManyRows {
|
|
return nil, tooManyRows("User_By_NormalizedEmail_And_Status_Not_Number")
|
|
}
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return user, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Get_User_By_Id(ctx context.Context,
|
|
user_id User_Id_Field) (
|
|
user *User, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.created_at, users.project_limit, users.position, users.company_name, users.company_size, users.working_on, users.is_professional, users.employee_count FROM users WHERE users.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, user_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
user = &User{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.CreatedAt, &user.ProjectLimit, &user.Position, &user.CompanyName, &user.CompanySize, &user.WorkingOn, &user.IsProfessional, &user.EmployeeCount)
|
|
if err != nil {
|
|
return (*User)(nil), obj.makeErr(err)
|
|
}
|
|
return user, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Get_User_ProjectLimit_By_Id(ctx context.Context,
|
|
user_id User_Id_Field) (
|
|
row *ProjectLimit_Row, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT users.project_limit FROM users WHERE users.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, user_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
row = &ProjectLimit_Row{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&row.ProjectLimit)
|
|
if err != nil {
|
|
return (*ProjectLimit_Row)(nil), obj.makeErr(err)
|
|
}
|
|
return row, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Get_Project_By_Id(ctx context.Context,
|
|
project_id Project_Id_Field) (
|
|
project *Project, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.rate_limit, projects.max_buckets, projects.partner_id, projects.owner_id, projects.created_at FROM projects WHERE projects.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, project_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
project = &Project{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.RateLimit, &project.MaxBuckets, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
|
|
if err != nil {
|
|
return (*Project)(nil), obj.makeErr(err)
|
|
}
|
|
return project, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Get_Project_UsageLimit_By_Id(ctx context.Context,
|
|
project_id Project_Id_Field) (
|
|
row *UsageLimit_Row, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT projects.usage_limit FROM projects WHERE projects.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, project_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
row = &UsageLimit_Row{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&row.UsageLimit)
|
|
if err != nil {
|
|
return (*UsageLimit_Row)(nil), obj.makeErr(err)
|
|
}
|
|
return row, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Get_Project_BandwidthLimit_By_Id(ctx context.Context,
|
|
project_id Project_Id_Field) (
|
|
row *BandwidthLimit_Row, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT projects.bandwidth_limit FROM projects WHERE projects.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, project_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
row = &BandwidthLimit_Row{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&row.BandwidthLimit)
|
|
if err != nil {
|
|
return (*BandwidthLimit_Row)(nil), obj.makeErr(err)
|
|
}
|
|
return row, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Get_Project_MaxBuckets_By_Id(ctx context.Context,
|
|
project_id Project_Id_Field) (
|
|
row *MaxBuckets_Row, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT projects.max_buckets FROM projects WHERE projects.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, project_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
row = &MaxBuckets_Row{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&row.MaxBuckets)
|
|
if err != nil {
|
|
return (*MaxBuckets_Row)(nil), obj.makeErr(err)
|
|
}
|
|
return row, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Get_Project_BandwidthLimit_Project_UsageLimit_By_Id(ctx context.Context,
|
|
project_id Project_Id_Field) (
|
|
row *BandwidthLimit_UsageLimit_Row, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT projects.bandwidth_limit, projects.usage_limit FROM projects WHERE projects.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, project_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
row = &BandwidthLimit_UsageLimit_Row{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&row.BandwidthLimit, &row.UsageLimit)
|
|
if err != nil {
|
|
return (*BandwidthLimit_UsageLimit_Row)(nil), obj.makeErr(err)
|
|
}
|
|
return row, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) All_Project(ctx context.Context) (
|
|
rows []*Project, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.rate_limit, projects.max_buckets, projects.partner_id, projects.owner_id, projects.created_at FROM projects")
|
|
|
|
var __values []interface{}
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, err = func() (rows []*Project, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
project := &Project{}
|
|
err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.RateLimit, &project.MaxBuckets, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
rows = append(rows, project)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return rows, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) All_Project_By_CreatedAt_Less_OrderBy_Asc_CreatedAt(ctx context.Context,
|
|
project_created_at_less Project_CreatedAt_Field) (
|
|
rows []*Project, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.rate_limit, projects.max_buckets, projects.partner_id, projects.owner_id, projects.created_at FROM projects WHERE projects.created_at < ? ORDER BY projects.created_at")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, project_created_at_less.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, err = func() (rows []*Project, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
project := &Project{}
|
|
err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.RateLimit, &project.MaxBuckets, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
rows = append(rows, project)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return rows, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) All_Project_By_OwnerId_OrderBy_Asc_CreatedAt(ctx context.Context,
|
|
project_owner_id Project_OwnerId_Field) (
|
|
rows []*Project, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.rate_limit, projects.max_buckets, projects.partner_id, projects.owner_id, projects.created_at FROM projects WHERE projects.owner_id = ? ORDER BY projects.created_at")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, project_owner_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, err = func() (rows []*Project, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
project := &Project{}
|
|
err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.RateLimit, &project.MaxBuckets, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
rows = append(rows, project)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return rows, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) All_Project_By_ProjectMember_MemberId_OrderBy_Asc_Project_Name(ctx context.Context,
|
|
project_member_member_id ProjectMember_MemberId_Field) (
|
|
rows []*Project, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.rate_limit, projects.max_buckets, projects.partner_id, projects.owner_id, projects.created_at FROM projects JOIN project_members ON projects.id = project_members.project_id WHERE project_members.member_id = ? ORDER BY projects.name")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, project_member_member_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, err = func() (rows []*Project, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
project := &Project{}
|
|
err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.RateLimit, &project.MaxBuckets, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
rows = append(rows, project)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return rows, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Limited_Project_By_CreatedAt_Less_OrderBy_Asc_CreatedAt(ctx context.Context,
|
|
project_created_at_less Project_CreatedAt_Field,
|
|
limit int, offset int64) (
|
|
rows []*Project, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.rate_limit, projects.max_buckets, projects.partner_id, projects.owner_id, projects.created_at FROM projects WHERE projects.created_at < ? ORDER BY projects.created_at LIMIT ? OFFSET ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, project_created_at_less.value())
|
|
|
|
__values = append(__values, limit, offset)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, err = func() (rows []*Project, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
project := &Project{}
|
|
err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.RateLimit, &project.MaxBuckets, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
rows = append(rows, project)
|
|
}
|
|
err = __rows.Err()
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
return rows, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) All_ProjectMember_By_MemberId(ctx context.Context,
|
|
project_member_member_id ProjectMember_MemberId_Field) (
|
|
rows []*ProjectMember, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT project_members.member_id, project_members.project_id, project_members.created_at FROM project_members WHERE project_members.member_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, project_member_member_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, err = func() (rows []*ProjectMember, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
project_member := &ProjectMember{}
|
|
err = __rows.Scan(&project_member.MemberId, &project_member.ProjectId, &project_member.CreatedAt)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
rows = append(rows, project_member)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return rows, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Get_ApiKey_By_Id(ctx context.Context,
|
|
api_key_id ApiKey_Id_Field) (
|
|
api_key *ApiKey, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.partner_id, api_keys.created_at FROM api_keys WHERE api_keys.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, api_key_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
api_key = &ApiKey{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&api_key.Id, &api_key.ProjectId, &api_key.Head, &api_key.Name, &api_key.Secret, &api_key.PartnerId, &api_key.CreatedAt)
|
|
if err != nil {
|
|
return (*ApiKey)(nil), obj.makeErr(err)
|
|
}
|
|
return api_key, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Get_ApiKey_By_Head(ctx context.Context,
|
|
api_key_head ApiKey_Head_Field) (
|
|
api_key *ApiKey, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.partner_id, api_keys.created_at FROM api_keys WHERE api_keys.head = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, api_key_head.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
api_key = &ApiKey{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&api_key.Id, &api_key.ProjectId, &api_key.Head, &api_key.Name, &api_key.Secret, &api_key.PartnerId, &api_key.CreatedAt)
|
|
if err != nil {
|
|
return (*ApiKey)(nil), obj.makeErr(err)
|
|
}
|
|
return api_key, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Get_ApiKey_By_Name_And_ProjectId(ctx context.Context,
|
|
api_key_name ApiKey_Name_Field,
|
|
api_key_project_id ApiKey_ProjectId_Field) (
|
|
api_key *ApiKey, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.partner_id, api_keys.created_at FROM api_keys WHERE api_keys.name = ? AND api_keys.project_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, api_key_name.value(), api_key_project_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
api_key = &ApiKey{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&api_key.Id, &api_key.ProjectId, &api_key.Head, &api_key.Name, &api_key.Secret, &api_key.PartnerId, &api_key.CreatedAt)
|
|
if err != nil {
|
|
return (*ApiKey)(nil), obj.makeErr(err)
|
|
}
|
|
return api_key, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Paged_BucketBandwidthRollup_By_IntervalStart_GreaterOrEqual(ctx context.Context,
|
|
bucket_bandwidth_rollup_interval_start_greater_or_equal BucketBandwidthRollup_IntervalStart_Field,
|
|
limit int, start *Paged_BucketBandwidthRollup_By_IntervalStart_GreaterOrEqual_Continuation) (
|
|
rows []*BucketBandwidthRollup, next *Paged_BucketBandwidthRollup_By_IntervalStart_GreaterOrEqual_Continuation, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_bandwidth_rollups.bucket_name, bucket_bandwidth_rollups.project_id, bucket_bandwidth_rollups.interval_start, bucket_bandwidth_rollups.interval_seconds, bucket_bandwidth_rollups.action, bucket_bandwidth_rollups.inline, bucket_bandwidth_rollups.allocated, bucket_bandwidth_rollups.settled, bucket_bandwidth_rollups.bucket_name, bucket_bandwidth_rollups.project_id, bucket_bandwidth_rollups.interval_start, bucket_bandwidth_rollups.action FROM bucket_bandwidth_rollups WHERE bucket_bandwidth_rollups.interval_start >= ? AND (bucket_bandwidth_rollups.bucket_name, bucket_bandwidth_rollups.project_id, bucket_bandwidth_rollups.interval_start, bucket_bandwidth_rollups.action) > (?, ?, ?, ?) ORDER BY bucket_bandwidth_rollups.bucket_name, bucket_bandwidth_rollups.project_id, bucket_bandwidth_rollups.interval_start, bucket_bandwidth_rollups.action LIMIT ?")
|
|
|
|
var __embed_first_stmt = __sqlbundle_Literal("SELECT bucket_bandwidth_rollups.bucket_name, bucket_bandwidth_rollups.project_id, bucket_bandwidth_rollups.interval_start, bucket_bandwidth_rollups.interval_seconds, bucket_bandwidth_rollups.action, bucket_bandwidth_rollups.inline, bucket_bandwidth_rollups.allocated, bucket_bandwidth_rollups.settled, bucket_bandwidth_rollups.bucket_name, bucket_bandwidth_rollups.project_id, bucket_bandwidth_rollups.interval_start, bucket_bandwidth_rollups.action FROM bucket_bandwidth_rollups WHERE bucket_bandwidth_rollups.interval_start >= ? ORDER BY bucket_bandwidth_rollups.bucket_name, bucket_bandwidth_rollups.project_id, bucket_bandwidth_rollups.interval_start, bucket_bandwidth_rollups.action LIMIT ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, bucket_bandwidth_rollup_interval_start_greater_or_equal.value())
|
|
|
|
var __stmt string
|
|
if start != nil && start._set {
|
|
__values = append(__values, start._value_bucket_name, start._value_project_id, start._value_interval_start, start._value_action, limit)
|
|
__stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
} else {
|
|
__values = append(__values, limit)
|
|
__stmt = __sqlbundle_Render(obj.dialect, __embed_first_stmt)
|
|
}
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, next, err = func() (rows []*BucketBandwidthRollup, next *Paged_BucketBandwidthRollup_By_IntervalStart_GreaterOrEqual_Continuation, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
var __continuation Paged_BucketBandwidthRollup_By_IntervalStart_GreaterOrEqual_Continuation
|
|
__continuation._set = true
|
|
|
|
for __rows.Next() {
|
|
bucket_bandwidth_rollup := &BucketBandwidthRollup{}
|
|
err = __rows.Scan(&bucket_bandwidth_rollup.BucketName, &bucket_bandwidth_rollup.ProjectId, &bucket_bandwidth_rollup.IntervalStart, &bucket_bandwidth_rollup.IntervalSeconds, &bucket_bandwidth_rollup.Action, &bucket_bandwidth_rollup.Inline, &bucket_bandwidth_rollup.Allocated, &bucket_bandwidth_rollup.Settled, &__continuation._value_bucket_name, &__continuation._value_project_id, &__continuation._value_interval_start, &__continuation._value_action)
|
|
if err != nil {
|
|
return nil, nil, err
|
|
}
|
|
rows = append(rows, bucket_bandwidth_rollup)
|
|
next = &__continuation
|
|
}
|
|
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, nil, err
|
|
}
|
|
|
|
return rows, next, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, nil, obj.makeErr(err)
|
|
}
|
|
return rows, next, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Paged_BucketBandwidthRollupArchive_By_IntervalStart_GreaterOrEqual(ctx context.Context,
|
|
bucket_bandwidth_rollup_archive_interval_start_greater_or_equal BucketBandwidthRollupArchive_IntervalStart_Field,
|
|
limit int, start *Paged_BucketBandwidthRollupArchive_By_IntervalStart_GreaterOrEqual_Continuation) (
|
|
rows []*BucketBandwidthRollupArchive, next *Paged_BucketBandwidthRollupArchive_By_IntervalStart_GreaterOrEqual_Continuation, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_bandwidth_rollup_archives.bucket_name, bucket_bandwidth_rollup_archives.project_id, bucket_bandwidth_rollup_archives.interval_start, bucket_bandwidth_rollup_archives.interval_seconds, bucket_bandwidth_rollup_archives.action, bucket_bandwidth_rollup_archives.inline, bucket_bandwidth_rollup_archives.allocated, bucket_bandwidth_rollup_archives.settled, bucket_bandwidth_rollup_archives.bucket_name, bucket_bandwidth_rollup_archives.project_id, bucket_bandwidth_rollup_archives.interval_start, bucket_bandwidth_rollup_archives.action FROM bucket_bandwidth_rollup_archives WHERE bucket_bandwidth_rollup_archives.interval_start >= ? AND (bucket_bandwidth_rollup_archives.bucket_name, bucket_bandwidth_rollup_archives.project_id, bucket_bandwidth_rollup_archives.interval_start, bucket_bandwidth_rollup_archives.action) > (?, ?, ?, ?) ORDER BY bucket_bandwidth_rollup_archives.bucket_name, bucket_bandwidth_rollup_archives.project_id, bucket_bandwidth_rollup_archives.interval_start, bucket_bandwidth_rollup_archives.action LIMIT ?")
|
|
|
|
var __embed_first_stmt = __sqlbundle_Literal("SELECT bucket_bandwidth_rollup_archives.bucket_name, bucket_bandwidth_rollup_archives.project_id, bucket_bandwidth_rollup_archives.interval_start, bucket_bandwidth_rollup_archives.interval_seconds, bucket_bandwidth_rollup_archives.action, bucket_bandwidth_rollup_archives.inline, bucket_bandwidth_rollup_archives.allocated, bucket_bandwidth_rollup_archives.settled, bucket_bandwidth_rollup_archives.bucket_name, bucket_bandwidth_rollup_archives.project_id, bucket_bandwidth_rollup_archives.interval_start, bucket_bandwidth_rollup_archives.action FROM bucket_bandwidth_rollup_archives WHERE bucket_bandwidth_rollup_archives.interval_start >= ? ORDER BY bucket_bandwidth_rollup_archives.bucket_name, bucket_bandwidth_rollup_archives.project_id, bucket_bandwidth_rollup_archives.interval_start, bucket_bandwidth_rollup_archives.action LIMIT ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, bucket_bandwidth_rollup_archive_interval_start_greater_or_equal.value())
|
|
|
|
var __stmt string
|
|
if start != nil && start._set {
|
|
__values = append(__values, start._value_bucket_name, start._value_project_id, start._value_interval_start, start._value_action, limit)
|
|
__stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
} else {
|
|
__values = append(__values, limit)
|
|
__stmt = __sqlbundle_Render(obj.dialect, __embed_first_stmt)
|
|
}
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, next, err = func() (rows []*BucketBandwidthRollupArchive, next *Paged_BucketBandwidthRollupArchive_By_IntervalStart_GreaterOrEqual_Continuation, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
var __continuation Paged_BucketBandwidthRollupArchive_By_IntervalStart_GreaterOrEqual_Continuation
|
|
__continuation._set = true
|
|
|
|
for __rows.Next() {
|
|
bucket_bandwidth_rollup_archive := &BucketBandwidthRollupArchive{}
|
|
err = __rows.Scan(&bucket_bandwidth_rollup_archive.BucketName, &bucket_bandwidth_rollup_archive.ProjectId, &bucket_bandwidth_rollup_archive.IntervalStart, &bucket_bandwidth_rollup_archive.IntervalSeconds, &bucket_bandwidth_rollup_archive.Action, &bucket_bandwidth_rollup_archive.Inline, &bucket_bandwidth_rollup_archive.Allocated, &bucket_bandwidth_rollup_archive.Settled, &__continuation._value_bucket_name, &__continuation._value_project_id, &__continuation._value_interval_start, &__continuation._value_action)
|
|
if err != nil {
|
|
return nil, nil, err
|
|
}
|
|
rows = append(rows, bucket_bandwidth_rollup_archive)
|
|
next = &__continuation
|
|
}
|
|
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, nil, err
|
|
}
|
|
|
|
return rows, next, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, nil, obj.makeErr(err)
|
|
}
|
|
return rows, next, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) All_BucketStorageTally(ctx context.Context) (
|
|
rows []*BucketStorageTally, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_storage_tallies.bucket_name, bucket_storage_tallies.project_id, bucket_storage_tallies.interval_start, bucket_storage_tallies.inline, bucket_storage_tallies.remote, bucket_storage_tallies.remote_segments_count, bucket_storage_tallies.inline_segments_count, bucket_storage_tallies.object_count, bucket_storage_tallies.metadata_size FROM bucket_storage_tallies")
|
|
|
|
var __values []interface{}
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, err = func() (rows []*BucketStorageTally, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
bucket_storage_tally := &BucketStorageTally{}
|
|
err = __rows.Scan(&bucket_storage_tally.BucketName, &bucket_storage_tally.ProjectId, &bucket_storage_tally.IntervalStart, &bucket_storage_tally.Inline, &bucket_storage_tally.Remote, &bucket_storage_tally.RemoteSegmentsCount, &bucket_storage_tally.InlineSegmentsCount, &bucket_storage_tally.ObjectCount, &bucket_storage_tally.MetadataSize)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
rows = append(rows, bucket_storage_tally)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return rows, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) All_BucketStorageTally_By_ProjectId_And_BucketName_And_IntervalStart_GreaterOrEqual_And_IntervalStart_LessOrEqual_OrderBy_Desc_IntervalStart(ctx context.Context,
|
|
bucket_storage_tally_project_id BucketStorageTally_ProjectId_Field,
|
|
bucket_storage_tally_bucket_name BucketStorageTally_BucketName_Field,
|
|
bucket_storage_tally_interval_start_greater_or_equal BucketStorageTally_IntervalStart_Field,
|
|
bucket_storage_tally_interval_start_less_or_equal BucketStorageTally_IntervalStart_Field) (
|
|
rows []*BucketStorageTally, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_storage_tallies.bucket_name, bucket_storage_tallies.project_id, bucket_storage_tallies.interval_start, bucket_storage_tallies.inline, bucket_storage_tallies.remote, bucket_storage_tallies.remote_segments_count, bucket_storage_tallies.inline_segments_count, bucket_storage_tallies.object_count, bucket_storage_tallies.metadata_size FROM bucket_storage_tallies WHERE bucket_storage_tallies.project_id = ? AND bucket_storage_tallies.bucket_name = ? AND bucket_storage_tallies.interval_start >= ? AND bucket_storage_tallies.interval_start <= ? ORDER BY bucket_storage_tallies.interval_start DESC")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, bucket_storage_tally_project_id.value(), bucket_storage_tally_bucket_name.value(), bucket_storage_tally_interval_start_greater_or_equal.value(), bucket_storage_tally_interval_start_less_or_equal.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, err = func() (rows []*BucketStorageTally, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
bucket_storage_tally := &BucketStorageTally{}
|
|
err = __rows.Scan(&bucket_storage_tally.BucketName, &bucket_storage_tally.ProjectId, &bucket_storage_tally.IntervalStart, &bucket_storage_tally.Inline, &bucket_storage_tally.Remote, &bucket_storage_tally.RemoteSegmentsCount, &bucket_storage_tally.InlineSegmentsCount, &bucket_storage_tally.ObjectCount, &bucket_storage_tally.MetadataSize)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
rows = append(rows, bucket_storage_tally)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return rows, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) All_StoragenodeBandwidthRollup_By_StoragenodeId_And_IntervalStart(ctx context.Context,
|
|
storagenode_bandwidth_rollup_storagenode_id StoragenodeBandwidthRollup_StoragenodeId_Field,
|
|
storagenode_bandwidth_rollup_interval_start StoragenodeBandwidthRollup_IntervalStart_Field) (
|
|
rows []*StoragenodeBandwidthRollup, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT storagenode_bandwidth_rollups.storagenode_id, storagenode_bandwidth_rollups.interval_start, storagenode_bandwidth_rollups.interval_seconds, storagenode_bandwidth_rollups.action, storagenode_bandwidth_rollups.allocated, storagenode_bandwidth_rollups.settled FROM storagenode_bandwidth_rollups WHERE storagenode_bandwidth_rollups.storagenode_id = ? AND storagenode_bandwidth_rollups.interval_start = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, storagenode_bandwidth_rollup_storagenode_id.value(), storagenode_bandwidth_rollup_interval_start.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, err = func() (rows []*StoragenodeBandwidthRollup, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
storagenode_bandwidth_rollup := &StoragenodeBandwidthRollup{}
|
|
err = __rows.Scan(&storagenode_bandwidth_rollup.StoragenodeId, &storagenode_bandwidth_rollup.IntervalStart, &storagenode_bandwidth_rollup.IntervalSeconds, &storagenode_bandwidth_rollup.Action, &storagenode_bandwidth_rollup.Allocated, &storagenode_bandwidth_rollup.Settled)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
rows = append(rows, storagenode_bandwidth_rollup)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return rows, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Paged_StoragenodeBandwidthRollup_By_IntervalStart_GreaterOrEqual(ctx context.Context,
|
|
storagenode_bandwidth_rollup_interval_start_greater_or_equal StoragenodeBandwidthRollup_IntervalStart_Field,
|
|
limit int, start *Paged_StoragenodeBandwidthRollup_By_IntervalStart_GreaterOrEqual_Continuation) (
|
|
rows []*StoragenodeBandwidthRollup, next *Paged_StoragenodeBandwidthRollup_By_IntervalStart_GreaterOrEqual_Continuation, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT storagenode_bandwidth_rollups.storagenode_id, storagenode_bandwidth_rollups.interval_start, storagenode_bandwidth_rollups.interval_seconds, storagenode_bandwidth_rollups.action, storagenode_bandwidth_rollups.allocated, storagenode_bandwidth_rollups.settled, storagenode_bandwidth_rollups.storagenode_id, storagenode_bandwidth_rollups.interval_start, storagenode_bandwidth_rollups.action FROM storagenode_bandwidth_rollups WHERE storagenode_bandwidth_rollups.interval_start >= ? AND (storagenode_bandwidth_rollups.storagenode_id, storagenode_bandwidth_rollups.interval_start, storagenode_bandwidth_rollups.action) > (?, ?, ?) ORDER BY storagenode_bandwidth_rollups.storagenode_id, storagenode_bandwidth_rollups.interval_start, storagenode_bandwidth_rollups.action LIMIT ?")
|
|
|
|
var __embed_first_stmt = __sqlbundle_Literal("SELECT storagenode_bandwidth_rollups.storagenode_id, storagenode_bandwidth_rollups.interval_start, storagenode_bandwidth_rollups.interval_seconds, storagenode_bandwidth_rollups.action, storagenode_bandwidth_rollups.allocated, storagenode_bandwidth_rollups.settled, storagenode_bandwidth_rollups.storagenode_id, storagenode_bandwidth_rollups.interval_start, storagenode_bandwidth_rollups.action FROM storagenode_bandwidth_rollups WHERE storagenode_bandwidth_rollups.interval_start >= ? ORDER BY storagenode_bandwidth_rollups.storagenode_id, storagenode_bandwidth_rollups.interval_start, storagenode_bandwidth_rollups.action LIMIT ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, storagenode_bandwidth_rollup_interval_start_greater_or_equal.value())
|
|
|
|
var __stmt string
|
|
if start != nil && start._set {
|
|
__values = append(__values, start._value_storagenode_id, start._value_interval_start, start._value_action, limit)
|
|
__stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
} else {
|
|
__values = append(__values, limit)
|
|
__stmt = __sqlbundle_Render(obj.dialect, __embed_first_stmt)
|
|
}
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, next, err = func() (rows []*StoragenodeBandwidthRollup, next *Paged_StoragenodeBandwidthRollup_By_IntervalStart_GreaterOrEqual_Continuation, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
var __continuation Paged_StoragenodeBandwidthRollup_By_IntervalStart_GreaterOrEqual_Continuation
|
|
__continuation._set = true
|
|
|
|
for __rows.Next() {
|
|
storagenode_bandwidth_rollup := &StoragenodeBandwidthRollup{}
|
|
err = __rows.Scan(&storagenode_bandwidth_rollup.StoragenodeId, &storagenode_bandwidth_rollup.IntervalStart, &storagenode_bandwidth_rollup.IntervalSeconds, &storagenode_bandwidth_rollup.Action, &storagenode_bandwidth_rollup.Allocated, &storagenode_bandwidth_rollup.Settled, &__continuation._value_storagenode_id, &__continuation._value_interval_start, &__continuation._value_action)
|
|
if err != nil {
|
|
return nil, nil, err
|
|
}
|
|
rows = append(rows, storagenode_bandwidth_rollup)
|
|
next = &__continuation
|
|
}
|
|
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, nil, err
|
|
}
|
|
|
|
return rows, next, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, nil, obj.makeErr(err)
|
|
}
|
|
return rows, next, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Paged_StoragenodeBandwidthRollup_By_StoragenodeId_And_IntervalStart_GreaterOrEqual(ctx context.Context,
|
|
storagenode_bandwidth_rollup_storagenode_id StoragenodeBandwidthRollup_StoragenodeId_Field,
|
|
storagenode_bandwidth_rollup_interval_start_greater_or_equal StoragenodeBandwidthRollup_IntervalStart_Field,
|
|
limit int, start *Paged_StoragenodeBandwidthRollup_By_StoragenodeId_And_IntervalStart_GreaterOrEqual_Continuation) (
|
|
rows []*StoragenodeBandwidthRollup, next *Paged_StoragenodeBandwidthRollup_By_StoragenodeId_And_IntervalStart_GreaterOrEqual_Continuation, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT storagenode_bandwidth_rollups.storagenode_id, storagenode_bandwidth_rollups.interval_start, storagenode_bandwidth_rollups.interval_seconds, storagenode_bandwidth_rollups.action, storagenode_bandwidth_rollups.allocated, storagenode_bandwidth_rollups.settled, storagenode_bandwidth_rollups.storagenode_id, storagenode_bandwidth_rollups.interval_start, storagenode_bandwidth_rollups.action FROM storagenode_bandwidth_rollups WHERE storagenode_bandwidth_rollups.storagenode_id = ? AND storagenode_bandwidth_rollups.interval_start >= ? AND (storagenode_bandwidth_rollups.storagenode_id, storagenode_bandwidth_rollups.interval_start, storagenode_bandwidth_rollups.action) > (?, ?, ?) ORDER BY storagenode_bandwidth_rollups.storagenode_id, storagenode_bandwidth_rollups.interval_start, storagenode_bandwidth_rollups.action LIMIT ?")
|
|
|
|
var __embed_first_stmt = __sqlbundle_Literal("SELECT storagenode_bandwidth_rollups.storagenode_id, storagenode_bandwidth_rollups.interval_start, storagenode_bandwidth_rollups.interval_seconds, storagenode_bandwidth_rollups.action, storagenode_bandwidth_rollups.allocated, storagenode_bandwidth_rollups.settled, storagenode_bandwidth_rollups.storagenode_id, storagenode_bandwidth_rollups.interval_start, storagenode_bandwidth_rollups.action FROM storagenode_bandwidth_rollups WHERE storagenode_bandwidth_rollups.storagenode_id = ? AND storagenode_bandwidth_rollups.interval_start >= ? ORDER BY storagenode_bandwidth_rollups.storagenode_id, storagenode_bandwidth_rollups.interval_start, storagenode_bandwidth_rollups.action LIMIT ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, storagenode_bandwidth_rollup_storagenode_id.value(), storagenode_bandwidth_rollup_interval_start_greater_or_equal.value())
|
|
|
|
var __stmt string
|
|
if start != nil && start._set {
|
|
__values = append(__values, start._value_storagenode_id, start._value_interval_start, start._value_action, limit)
|
|
__stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
} else {
|
|
__values = append(__values, limit)
|
|
__stmt = __sqlbundle_Render(obj.dialect, __embed_first_stmt)
|
|
}
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, next, err = func() (rows []*StoragenodeBandwidthRollup, next *Paged_StoragenodeBandwidthRollup_By_StoragenodeId_And_IntervalStart_GreaterOrEqual_Continuation, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
var __continuation Paged_StoragenodeBandwidthRollup_By_StoragenodeId_And_IntervalStart_GreaterOrEqual_Continuation
|
|
__continuation._set = true
|
|
|
|
for __rows.Next() {
|
|
storagenode_bandwidth_rollup := &StoragenodeBandwidthRollup{}
|
|
err = __rows.Scan(&storagenode_bandwidth_rollup.StoragenodeId, &storagenode_bandwidth_rollup.IntervalStart, &storagenode_bandwidth_rollup.IntervalSeconds, &storagenode_bandwidth_rollup.Action, &storagenode_bandwidth_rollup.Allocated, &storagenode_bandwidth_rollup.Settled, &__continuation._value_storagenode_id, &__continuation._value_interval_start, &__continuation._value_action)
|
|
if err != nil {
|
|
return nil, nil, err
|
|
}
|
|
rows = append(rows, storagenode_bandwidth_rollup)
|
|
next = &__continuation
|
|
}
|
|
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, nil, err
|
|
}
|
|
|
|
return rows, next, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, nil, obj.makeErr(err)
|
|
}
|
|
return rows, next, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Paged_StoragenodeBandwidthRollupArchive_By_IntervalStart_GreaterOrEqual(ctx context.Context,
|
|
storagenode_bandwidth_rollup_archive_interval_start_greater_or_equal StoragenodeBandwidthRollupArchive_IntervalStart_Field,
|
|
limit int, start *Paged_StoragenodeBandwidthRollupArchive_By_IntervalStart_GreaterOrEqual_Continuation) (
|
|
rows []*StoragenodeBandwidthRollupArchive, next *Paged_StoragenodeBandwidthRollupArchive_By_IntervalStart_GreaterOrEqual_Continuation, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT storagenode_bandwidth_rollup_archives.storagenode_id, storagenode_bandwidth_rollup_archives.interval_start, storagenode_bandwidth_rollup_archives.interval_seconds, storagenode_bandwidth_rollup_archives.action, storagenode_bandwidth_rollup_archives.allocated, storagenode_bandwidth_rollup_archives.settled, storagenode_bandwidth_rollup_archives.storagenode_id, storagenode_bandwidth_rollup_archives.interval_start, storagenode_bandwidth_rollup_archives.action FROM storagenode_bandwidth_rollup_archives WHERE storagenode_bandwidth_rollup_archives.interval_start >= ? AND (storagenode_bandwidth_rollup_archives.storagenode_id, storagenode_bandwidth_rollup_archives.interval_start, storagenode_bandwidth_rollup_archives.action) > (?, ?, ?) ORDER BY storagenode_bandwidth_rollup_archives.storagenode_id, storagenode_bandwidth_rollup_archives.interval_start, storagenode_bandwidth_rollup_archives.action LIMIT ?")
|
|
|
|
var __embed_first_stmt = __sqlbundle_Literal("SELECT storagenode_bandwidth_rollup_archives.storagenode_id, storagenode_bandwidth_rollup_archives.interval_start, storagenode_bandwidth_rollup_archives.interval_seconds, storagenode_bandwidth_rollup_archives.action, storagenode_bandwidth_rollup_archives.allocated, storagenode_bandwidth_rollup_archives.settled, storagenode_bandwidth_rollup_archives.storagenode_id, storagenode_bandwidth_rollup_archives.interval_start, storagenode_bandwidth_rollup_archives.action FROM storagenode_bandwidth_rollup_archives WHERE storagenode_bandwidth_rollup_archives.interval_start >= ? ORDER BY storagenode_bandwidth_rollup_archives.storagenode_id, storagenode_bandwidth_rollup_archives.interval_start, storagenode_bandwidth_rollup_archives.action LIMIT ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, storagenode_bandwidth_rollup_archive_interval_start_greater_or_equal.value())
|
|
|
|
var __stmt string
|
|
if start != nil && start._set {
|
|
__values = append(__values, start._value_storagenode_id, start._value_interval_start, start._value_action, limit)
|
|
__stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
} else {
|
|
__values = append(__values, limit)
|
|
__stmt = __sqlbundle_Render(obj.dialect, __embed_first_stmt)
|
|
}
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, next, err = func() (rows []*StoragenodeBandwidthRollupArchive, next *Paged_StoragenodeBandwidthRollupArchive_By_IntervalStart_GreaterOrEqual_Continuation, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
var __continuation Paged_StoragenodeBandwidthRollupArchive_By_IntervalStart_GreaterOrEqual_Continuation
|
|
__continuation._set = true
|
|
|
|
for __rows.Next() {
|
|
storagenode_bandwidth_rollup_archive := &StoragenodeBandwidthRollupArchive{}
|
|
err = __rows.Scan(&storagenode_bandwidth_rollup_archive.StoragenodeId, &storagenode_bandwidth_rollup_archive.IntervalStart, &storagenode_bandwidth_rollup_archive.IntervalSeconds, &storagenode_bandwidth_rollup_archive.Action, &storagenode_bandwidth_rollup_archive.Allocated, &storagenode_bandwidth_rollup_archive.Settled, &__continuation._value_storagenode_id, &__continuation._value_interval_start, &__continuation._value_action)
|
|
if err != nil {
|
|
return nil, nil, err
|
|
}
|
|
rows = append(rows, storagenode_bandwidth_rollup_archive)
|
|
next = &__continuation
|
|
}
|
|
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, nil, err
|
|
}
|
|
|
|
return rows, next, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, nil, obj.makeErr(err)
|
|
}
|
|
return rows, next, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Paged_StoragenodeBandwidthRollupPhase2_By_StoragenodeId_And_IntervalStart_GreaterOrEqual(ctx context.Context,
|
|
storagenode_bandwidth_rollup_phase2_storagenode_id StoragenodeBandwidthRollupPhase2_StoragenodeId_Field,
|
|
storagenode_bandwidth_rollup_phase2_interval_start_greater_or_equal StoragenodeBandwidthRollupPhase2_IntervalStart_Field,
|
|
limit int, start *Paged_StoragenodeBandwidthRollupPhase2_By_StoragenodeId_And_IntervalStart_GreaterOrEqual_Continuation) (
|
|
rows []*StoragenodeBandwidthRollupPhase2, next *Paged_StoragenodeBandwidthRollupPhase2_By_StoragenodeId_And_IntervalStart_GreaterOrEqual_Continuation, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT storagenode_bandwidth_rollups_phase2.storagenode_id, storagenode_bandwidth_rollups_phase2.interval_start, storagenode_bandwidth_rollups_phase2.interval_seconds, storagenode_bandwidth_rollups_phase2.action, storagenode_bandwidth_rollups_phase2.allocated, storagenode_bandwidth_rollups_phase2.settled, storagenode_bandwidth_rollups_phase2.storagenode_id, storagenode_bandwidth_rollups_phase2.interval_start, storagenode_bandwidth_rollups_phase2.action FROM storagenode_bandwidth_rollups_phase2 WHERE storagenode_bandwidth_rollups_phase2.storagenode_id = ? AND storagenode_bandwidth_rollups_phase2.interval_start >= ? AND (storagenode_bandwidth_rollups_phase2.storagenode_id, storagenode_bandwidth_rollups_phase2.interval_start, storagenode_bandwidth_rollups_phase2.action) > (?, ?, ?) ORDER BY storagenode_bandwidth_rollups_phase2.storagenode_id, storagenode_bandwidth_rollups_phase2.interval_start, storagenode_bandwidth_rollups_phase2.action LIMIT ?")
|
|
|
|
var __embed_first_stmt = __sqlbundle_Literal("SELECT storagenode_bandwidth_rollups_phase2.storagenode_id, storagenode_bandwidth_rollups_phase2.interval_start, storagenode_bandwidth_rollups_phase2.interval_seconds, storagenode_bandwidth_rollups_phase2.action, storagenode_bandwidth_rollups_phase2.allocated, storagenode_bandwidth_rollups_phase2.settled, storagenode_bandwidth_rollups_phase2.storagenode_id, storagenode_bandwidth_rollups_phase2.interval_start, storagenode_bandwidth_rollups_phase2.action FROM storagenode_bandwidth_rollups_phase2 WHERE storagenode_bandwidth_rollups_phase2.storagenode_id = ? AND storagenode_bandwidth_rollups_phase2.interval_start >= ? ORDER BY storagenode_bandwidth_rollups_phase2.storagenode_id, storagenode_bandwidth_rollups_phase2.interval_start, storagenode_bandwidth_rollups_phase2.action LIMIT ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, storagenode_bandwidth_rollup_phase2_storagenode_id.value(), storagenode_bandwidth_rollup_phase2_interval_start_greater_or_equal.value())
|
|
|
|
var __stmt string
|
|
if start != nil && start._set {
|
|
__values = append(__values, start._value_storagenode_id, start._value_interval_start, start._value_action, limit)
|
|
__stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
} else {
|
|
__values = append(__values, limit)
|
|
__stmt = __sqlbundle_Render(obj.dialect, __embed_first_stmt)
|
|
}
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, next, err = func() (rows []*StoragenodeBandwidthRollupPhase2, next *Paged_StoragenodeBandwidthRollupPhase2_By_StoragenodeId_And_IntervalStart_GreaterOrEqual_Continuation, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
var __continuation Paged_StoragenodeBandwidthRollupPhase2_By_StoragenodeId_And_IntervalStart_GreaterOrEqual_Continuation
|
|
__continuation._set = true
|
|
|
|
for __rows.Next() {
|
|
storagenode_bandwidth_rollup_phase2 := &StoragenodeBandwidthRollupPhase2{}
|
|
err = __rows.Scan(&storagenode_bandwidth_rollup_phase2.StoragenodeId, &storagenode_bandwidth_rollup_phase2.IntervalStart, &storagenode_bandwidth_rollup_phase2.IntervalSeconds, &storagenode_bandwidth_rollup_phase2.Action, &storagenode_bandwidth_rollup_phase2.Allocated, &storagenode_bandwidth_rollup_phase2.Settled, &__continuation._value_storagenode_id, &__continuation._value_interval_start, &__continuation._value_action)
|
|
if err != nil {
|
|
return nil, nil, err
|
|
}
|
|
rows = append(rows, storagenode_bandwidth_rollup_phase2)
|
|
next = &__continuation
|
|
}
|
|
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, nil, err
|
|
}
|
|
|
|
return rows, next, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, nil, obj.makeErr(err)
|
|
}
|
|
return rows, next, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) All_StoragenodeStorageTally(ctx context.Context) (
|
|
rows []*StoragenodeStorageTally, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT storagenode_storage_tallies.node_id, storagenode_storage_tallies.interval_end_time, storagenode_storage_tallies.data_total FROM storagenode_storage_tallies")
|
|
|
|
var __values []interface{}
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, err = func() (rows []*StoragenodeStorageTally, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
storagenode_storage_tally := &StoragenodeStorageTally{}
|
|
err = __rows.Scan(&storagenode_storage_tally.NodeId, &storagenode_storage_tally.IntervalEndTime, &storagenode_storage_tally.DataTotal)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
rows = append(rows, storagenode_storage_tally)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return rows, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) All_StoragenodeStorageTally_By_IntervalEndTime_GreaterOrEqual(ctx context.Context,
|
|
storagenode_storage_tally_interval_end_time_greater_or_equal StoragenodeStorageTally_IntervalEndTime_Field) (
|
|
rows []*StoragenodeStorageTally, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT storagenode_storage_tallies.node_id, storagenode_storage_tallies.interval_end_time, storagenode_storage_tallies.data_total FROM storagenode_storage_tallies WHERE storagenode_storage_tallies.interval_end_time >= ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, storagenode_storage_tally_interval_end_time_greater_or_equal.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, err = func() (rows []*StoragenodeStorageTally, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
storagenode_storage_tally := &StoragenodeStorageTally{}
|
|
err = __rows.Scan(&storagenode_storage_tally.NodeId, &storagenode_storage_tally.IntervalEndTime, &storagenode_storage_tally.DataTotal)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
rows = append(rows, storagenode_storage_tally)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return rows, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Get_StoragenodePaystub_By_NodeId_And_Period(ctx context.Context,
|
|
storagenode_paystub_node_id StoragenodePaystub_NodeId_Field,
|
|
storagenode_paystub_period StoragenodePaystub_Period_Field) (
|
|
storagenode_paystub *StoragenodePaystub, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT storagenode_paystubs.period, storagenode_paystubs.node_id, storagenode_paystubs.created_at, storagenode_paystubs.codes, storagenode_paystubs.usage_at_rest, storagenode_paystubs.usage_get, storagenode_paystubs.usage_put, storagenode_paystubs.usage_get_repair, storagenode_paystubs.usage_put_repair, storagenode_paystubs.usage_get_audit, storagenode_paystubs.comp_at_rest, storagenode_paystubs.comp_get, storagenode_paystubs.comp_put, storagenode_paystubs.comp_get_repair, storagenode_paystubs.comp_put_repair, storagenode_paystubs.comp_get_audit, storagenode_paystubs.surge_percent, storagenode_paystubs.held, storagenode_paystubs.owed, storagenode_paystubs.disposed, storagenode_paystubs.paid, storagenode_paystubs.distributed FROM storagenode_paystubs WHERE storagenode_paystubs.node_id = ? AND storagenode_paystubs.period = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, storagenode_paystub_node_id.value(), storagenode_paystub_period.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
storagenode_paystub = &StoragenodePaystub{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&storagenode_paystub.Period, &storagenode_paystub.NodeId, &storagenode_paystub.CreatedAt, &storagenode_paystub.Codes, &storagenode_paystub.UsageAtRest, &storagenode_paystub.UsageGet, &storagenode_paystub.UsagePut, &storagenode_paystub.UsageGetRepair, &storagenode_paystub.UsagePutRepair, &storagenode_paystub.UsageGetAudit, &storagenode_paystub.CompAtRest, &storagenode_paystub.CompGet, &storagenode_paystub.CompPut, &storagenode_paystub.CompGetRepair, &storagenode_paystub.CompPutRepair, &storagenode_paystub.CompGetAudit, &storagenode_paystub.SurgePercent, &storagenode_paystub.Held, &storagenode_paystub.Owed, &storagenode_paystub.Disposed, &storagenode_paystub.Paid, &storagenode_paystub.Distributed)
|
|
if err != nil {
|
|
return (*StoragenodePaystub)(nil), obj.makeErr(err)
|
|
}
|
|
return storagenode_paystub, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) All_StoragenodePaystub_By_NodeId(ctx context.Context,
|
|
storagenode_paystub_node_id StoragenodePaystub_NodeId_Field) (
|
|
rows []*StoragenodePaystub, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT storagenode_paystubs.period, storagenode_paystubs.node_id, storagenode_paystubs.created_at, storagenode_paystubs.codes, storagenode_paystubs.usage_at_rest, storagenode_paystubs.usage_get, storagenode_paystubs.usage_put, storagenode_paystubs.usage_get_repair, storagenode_paystubs.usage_put_repair, storagenode_paystubs.usage_get_audit, storagenode_paystubs.comp_at_rest, storagenode_paystubs.comp_get, storagenode_paystubs.comp_put, storagenode_paystubs.comp_get_repair, storagenode_paystubs.comp_put_repair, storagenode_paystubs.comp_get_audit, storagenode_paystubs.surge_percent, storagenode_paystubs.held, storagenode_paystubs.owed, storagenode_paystubs.disposed, storagenode_paystubs.paid, storagenode_paystubs.distributed FROM storagenode_paystubs WHERE storagenode_paystubs.node_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, storagenode_paystub_node_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, err = func() (rows []*StoragenodePaystub, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
storagenode_paystub := &StoragenodePaystub{}
|
|
err = __rows.Scan(&storagenode_paystub.Period, &storagenode_paystub.NodeId, &storagenode_paystub.CreatedAt, &storagenode_paystub.Codes, &storagenode_paystub.UsageAtRest, &storagenode_paystub.UsageGet, &storagenode_paystub.UsagePut, &storagenode_paystub.UsageGetRepair, &storagenode_paystub.UsagePutRepair, &storagenode_paystub.UsageGetAudit, &storagenode_paystub.CompAtRest, &storagenode_paystub.CompGet, &storagenode_paystub.CompPut, &storagenode_paystub.CompGetRepair, &storagenode_paystub.CompPutRepair, &storagenode_paystub.CompGetAudit, &storagenode_paystub.SurgePercent, &storagenode_paystub.Held, &storagenode_paystub.Owed, &storagenode_paystub.Disposed, &storagenode_paystub.Paid, &storagenode_paystub.Distributed)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
rows = append(rows, storagenode_paystub)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return rows, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Limited_StoragenodePayment_By_NodeId_And_Period_OrderBy_Desc_Id(ctx context.Context,
|
|
storagenode_payment_node_id StoragenodePayment_NodeId_Field,
|
|
storagenode_payment_period StoragenodePayment_Period_Field,
|
|
limit int, offset int64) (
|
|
rows []*StoragenodePayment, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT storagenode_payments.id, storagenode_payments.created_at, storagenode_payments.node_id, storagenode_payments.period, storagenode_payments.amount, storagenode_payments.receipt, storagenode_payments.notes FROM storagenode_payments WHERE storagenode_payments.node_id = ? AND storagenode_payments.period = ? ORDER BY storagenode_payments.id DESC LIMIT ? OFFSET ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, storagenode_payment_node_id.value(), storagenode_payment_period.value())
|
|
|
|
__values = append(__values, limit, offset)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, err = func() (rows []*StoragenodePayment, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
storagenode_payment := &StoragenodePayment{}
|
|
err = __rows.Scan(&storagenode_payment.Id, &storagenode_payment.CreatedAt, &storagenode_payment.NodeId, &storagenode_payment.Period, &storagenode_payment.Amount, &storagenode_payment.Receipt, &storagenode_payment.Notes)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
rows = append(rows, storagenode_payment)
|
|
}
|
|
err = __rows.Err()
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
return rows, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) All_StoragenodePayment_By_NodeId(ctx context.Context,
|
|
storagenode_payment_node_id StoragenodePayment_NodeId_Field) (
|
|
rows []*StoragenodePayment, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT storagenode_payments.id, storagenode_payments.created_at, storagenode_payments.node_id, storagenode_payments.period, storagenode_payments.amount, storagenode_payments.receipt, storagenode_payments.notes FROM storagenode_payments WHERE storagenode_payments.node_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, storagenode_payment_node_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, err = func() (rows []*StoragenodePayment, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
storagenode_payment := &StoragenodePayment{}
|
|
err = __rows.Scan(&storagenode_payment.Id, &storagenode_payment.CreatedAt, &storagenode_payment.NodeId, &storagenode_payment.Period, &storagenode_payment.Amount, &storagenode_payment.Receipt, &storagenode_payment.Notes)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
rows = append(rows, storagenode_payment)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return rows, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) All_StoragenodePayment_By_NodeId_And_Period(ctx context.Context,
|
|
storagenode_payment_node_id StoragenodePayment_NodeId_Field,
|
|
storagenode_payment_period StoragenodePayment_Period_Field) (
|
|
rows []*StoragenodePayment, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT storagenode_payments.id, storagenode_payments.created_at, storagenode_payments.node_id, storagenode_payments.period, storagenode_payments.amount, storagenode_payments.receipt, storagenode_payments.notes FROM storagenode_payments WHERE storagenode_payments.node_id = ? AND storagenode_payments.period = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, storagenode_payment_node_id.value(), storagenode_payment_period.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, err = func() (rows []*StoragenodePayment, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
storagenode_payment := &StoragenodePayment{}
|
|
err = __rows.Scan(&storagenode_payment.Id, &storagenode_payment.CreatedAt, &storagenode_payment.NodeId, &storagenode_payment.Period, &storagenode_payment.Amount, &storagenode_payment.Receipt, &storagenode_payment.Notes)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
rows = append(rows, storagenode_payment)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return rows, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Get_PeerIdentity_By_NodeId(ctx context.Context,
|
|
peer_identity_node_id PeerIdentity_NodeId_Field) (
|
|
peer_identity *PeerIdentity, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT peer_identities.node_id, peer_identities.leaf_serial_number, peer_identities.chain, peer_identities.updated_at FROM peer_identities WHERE peer_identities.node_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, peer_identity_node_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
peer_identity = &PeerIdentity{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&peer_identity.NodeId, &peer_identity.LeafSerialNumber, &peer_identity.Chain, &peer_identity.UpdatedAt)
|
|
if err != nil {
|
|
return (*PeerIdentity)(nil), obj.makeErr(err)
|
|
}
|
|
return peer_identity, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Get_PeerIdentity_LeafSerialNumber_By_NodeId(ctx context.Context,
|
|
peer_identity_node_id PeerIdentity_NodeId_Field) (
|
|
row *LeafSerialNumber_Row, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT peer_identities.leaf_serial_number FROM peer_identities WHERE peer_identities.node_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, peer_identity_node_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
row = &LeafSerialNumber_Row{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&row.LeafSerialNumber)
|
|
if err != nil {
|
|
return (*LeafSerialNumber_Row)(nil), obj.makeErr(err)
|
|
}
|
|
return row, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Get_RegistrationToken_By_Secret(ctx context.Context,
|
|
registration_token_secret RegistrationToken_Secret_Field) (
|
|
registration_token *RegistrationToken, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT registration_tokens.secret, registration_tokens.owner_id, registration_tokens.project_limit, registration_tokens.created_at FROM registration_tokens WHERE registration_tokens.secret = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, registration_token_secret.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
registration_token = &RegistrationToken{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(®istration_token.Secret, ®istration_token.OwnerId, ®istration_token.ProjectLimit, ®istration_token.CreatedAt)
|
|
if err != nil {
|
|
return (*RegistrationToken)(nil), obj.makeErr(err)
|
|
}
|
|
return registration_token, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Get_RegistrationToken_By_OwnerId(ctx context.Context,
|
|
registration_token_owner_id RegistrationToken_OwnerId_Field) (
|
|
registration_token *RegistrationToken, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __cond_0 = &__sqlbundle_Condition{Left: "registration_tokens.owner_id", Equal: true, Right: "?", Null: true}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("SELECT registration_tokens.secret, registration_tokens.owner_id, registration_tokens.project_limit, registration_tokens.created_at FROM registration_tokens WHERE "), __cond_0}}
|
|
|
|
var __values []interface{}
|
|
if !registration_token_owner_id.isnull() {
|
|
__cond_0.Null = false
|
|
__values = append(__values, registration_token_owner_id.value())
|
|
}
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
registration_token = &RegistrationToken{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(®istration_token.Secret, ®istration_token.OwnerId, ®istration_token.ProjectLimit, ®istration_token.CreatedAt)
|
|
if err != nil {
|
|
return (*RegistrationToken)(nil), obj.makeErr(err)
|
|
}
|
|
return registration_token, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Get_ResetPasswordToken_By_Secret(ctx context.Context,
|
|
reset_password_token_secret ResetPasswordToken_Secret_Field) (
|
|
reset_password_token *ResetPasswordToken, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT reset_password_tokens.secret, reset_password_tokens.owner_id, reset_password_tokens.created_at FROM reset_password_tokens WHERE reset_password_tokens.secret = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, reset_password_token_secret.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
reset_password_token = &ResetPasswordToken{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&reset_password_token.Secret, &reset_password_token.OwnerId, &reset_password_token.CreatedAt)
|
|
if err != nil {
|
|
return (*ResetPasswordToken)(nil), obj.makeErr(err)
|
|
}
|
|
return reset_password_token, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Get_ResetPasswordToken_By_OwnerId(ctx context.Context,
|
|
reset_password_token_owner_id ResetPasswordToken_OwnerId_Field) (
|
|
reset_password_token *ResetPasswordToken, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT reset_password_tokens.secret, reset_password_tokens.owner_id, reset_password_tokens.created_at FROM reset_password_tokens WHERE reset_password_tokens.owner_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, reset_password_token_owner_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
reset_password_token = &ResetPasswordToken{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&reset_password_token.Secret, &reset_password_token.OwnerId, &reset_password_token.CreatedAt)
|
|
if err != nil {
|
|
return (*ResetPasswordToken)(nil), obj.makeErr(err)
|
|
}
|
|
return reset_password_token, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Get_BucketMetainfo_By_ProjectId_And_Name(ctx context.Context,
|
|
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
|
|
bucket_metainfo_name BucketMetainfo_Name_Field) (
|
|
bucket_metainfo *BucketMetainfo, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.partner_id, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares FROM bucket_metainfos WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, bucket_metainfo_project_id.value(), bucket_metainfo_name.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
bucket_metainfo = &BucketMetainfo{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.PartnerId, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares)
|
|
if err != nil {
|
|
return (*BucketMetainfo)(nil), obj.makeErr(err)
|
|
}
|
|
return bucket_metainfo, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Get_BucketMetainfo_Id_By_ProjectId_And_Name(ctx context.Context,
|
|
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
|
|
bucket_metainfo_name BucketMetainfo_Name_Field) (
|
|
row *Id_Row, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_metainfos.id FROM bucket_metainfos WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, bucket_metainfo_project_id.value(), bucket_metainfo_name.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
row = &Id_Row{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&row.Id)
|
|
if err != nil {
|
|
return (*Id_Row)(nil), obj.makeErr(err)
|
|
}
|
|
return row, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Has_BucketMetainfo_By_ProjectId_And_Name(ctx context.Context,
|
|
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
|
|
bucket_metainfo_name BucketMetainfo_Name_Field) (
|
|
has bool, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT EXISTS( SELECT 1 FROM bucket_metainfos WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name = ? )")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, bucket_metainfo_project_id.value(), bucket_metainfo_name.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&has)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
return has, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Limited_BucketMetainfo_By_ProjectId_And_Name_GreaterOrEqual_OrderBy_Asc_Name(ctx context.Context,
|
|
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
|
|
bucket_metainfo_name_greater_or_equal BucketMetainfo_Name_Field,
|
|
limit int, offset int64) (
|
|
rows []*BucketMetainfo, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.partner_id, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares FROM bucket_metainfos WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name >= ? ORDER BY bucket_metainfos.name LIMIT ? OFFSET ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, bucket_metainfo_project_id.value(), bucket_metainfo_name_greater_or_equal.value())
|
|
|
|
__values = append(__values, limit, offset)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, err = func() (rows []*BucketMetainfo, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
bucket_metainfo := &BucketMetainfo{}
|
|
err = __rows.Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.PartnerId, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
rows = append(rows, bucket_metainfo)
|
|
}
|
|
err = __rows.Err()
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
return rows, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Limited_BucketMetainfo_By_ProjectId_And_Name_Greater_OrderBy_Asc_Name(ctx context.Context,
|
|
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
|
|
bucket_metainfo_name_greater BucketMetainfo_Name_Field,
|
|
limit int, offset int64) (
|
|
rows []*BucketMetainfo, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.partner_id, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares FROM bucket_metainfos WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name > ? ORDER BY bucket_metainfos.name LIMIT ? OFFSET ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, bucket_metainfo_project_id.value(), bucket_metainfo_name_greater.value())
|
|
|
|
__values = append(__values, limit, offset)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, err = func() (rows []*BucketMetainfo, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
bucket_metainfo := &BucketMetainfo{}
|
|
err = __rows.Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.PartnerId, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
rows = append(rows, bucket_metainfo)
|
|
}
|
|
err = __rows.Err()
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
return rows, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Count_BucketMetainfo_Name_By_ProjectId(ctx context.Context,
|
|
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field) (
|
|
count int64, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT COUNT(*) FROM bucket_metainfos WHERE bucket_metainfos.project_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, bucket_metainfo_project_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&count)
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
return count, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Get_GracefulExitProgress_By_NodeId(ctx context.Context,
|
|
graceful_exit_progress_node_id GracefulExitProgress_NodeId_Field) (
|
|
graceful_exit_progress *GracefulExitProgress, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT graceful_exit_progress.node_id, graceful_exit_progress.bytes_transferred, graceful_exit_progress.pieces_transferred, graceful_exit_progress.pieces_failed, graceful_exit_progress.updated_at FROM graceful_exit_progress WHERE graceful_exit_progress.node_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, graceful_exit_progress_node_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
graceful_exit_progress = &GracefulExitProgress{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&graceful_exit_progress.NodeId, &graceful_exit_progress.BytesTransferred, &graceful_exit_progress.PiecesTransferred, &graceful_exit_progress.PiecesFailed, &graceful_exit_progress.UpdatedAt)
|
|
if err != nil {
|
|
return (*GracefulExitProgress)(nil), obj.makeErr(err)
|
|
}
|
|
return graceful_exit_progress, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Get_GracefulExitTransferQueue_By_NodeId_And_Path_And_PieceNum(ctx context.Context,
|
|
graceful_exit_transfer_queue_node_id GracefulExitTransferQueue_NodeId_Field,
|
|
graceful_exit_transfer_queue_path GracefulExitTransferQueue_Path_Field,
|
|
graceful_exit_transfer_queue_piece_num GracefulExitTransferQueue_PieceNum_Field) (
|
|
graceful_exit_transfer_queue *GracefulExitTransferQueue, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT graceful_exit_transfer_queue.node_id, graceful_exit_transfer_queue.path, graceful_exit_transfer_queue.piece_num, graceful_exit_transfer_queue.root_piece_id, graceful_exit_transfer_queue.durability_ratio, graceful_exit_transfer_queue.queued_at, graceful_exit_transfer_queue.requested_at, graceful_exit_transfer_queue.last_failed_at, graceful_exit_transfer_queue.last_failed_code, graceful_exit_transfer_queue.failed_count, graceful_exit_transfer_queue.finished_at, graceful_exit_transfer_queue.order_limit_send_count FROM graceful_exit_transfer_queue WHERE graceful_exit_transfer_queue.node_id = ? AND graceful_exit_transfer_queue.path = ? AND graceful_exit_transfer_queue.piece_num = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, graceful_exit_transfer_queue_node_id.value(), graceful_exit_transfer_queue_path.value(), graceful_exit_transfer_queue_piece_num.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
graceful_exit_transfer_queue = &GracefulExitTransferQueue{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&graceful_exit_transfer_queue.NodeId, &graceful_exit_transfer_queue.Path, &graceful_exit_transfer_queue.PieceNum, &graceful_exit_transfer_queue.RootPieceId, &graceful_exit_transfer_queue.DurabilityRatio, &graceful_exit_transfer_queue.QueuedAt, &graceful_exit_transfer_queue.RequestedAt, &graceful_exit_transfer_queue.LastFailedAt, &graceful_exit_transfer_queue.LastFailedCode, &graceful_exit_transfer_queue.FailedCount, &graceful_exit_transfer_queue.FinishedAt, &graceful_exit_transfer_queue.OrderLimitSendCount)
|
|
if err != nil {
|
|
return (*GracefulExitTransferQueue)(nil), obj.makeErr(err)
|
|
}
|
|
return graceful_exit_transfer_queue, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Get_StripeCustomer_CustomerId_By_UserId(ctx context.Context,
|
|
stripe_customer_user_id StripeCustomer_UserId_Field) (
|
|
row *CustomerId_Row, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT stripe_customers.customer_id FROM stripe_customers WHERE stripe_customers.user_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, stripe_customer_user_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
row = &CustomerId_Row{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&row.CustomerId)
|
|
if err != nil {
|
|
return (*CustomerId_Row)(nil), obj.makeErr(err)
|
|
}
|
|
return row, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Limited_StripeCustomer_By_CreatedAt_LessOrEqual_OrderBy_Desc_CreatedAt(ctx context.Context,
|
|
stripe_customer_created_at_less_or_equal StripeCustomer_CreatedAt_Field,
|
|
limit int, offset int64) (
|
|
rows []*StripeCustomer, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT stripe_customers.user_id, stripe_customers.customer_id, stripe_customers.created_at FROM stripe_customers WHERE stripe_customers.created_at <= ? ORDER BY stripe_customers.created_at DESC LIMIT ? OFFSET ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, stripe_customer_created_at_less_or_equal.value())
|
|
|
|
__values = append(__values, limit, offset)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, err = func() (rows []*StripeCustomer, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
stripe_customer := &StripeCustomer{}
|
|
err = __rows.Scan(&stripe_customer.UserId, &stripe_customer.CustomerId, &stripe_customer.CreatedAt)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
rows = append(rows, stripe_customer)
|
|
}
|
|
err = __rows.Err()
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
return rows, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) All_CoinpaymentsTransaction_By_UserId_OrderBy_Desc_CreatedAt(ctx context.Context,
|
|
coinpayments_transaction_user_id CoinpaymentsTransaction_UserId_Field) (
|
|
rows []*CoinpaymentsTransaction, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT coinpayments_transactions.id, coinpayments_transactions.user_id, coinpayments_transactions.address, coinpayments_transactions.amount, coinpayments_transactions.received, coinpayments_transactions.status, coinpayments_transactions.key, coinpayments_transactions.timeout, coinpayments_transactions.created_at FROM coinpayments_transactions WHERE coinpayments_transactions.user_id = ? ORDER BY coinpayments_transactions.created_at DESC")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, coinpayments_transaction_user_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, err = func() (rows []*CoinpaymentsTransaction, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
coinpayments_transaction := &CoinpaymentsTransaction{}
|
|
err = __rows.Scan(&coinpayments_transaction.Id, &coinpayments_transaction.UserId, &coinpayments_transaction.Address, &coinpayments_transaction.Amount, &coinpayments_transaction.Received, &coinpayments_transaction.Status, &coinpayments_transaction.Key, &coinpayments_transaction.Timeout, &coinpayments_transaction.CreatedAt)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
rows = append(rows, coinpayments_transaction)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return rows, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Get_StripecoinpaymentsInvoiceProjectRecord_By_ProjectId_And_PeriodStart_And_PeriodEnd(ctx context.Context,
|
|
stripecoinpayments_invoice_project_record_project_id StripecoinpaymentsInvoiceProjectRecord_ProjectId_Field,
|
|
stripecoinpayments_invoice_project_record_period_start StripecoinpaymentsInvoiceProjectRecord_PeriodStart_Field,
|
|
stripecoinpayments_invoice_project_record_period_end StripecoinpaymentsInvoiceProjectRecord_PeriodEnd_Field) (
|
|
stripecoinpayments_invoice_project_record *StripecoinpaymentsInvoiceProjectRecord, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT stripecoinpayments_invoice_project_records.id, stripecoinpayments_invoice_project_records.project_id, stripecoinpayments_invoice_project_records.storage, stripecoinpayments_invoice_project_records.egress, stripecoinpayments_invoice_project_records.objects, stripecoinpayments_invoice_project_records.period_start, stripecoinpayments_invoice_project_records.period_end, stripecoinpayments_invoice_project_records.state, stripecoinpayments_invoice_project_records.created_at FROM stripecoinpayments_invoice_project_records WHERE stripecoinpayments_invoice_project_records.project_id = ? AND stripecoinpayments_invoice_project_records.period_start = ? AND stripecoinpayments_invoice_project_records.period_end = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, stripecoinpayments_invoice_project_record_project_id.value(), stripecoinpayments_invoice_project_record_period_start.value(), stripecoinpayments_invoice_project_record_period_end.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
stripecoinpayments_invoice_project_record = &StripecoinpaymentsInvoiceProjectRecord{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&stripecoinpayments_invoice_project_record.Id, &stripecoinpayments_invoice_project_record.ProjectId, &stripecoinpayments_invoice_project_record.Storage, &stripecoinpayments_invoice_project_record.Egress, &stripecoinpayments_invoice_project_record.Objects, &stripecoinpayments_invoice_project_record.PeriodStart, &stripecoinpayments_invoice_project_record.PeriodEnd, &stripecoinpayments_invoice_project_record.State, &stripecoinpayments_invoice_project_record.CreatedAt)
|
|
if err != nil {
|
|
return (*StripecoinpaymentsInvoiceProjectRecord)(nil), obj.makeErr(err)
|
|
}
|
|
return stripecoinpayments_invoice_project_record, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Limited_StripecoinpaymentsInvoiceProjectRecord_By_PeriodStart_And_PeriodEnd_And_State(ctx context.Context,
|
|
stripecoinpayments_invoice_project_record_period_start StripecoinpaymentsInvoiceProjectRecord_PeriodStart_Field,
|
|
stripecoinpayments_invoice_project_record_period_end StripecoinpaymentsInvoiceProjectRecord_PeriodEnd_Field,
|
|
stripecoinpayments_invoice_project_record_state StripecoinpaymentsInvoiceProjectRecord_State_Field,
|
|
limit int, offset int64) (
|
|
rows []*StripecoinpaymentsInvoiceProjectRecord, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT stripecoinpayments_invoice_project_records.id, stripecoinpayments_invoice_project_records.project_id, stripecoinpayments_invoice_project_records.storage, stripecoinpayments_invoice_project_records.egress, stripecoinpayments_invoice_project_records.objects, stripecoinpayments_invoice_project_records.period_start, stripecoinpayments_invoice_project_records.period_end, stripecoinpayments_invoice_project_records.state, stripecoinpayments_invoice_project_records.created_at FROM stripecoinpayments_invoice_project_records WHERE stripecoinpayments_invoice_project_records.period_start = ? AND stripecoinpayments_invoice_project_records.period_end = ? AND stripecoinpayments_invoice_project_records.state = ? LIMIT ? OFFSET ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, stripecoinpayments_invoice_project_record_period_start.value(), stripecoinpayments_invoice_project_record_period_end.value(), stripecoinpayments_invoice_project_record_state.value())
|
|
|
|
__values = append(__values, limit, offset)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, err = func() (rows []*StripecoinpaymentsInvoiceProjectRecord, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
stripecoinpayments_invoice_project_record := &StripecoinpaymentsInvoiceProjectRecord{}
|
|
err = __rows.Scan(&stripecoinpayments_invoice_project_record.Id, &stripecoinpayments_invoice_project_record.ProjectId, &stripecoinpayments_invoice_project_record.Storage, &stripecoinpayments_invoice_project_record.Egress, &stripecoinpayments_invoice_project_record.Objects, &stripecoinpayments_invoice_project_record.PeriodStart, &stripecoinpayments_invoice_project_record.PeriodEnd, &stripecoinpayments_invoice_project_record.State, &stripecoinpayments_invoice_project_record.CreatedAt)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
rows = append(rows, stripecoinpayments_invoice_project_record)
|
|
}
|
|
err = __rows.Err()
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
return rows, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Get_StripecoinpaymentsTxConversionRate_By_TxId(ctx context.Context,
|
|
stripecoinpayments_tx_conversion_rate_tx_id StripecoinpaymentsTxConversionRate_TxId_Field) (
|
|
stripecoinpayments_tx_conversion_rate *StripecoinpaymentsTxConversionRate, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT stripecoinpayments_tx_conversion_rates.tx_id, stripecoinpayments_tx_conversion_rates.rate, stripecoinpayments_tx_conversion_rates.created_at FROM stripecoinpayments_tx_conversion_rates WHERE stripecoinpayments_tx_conversion_rates.tx_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, stripecoinpayments_tx_conversion_rate_tx_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
stripecoinpayments_tx_conversion_rate = &StripecoinpaymentsTxConversionRate{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&stripecoinpayments_tx_conversion_rate.TxId, &stripecoinpayments_tx_conversion_rate.Rate, &stripecoinpayments_tx_conversion_rate.CreatedAt)
|
|
if err != nil {
|
|
return (*StripecoinpaymentsTxConversionRate)(nil), obj.makeErr(err)
|
|
}
|
|
return stripecoinpayments_tx_conversion_rate, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Get_CouponCode_By_Name(ctx context.Context,
|
|
coupon_code_name CouponCode_Name_Field) (
|
|
coupon_code *CouponCode, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT coupon_codes.id, coupon_codes.name, coupon_codes.amount, coupon_codes.description, coupon_codes.type, coupon_codes.billing_periods, coupon_codes.created_at FROM coupon_codes WHERE coupon_codes.name = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, coupon_code_name.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
coupon_code = &CouponCode{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&coupon_code.Id, &coupon_code.Name, &coupon_code.Amount, &coupon_code.Description, &coupon_code.Type, &coupon_code.BillingPeriods, &coupon_code.CreatedAt)
|
|
if err != nil {
|
|
return (*CouponCode)(nil), obj.makeErr(err)
|
|
}
|
|
return coupon_code, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Get_Coupon_By_Id(ctx context.Context,
|
|
coupon_id Coupon_Id_Field) (
|
|
coupon *Coupon, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT coupons.id, coupons.user_id, coupons.amount, coupons.description, coupons.type, coupons.status, coupons.duration, coupons.billing_periods, coupons.coupon_code_name, coupons.created_at FROM coupons WHERE coupons.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, coupon_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
coupon = &Coupon{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&coupon.Id, &coupon.UserId, &coupon.Amount, &coupon.Description, &coupon.Type, &coupon.Status, &coupon.Duration, &coupon.BillingPeriods, &coupon.CouponCodeName, &coupon.CreatedAt)
|
|
if err != nil {
|
|
return (*Coupon)(nil), obj.makeErr(err)
|
|
}
|
|
return coupon, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) All_Coupon_By_UserId_OrderBy_Desc_CreatedAt(ctx context.Context,
|
|
coupon_user_id Coupon_UserId_Field) (
|
|
rows []*Coupon, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT coupons.id, coupons.user_id, coupons.amount, coupons.description, coupons.type, coupons.status, coupons.duration, coupons.billing_periods, coupons.coupon_code_name, coupons.created_at FROM coupons WHERE coupons.user_id = ? ORDER BY coupons.created_at DESC")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, coupon_user_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, err = func() (rows []*Coupon, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
coupon := &Coupon{}
|
|
err = __rows.Scan(&coupon.Id, &coupon.UserId, &coupon.Amount, &coupon.Description, &coupon.Type, &coupon.Status, &coupon.Duration, &coupon.BillingPeriods, &coupon.CouponCodeName, &coupon.CreatedAt)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
rows = append(rows, coupon)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return rows, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) All_Coupon_By_UserId_And_Status_OrderBy_Desc_CreatedAt(ctx context.Context,
|
|
coupon_user_id Coupon_UserId_Field,
|
|
coupon_status Coupon_Status_Field) (
|
|
rows []*Coupon, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT coupons.id, coupons.user_id, coupons.amount, coupons.description, coupons.type, coupons.status, coupons.duration, coupons.billing_periods, coupons.coupon_code_name, coupons.created_at FROM coupons WHERE coupons.user_id = ? AND coupons.status = ? ORDER BY coupons.created_at DESC")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, coupon_user_id.value(), coupon_status.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, err = func() (rows []*Coupon, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
coupon := &Coupon{}
|
|
err = __rows.Scan(&coupon.Id, &coupon.UserId, &coupon.Amount, &coupon.Description, &coupon.Type, &coupon.Status, &coupon.Duration, &coupon.BillingPeriods, &coupon.CouponCodeName, &coupon.CreatedAt)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
rows = append(rows, coupon)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return rows, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) All_Coupon_By_Status_OrderBy_Desc_CreatedAt(ctx context.Context,
|
|
coupon_status Coupon_Status_Field) (
|
|
rows []*Coupon, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT coupons.id, coupons.user_id, coupons.amount, coupons.description, coupons.type, coupons.status, coupons.duration, coupons.billing_periods, coupons.coupon_code_name, coupons.created_at FROM coupons WHERE coupons.status = ? ORDER BY coupons.created_at DESC")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, coupon_status.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, err = func() (rows []*Coupon, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
coupon := &Coupon{}
|
|
err = __rows.Scan(&coupon.Id, &coupon.UserId, &coupon.Amount, &coupon.Description, &coupon.Type, &coupon.Status, &coupon.Duration, &coupon.BillingPeriods, &coupon.CouponCodeName, &coupon.CreatedAt)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
rows = append(rows, coupon)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return rows, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Limited_Coupon_By_CreatedAt_LessOrEqual_And_Status_OrderBy_Desc_CreatedAt(ctx context.Context,
|
|
coupon_created_at_less_or_equal Coupon_CreatedAt_Field,
|
|
coupon_status Coupon_Status_Field,
|
|
limit int, offset int64) (
|
|
rows []*Coupon, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT coupons.id, coupons.user_id, coupons.amount, coupons.description, coupons.type, coupons.status, coupons.duration, coupons.billing_periods, coupons.coupon_code_name, coupons.created_at FROM coupons WHERE coupons.created_at <= ? AND coupons.status = ? ORDER BY coupons.created_at DESC LIMIT ? OFFSET ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, coupon_created_at_less_or_equal.value(), coupon_status.value())
|
|
|
|
__values = append(__values, limit, offset)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, err = func() (rows []*Coupon, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
coupon := &Coupon{}
|
|
err = __rows.Scan(&coupon.Id, &coupon.UserId, &coupon.Amount, &coupon.Description, &coupon.Type, &coupon.Status, &coupon.Duration, &coupon.BillingPeriods, &coupon.CouponCodeName, &coupon.CreatedAt)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
rows = append(rows, coupon)
|
|
}
|
|
err = __rows.Err()
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
return rows, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Limited_CouponUsage_By_Period_And_Status_Equal_Number(ctx context.Context,
|
|
coupon_usage_period CouponUsage_Period_Field,
|
|
limit int, offset int64) (
|
|
rows []*CouponUsage, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT coupon_usages.coupon_id, coupon_usages.amount, coupon_usages.status, coupon_usages.period FROM coupon_usages WHERE coupon_usages.period = ? AND coupon_usages.status = 0 LIMIT ? OFFSET ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, coupon_usage_period.value())
|
|
|
|
__values = append(__values, limit, offset)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, err = func() (rows []*CouponUsage, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
coupon_usage := &CouponUsage{}
|
|
err = __rows.Scan(&coupon_usage.CouponId, &coupon_usage.Amount, &coupon_usage.Status, &coupon_usage.Period)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
rows = append(rows, coupon_usage)
|
|
}
|
|
err = __rows.Err()
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
return rows, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Has_NodeApiVersion_By_Id_And_ApiVersion_GreaterOrEqual(ctx context.Context,
|
|
node_api_version_id NodeApiVersion_Id_Field,
|
|
node_api_version_api_version_greater_or_equal NodeApiVersion_ApiVersion_Field) (
|
|
has bool, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT EXISTS( SELECT 1 FROM node_api_versions WHERE node_api_versions.id = ? AND node_api_versions.api_version >= ? )")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, node_api_version_id.value(), node_api_version_api_version_greater_or_equal.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&has)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
return has, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) UpdateNoReturn_Irreparabledb_By_Segmentpath(ctx context.Context,
|
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field,
|
|
update Irreparabledb_Update_Fields) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE irreparabledbs SET "), __sets, __sqlbundle_Literal(" WHERE irreparabledbs.segmentpath = ?")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.Segmentdetail._set {
|
|
__values = append(__values, update.Segmentdetail.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("segmentdetail = ?"))
|
|
}
|
|
|
|
if update.PiecesLostCount._set {
|
|
__values = append(__values, update.PiecesLostCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("pieces_lost_count = ?"))
|
|
}
|
|
|
|
if update.SegDamagedUnixSec._set {
|
|
__values = append(__values, update.SegDamagedUnixSec.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("seg_damaged_unix_sec = ?"))
|
|
}
|
|
|
|
if update.RepairAttemptCount._set {
|
|
__values = append(__values, update.RepairAttemptCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("repair_attempt_count = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, irreparabledb_segmentpath.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
_, err = obj.driver.ExecContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (obj *pgxImpl) UpdateNoReturn_AccountingTimestamps_By_Name(ctx context.Context,
|
|
accounting_timestamps_name AccountingTimestamps_Name_Field,
|
|
update AccountingTimestamps_Update_Fields) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE accounting_timestamps SET "), __sets, __sqlbundle_Literal(" WHERE accounting_timestamps.name = ?")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.Value._set {
|
|
__values = append(__values, update.Value.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("value = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, accounting_timestamps_name.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
_, err = obj.driver.ExecContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (obj *pgxImpl) Update_Node_By_Id(ctx context.Context,
|
|
node_id Node_Id_Field,
|
|
update Node_Update_Fields) (
|
|
node *Node, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE nodes SET "), __sets, __sqlbundle_Literal(" WHERE nodes.id = ? RETURNING nodes.id, nodes.address, nodes.last_net, nodes.last_ip_port, nodes.protocol, nodes.type, nodes.email, nodes.wallet, nodes.wallet_features, nodes.free_disk, nodes.piece_count, nodes.major, nodes.minor, nodes.patch, nodes.hash, nodes.timestamp, nodes.release, nodes.latency_90, nodes.audit_success_count, nodes.total_audit_count, nodes.vetted_at, nodes.created_at, nodes.updated_at, nodes.last_contact_success, nodes.last_contact_failure, nodes.contained, nodes.disqualified, nodes.suspended, nodes.unknown_audit_suspended, nodes.offline_suspended, nodes.under_review, nodes.online_score, nodes.audit_reputation_alpha, nodes.audit_reputation_beta, nodes.unknown_audit_reputation_alpha, nodes.unknown_audit_reputation_beta, nodes.exit_initiated_at, nodes.exit_loop_completed_at, nodes.exit_finished_at, nodes.exit_success")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.Address._set {
|
|
__values = append(__values, update.Address.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("address = ?"))
|
|
}
|
|
|
|
if update.LastNet._set {
|
|
__values = append(__values, update.LastNet.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("last_net = ?"))
|
|
}
|
|
|
|
if update.LastIpPort._set {
|
|
__values = append(__values, update.LastIpPort.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("last_ip_port = ?"))
|
|
}
|
|
|
|
if update.Protocol._set {
|
|
__values = append(__values, update.Protocol.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("protocol = ?"))
|
|
}
|
|
|
|
if update.Type._set {
|
|
__values = append(__values, update.Type.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("type = ?"))
|
|
}
|
|
|
|
if update.Email._set {
|
|
__values = append(__values, update.Email.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("email = ?"))
|
|
}
|
|
|
|
if update.Wallet._set {
|
|
__values = append(__values, update.Wallet.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("wallet = ?"))
|
|
}
|
|
|
|
if update.WalletFeatures._set {
|
|
__values = append(__values, update.WalletFeatures.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("wallet_features = ?"))
|
|
}
|
|
|
|
if update.FreeDisk._set {
|
|
__values = append(__values, update.FreeDisk.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("free_disk = ?"))
|
|
}
|
|
|
|
if update.PieceCount._set {
|
|
__values = append(__values, update.PieceCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("piece_count = ?"))
|
|
}
|
|
|
|
if update.Major._set {
|
|
__values = append(__values, update.Major.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("major = ?"))
|
|
}
|
|
|
|
if update.Minor._set {
|
|
__values = append(__values, update.Minor.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("minor = ?"))
|
|
}
|
|
|
|
if update.Patch._set {
|
|
__values = append(__values, update.Patch.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("patch = ?"))
|
|
}
|
|
|
|
if update.Hash._set {
|
|
__values = append(__values, update.Hash.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("hash = ?"))
|
|
}
|
|
|
|
if update.Timestamp._set {
|
|
__values = append(__values, update.Timestamp.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("timestamp = ?"))
|
|
}
|
|
|
|
if update.Release._set {
|
|
__values = append(__values, update.Release.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("release = ?"))
|
|
}
|
|
|
|
if update.Latency90._set {
|
|
__values = append(__values, update.Latency90.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("latency_90 = ?"))
|
|
}
|
|
|
|
if update.AuditSuccessCount._set {
|
|
__values = append(__values, update.AuditSuccessCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("audit_success_count = ?"))
|
|
}
|
|
|
|
if update.TotalAuditCount._set {
|
|
__values = append(__values, update.TotalAuditCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("total_audit_count = ?"))
|
|
}
|
|
|
|
if update.VettedAt._set {
|
|
__values = append(__values, update.VettedAt.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("vetted_at = ?"))
|
|
}
|
|
|
|
if update.LastContactSuccess._set {
|
|
__values = append(__values, update.LastContactSuccess.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("last_contact_success = ?"))
|
|
}
|
|
|
|
if update.LastContactFailure._set {
|
|
__values = append(__values, update.LastContactFailure.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("last_contact_failure = ?"))
|
|
}
|
|
|
|
if update.Contained._set {
|
|
__values = append(__values, update.Contained.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("contained = ?"))
|
|
}
|
|
|
|
if update.Disqualified._set {
|
|
__values = append(__values, update.Disqualified.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("disqualified = ?"))
|
|
}
|
|
|
|
if update.Suspended._set {
|
|
__values = append(__values, update.Suspended.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("suspended = ?"))
|
|
}
|
|
|
|
if update.UnknownAuditSuspended._set {
|
|
__values = append(__values, update.UnknownAuditSuspended.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("unknown_audit_suspended = ?"))
|
|
}
|
|
|
|
if update.OfflineSuspended._set {
|
|
__values = append(__values, update.OfflineSuspended.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("offline_suspended = ?"))
|
|
}
|
|
|
|
if update.UnderReview._set {
|
|
__values = append(__values, update.UnderReview.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("under_review = ?"))
|
|
}
|
|
|
|
if update.OnlineScore._set {
|
|
__values = append(__values, update.OnlineScore.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("online_score = ?"))
|
|
}
|
|
|
|
if update.AuditReputationAlpha._set {
|
|
__values = append(__values, update.AuditReputationAlpha.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("audit_reputation_alpha = ?"))
|
|
}
|
|
|
|
if update.AuditReputationBeta._set {
|
|
__values = append(__values, update.AuditReputationBeta.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("audit_reputation_beta = ?"))
|
|
}
|
|
|
|
if update.UnknownAuditReputationAlpha._set {
|
|
__values = append(__values, update.UnknownAuditReputationAlpha.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("unknown_audit_reputation_alpha = ?"))
|
|
}
|
|
|
|
if update.UnknownAuditReputationBeta._set {
|
|
__values = append(__values, update.UnknownAuditReputationBeta.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("unknown_audit_reputation_beta = ?"))
|
|
}
|
|
|
|
if update.ExitInitiatedAt._set {
|
|
__values = append(__values, update.ExitInitiatedAt.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("exit_initiated_at = ?"))
|
|
}
|
|
|
|
if update.ExitLoopCompletedAt._set {
|
|
__values = append(__values, update.ExitLoopCompletedAt.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("exit_loop_completed_at = ?"))
|
|
}
|
|
|
|
if update.ExitFinishedAt._set {
|
|
__values = append(__values, update.ExitFinishedAt.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("exit_finished_at = ?"))
|
|
}
|
|
|
|
if update.ExitSuccess._set {
|
|
__values = append(__values, update.ExitSuccess.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("exit_success = ?"))
|
|
}
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
|
|
__values = append(__values, __now)
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("updated_at = ?"))
|
|
|
|
__args = append(__args, node_id.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
node = &Node{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&node.Id, &node.Address, &node.LastNet, &node.LastIpPort, &node.Protocol, &node.Type, &node.Email, &node.Wallet, &node.WalletFeatures, &node.FreeDisk, &node.PieceCount, &node.Major, &node.Minor, &node.Patch, &node.Hash, &node.Timestamp, &node.Release, &node.Latency90, &node.AuditSuccessCount, &node.TotalAuditCount, &node.VettedAt, &node.CreatedAt, &node.UpdatedAt, &node.LastContactSuccess, &node.LastContactFailure, &node.Contained, &node.Disqualified, &node.Suspended, &node.UnknownAuditSuspended, &node.OfflineSuspended, &node.UnderReview, &node.OnlineScore, &node.AuditReputationAlpha, &node.AuditReputationBeta, &node.UnknownAuditReputationAlpha, &node.UnknownAuditReputationBeta, &node.ExitInitiatedAt, &node.ExitLoopCompletedAt, &node.ExitFinishedAt, &node.ExitSuccess)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return node, nil
|
|
}
|
|
|
|
func (obj *pgxImpl) UpdateNoReturn_Node_By_Id(ctx context.Context,
|
|
node_id Node_Id_Field,
|
|
update Node_Update_Fields) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE nodes SET "), __sets, __sqlbundle_Literal(" WHERE nodes.id = ?")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.Address._set {
|
|
__values = append(__values, update.Address.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("address = ?"))
|
|
}
|
|
|
|
if update.LastNet._set {
|
|
__values = append(__values, update.LastNet.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("last_net = ?"))
|
|
}
|
|
|
|
if update.LastIpPort._set {
|
|
__values = append(__values, update.LastIpPort.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("last_ip_port = ?"))
|
|
}
|
|
|
|
if update.Protocol._set {
|
|
__values = append(__values, update.Protocol.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("protocol = ?"))
|
|
}
|
|
|
|
if update.Type._set {
|
|
__values = append(__values, update.Type.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("type = ?"))
|
|
}
|
|
|
|
if update.Email._set {
|
|
__values = append(__values, update.Email.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("email = ?"))
|
|
}
|
|
|
|
if update.Wallet._set {
|
|
__values = append(__values, update.Wallet.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("wallet = ?"))
|
|
}
|
|
|
|
if update.WalletFeatures._set {
|
|
__values = append(__values, update.WalletFeatures.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("wallet_features = ?"))
|
|
}
|
|
|
|
if update.FreeDisk._set {
|
|
__values = append(__values, update.FreeDisk.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("free_disk = ?"))
|
|
}
|
|
|
|
if update.PieceCount._set {
|
|
__values = append(__values, update.PieceCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("piece_count = ?"))
|
|
}
|
|
|
|
if update.Major._set {
|
|
__values = append(__values, update.Major.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("major = ?"))
|
|
}
|
|
|
|
if update.Minor._set {
|
|
__values = append(__values, update.Minor.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("minor = ?"))
|
|
}
|
|
|
|
if update.Patch._set {
|
|
__values = append(__values, update.Patch.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("patch = ?"))
|
|
}
|
|
|
|
if update.Hash._set {
|
|
__values = append(__values, update.Hash.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("hash = ?"))
|
|
}
|
|
|
|
if update.Timestamp._set {
|
|
__values = append(__values, update.Timestamp.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("timestamp = ?"))
|
|
}
|
|
|
|
if update.Release._set {
|
|
__values = append(__values, update.Release.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("release = ?"))
|
|
}
|
|
|
|
if update.Latency90._set {
|
|
__values = append(__values, update.Latency90.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("latency_90 = ?"))
|
|
}
|
|
|
|
if update.AuditSuccessCount._set {
|
|
__values = append(__values, update.AuditSuccessCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("audit_success_count = ?"))
|
|
}
|
|
|
|
if update.TotalAuditCount._set {
|
|
__values = append(__values, update.TotalAuditCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("total_audit_count = ?"))
|
|
}
|
|
|
|
if update.VettedAt._set {
|
|
__values = append(__values, update.VettedAt.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("vetted_at = ?"))
|
|
}
|
|
|
|
if update.LastContactSuccess._set {
|
|
__values = append(__values, update.LastContactSuccess.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("last_contact_success = ?"))
|
|
}
|
|
|
|
if update.LastContactFailure._set {
|
|
__values = append(__values, update.LastContactFailure.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("last_contact_failure = ?"))
|
|
}
|
|
|
|
if update.Contained._set {
|
|
__values = append(__values, update.Contained.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("contained = ?"))
|
|
}
|
|
|
|
if update.Disqualified._set {
|
|
__values = append(__values, update.Disqualified.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("disqualified = ?"))
|
|
}
|
|
|
|
if update.Suspended._set {
|
|
__values = append(__values, update.Suspended.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("suspended = ?"))
|
|
}
|
|
|
|
if update.UnknownAuditSuspended._set {
|
|
__values = append(__values, update.UnknownAuditSuspended.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("unknown_audit_suspended = ?"))
|
|
}
|
|
|
|
if update.OfflineSuspended._set {
|
|
__values = append(__values, update.OfflineSuspended.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("offline_suspended = ?"))
|
|
}
|
|
|
|
if update.UnderReview._set {
|
|
__values = append(__values, update.UnderReview.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("under_review = ?"))
|
|
}
|
|
|
|
if update.OnlineScore._set {
|
|
__values = append(__values, update.OnlineScore.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("online_score = ?"))
|
|
}
|
|
|
|
if update.AuditReputationAlpha._set {
|
|
__values = append(__values, update.AuditReputationAlpha.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("audit_reputation_alpha = ?"))
|
|
}
|
|
|
|
if update.AuditReputationBeta._set {
|
|
__values = append(__values, update.AuditReputationBeta.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("audit_reputation_beta = ?"))
|
|
}
|
|
|
|
if update.UnknownAuditReputationAlpha._set {
|
|
__values = append(__values, update.UnknownAuditReputationAlpha.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("unknown_audit_reputation_alpha = ?"))
|
|
}
|
|
|
|
if update.UnknownAuditReputationBeta._set {
|
|
__values = append(__values, update.UnknownAuditReputationBeta.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("unknown_audit_reputation_beta = ?"))
|
|
}
|
|
|
|
if update.ExitInitiatedAt._set {
|
|
__values = append(__values, update.ExitInitiatedAt.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("exit_initiated_at = ?"))
|
|
}
|
|
|
|
if update.ExitLoopCompletedAt._set {
|
|
__values = append(__values, update.ExitLoopCompletedAt.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("exit_loop_completed_at = ?"))
|
|
}
|
|
|
|
if update.ExitFinishedAt._set {
|
|
__values = append(__values, update.ExitFinishedAt.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("exit_finished_at = ?"))
|
|
}
|
|
|
|
if update.ExitSuccess._set {
|
|
__values = append(__values, update.ExitSuccess.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("exit_success = ?"))
|
|
}
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
|
|
__values = append(__values, __now)
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("updated_at = ?"))
|
|
|
|
__args = append(__args, node_id.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
_, err = obj.driver.ExecContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (obj *pgxImpl) Update_AuditHistory_By_NodeId(ctx context.Context,
|
|
audit_history_node_id AuditHistory_NodeId_Field,
|
|
update AuditHistory_Update_Fields) (
|
|
audit_history *AuditHistory, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE audit_histories SET "), __sets, __sqlbundle_Literal(" WHERE audit_histories.node_id = ? RETURNING audit_histories.node_id, audit_histories.history")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.History._set {
|
|
__values = append(__values, update.History.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("history = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return nil, emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, audit_history_node_id.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
audit_history = &AuditHistory{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&audit_history.NodeId, &audit_history.History)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return audit_history, nil
|
|
}
|
|
|
|
func (obj *pgxImpl) Update_User_By_Id(ctx context.Context,
|
|
user_id User_Id_Field,
|
|
update User_Update_Fields) (
|
|
user *User, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE users SET "), __sets, __sqlbundle_Literal(" WHERE users.id = ? RETURNING users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.created_at, users.project_limit, users.position, users.company_name, users.company_size, users.working_on, users.is_professional, users.employee_count")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.Email._set {
|
|
__values = append(__values, update.Email.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("email = ?"))
|
|
}
|
|
|
|
if update.NormalizedEmail._set {
|
|
__values = append(__values, update.NormalizedEmail.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("normalized_email = ?"))
|
|
}
|
|
|
|
if update.FullName._set {
|
|
__values = append(__values, update.FullName.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("full_name = ?"))
|
|
}
|
|
|
|
if update.ShortName._set {
|
|
__values = append(__values, update.ShortName.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("short_name = ?"))
|
|
}
|
|
|
|
if update.PasswordHash._set {
|
|
__values = append(__values, update.PasswordHash.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("password_hash = ?"))
|
|
}
|
|
|
|
if update.Status._set {
|
|
__values = append(__values, update.Status.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("status = ?"))
|
|
}
|
|
|
|
if update.ProjectLimit._set {
|
|
__values = append(__values, update.ProjectLimit.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("project_limit = ?"))
|
|
}
|
|
|
|
if update.Position._set {
|
|
__values = append(__values, update.Position.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("position = ?"))
|
|
}
|
|
|
|
if update.CompanyName._set {
|
|
__values = append(__values, update.CompanyName.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("company_name = ?"))
|
|
}
|
|
|
|
if update.CompanySize._set {
|
|
__values = append(__values, update.CompanySize.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("company_size = ?"))
|
|
}
|
|
|
|
if update.WorkingOn._set {
|
|
__values = append(__values, update.WorkingOn.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("working_on = ?"))
|
|
}
|
|
|
|
if update.IsProfessional._set {
|
|
__values = append(__values, update.IsProfessional.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("is_professional = ?"))
|
|
}
|
|
|
|
if update.EmployeeCount._set {
|
|
__values = append(__values, update.EmployeeCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("employee_count = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return nil, emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, user_id.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
user = &User{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.CreatedAt, &user.ProjectLimit, &user.Position, &user.CompanyName, &user.CompanySize, &user.WorkingOn, &user.IsProfessional, &user.EmployeeCount)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return user, nil
|
|
}
|
|
|
|
func (obj *pgxImpl) Update_Project_By_Id(ctx context.Context,
|
|
project_id Project_Id_Field,
|
|
update Project_Update_Fields) (
|
|
project *Project, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE projects SET "), __sets, __sqlbundle_Literal(" WHERE projects.id = ? RETURNING projects.id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.rate_limit, projects.max_buckets, projects.partner_id, projects.owner_id, projects.created_at")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.Name._set {
|
|
__values = append(__values, update.Name.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("name = ?"))
|
|
}
|
|
|
|
if update.Description._set {
|
|
__values = append(__values, update.Description.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("description = ?"))
|
|
}
|
|
|
|
if update.UsageLimit._set {
|
|
__values = append(__values, update.UsageLimit.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("usage_limit = ?"))
|
|
}
|
|
|
|
if update.BandwidthLimit._set {
|
|
__values = append(__values, update.BandwidthLimit.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("bandwidth_limit = ?"))
|
|
}
|
|
|
|
if update.RateLimit._set {
|
|
__values = append(__values, update.RateLimit.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("rate_limit = ?"))
|
|
}
|
|
|
|
if update.MaxBuckets._set {
|
|
__values = append(__values, update.MaxBuckets.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("max_buckets = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return nil, emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, project_id.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
project = &Project{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.RateLimit, &project.MaxBuckets, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return project, nil
|
|
}
|
|
|
|
func (obj *pgxImpl) UpdateNoReturn_ApiKey_By_Id(ctx context.Context,
|
|
api_key_id ApiKey_Id_Field,
|
|
update ApiKey_Update_Fields) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE api_keys SET "), __sets, __sqlbundle_Literal(" WHERE api_keys.id = ?")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.Name._set {
|
|
__values = append(__values, update.Name.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("name = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, api_key_id.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
_, err = obj.driver.ExecContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (obj *pgxImpl) UpdateNoReturn_PeerIdentity_By_NodeId(ctx context.Context,
|
|
peer_identity_node_id PeerIdentity_NodeId_Field,
|
|
update PeerIdentity_Update_Fields) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE peer_identities SET "), __sets, __sqlbundle_Literal(" WHERE peer_identities.node_id = ?")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.LeafSerialNumber._set {
|
|
__values = append(__values, update.LeafSerialNumber.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("leaf_serial_number = ?"))
|
|
}
|
|
|
|
if update.Chain._set {
|
|
__values = append(__values, update.Chain.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("chain = ?"))
|
|
}
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
|
|
__values = append(__values, __now)
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("updated_at = ?"))
|
|
|
|
__args = append(__args, peer_identity_node_id.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
_, err = obj.driver.ExecContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (obj *pgxImpl) Update_RegistrationToken_By_Secret(ctx context.Context,
|
|
registration_token_secret RegistrationToken_Secret_Field,
|
|
update RegistrationToken_Update_Fields) (
|
|
registration_token *RegistrationToken, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE registration_tokens SET "), __sets, __sqlbundle_Literal(" WHERE registration_tokens.secret = ? RETURNING registration_tokens.secret, registration_tokens.owner_id, registration_tokens.project_limit, registration_tokens.created_at")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.OwnerId._set {
|
|
__values = append(__values, update.OwnerId.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("owner_id = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return nil, emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, registration_token_secret.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
registration_token = &RegistrationToken{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(®istration_token.Secret, ®istration_token.OwnerId, ®istration_token.ProjectLimit, ®istration_token.CreatedAt)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return registration_token, nil
|
|
}
|
|
|
|
func (obj *pgxImpl) Update_BucketMetainfo_By_ProjectId_And_Name(ctx context.Context,
|
|
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
|
|
bucket_metainfo_name BucketMetainfo_Name_Field,
|
|
update BucketMetainfo_Update_Fields) (
|
|
bucket_metainfo *BucketMetainfo, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE bucket_metainfos SET "), __sets, __sqlbundle_Literal(" WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name = ? RETURNING bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.partner_id, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.PartnerId._set {
|
|
__values = append(__values, update.PartnerId.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("partner_id = ?"))
|
|
}
|
|
|
|
if update.DefaultSegmentSize._set {
|
|
__values = append(__values, update.DefaultSegmentSize.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("default_segment_size = ?"))
|
|
}
|
|
|
|
if update.DefaultEncryptionCipherSuite._set {
|
|
__values = append(__values, update.DefaultEncryptionCipherSuite.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("default_encryption_cipher_suite = ?"))
|
|
}
|
|
|
|
if update.DefaultEncryptionBlockSize._set {
|
|
__values = append(__values, update.DefaultEncryptionBlockSize.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("default_encryption_block_size = ?"))
|
|
}
|
|
|
|
if update.DefaultRedundancyAlgorithm._set {
|
|
__values = append(__values, update.DefaultRedundancyAlgorithm.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("default_redundancy_algorithm = ?"))
|
|
}
|
|
|
|
if update.DefaultRedundancyShareSize._set {
|
|
__values = append(__values, update.DefaultRedundancyShareSize.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("default_redundancy_share_size = ?"))
|
|
}
|
|
|
|
if update.DefaultRedundancyRequiredShares._set {
|
|
__values = append(__values, update.DefaultRedundancyRequiredShares.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("default_redundancy_required_shares = ?"))
|
|
}
|
|
|
|
if update.DefaultRedundancyRepairShares._set {
|
|
__values = append(__values, update.DefaultRedundancyRepairShares.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("default_redundancy_repair_shares = ?"))
|
|
}
|
|
|
|
if update.DefaultRedundancyOptimalShares._set {
|
|
__values = append(__values, update.DefaultRedundancyOptimalShares.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("default_redundancy_optimal_shares = ?"))
|
|
}
|
|
|
|
if update.DefaultRedundancyTotalShares._set {
|
|
__values = append(__values, update.DefaultRedundancyTotalShares.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("default_redundancy_total_shares = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return nil, emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, bucket_metainfo_project_id.value(), bucket_metainfo_name.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
bucket_metainfo = &BucketMetainfo{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.PartnerId, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return bucket_metainfo, nil
|
|
}
|
|
|
|
func (obj *pgxImpl) UpdateNoReturn_GracefulExitTransferQueue_By_NodeId_And_Path_And_PieceNum(ctx context.Context,
|
|
graceful_exit_transfer_queue_node_id GracefulExitTransferQueue_NodeId_Field,
|
|
graceful_exit_transfer_queue_path GracefulExitTransferQueue_Path_Field,
|
|
graceful_exit_transfer_queue_piece_num GracefulExitTransferQueue_PieceNum_Field,
|
|
update GracefulExitTransferQueue_Update_Fields) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE graceful_exit_transfer_queue SET "), __sets, __sqlbundle_Literal(" WHERE graceful_exit_transfer_queue.node_id = ? AND graceful_exit_transfer_queue.path = ? AND graceful_exit_transfer_queue.piece_num = ?")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.DurabilityRatio._set {
|
|
__values = append(__values, update.DurabilityRatio.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("durability_ratio = ?"))
|
|
}
|
|
|
|
if update.RequestedAt._set {
|
|
__values = append(__values, update.RequestedAt.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("requested_at = ?"))
|
|
}
|
|
|
|
if update.LastFailedAt._set {
|
|
__values = append(__values, update.LastFailedAt.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("last_failed_at = ?"))
|
|
}
|
|
|
|
if update.LastFailedCode._set {
|
|
__values = append(__values, update.LastFailedCode.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("last_failed_code = ?"))
|
|
}
|
|
|
|
if update.FailedCount._set {
|
|
__values = append(__values, update.FailedCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("failed_count = ?"))
|
|
}
|
|
|
|
if update.FinishedAt._set {
|
|
__values = append(__values, update.FinishedAt.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("finished_at = ?"))
|
|
}
|
|
|
|
if update.OrderLimitSendCount._set {
|
|
__values = append(__values, update.OrderLimitSendCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("order_limit_send_count = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, graceful_exit_transfer_queue_node_id.value(), graceful_exit_transfer_queue_path.value(), graceful_exit_transfer_queue_piece_num.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
_, err = obj.driver.ExecContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (obj *pgxImpl) Update_CoinpaymentsTransaction_By_Id(ctx context.Context,
|
|
coinpayments_transaction_id CoinpaymentsTransaction_Id_Field,
|
|
update CoinpaymentsTransaction_Update_Fields) (
|
|
coinpayments_transaction *CoinpaymentsTransaction, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE coinpayments_transactions SET "), __sets, __sqlbundle_Literal(" WHERE coinpayments_transactions.id = ? RETURNING coinpayments_transactions.id, coinpayments_transactions.user_id, coinpayments_transactions.address, coinpayments_transactions.amount, coinpayments_transactions.received, coinpayments_transactions.status, coinpayments_transactions.key, coinpayments_transactions.timeout, coinpayments_transactions.created_at")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.Received._set {
|
|
__values = append(__values, update.Received.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("received = ?"))
|
|
}
|
|
|
|
if update.Status._set {
|
|
__values = append(__values, update.Status.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("status = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return nil, emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, coinpayments_transaction_id.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
coinpayments_transaction = &CoinpaymentsTransaction{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&coinpayments_transaction.Id, &coinpayments_transaction.UserId, &coinpayments_transaction.Address, &coinpayments_transaction.Amount, &coinpayments_transaction.Received, &coinpayments_transaction.Status, &coinpayments_transaction.Key, &coinpayments_transaction.Timeout, &coinpayments_transaction.CreatedAt)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return coinpayments_transaction, nil
|
|
}
|
|
|
|
func (obj *pgxImpl) Update_StripecoinpaymentsInvoiceProjectRecord_By_Id(ctx context.Context,
|
|
stripecoinpayments_invoice_project_record_id StripecoinpaymentsInvoiceProjectRecord_Id_Field,
|
|
update StripecoinpaymentsInvoiceProjectRecord_Update_Fields) (
|
|
stripecoinpayments_invoice_project_record *StripecoinpaymentsInvoiceProjectRecord, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE stripecoinpayments_invoice_project_records SET "), __sets, __sqlbundle_Literal(" WHERE stripecoinpayments_invoice_project_records.id = ? RETURNING stripecoinpayments_invoice_project_records.id, stripecoinpayments_invoice_project_records.project_id, stripecoinpayments_invoice_project_records.storage, stripecoinpayments_invoice_project_records.egress, stripecoinpayments_invoice_project_records.objects, stripecoinpayments_invoice_project_records.period_start, stripecoinpayments_invoice_project_records.period_end, stripecoinpayments_invoice_project_records.state, stripecoinpayments_invoice_project_records.created_at")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.State._set {
|
|
__values = append(__values, update.State.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("state = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return nil, emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, stripecoinpayments_invoice_project_record_id.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
stripecoinpayments_invoice_project_record = &StripecoinpaymentsInvoiceProjectRecord{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&stripecoinpayments_invoice_project_record.Id, &stripecoinpayments_invoice_project_record.ProjectId, &stripecoinpayments_invoice_project_record.Storage, &stripecoinpayments_invoice_project_record.Egress, &stripecoinpayments_invoice_project_record.Objects, &stripecoinpayments_invoice_project_record.PeriodStart, &stripecoinpayments_invoice_project_record.PeriodEnd, &stripecoinpayments_invoice_project_record.State, &stripecoinpayments_invoice_project_record.CreatedAt)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return stripecoinpayments_invoice_project_record, nil
|
|
}
|
|
|
|
func (obj *pgxImpl) Update_Coupon_By_Id(ctx context.Context,
|
|
coupon_id Coupon_Id_Field,
|
|
update Coupon_Update_Fields) (
|
|
coupon *Coupon, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE coupons SET "), __sets, __sqlbundle_Literal(" WHERE coupons.id = ? RETURNING coupons.id, coupons.user_id, coupons.amount, coupons.description, coupons.type, coupons.status, coupons.duration, coupons.billing_periods, coupons.coupon_code_name, coupons.created_at")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.Status._set {
|
|
__values = append(__values, update.Status.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("status = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return nil, emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, coupon_id.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
coupon = &Coupon{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&coupon.Id, &coupon.UserId, &coupon.Amount, &coupon.Description, &coupon.Type, &coupon.Status, &coupon.Duration, &coupon.BillingPeriods, &coupon.CouponCodeName, &coupon.CreatedAt)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return coupon, nil
|
|
}
|
|
|
|
func (obj *pgxImpl) Update_CouponUsage_By_CouponId_And_Period(ctx context.Context,
|
|
coupon_usage_coupon_id CouponUsage_CouponId_Field,
|
|
coupon_usage_period CouponUsage_Period_Field,
|
|
update CouponUsage_Update_Fields) (
|
|
coupon_usage *CouponUsage, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE coupon_usages SET "), __sets, __sqlbundle_Literal(" WHERE coupon_usages.coupon_id = ? AND coupon_usages.period = ? RETURNING coupon_usages.coupon_id, coupon_usages.amount, coupon_usages.status, coupon_usages.period")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.Status._set {
|
|
__values = append(__values, update.Status.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("status = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return nil, emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, coupon_usage_coupon_id.value(), coupon_usage_period.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
coupon_usage = &CouponUsage{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&coupon_usage.CouponId, &coupon_usage.Amount, &coupon_usage.Status, &coupon_usage.Period)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return coupon_usage, nil
|
|
}
|
|
|
|
func (obj *pgxImpl) UpdateNoReturn_NodeApiVersion_By_Id_And_ApiVersion_Less(ctx context.Context,
|
|
node_api_version_id NodeApiVersion_Id_Field,
|
|
node_api_version_api_version_less NodeApiVersion_ApiVersion_Field,
|
|
update NodeApiVersion_Update_Fields) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE node_api_versions SET "), __sets, __sqlbundle_Literal(" WHERE node_api_versions.id = ? AND node_api_versions.api_version < ?")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.ApiVersion._set {
|
|
__values = append(__values, update.ApiVersion.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("api_version = ?"))
|
|
}
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
|
|
__values = append(__values, __now)
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("updated_at = ?"))
|
|
|
|
__args = append(__args, node_api_version_id.value(), node_api_version_api_version_less.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
_, err = obj.driver.ExecContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (obj *pgxImpl) Delete_PendingAudits_By_NodeId(ctx context.Context,
|
|
pending_audits_node_id PendingAudits_NodeId_Field) (
|
|
deleted bool, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM pending_audits WHERE pending_audits.node_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, pending_audits_node_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.ExecContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Delete_Irreparabledb_By_Segmentpath(ctx context.Context,
|
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field) (
|
|
deleted bool, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM irreparabledbs WHERE irreparabledbs.segmentpath = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, irreparabledb_segmentpath.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.ExecContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Delete_Injuredsegment_By_UpdatedAt_Less(ctx context.Context,
|
|
injuredsegment_updated_at_less Injuredsegment_UpdatedAt_Field) (
|
|
count int64, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM injuredsegments WHERE injuredsegments.updated_at < ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, injuredsegment_updated_at_less.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.ExecContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
return count, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Delete_User_By_Id(ctx context.Context,
|
|
user_id User_Id_Field) (
|
|
deleted bool, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM users WHERE users.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, user_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.ExecContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Delete_Project_By_Id(ctx context.Context,
|
|
project_id Project_Id_Field) (
|
|
deleted bool, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM projects WHERE projects.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, project_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.ExecContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Delete_ProjectMember_By_MemberId_And_ProjectId(ctx context.Context,
|
|
project_member_member_id ProjectMember_MemberId_Field,
|
|
project_member_project_id ProjectMember_ProjectId_Field) (
|
|
deleted bool, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM project_members WHERE project_members.member_id = ? AND project_members.project_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, project_member_member_id.value(), project_member_project_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.ExecContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Delete_ApiKey_By_Id(ctx context.Context,
|
|
api_key_id ApiKey_Id_Field) (
|
|
deleted bool, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM api_keys WHERE api_keys.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, api_key_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.ExecContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Delete_ResetPasswordToken_By_Secret(ctx context.Context,
|
|
reset_password_token_secret ResetPasswordToken_Secret_Field) (
|
|
deleted bool, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM reset_password_tokens WHERE reset_password_tokens.secret = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, reset_password_token_secret.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.ExecContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Delete_BucketMetainfo_By_ProjectId_And_Name(ctx context.Context,
|
|
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
|
|
bucket_metainfo_name BucketMetainfo_Name_Field) (
|
|
deleted bool, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM bucket_metainfos WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, bucket_metainfo_project_id.value(), bucket_metainfo_name.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.ExecContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Delete_GracefulExitTransferQueue_By_NodeId(ctx context.Context,
|
|
graceful_exit_transfer_queue_node_id GracefulExitTransferQueue_NodeId_Field) (
|
|
count int64, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM graceful_exit_transfer_queue WHERE graceful_exit_transfer_queue.node_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, graceful_exit_transfer_queue_node_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.ExecContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
return count, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Delete_GracefulExitTransferQueue_By_NodeId_And_Path_And_PieceNum(ctx context.Context,
|
|
graceful_exit_transfer_queue_node_id GracefulExitTransferQueue_NodeId_Field,
|
|
graceful_exit_transfer_queue_path GracefulExitTransferQueue_Path_Field,
|
|
graceful_exit_transfer_queue_piece_num GracefulExitTransferQueue_PieceNum_Field) (
|
|
deleted bool, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM graceful_exit_transfer_queue WHERE graceful_exit_transfer_queue.node_id = ? AND graceful_exit_transfer_queue.path = ? AND graceful_exit_transfer_queue.piece_num = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, graceful_exit_transfer_queue_node_id.value(), graceful_exit_transfer_queue_path.value(), graceful_exit_transfer_queue_piece_num.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.ExecContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Delete_GracefulExitTransferQueue_By_NodeId_And_FinishedAt_IsNot_Null(ctx context.Context,
|
|
graceful_exit_transfer_queue_node_id GracefulExitTransferQueue_NodeId_Field) (
|
|
count int64, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM graceful_exit_transfer_queue WHERE graceful_exit_transfer_queue.node_id = ? AND graceful_exit_transfer_queue.finished_at is not NULL")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, graceful_exit_transfer_queue_node_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.ExecContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
return count, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Delete_CouponCode_By_Name(ctx context.Context,
|
|
coupon_code_name CouponCode_Name_Field) (
|
|
deleted bool, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM coupon_codes WHERE coupon_codes.name = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, coupon_code_name.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.ExecContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxImpl) Delete_Coupon_By_Id(ctx context.Context,
|
|
coupon_id Coupon_Id_Field) (
|
|
deleted bool, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM coupons WHERE coupons.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, coupon_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.ExecContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (impl pgxImpl) isConstraintError(err error) (
|
|
constraint string, ok bool) {
|
|
if e, ok := err.(*pgconn.PgError); ok {
|
|
if e.Code[:2] == "23" {
|
|
return e.ConstraintName, true
|
|
}
|
|
}
|
|
return "", false
|
|
}
|
|
|
|
func (obj *pgxImpl) deleteAll(ctx context.Context) (count int64, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __res sql.Result
|
|
var __count int64
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM user_credits;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM stripecoinpayments_apply_balance_intents;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM project_members;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM bucket_metainfos;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM api_keys;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM value_attributions;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM users;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM stripecoinpayments_tx_conversion_rates;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM stripecoinpayments_invoice_project_records;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM stripe_customers;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM storagenode_storage_tallies;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM storagenode_paystubs;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM storagenode_payments;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM storagenode_bandwidth_rollups_phase2;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM storagenode_bandwidth_rollup_archives;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM storagenode_bandwidth_rollups;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM revocations;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM reset_password_tokens;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM registration_tokens;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM project_bandwidth_rollups;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM projects;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM pending_audits;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM peer_identities;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM offers;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM node_api_versions;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM nodes;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM irreparabledbs;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM injuredsegments;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM graceful_exit_transfer_queue;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM graceful_exit_progress;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM coupon_usages;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM coupon_codes;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM coupons;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM coinpayments_transactions;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM bucket_storage_tallies;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM bucket_bandwidth_rollup_archives;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM bucket_bandwidth_rollups;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM audit_histories;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM accounting_timestamps;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM accounting_rollups;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
|
|
return count, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Create_ValueAttribution(ctx context.Context,
|
|
value_attribution_project_id ValueAttribution_ProjectId_Field,
|
|
value_attribution_bucket_name ValueAttribution_BucketName_Field,
|
|
value_attribution_partner_id ValueAttribution_PartnerId_Field) (
|
|
value_attribution *ValueAttribution, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__project_id_val := value_attribution_project_id.value()
|
|
__bucket_name_val := value_attribution_bucket_name.value()
|
|
__partner_id_val := value_attribution_partner_id.value()
|
|
__last_updated_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO value_attributions ( project_id, bucket_name, partner_id, last_updated ) VALUES ( ?, ?, ?, ? ) RETURNING value_attributions.project_id, value_attributions.bucket_name, value_attributions.partner_id, value_attributions.last_updated")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, __project_id_val, __bucket_name_val, __partner_id_val, __last_updated_val)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
value_attribution = &ValueAttribution{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&value_attribution.ProjectId, &value_attribution.BucketName, &value_attribution.PartnerId, &value_attribution.LastUpdated)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return value_attribution, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) CreateNoReturn_Irreparabledb(ctx context.Context,
|
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field,
|
|
irreparabledb_segmentdetail Irreparabledb_Segmentdetail_Field,
|
|
irreparabledb_pieces_lost_count Irreparabledb_PiecesLostCount_Field,
|
|
irreparabledb_seg_damaged_unix_sec Irreparabledb_SegDamagedUnixSec_Field,
|
|
irreparabledb_repair_attempt_count Irreparabledb_RepairAttemptCount_Field) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
__segmentpath_val := irreparabledb_segmentpath.value()
|
|
__segmentdetail_val := irreparabledb_segmentdetail.value()
|
|
__pieces_lost_count_val := irreparabledb_pieces_lost_count.value()
|
|
__seg_damaged_unix_sec_val := irreparabledb_seg_damaged_unix_sec.value()
|
|
__repair_attempt_count_val := irreparabledb_repair_attempt_count.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO irreparabledbs ( segmentpath, segmentdetail, pieces_lost_count, seg_damaged_unix_sec, repair_attempt_count ) VALUES ( ?, ?, ?, ?, ? )")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, __segmentpath_val, __segmentdetail_val, __pieces_lost_count_val, __seg_damaged_unix_sec_val, __repair_attempt_count_val)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
_, err = obj.driver.ExecContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) CreateNoReturn_AccountingTimestamps(ctx context.Context,
|
|
accounting_timestamps_name AccountingTimestamps_Name_Field,
|
|
accounting_timestamps_value AccountingTimestamps_Value_Field) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
__name_val := accounting_timestamps_name.value()
|
|
__value_val := accounting_timestamps_value.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO accounting_timestamps ( name, value ) VALUES ( ?, ? )")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, __name_val, __value_val)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
_, err = obj.driver.ExecContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Create_AuditHistory(ctx context.Context,
|
|
audit_history_node_id AuditHistory_NodeId_Field,
|
|
audit_history_history AuditHistory_History_Field) (
|
|
audit_history *AuditHistory, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
__node_id_val := audit_history_node_id.value()
|
|
__history_val := audit_history_history.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO audit_histories ( node_id, history ) VALUES ( ?, ? ) RETURNING audit_histories.node_id, audit_histories.history")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, __node_id_val, __history_val)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
audit_history = &AuditHistory{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&audit_history.NodeId, &audit_history.History)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return audit_history, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Create_User(ctx context.Context,
|
|
user_id User_Id_Field,
|
|
user_email User_Email_Field,
|
|
user_normalized_email User_NormalizedEmail_Field,
|
|
user_full_name User_FullName_Field,
|
|
user_password_hash User_PasswordHash_Field,
|
|
optional User_Create_Fields) (
|
|
user *User, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__id_val := user_id.value()
|
|
__email_val := user_email.value()
|
|
__normalized_email_val := user_normalized_email.value()
|
|
__full_name_val := user_full_name.value()
|
|
__short_name_val := optional.ShortName.value()
|
|
__password_hash_val := user_password_hash.value()
|
|
__status_val := int(0)
|
|
__partner_id_val := optional.PartnerId.value()
|
|
__created_at_val := __now
|
|
__position_val := optional.Position.value()
|
|
__company_name_val := optional.CompanyName.value()
|
|
__company_size_val := optional.CompanySize.value()
|
|
__working_on_val := optional.WorkingOn.value()
|
|
__employee_count_val := optional.EmployeeCount.value()
|
|
|
|
var __columns = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("id, email, normalized_email, full_name, short_name, password_hash, status, partner_id, created_at, position, company_name, company_size, working_on, employee_count")}
|
|
var __placeholders = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?")}
|
|
var __clause = &__sqlbundle_Hole{SQL: __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("("), __columns, __sqlbundle_Literal(") VALUES ("), __placeholders, __sqlbundle_Literal(")")}}}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("INSERT INTO users "), __clause, __sqlbundle_Literal(" RETURNING users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.created_at, users.project_limit, users.position, users.company_name, users.company_size, users.working_on, users.is_professional, users.employee_count")}}
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, __id_val, __email_val, __normalized_email_val, __full_name_val, __short_name_val, __password_hash_val, __status_val, __partner_id_val, __created_at_val, __position_val, __company_name_val, __company_size_val, __working_on_val, __employee_count_val)
|
|
|
|
__optional_columns := __sqlbundle_Literals{Join: ", "}
|
|
__optional_placeholders := __sqlbundle_Literals{Join: ", "}
|
|
|
|
if optional.ProjectLimit._set {
|
|
__values = append(__values, optional.ProjectLimit.value())
|
|
__optional_columns.SQLs = append(__optional_columns.SQLs, __sqlbundle_Literal("project_limit"))
|
|
__optional_placeholders.SQLs = append(__optional_placeholders.SQLs, __sqlbundle_Literal("?"))
|
|
}
|
|
|
|
if optional.IsProfessional._set {
|
|
__values = append(__values, optional.IsProfessional.value())
|
|
__optional_columns.SQLs = append(__optional_columns.SQLs, __sqlbundle_Literal("is_professional"))
|
|
__optional_placeholders.SQLs = append(__optional_placeholders.SQLs, __sqlbundle_Literal("?"))
|
|
}
|
|
|
|
if len(__optional_columns.SQLs) == 0 {
|
|
if __columns.SQL == nil {
|
|
__clause.SQL = __sqlbundle_Literal("DEFAULT VALUES")
|
|
}
|
|
} else {
|
|
__columns.SQL = __sqlbundle_Literals{Join: ", ", SQLs: []__sqlbundle_SQL{__columns.SQL, __optional_columns}}
|
|
__placeholders.SQL = __sqlbundle_Literals{Join: ", ", SQLs: []__sqlbundle_SQL{__placeholders.SQL, __optional_placeholders}}
|
|
}
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
user = &User{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.CreatedAt, &user.ProjectLimit, &user.Position, &user.CompanyName, &user.CompanySize, &user.WorkingOn, &user.IsProfessional, &user.EmployeeCount)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return user, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Create_Project(ctx context.Context,
|
|
project_id Project_Id_Field,
|
|
project_name Project_Name_Field,
|
|
project_description Project_Description_Field,
|
|
project_owner_id Project_OwnerId_Field,
|
|
optional Project_Create_Fields) (
|
|
project *Project, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__id_val := project_id.value()
|
|
__name_val := project_name.value()
|
|
__description_val := project_description.value()
|
|
__usage_limit_val := optional.UsageLimit.value()
|
|
__bandwidth_limit_val := optional.BandwidthLimit.value()
|
|
__rate_limit_val := optional.RateLimit.value()
|
|
__max_buckets_val := optional.MaxBuckets.value()
|
|
__partner_id_val := optional.PartnerId.value()
|
|
__owner_id_val := project_owner_id.value()
|
|
__created_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO projects ( id, name, description, usage_limit, bandwidth_limit, rate_limit, max_buckets, partner_id, owner_id, created_at ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ? ) RETURNING projects.id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.rate_limit, projects.max_buckets, projects.partner_id, projects.owner_id, projects.created_at")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, __id_val, __name_val, __description_val, __usage_limit_val, __bandwidth_limit_val, __rate_limit_val, __max_buckets_val, __partner_id_val, __owner_id_val, __created_at_val)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
project = &Project{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.RateLimit, &project.MaxBuckets, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return project, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Create_ProjectMember(ctx context.Context,
|
|
project_member_member_id ProjectMember_MemberId_Field,
|
|
project_member_project_id ProjectMember_ProjectId_Field) (
|
|
project_member *ProjectMember, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__member_id_val := project_member_member_id.value()
|
|
__project_id_val := project_member_project_id.value()
|
|
__created_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO project_members ( member_id, project_id, created_at ) VALUES ( ?, ?, ? ) RETURNING project_members.member_id, project_members.project_id, project_members.created_at")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, __member_id_val, __project_id_val, __created_at_val)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
project_member = &ProjectMember{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&project_member.MemberId, &project_member.ProjectId, &project_member.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return project_member, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Create_ApiKey(ctx context.Context,
|
|
api_key_id ApiKey_Id_Field,
|
|
api_key_project_id ApiKey_ProjectId_Field,
|
|
api_key_head ApiKey_Head_Field,
|
|
api_key_name ApiKey_Name_Field,
|
|
api_key_secret ApiKey_Secret_Field,
|
|
optional ApiKey_Create_Fields) (
|
|
api_key *ApiKey, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__id_val := api_key_id.value()
|
|
__project_id_val := api_key_project_id.value()
|
|
__head_val := api_key_head.value()
|
|
__name_val := api_key_name.value()
|
|
__secret_val := api_key_secret.value()
|
|
__partner_id_val := optional.PartnerId.value()
|
|
__created_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO api_keys ( id, project_id, head, name, secret, partner_id, created_at ) VALUES ( ?, ?, ?, ?, ?, ?, ? ) RETURNING api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.partner_id, api_keys.created_at")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, __id_val, __project_id_val, __head_val, __name_val, __secret_val, __partner_id_val, __created_at_val)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
api_key = &ApiKey{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&api_key.Id, &api_key.ProjectId, &api_key.Head, &api_key.Name, &api_key.Secret, &api_key.PartnerId, &api_key.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return api_key, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) CreateNoReturn_Revocation(ctx context.Context,
|
|
revocation_revoked Revocation_Revoked_Field,
|
|
revocation_api_key_id Revocation_ApiKeyId_Field) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
__revoked_val := revocation_revoked.value()
|
|
__api_key_id_val := revocation_api_key_id.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO revocations ( revoked, api_key_id ) VALUES ( ?, ? )")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, __revoked_val, __api_key_id_val)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
_, err = obj.driver.ExecContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) CreateNoReturn_BucketStorageTally(ctx context.Context,
|
|
bucket_storage_tally_bucket_name BucketStorageTally_BucketName_Field,
|
|
bucket_storage_tally_project_id BucketStorageTally_ProjectId_Field,
|
|
bucket_storage_tally_interval_start BucketStorageTally_IntervalStart_Field,
|
|
bucket_storage_tally_inline BucketStorageTally_Inline_Field,
|
|
bucket_storage_tally_remote BucketStorageTally_Remote_Field,
|
|
bucket_storage_tally_remote_segments_count BucketStorageTally_RemoteSegmentsCount_Field,
|
|
bucket_storage_tally_inline_segments_count BucketStorageTally_InlineSegmentsCount_Field,
|
|
bucket_storage_tally_object_count BucketStorageTally_ObjectCount_Field,
|
|
bucket_storage_tally_metadata_size BucketStorageTally_MetadataSize_Field) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
__bucket_name_val := bucket_storage_tally_bucket_name.value()
|
|
__project_id_val := bucket_storage_tally_project_id.value()
|
|
__interval_start_val := bucket_storage_tally_interval_start.value()
|
|
__inline_val := bucket_storage_tally_inline.value()
|
|
__remote_val := bucket_storage_tally_remote.value()
|
|
__remote_segments_count_val := bucket_storage_tally_remote_segments_count.value()
|
|
__inline_segments_count_val := bucket_storage_tally_inline_segments_count.value()
|
|
__object_count_val := bucket_storage_tally_object_count.value()
|
|
__metadata_size_val := bucket_storage_tally_metadata_size.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO bucket_storage_tallies ( bucket_name, project_id, interval_start, inline, remote, remote_segments_count, inline_segments_count, object_count, metadata_size ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ? )")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, __bucket_name_val, __project_id_val, __interval_start_val, __inline_val, __remote_val, __remote_segments_count_val, __inline_segments_count_val, __object_count_val, __metadata_size_val)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
_, err = obj.driver.ExecContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Create_StoragenodeBandwidthRollup(ctx context.Context,
|
|
storagenode_bandwidth_rollup_storagenode_id StoragenodeBandwidthRollup_StoragenodeId_Field,
|
|
storagenode_bandwidth_rollup_interval_start StoragenodeBandwidthRollup_IntervalStart_Field,
|
|
storagenode_bandwidth_rollup_interval_seconds StoragenodeBandwidthRollup_IntervalSeconds_Field,
|
|
storagenode_bandwidth_rollup_action StoragenodeBandwidthRollup_Action_Field,
|
|
storagenode_bandwidth_rollup_settled StoragenodeBandwidthRollup_Settled_Field,
|
|
optional StoragenodeBandwidthRollup_Create_Fields) (
|
|
storagenode_bandwidth_rollup *StoragenodeBandwidthRollup, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
__storagenode_id_val := storagenode_bandwidth_rollup_storagenode_id.value()
|
|
__interval_start_val := storagenode_bandwidth_rollup_interval_start.value()
|
|
__interval_seconds_val := storagenode_bandwidth_rollup_interval_seconds.value()
|
|
__action_val := storagenode_bandwidth_rollup_action.value()
|
|
__settled_val := storagenode_bandwidth_rollup_settled.value()
|
|
|
|
var __columns = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("storagenode_id, interval_start, interval_seconds, action, settled")}
|
|
var __placeholders = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("?, ?, ?, ?, ?")}
|
|
var __clause = &__sqlbundle_Hole{SQL: __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("("), __columns, __sqlbundle_Literal(") VALUES ("), __placeholders, __sqlbundle_Literal(")")}}}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("INSERT INTO storagenode_bandwidth_rollups "), __clause, __sqlbundle_Literal(" RETURNING storagenode_bandwidth_rollups.storagenode_id, storagenode_bandwidth_rollups.interval_start, storagenode_bandwidth_rollups.interval_seconds, storagenode_bandwidth_rollups.action, storagenode_bandwidth_rollups.allocated, storagenode_bandwidth_rollups.settled")}}
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, __storagenode_id_val, __interval_start_val, __interval_seconds_val, __action_val, __settled_val)
|
|
|
|
__optional_columns := __sqlbundle_Literals{Join: ", "}
|
|
__optional_placeholders := __sqlbundle_Literals{Join: ", "}
|
|
|
|
if optional.Allocated._set {
|
|
__values = append(__values, optional.Allocated.value())
|
|
__optional_columns.SQLs = append(__optional_columns.SQLs, __sqlbundle_Literal("allocated"))
|
|
__optional_placeholders.SQLs = append(__optional_placeholders.SQLs, __sqlbundle_Literal("?"))
|
|
}
|
|
|
|
if len(__optional_columns.SQLs) == 0 {
|
|
if __columns.SQL == nil {
|
|
__clause.SQL = __sqlbundle_Literal("DEFAULT VALUES")
|
|
}
|
|
} else {
|
|
__columns.SQL = __sqlbundle_Literals{Join: ", ", SQLs: []__sqlbundle_SQL{__columns.SQL, __optional_columns}}
|
|
__placeholders.SQL = __sqlbundle_Literals{Join: ", ", SQLs: []__sqlbundle_SQL{__placeholders.SQL, __optional_placeholders}}
|
|
}
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
storagenode_bandwidth_rollup = &StoragenodeBandwidthRollup{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&storagenode_bandwidth_rollup.StoragenodeId, &storagenode_bandwidth_rollup.IntervalStart, &storagenode_bandwidth_rollup.IntervalSeconds, &storagenode_bandwidth_rollup.Action, &storagenode_bandwidth_rollup.Allocated, &storagenode_bandwidth_rollup.Settled)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return storagenode_bandwidth_rollup, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) ReplaceNoReturn_StoragenodePaystub(ctx context.Context,
|
|
storagenode_paystub_period StoragenodePaystub_Period_Field,
|
|
storagenode_paystub_node_id StoragenodePaystub_NodeId_Field,
|
|
storagenode_paystub_codes StoragenodePaystub_Codes_Field,
|
|
storagenode_paystub_usage_at_rest StoragenodePaystub_UsageAtRest_Field,
|
|
storagenode_paystub_usage_get StoragenodePaystub_UsageGet_Field,
|
|
storagenode_paystub_usage_put StoragenodePaystub_UsagePut_Field,
|
|
storagenode_paystub_usage_get_repair StoragenodePaystub_UsageGetRepair_Field,
|
|
storagenode_paystub_usage_put_repair StoragenodePaystub_UsagePutRepair_Field,
|
|
storagenode_paystub_usage_get_audit StoragenodePaystub_UsageGetAudit_Field,
|
|
storagenode_paystub_comp_at_rest StoragenodePaystub_CompAtRest_Field,
|
|
storagenode_paystub_comp_get StoragenodePaystub_CompGet_Field,
|
|
storagenode_paystub_comp_put StoragenodePaystub_CompPut_Field,
|
|
storagenode_paystub_comp_get_repair StoragenodePaystub_CompGetRepair_Field,
|
|
storagenode_paystub_comp_put_repair StoragenodePaystub_CompPutRepair_Field,
|
|
storagenode_paystub_comp_get_audit StoragenodePaystub_CompGetAudit_Field,
|
|
storagenode_paystub_surge_percent StoragenodePaystub_SurgePercent_Field,
|
|
storagenode_paystub_held StoragenodePaystub_Held_Field,
|
|
storagenode_paystub_owed StoragenodePaystub_Owed_Field,
|
|
storagenode_paystub_disposed StoragenodePaystub_Disposed_Field,
|
|
storagenode_paystub_paid StoragenodePaystub_Paid_Field,
|
|
storagenode_paystub_distributed StoragenodePaystub_Distributed_Field) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__period_val := storagenode_paystub_period.value()
|
|
__node_id_val := storagenode_paystub_node_id.value()
|
|
__created_at_val := __now
|
|
__codes_val := storagenode_paystub_codes.value()
|
|
__usage_at_rest_val := storagenode_paystub_usage_at_rest.value()
|
|
__usage_get_val := storagenode_paystub_usage_get.value()
|
|
__usage_put_val := storagenode_paystub_usage_put.value()
|
|
__usage_get_repair_val := storagenode_paystub_usage_get_repair.value()
|
|
__usage_put_repair_val := storagenode_paystub_usage_put_repair.value()
|
|
__usage_get_audit_val := storagenode_paystub_usage_get_audit.value()
|
|
__comp_at_rest_val := storagenode_paystub_comp_at_rest.value()
|
|
__comp_get_val := storagenode_paystub_comp_get.value()
|
|
__comp_put_val := storagenode_paystub_comp_put.value()
|
|
__comp_get_repair_val := storagenode_paystub_comp_get_repair.value()
|
|
__comp_put_repair_val := storagenode_paystub_comp_put_repair.value()
|
|
__comp_get_audit_val := storagenode_paystub_comp_get_audit.value()
|
|
__surge_percent_val := storagenode_paystub_surge_percent.value()
|
|
__held_val := storagenode_paystub_held.value()
|
|
__owed_val := storagenode_paystub_owed.value()
|
|
__disposed_val := storagenode_paystub_disposed.value()
|
|
__paid_val := storagenode_paystub_paid.value()
|
|
__distributed_val := storagenode_paystub_distributed.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("UPSERT INTO storagenode_paystubs ( period, node_id, created_at, codes, usage_at_rest, usage_get, usage_put, usage_get_repair, usage_put_repair, usage_get_audit, comp_at_rest, comp_get, comp_put, comp_get_repair, comp_put_repair, comp_get_audit, surge_percent, held, owed, disposed, paid, distributed ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? )")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, __period_val, __node_id_val, __created_at_val, __codes_val, __usage_at_rest_val, __usage_get_val, __usage_put_val, __usage_get_repair_val, __usage_put_repair_val, __usage_get_audit_val, __comp_at_rest_val, __comp_get_val, __comp_put_val, __comp_get_repair_val, __comp_put_repair_val, __comp_get_audit_val, __surge_percent_val, __held_val, __owed_val, __disposed_val, __paid_val, __distributed_val)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
_, err = obj.driver.ExecContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) CreateNoReturn_StoragenodePayment(ctx context.Context,
|
|
storagenode_payment_node_id StoragenodePayment_NodeId_Field,
|
|
storagenode_payment_period StoragenodePayment_Period_Field,
|
|
storagenode_payment_amount StoragenodePayment_Amount_Field,
|
|
optional StoragenodePayment_Create_Fields) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__created_at_val := __now
|
|
__node_id_val := storagenode_payment_node_id.value()
|
|
__period_val := storagenode_payment_period.value()
|
|
__amount_val := storagenode_payment_amount.value()
|
|
__receipt_val := optional.Receipt.value()
|
|
__notes_val := optional.Notes.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO storagenode_payments ( created_at, node_id, period, amount, receipt, notes ) VALUES ( ?, ?, ?, ?, ?, ? )")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, __created_at_val, __node_id_val, __period_val, __amount_val, __receipt_val, __notes_val)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
_, err = obj.driver.ExecContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) CreateNoReturn_PeerIdentity(ctx context.Context,
|
|
peer_identity_node_id PeerIdentity_NodeId_Field,
|
|
peer_identity_leaf_serial_number PeerIdentity_LeafSerialNumber_Field,
|
|
peer_identity_chain PeerIdentity_Chain_Field) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__node_id_val := peer_identity_node_id.value()
|
|
__leaf_serial_number_val := peer_identity_leaf_serial_number.value()
|
|
__chain_val := peer_identity_chain.value()
|
|
__updated_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO peer_identities ( node_id, leaf_serial_number, chain, updated_at ) VALUES ( ?, ?, ?, ? )")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, __node_id_val, __leaf_serial_number_val, __chain_val, __updated_at_val)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
_, err = obj.driver.ExecContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Create_RegistrationToken(ctx context.Context,
|
|
registration_token_secret RegistrationToken_Secret_Field,
|
|
registration_token_project_limit RegistrationToken_ProjectLimit_Field,
|
|
optional RegistrationToken_Create_Fields) (
|
|
registration_token *RegistrationToken, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__secret_val := registration_token_secret.value()
|
|
__owner_id_val := optional.OwnerId.value()
|
|
__project_limit_val := registration_token_project_limit.value()
|
|
__created_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO registration_tokens ( secret, owner_id, project_limit, created_at ) VALUES ( ?, ?, ?, ? ) RETURNING registration_tokens.secret, registration_tokens.owner_id, registration_tokens.project_limit, registration_tokens.created_at")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, __secret_val, __owner_id_val, __project_limit_val, __created_at_val)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
registration_token = &RegistrationToken{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(®istration_token.Secret, ®istration_token.OwnerId, ®istration_token.ProjectLimit, ®istration_token.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return registration_token, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Create_ResetPasswordToken(ctx context.Context,
|
|
reset_password_token_secret ResetPasswordToken_Secret_Field,
|
|
reset_password_token_owner_id ResetPasswordToken_OwnerId_Field) (
|
|
reset_password_token *ResetPasswordToken, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__secret_val := reset_password_token_secret.value()
|
|
__owner_id_val := reset_password_token_owner_id.value()
|
|
__created_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO reset_password_tokens ( secret, owner_id, created_at ) VALUES ( ?, ?, ? ) RETURNING reset_password_tokens.secret, reset_password_tokens.owner_id, reset_password_tokens.created_at")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, __secret_val, __owner_id_val, __created_at_val)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
reset_password_token = &ResetPasswordToken{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&reset_password_token.Secret, &reset_password_token.OwnerId, &reset_password_token.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return reset_password_token, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Create_BucketMetainfo(ctx context.Context,
|
|
bucket_metainfo_id BucketMetainfo_Id_Field,
|
|
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
|
|
bucket_metainfo_name BucketMetainfo_Name_Field,
|
|
bucket_metainfo_path_cipher BucketMetainfo_PathCipher_Field,
|
|
bucket_metainfo_default_segment_size BucketMetainfo_DefaultSegmentSize_Field,
|
|
bucket_metainfo_default_encryption_cipher_suite BucketMetainfo_DefaultEncryptionCipherSuite_Field,
|
|
bucket_metainfo_default_encryption_block_size BucketMetainfo_DefaultEncryptionBlockSize_Field,
|
|
bucket_metainfo_default_redundancy_algorithm BucketMetainfo_DefaultRedundancyAlgorithm_Field,
|
|
bucket_metainfo_default_redundancy_share_size BucketMetainfo_DefaultRedundancyShareSize_Field,
|
|
bucket_metainfo_default_redundancy_required_shares BucketMetainfo_DefaultRedundancyRequiredShares_Field,
|
|
bucket_metainfo_default_redundancy_repair_shares BucketMetainfo_DefaultRedundancyRepairShares_Field,
|
|
bucket_metainfo_default_redundancy_optimal_shares BucketMetainfo_DefaultRedundancyOptimalShares_Field,
|
|
bucket_metainfo_default_redundancy_total_shares BucketMetainfo_DefaultRedundancyTotalShares_Field,
|
|
optional BucketMetainfo_Create_Fields) (
|
|
bucket_metainfo *BucketMetainfo, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__id_val := bucket_metainfo_id.value()
|
|
__project_id_val := bucket_metainfo_project_id.value()
|
|
__name_val := bucket_metainfo_name.value()
|
|
__partner_id_val := optional.PartnerId.value()
|
|
__path_cipher_val := bucket_metainfo_path_cipher.value()
|
|
__created_at_val := __now
|
|
__default_segment_size_val := bucket_metainfo_default_segment_size.value()
|
|
__default_encryption_cipher_suite_val := bucket_metainfo_default_encryption_cipher_suite.value()
|
|
__default_encryption_block_size_val := bucket_metainfo_default_encryption_block_size.value()
|
|
__default_redundancy_algorithm_val := bucket_metainfo_default_redundancy_algorithm.value()
|
|
__default_redundancy_share_size_val := bucket_metainfo_default_redundancy_share_size.value()
|
|
__default_redundancy_required_shares_val := bucket_metainfo_default_redundancy_required_shares.value()
|
|
__default_redundancy_repair_shares_val := bucket_metainfo_default_redundancy_repair_shares.value()
|
|
__default_redundancy_optimal_shares_val := bucket_metainfo_default_redundancy_optimal_shares.value()
|
|
__default_redundancy_total_shares_val := bucket_metainfo_default_redundancy_total_shares.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO bucket_metainfos ( id, project_id, name, partner_id, path_cipher, created_at, default_segment_size, default_encryption_cipher_suite, default_encryption_block_size, default_redundancy_algorithm, default_redundancy_share_size, default_redundancy_required_shares, default_redundancy_repair_shares, default_redundancy_optimal_shares, default_redundancy_total_shares ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? ) RETURNING bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.partner_id, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, __id_val, __project_id_val, __name_val, __partner_id_val, __path_cipher_val, __created_at_val, __default_segment_size_val, __default_encryption_cipher_suite_val, __default_encryption_block_size_val, __default_redundancy_algorithm_val, __default_redundancy_share_size_val, __default_redundancy_required_shares_val, __default_redundancy_repair_shares_val, __default_redundancy_optimal_shares_val, __default_redundancy_total_shares_val)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
bucket_metainfo = &BucketMetainfo{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.PartnerId, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return bucket_metainfo, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Create_StripeCustomer(ctx context.Context,
|
|
stripe_customer_user_id StripeCustomer_UserId_Field,
|
|
stripe_customer_customer_id StripeCustomer_CustomerId_Field) (
|
|
stripe_customer *StripeCustomer, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__user_id_val := stripe_customer_user_id.value()
|
|
__customer_id_val := stripe_customer_customer_id.value()
|
|
__created_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO stripe_customers ( user_id, customer_id, created_at ) VALUES ( ?, ?, ? ) RETURNING stripe_customers.user_id, stripe_customers.customer_id, stripe_customers.created_at")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, __user_id_val, __customer_id_val, __created_at_val)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
stripe_customer = &StripeCustomer{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&stripe_customer.UserId, &stripe_customer.CustomerId, &stripe_customer.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return stripe_customer, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Create_CoinpaymentsTransaction(ctx context.Context,
|
|
coinpayments_transaction_id CoinpaymentsTransaction_Id_Field,
|
|
coinpayments_transaction_user_id CoinpaymentsTransaction_UserId_Field,
|
|
coinpayments_transaction_address CoinpaymentsTransaction_Address_Field,
|
|
coinpayments_transaction_amount CoinpaymentsTransaction_Amount_Field,
|
|
coinpayments_transaction_received CoinpaymentsTransaction_Received_Field,
|
|
coinpayments_transaction_status CoinpaymentsTransaction_Status_Field,
|
|
coinpayments_transaction_key CoinpaymentsTransaction_Key_Field,
|
|
coinpayments_transaction_timeout CoinpaymentsTransaction_Timeout_Field) (
|
|
coinpayments_transaction *CoinpaymentsTransaction, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__id_val := coinpayments_transaction_id.value()
|
|
__user_id_val := coinpayments_transaction_user_id.value()
|
|
__address_val := coinpayments_transaction_address.value()
|
|
__amount_val := coinpayments_transaction_amount.value()
|
|
__received_val := coinpayments_transaction_received.value()
|
|
__status_val := coinpayments_transaction_status.value()
|
|
__key_val := coinpayments_transaction_key.value()
|
|
__timeout_val := coinpayments_transaction_timeout.value()
|
|
__created_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO coinpayments_transactions ( id, user_id, address, amount, received, status, key, timeout, created_at ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ? ) RETURNING coinpayments_transactions.id, coinpayments_transactions.user_id, coinpayments_transactions.address, coinpayments_transactions.amount, coinpayments_transactions.received, coinpayments_transactions.status, coinpayments_transactions.key, coinpayments_transactions.timeout, coinpayments_transactions.created_at")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, __id_val, __user_id_val, __address_val, __amount_val, __received_val, __status_val, __key_val, __timeout_val, __created_at_val)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
coinpayments_transaction = &CoinpaymentsTransaction{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&coinpayments_transaction.Id, &coinpayments_transaction.UserId, &coinpayments_transaction.Address, &coinpayments_transaction.Amount, &coinpayments_transaction.Received, &coinpayments_transaction.Status, &coinpayments_transaction.Key, &coinpayments_transaction.Timeout, &coinpayments_transaction.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return coinpayments_transaction, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Create_StripecoinpaymentsInvoiceProjectRecord(ctx context.Context,
|
|
stripecoinpayments_invoice_project_record_id StripecoinpaymentsInvoiceProjectRecord_Id_Field,
|
|
stripecoinpayments_invoice_project_record_project_id StripecoinpaymentsInvoiceProjectRecord_ProjectId_Field,
|
|
stripecoinpayments_invoice_project_record_storage StripecoinpaymentsInvoiceProjectRecord_Storage_Field,
|
|
stripecoinpayments_invoice_project_record_egress StripecoinpaymentsInvoiceProjectRecord_Egress_Field,
|
|
stripecoinpayments_invoice_project_record_objects StripecoinpaymentsInvoiceProjectRecord_Objects_Field,
|
|
stripecoinpayments_invoice_project_record_period_start StripecoinpaymentsInvoiceProjectRecord_PeriodStart_Field,
|
|
stripecoinpayments_invoice_project_record_period_end StripecoinpaymentsInvoiceProjectRecord_PeriodEnd_Field,
|
|
stripecoinpayments_invoice_project_record_state StripecoinpaymentsInvoiceProjectRecord_State_Field) (
|
|
stripecoinpayments_invoice_project_record *StripecoinpaymentsInvoiceProjectRecord, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__id_val := stripecoinpayments_invoice_project_record_id.value()
|
|
__project_id_val := stripecoinpayments_invoice_project_record_project_id.value()
|
|
__storage_val := stripecoinpayments_invoice_project_record_storage.value()
|
|
__egress_val := stripecoinpayments_invoice_project_record_egress.value()
|
|
__objects_val := stripecoinpayments_invoice_project_record_objects.value()
|
|
__period_start_val := stripecoinpayments_invoice_project_record_period_start.value()
|
|
__period_end_val := stripecoinpayments_invoice_project_record_period_end.value()
|
|
__state_val := stripecoinpayments_invoice_project_record_state.value()
|
|
__created_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO stripecoinpayments_invoice_project_records ( id, project_id, storage, egress, objects, period_start, period_end, state, created_at ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ? ) RETURNING stripecoinpayments_invoice_project_records.id, stripecoinpayments_invoice_project_records.project_id, stripecoinpayments_invoice_project_records.storage, stripecoinpayments_invoice_project_records.egress, stripecoinpayments_invoice_project_records.objects, stripecoinpayments_invoice_project_records.period_start, stripecoinpayments_invoice_project_records.period_end, stripecoinpayments_invoice_project_records.state, stripecoinpayments_invoice_project_records.created_at")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, __id_val, __project_id_val, __storage_val, __egress_val, __objects_val, __period_start_val, __period_end_val, __state_val, __created_at_val)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
stripecoinpayments_invoice_project_record = &StripecoinpaymentsInvoiceProjectRecord{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&stripecoinpayments_invoice_project_record.Id, &stripecoinpayments_invoice_project_record.ProjectId, &stripecoinpayments_invoice_project_record.Storage, &stripecoinpayments_invoice_project_record.Egress, &stripecoinpayments_invoice_project_record.Objects, &stripecoinpayments_invoice_project_record.PeriodStart, &stripecoinpayments_invoice_project_record.PeriodEnd, &stripecoinpayments_invoice_project_record.State, &stripecoinpayments_invoice_project_record.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return stripecoinpayments_invoice_project_record, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Create_StripecoinpaymentsTxConversionRate(ctx context.Context,
|
|
stripecoinpayments_tx_conversion_rate_tx_id StripecoinpaymentsTxConversionRate_TxId_Field,
|
|
stripecoinpayments_tx_conversion_rate_rate StripecoinpaymentsTxConversionRate_Rate_Field) (
|
|
stripecoinpayments_tx_conversion_rate *StripecoinpaymentsTxConversionRate, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__tx_id_val := stripecoinpayments_tx_conversion_rate_tx_id.value()
|
|
__rate_val := stripecoinpayments_tx_conversion_rate_rate.value()
|
|
__created_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO stripecoinpayments_tx_conversion_rates ( tx_id, rate, created_at ) VALUES ( ?, ?, ? ) RETURNING stripecoinpayments_tx_conversion_rates.tx_id, stripecoinpayments_tx_conversion_rates.rate, stripecoinpayments_tx_conversion_rates.created_at")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, __tx_id_val, __rate_val, __created_at_val)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
stripecoinpayments_tx_conversion_rate = &StripecoinpaymentsTxConversionRate{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&stripecoinpayments_tx_conversion_rate.TxId, &stripecoinpayments_tx_conversion_rate.Rate, &stripecoinpayments_tx_conversion_rate.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return stripecoinpayments_tx_conversion_rate, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Create_CouponCode(ctx context.Context,
|
|
coupon_code_id CouponCode_Id_Field,
|
|
coupon_code_name CouponCode_Name_Field,
|
|
coupon_code_amount CouponCode_Amount_Field,
|
|
coupon_code_description CouponCode_Description_Field,
|
|
coupon_code_type CouponCode_Type_Field,
|
|
optional CouponCode_Create_Fields) (
|
|
coupon_code *CouponCode, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__id_val := coupon_code_id.value()
|
|
__name_val := coupon_code_name.value()
|
|
__amount_val := coupon_code_amount.value()
|
|
__description_val := coupon_code_description.value()
|
|
__type_val := coupon_code_type.value()
|
|
__billing_periods_val := optional.BillingPeriods.value()
|
|
__created_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO coupon_codes ( id, name, amount, description, type, billing_periods, created_at ) VALUES ( ?, ?, ?, ?, ?, ?, ? ) RETURNING coupon_codes.id, coupon_codes.name, coupon_codes.amount, coupon_codes.description, coupon_codes.type, coupon_codes.billing_periods, coupon_codes.created_at")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, __id_val, __name_val, __amount_val, __description_val, __type_val, __billing_periods_val, __created_at_val)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
coupon_code = &CouponCode{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&coupon_code.Id, &coupon_code.Name, &coupon_code.Amount, &coupon_code.Description, &coupon_code.Type, &coupon_code.BillingPeriods, &coupon_code.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return coupon_code, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Create_Coupon(ctx context.Context,
|
|
coupon_id Coupon_Id_Field,
|
|
coupon_user_id Coupon_UserId_Field,
|
|
coupon_amount Coupon_Amount_Field,
|
|
coupon_description Coupon_Description_Field,
|
|
coupon_type Coupon_Type_Field,
|
|
coupon_status Coupon_Status_Field,
|
|
coupon_duration Coupon_Duration_Field,
|
|
optional Coupon_Create_Fields) (
|
|
coupon *Coupon, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__id_val := coupon_id.value()
|
|
__user_id_val := coupon_user_id.value()
|
|
__amount_val := coupon_amount.value()
|
|
__description_val := coupon_description.value()
|
|
__type_val := coupon_type.value()
|
|
__status_val := coupon_status.value()
|
|
__duration_val := coupon_duration.value()
|
|
__billing_periods_val := optional.BillingPeriods.value()
|
|
__coupon_code_name_val := optional.CouponCodeName.value()
|
|
__created_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO coupons ( id, user_id, amount, description, type, status, duration, billing_periods, coupon_code_name, created_at ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ? ) RETURNING coupons.id, coupons.user_id, coupons.amount, coupons.description, coupons.type, coupons.status, coupons.duration, coupons.billing_periods, coupons.coupon_code_name, coupons.created_at")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, __id_val, __user_id_val, __amount_val, __description_val, __type_val, __status_val, __duration_val, __billing_periods_val, __coupon_code_name_val, __created_at_val)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
coupon = &Coupon{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&coupon.Id, &coupon.UserId, &coupon.Amount, &coupon.Description, &coupon.Type, &coupon.Status, &coupon.Duration, &coupon.BillingPeriods, &coupon.CouponCodeName, &coupon.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return coupon, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Create_CouponUsage(ctx context.Context,
|
|
coupon_usage_coupon_id CouponUsage_CouponId_Field,
|
|
coupon_usage_amount CouponUsage_Amount_Field,
|
|
coupon_usage_status CouponUsage_Status_Field,
|
|
coupon_usage_period CouponUsage_Period_Field) (
|
|
coupon_usage *CouponUsage, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
__coupon_id_val := coupon_usage_coupon_id.value()
|
|
__amount_val := coupon_usage_amount.value()
|
|
__status_val := coupon_usage_status.value()
|
|
__period_val := coupon_usage_period.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO coupon_usages ( coupon_id, amount, status, period ) VALUES ( ?, ?, ?, ? ) RETURNING coupon_usages.coupon_id, coupon_usages.amount, coupon_usages.status, coupon_usages.period")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, __coupon_id_val, __amount_val, __status_val, __period_val)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
coupon_usage = &CouponUsage{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&coupon_usage.CouponId, &coupon_usage.Amount, &coupon_usage.Status, &coupon_usage.Period)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return coupon_usage, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) ReplaceNoReturn_NodeApiVersion(ctx context.Context,
|
|
node_api_version_id NodeApiVersion_Id_Field,
|
|
node_api_version_api_version NodeApiVersion_ApiVersion_Field) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__id_val := node_api_version_id.value()
|
|
__api_version_val := node_api_version_api_version.value()
|
|
__created_at_val := __now
|
|
__updated_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("UPSERT INTO node_api_versions ( id, api_version, created_at, updated_at ) VALUES ( ?, ?, ?, ? )")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, __id_val, __api_version_val, __created_at_val, __updated_at_val)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
_, err = obj.driver.ExecContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Get_ValueAttribution_By_ProjectId_And_BucketName(ctx context.Context,
|
|
value_attribution_project_id ValueAttribution_ProjectId_Field,
|
|
value_attribution_bucket_name ValueAttribution_BucketName_Field) (
|
|
value_attribution *ValueAttribution, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT value_attributions.project_id, value_attributions.bucket_name, value_attributions.partner_id, value_attributions.last_updated FROM value_attributions WHERE value_attributions.project_id = ? AND value_attributions.bucket_name = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, value_attribution_project_id.value(), value_attribution_bucket_name.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
value_attribution = &ValueAttribution{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&value_attribution.ProjectId, &value_attribution.BucketName, &value_attribution.PartnerId, &value_attribution.LastUpdated)
|
|
if err != nil {
|
|
return (*ValueAttribution)(nil), obj.makeErr(err)
|
|
}
|
|
return value_attribution, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Get_PendingAudits_By_NodeId(ctx context.Context,
|
|
pending_audits_node_id PendingAudits_NodeId_Field) (
|
|
pending_audits *PendingAudits, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT pending_audits.node_id, pending_audits.piece_id, pending_audits.stripe_index, pending_audits.share_size, pending_audits.expected_share_hash, pending_audits.reverify_count, pending_audits.path FROM pending_audits WHERE pending_audits.node_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, pending_audits_node_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
pending_audits = &PendingAudits{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&pending_audits.NodeId, &pending_audits.PieceId, &pending_audits.StripeIndex, &pending_audits.ShareSize, &pending_audits.ExpectedShareHash, &pending_audits.ReverifyCount, &pending_audits.Path)
|
|
if err != nil {
|
|
return (*PendingAudits)(nil), obj.makeErr(err)
|
|
}
|
|
return pending_audits, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Get_Irreparabledb_By_Segmentpath(ctx context.Context,
|
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field) (
|
|
irreparabledb *Irreparabledb, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT irreparabledbs.segmentpath, irreparabledbs.segmentdetail, irreparabledbs.pieces_lost_count, irreparabledbs.seg_damaged_unix_sec, irreparabledbs.repair_attempt_count FROM irreparabledbs WHERE irreparabledbs.segmentpath = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, irreparabledb_segmentpath.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
irreparabledb = &Irreparabledb{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&irreparabledb.Segmentpath, &irreparabledb.Segmentdetail, &irreparabledb.PiecesLostCount, &irreparabledb.SegDamagedUnixSec, &irreparabledb.RepairAttemptCount)
|
|
if err != nil {
|
|
return (*Irreparabledb)(nil), obj.makeErr(err)
|
|
}
|
|
return irreparabledb, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Limited_Irreparabledb_By_Segmentpath_Greater_OrderBy_Asc_Segmentpath(ctx context.Context,
|
|
irreparabledb_segmentpath_greater Irreparabledb_Segmentpath_Field,
|
|
limit int, offset int64) (
|
|
rows []*Irreparabledb, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT irreparabledbs.segmentpath, irreparabledbs.segmentdetail, irreparabledbs.pieces_lost_count, irreparabledbs.seg_damaged_unix_sec, irreparabledbs.repair_attempt_count FROM irreparabledbs WHERE irreparabledbs.segmentpath > ? ORDER BY irreparabledbs.segmentpath LIMIT ? OFFSET ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, irreparabledb_segmentpath_greater.value())
|
|
|
|
__values = append(__values, limit, offset)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, err = func() (rows []*Irreparabledb, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
irreparabledb := &Irreparabledb{}
|
|
err = __rows.Scan(&irreparabledb.Segmentpath, &irreparabledb.Segmentdetail, &irreparabledb.PiecesLostCount, &irreparabledb.SegDamagedUnixSec, &irreparabledb.RepairAttemptCount)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
rows = append(rows, irreparabledb)
|
|
}
|
|
err = __rows.Err()
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
return rows, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Find_AccountingTimestamps_Value_By_Name(ctx context.Context,
|
|
accounting_timestamps_name AccountingTimestamps_Name_Field) (
|
|
row *Value_Row, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT accounting_timestamps.value FROM accounting_timestamps WHERE accounting_timestamps.name = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, accounting_timestamps_name.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
row = &Value_Row{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&row.Value)
|
|
if err == sql.ErrNoRows {
|
|
return (*Value_Row)(nil), nil
|
|
}
|
|
if err != nil {
|
|
return (*Value_Row)(nil), obj.makeErr(err)
|
|
}
|
|
return row, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Get_Node_By_Id(ctx context.Context,
|
|
node_id Node_Id_Field) (
|
|
node *Node, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.address, nodes.last_net, nodes.last_ip_port, nodes.protocol, nodes.type, nodes.email, nodes.wallet, nodes.wallet_features, nodes.free_disk, nodes.piece_count, nodes.major, nodes.minor, nodes.patch, nodes.hash, nodes.timestamp, nodes.release, nodes.latency_90, nodes.audit_success_count, nodes.total_audit_count, nodes.vetted_at, nodes.created_at, nodes.updated_at, nodes.last_contact_success, nodes.last_contact_failure, nodes.contained, nodes.disqualified, nodes.suspended, nodes.unknown_audit_suspended, nodes.offline_suspended, nodes.under_review, nodes.online_score, nodes.audit_reputation_alpha, nodes.audit_reputation_beta, nodes.unknown_audit_reputation_alpha, nodes.unknown_audit_reputation_beta, nodes.exit_initiated_at, nodes.exit_loop_completed_at, nodes.exit_finished_at, nodes.exit_success FROM nodes WHERE nodes.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, node_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
node = &Node{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&node.Id, &node.Address, &node.LastNet, &node.LastIpPort, &node.Protocol, &node.Type, &node.Email, &node.Wallet, &node.WalletFeatures, &node.FreeDisk, &node.PieceCount, &node.Major, &node.Minor, &node.Patch, &node.Hash, &node.Timestamp, &node.Release, &node.Latency90, &node.AuditSuccessCount, &node.TotalAuditCount, &node.VettedAt, &node.CreatedAt, &node.UpdatedAt, &node.LastContactSuccess, &node.LastContactFailure, &node.Contained, &node.Disqualified, &node.Suspended, &node.UnknownAuditSuspended, &node.OfflineSuspended, &node.UnderReview, &node.OnlineScore, &node.AuditReputationAlpha, &node.AuditReputationBeta, &node.UnknownAuditReputationAlpha, &node.UnknownAuditReputationBeta, &node.ExitInitiatedAt, &node.ExitLoopCompletedAt, &node.ExitFinishedAt, &node.ExitSuccess)
|
|
if err != nil {
|
|
return (*Node)(nil), obj.makeErr(err)
|
|
}
|
|
return node, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) All_Node_Id(ctx context.Context) (
|
|
rows []*Id_Row, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id FROM nodes")
|
|
|
|
var __values []interface{}
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, err = func() (rows []*Id_Row, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
row := &Id_Row{}
|
|
err = __rows.Scan(&row.Id)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
rows = append(rows, row)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return rows, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Paged_Node(ctx context.Context,
|
|
limit int, start *Paged_Node_Continuation) (
|
|
rows []*Node, next *Paged_Node_Continuation, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.address, nodes.last_net, nodes.last_ip_port, nodes.protocol, nodes.type, nodes.email, nodes.wallet, nodes.wallet_features, nodes.free_disk, nodes.piece_count, nodes.major, nodes.minor, nodes.patch, nodes.hash, nodes.timestamp, nodes.release, nodes.latency_90, nodes.audit_success_count, nodes.total_audit_count, nodes.vetted_at, nodes.created_at, nodes.updated_at, nodes.last_contact_success, nodes.last_contact_failure, nodes.contained, nodes.disqualified, nodes.suspended, nodes.unknown_audit_suspended, nodes.offline_suspended, nodes.under_review, nodes.online_score, nodes.audit_reputation_alpha, nodes.audit_reputation_beta, nodes.unknown_audit_reputation_alpha, nodes.unknown_audit_reputation_beta, nodes.exit_initiated_at, nodes.exit_loop_completed_at, nodes.exit_finished_at, nodes.exit_success, nodes.id FROM nodes WHERE (nodes.id) > ? ORDER BY nodes.id LIMIT ?")
|
|
|
|
var __embed_first_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.address, nodes.last_net, nodes.last_ip_port, nodes.protocol, nodes.type, nodes.email, nodes.wallet, nodes.wallet_features, nodes.free_disk, nodes.piece_count, nodes.major, nodes.minor, nodes.patch, nodes.hash, nodes.timestamp, nodes.release, nodes.latency_90, nodes.audit_success_count, nodes.total_audit_count, nodes.vetted_at, nodes.created_at, nodes.updated_at, nodes.last_contact_success, nodes.last_contact_failure, nodes.contained, nodes.disqualified, nodes.suspended, nodes.unknown_audit_suspended, nodes.offline_suspended, nodes.under_review, nodes.online_score, nodes.audit_reputation_alpha, nodes.audit_reputation_beta, nodes.unknown_audit_reputation_alpha, nodes.unknown_audit_reputation_beta, nodes.exit_initiated_at, nodes.exit_loop_completed_at, nodes.exit_finished_at, nodes.exit_success, nodes.id FROM nodes ORDER BY nodes.id LIMIT ?")
|
|
|
|
var __values []interface{}
|
|
|
|
var __stmt string
|
|
if start != nil && start._set {
|
|
__values = append(__values, start._value_id, limit)
|
|
__stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
} else {
|
|
__values = append(__values, limit)
|
|
__stmt = __sqlbundle_Render(obj.dialect, __embed_first_stmt)
|
|
}
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, next, err = func() (rows []*Node, next *Paged_Node_Continuation, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
var __continuation Paged_Node_Continuation
|
|
__continuation._set = true
|
|
|
|
for __rows.Next() {
|
|
node := &Node{}
|
|
err = __rows.Scan(&node.Id, &node.Address, &node.LastNet, &node.LastIpPort, &node.Protocol, &node.Type, &node.Email, &node.Wallet, &node.WalletFeatures, &node.FreeDisk, &node.PieceCount, &node.Major, &node.Minor, &node.Patch, &node.Hash, &node.Timestamp, &node.Release, &node.Latency90, &node.AuditSuccessCount, &node.TotalAuditCount, &node.VettedAt, &node.CreatedAt, &node.UpdatedAt, &node.LastContactSuccess, &node.LastContactFailure, &node.Contained, &node.Disqualified, &node.Suspended, &node.UnknownAuditSuspended, &node.OfflineSuspended, &node.UnderReview, &node.OnlineScore, &node.AuditReputationAlpha, &node.AuditReputationBeta, &node.UnknownAuditReputationAlpha, &node.UnknownAuditReputationBeta, &node.ExitInitiatedAt, &node.ExitLoopCompletedAt, &node.ExitFinishedAt, &node.ExitSuccess, &__continuation._value_id)
|
|
if err != nil {
|
|
return nil, nil, err
|
|
}
|
|
rows = append(rows, node)
|
|
next = &__continuation
|
|
}
|
|
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, nil, err
|
|
}
|
|
|
|
return rows, next, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, nil, obj.makeErr(err)
|
|
}
|
|
return rows, next, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) All_Node_Id_Node_PieceCount_By_PieceCount_Not_Number(ctx context.Context) (
|
|
rows []*Id_PieceCount_Row, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.piece_count FROM nodes WHERE nodes.piece_count != 0")
|
|
|
|
var __values []interface{}
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, err = func() (rows []*Id_PieceCount_Row, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
row := &Id_PieceCount_Row{}
|
|
err = __rows.Scan(&row.Id, &row.PieceCount)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
rows = append(rows, row)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return rows, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Get_AuditHistory_By_NodeId(ctx context.Context,
|
|
audit_history_node_id AuditHistory_NodeId_Field) (
|
|
audit_history *AuditHistory, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT audit_histories.node_id, audit_histories.history FROM audit_histories WHERE audit_histories.node_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, audit_history_node_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
audit_history = &AuditHistory{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&audit_history.NodeId, &audit_history.History)
|
|
if err != nil {
|
|
return (*AuditHistory)(nil), obj.makeErr(err)
|
|
}
|
|
return audit_history, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Get_User_By_NormalizedEmail_And_Status_Not_Number(ctx context.Context,
|
|
user_normalized_email User_NormalizedEmail_Field) (
|
|
user *User, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.created_at, users.project_limit, users.position, users.company_name, users.company_size, users.working_on, users.is_professional, users.employee_count FROM users WHERE users.normalized_email = ? AND users.status != 0 LIMIT 2")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, user_normalized_email.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
user, err = func() (user *User, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
if !__rows.Next() {
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return nil, sql.ErrNoRows
|
|
}
|
|
|
|
user = &User{}
|
|
err = __rows.Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.CreatedAt, &user.ProjectLimit, &user.Position, &user.CompanyName, &user.CompanySize, &user.WorkingOn, &user.IsProfessional, &user.EmployeeCount)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
if __rows.Next() {
|
|
return nil, errTooManyRows
|
|
}
|
|
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
return user, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
if err == errTooManyRows {
|
|
return nil, tooManyRows("User_By_NormalizedEmail_And_Status_Not_Number")
|
|
}
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return user, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Get_User_By_Id(ctx context.Context,
|
|
user_id User_Id_Field) (
|
|
user *User, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.created_at, users.project_limit, users.position, users.company_name, users.company_size, users.working_on, users.is_professional, users.employee_count FROM users WHERE users.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, user_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
user = &User{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.CreatedAt, &user.ProjectLimit, &user.Position, &user.CompanyName, &user.CompanySize, &user.WorkingOn, &user.IsProfessional, &user.EmployeeCount)
|
|
if err != nil {
|
|
return (*User)(nil), obj.makeErr(err)
|
|
}
|
|
return user, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Get_User_ProjectLimit_By_Id(ctx context.Context,
|
|
user_id User_Id_Field) (
|
|
row *ProjectLimit_Row, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT users.project_limit FROM users WHERE users.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, user_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
row = &ProjectLimit_Row{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&row.ProjectLimit)
|
|
if err != nil {
|
|
return (*ProjectLimit_Row)(nil), obj.makeErr(err)
|
|
}
|
|
return row, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Get_Project_By_Id(ctx context.Context,
|
|
project_id Project_Id_Field) (
|
|
project *Project, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.rate_limit, projects.max_buckets, projects.partner_id, projects.owner_id, projects.created_at FROM projects WHERE projects.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, project_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
project = &Project{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.RateLimit, &project.MaxBuckets, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
|
|
if err != nil {
|
|
return (*Project)(nil), obj.makeErr(err)
|
|
}
|
|
return project, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Get_Project_UsageLimit_By_Id(ctx context.Context,
|
|
project_id Project_Id_Field) (
|
|
row *UsageLimit_Row, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT projects.usage_limit FROM projects WHERE projects.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, project_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
row = &UsageLimit_Row{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&row.UsageLimit)
|
|
if err != nil {
|
|
return (*UsageLimit_Row)(nil), obj.makeErr(err)
|
|
}
|
|
return row, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Get_Project_BandwidthLimit_By_Id(ctx context.Context,
|
|
project_id Project_Id_Field) (
|
|
row *BandwidthLimit_Row, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT projects.bandwidth_limit FROM projects WHERE projects.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, project_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
row = &BandwidthLimit_Row{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&row.BandwidthLimit)
|
|
if err != nil {
|
|
return (*BandwidthLimit_Row)(nil), obj.makeErr(err)
|
|
}
|
|
return row, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Get_Project_MaxBuckets_By_Id(ctx context.Context,
|
|
project_id Project_Id_Field) (
|
|
row *MaxBuckets_Row, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT projects.max_buckets FROM projects WHERE projects.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, project_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
row = &MaxBuckets_Row{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&row.MaxBuckets)
|
|
if err != nil {
|
|
return (*MaxBuckets_Row)(nil), obj.makeErr(err)
|
|
}
|
|
return row, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Get_Project_BandwidthLimit_Project_UsageLimit_By_Id(ctx context.Context,
|
|
project_id Project_Id_Field) (
|
|
row *BandwidthLimit_UsageLimit_Row, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT projects.bandwidth_limit, projects.usage_limit FROM projects WHERE projects.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, project_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
row = &BandwidthLimit_UsageLimit_Row{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&row.BandwidthLimit, &row.UsageLimit)
|
|
if err != nil {
|
|
return (*BandwidthLimit_UsageLimit_Row)(nil), obj.makeErr(err)
|
|
}
|
|
return row, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) All_Project(ctx context.Context) (
|
|
rows []*Project, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.rate_limit, projects.max_buckets, projects.partner_id, projects.owner_id, projects.created_at FROM projects")
|
|
|
|
var __values []interface{}
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, err = func() (rows []*Project, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
project := &Project{}
|
|
err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.RateLimit, &project.MaxBuckets, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
rows = append(rows, project)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return rows, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) All_Project_By_CreatedAt_Less_OrderBy_Asc_CreatedAt(ctx context.Context,
|
|
project_created_at_less Project_CreatedAt_Field) (
|
|
rows []*Project, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.rate_limit, projects.max_buckets, projects.partner_id, projects.owner_id, projects.created_at FROM projects WHERE projects.created_at < ? ORDER BY projects.created_at")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, project_created_at_less.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, err = func() (rows []*Project, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
project := &Project{}
|
|
err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.RateLimit, &project.MaxBuckets, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
rows = append(rows, project)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return rows, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) All_Project_By_OwnerId_OrderBy_Asc_CreatedAt(ctx context.Context,
|
|
project_owner_id Project_OwnerId_Field) (
|
|
rows []*Project, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.rate_limit, projects.max_buckets, projects.partner_id, projects.owner_id, projects.created_at FROM projects WHERE projects.owner_id = ? ORDER BY projects.created_at")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, project_owner_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, err = func() (rows []*Project, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
project := &Project{}
|
|
err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.RateLimit, &project.MaxBuckets, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
rows = append(rows, project)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return rows, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) All_Project_By_ProjectMember_MemberId_OrderBy_Asc_Project_Name(ctx context.Context,
|
|
project_member_member_id ProjectMember_MemberId_Field) (
|
|
rows []*Project, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.rate_limit, projects.max_buckets, projects.partner_id, projects.owner_id, projects.created_at FROM projects JOIN project_members ON projects.id = project_members.project_id WHERE project_members.member_id = ? ORDER BY projects.name")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, project_member_member_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, err = func() (rows []*Project, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
project := &Project{}
|
|
err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.RateLimit, &project.MaxBuckets, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
rows = append(rows, project)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return rows, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Limited_Project_By_CreatedAt_Less_OrderBy_Asc_CreatedAt(ctx context.Context,
|
|
project_created_at_less Project_CreatedAt_Field,
|
|
limit int, offset int64) (
|
|
rows []*Project, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.rate_limit, projects.max_buckets, projects.partner_id, projects.owner_id, projects.created_at FROM projects WHERE projects.created_at < ? ORDER BY projects.created_at LIMIT ? OFFSET ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, project_created_at_less.value())
|
|
|
|
__values = append(__values, limit, offset)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, err = func() (rows []*Project, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
project := &Project{}
|
|
err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.RateLimit, &project.MaxBuckets, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
rows = append(rows, project)
|
|
}
|
|
err = __rows.Err()
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
return rows, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) All_ProjectMember_By_MemberId(ctx context.Context,
|
|
project_member_member_id ProjectMember_MemberId_Field) (
|
|
rows []*ProjectMember, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT project_members.member_id, project_members.project_id, project_members.created_at FROM project_members WHERE project_members.member_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, project_member_member_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, err = func() (rows []*ProjectMember, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
project_member := &ProjectMember{}
|
|
err = __rows.Scan(&project_member.MemberId, &project_member.ProjectId, &project_member.CreatedAt)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
rows = append(rows, project_member)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return rows, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Get_ApiKey_By_Id(ctx context.Context,
|
|
api_key_id ApiKey_Id_Field) (
|
|
api_key *ApiKey, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.partner_id, api_keys.created_at FROM api_keys WHERE api_keys.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, api_key_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
api_key = &ApiKey{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&api_key.Id, &api_key.ProjectId, &api_key.Head, &api_key.Name, &api_key.Secret, &api_key.PartnerId, &api_key.CreatedAt)
|
|
if err != nil {
|
|
return (*ApiKey)(nil), obj.makeErr(err)
|
|
}
|
|
return api_key, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Get_ApiKey_By_Head(ctx context.Context,
|
|
api_key_head ApiKey_Head_Field) (
|
|
api_key *ApiKey, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.partner_id, api_keys.created_at FROM api_keys WHERE api_keys.head = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, api_key_head.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
api_key = &ApiKey{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&api_key.Id, &api_key.ProjectId, &api_key.Head, &api_key.Name, &api_key.Secret, &api_key.PartnerId, &api_key.CreatedAt)
|
|
if err != nil {
|
|
return (*ApiKey)(nil), obj.makeErr(err)
|
|
}
|
|
return api_key, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Get_ApiKey_By_Name_And_ProjectId(ctx context.Context,
|
|
api_key_name ApiKey_Name_Field,
|
|
api_key_project_id ApiKey_ProjectId_Field) (
|
|
api_key *ApiKey, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.partner_id, api_keys.created_at FROM api_keys WHERE api_keys.name = ? AND api_keys.project_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, api_key_name.value(), api_key_project_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
api_key = &ApiKey{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&api_key.Id, &api_key.ProjectId, &api_key.Head, &api_key.Name, &api_key.Secret, &api_key.PartnerId, &api_key.CreatedAt)
|
|
if err != nil {
|
|
return (*ApiKey)(nil), obj.makeErr(err)
|
|
}
|
|
return api_key, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Paged_BucketBandwidthRollup_By_IntervalStart_GreaterOrEqual(ctx context.Context,
|
|
bucket_bandwidth_rollup_interval_start_greater_or_equal BucketBandwidthRollup_IntervalStart_Field,
|
|
limit int, start *Paged_BucketBandwidthRollup_By_IntervalStart_GreaterOrEqual_Continuation) (
|
|
rows []*BucketBandwidthRollup, next *Paged_BucketBandwidthRollup_By_IntervalStart_GreaterOrEqual_Continuation, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_bandwidth_rollups.bucket_name, bucket_bandwidth_rollups.project_id, bucket_bandwidth_rollups.interval_start, bucket_bandwidth_rollups.interval_seconds, bucket_bandwidth_rollups.action, bucket_bandwidth_rollups.inline, bucket_bandwidth_rollups.allocated, bucket_bandwidth_rollups.settled, bucket_bandwidth_rollups.bucket_name, bucket_bandwidth_rollups.project_id, bucket_bandwidth_rollups.interval_start, bucket_bandwidth_rollups.action FROM bucket_bandwidth_rollups WHERE bucket_bandwidth_rollups.interval_start >= ? AND (bucket_bandwidth_rollups.bucket_name, bucket_bandwidth_rollups.project_id, bucket_bandwidth_rollups.interval_start, bucket_bandwidth_rollups.action) > (?, ?, ?, ?) ORDER BY bucket_bandwidth_rollups.bucket_name, bucket_bandwidth_rollups.project_id, bucket_bandwidth_rollups.interval_start, bucket_bandwidth_rollups.action LIMIT ?")
|
|
|
|
var __embed_first_stmt = __sqlbundle_Literal("SELECT bucket_bandwidth_rollups.bucket_name, bucket_bandwidth_rollups.project_id, bucket_bandwidth_rollups.interval_start, bucket_bandwidth_rollups.interval_seconds, bucket_bandwidth_rollups.action, bucket_bandwidth_rollups.inline, bucket_bandwidth_rollups.allocated, bucket_bandwidth_rollups.settled, bucket_bandwidth_rollups.bucket_name, bucket_bandwidth_rollups.project_id, bucket_bandwidth_rollups.interval_start, bucket_bandwidth_rollups.action FROM bucket_bandwidth_rollups WHERE bucket_bandwidth_rollups.interval_start >= ? ORDER BY bucket_bandwidth_rollups.bucket_name, bucket_bandwidth_rollups.project_id, bucket_bandwidth_rollups.interval_start, bucket_bandwidth_rollups.action LIMIT ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, bucket_bandwidth_rollup_interval_start_greater_or_equal.value())
|
|
|
|
var __stmt string
|
|
if start != nil && start._set {
|
|
__values = append(__values, start._value_bucket_name, start._value_project_id, start._value_interval_start, start._value_action, limit)
|
|
__stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
} else {
|
|
__values = append(__values, limit)
|
|
__stmt = __sqlbundle_Render(obj.dialect, __embed_first_stmt)
|
|
}
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, next, err = func() (rows []*BucketBandwidthRollup, next *Paged_BucketBandwidthRollup_By_IntervalStart_GreaterOrEqual_Continuation, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
var __continuation Paged_BucketBandwidthRollup_By_IntervalStart_GreaterOrEqual_Continuation
|
|
__continuation._set = true
|
|
|
|
for __rows.Next() {
|
|
bucket_bandwidth_rollup := &BucketBandwidthRollup{}
|
|
err = __rows.Scan(&bucket_bandwidth_rollup.BucketName, &bucket_bandwidth_rollup.ProjectId, &bucket_bandwidth_rollup.IntervalStart, &bucket_bandwidth_rollup.IntervalSeconds, &bucket_bandwidth_rollup.Action, &bucket_bandwidth_rollup.Inline, &bucket_bandwidth_rollup.Allocated, &bucket_bandwidth_rollup.Settled, &__continuation._value_bucket_name, &__continuation._value_project_id, &__continuation._value_interval_start, &__continuation._value_action)
|
|
if err != nil {
|
|
return nil, nil, err
|
|
}
|
|
rows = append(rows, bucket_bandwidth_rollup)
|
|
next = &__continuation
|
|
}
|
|
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, nil, err
|
|
}
|
|
|
|
return rows, next, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, nil, obj.makeErr(err)
|
|
}
|
|
return rows, next, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Paged_BucketBandwidthRollupArchive_By_IntervalStart_GreaterOrEqual(ctx context.Context,
|
|
bucket_bandwidth_rollup_archive_interval_start_greater_or_equal BucketBandwidthRollupArchive_IntervalStart_Field,
|
|
limit int, start *Paged_BucketBandwidthRollupArchive_By_IntervalStart_GreaterOrEqual_Continuation) (
|
|
rows []*BucketBandwidthRollupArchive, next *Paged_BucketBandwidthRollupArchive_By_IntervalStart_GreaterOrEqual_Continuation, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_bandwidth_rollup_archives.bucket_name, bucket_bandwidth_rollup_archives.project_id, bucket_bandwidth_rollup_archives.interval_start, bucket_bandwidth_rollup_archives.interval_seconds, bucket_bandwidth_rollup_archives.action, bucket_bandwidth_rollup_archives.inline, bucket_bandwidth_rollup_archives.allocated, bucket_bandwidth_rollup_archives.settled, bucket_bandwidth_rollup_archives.bucket_name, bucket_bandwidth_rollup_archives.project_id, bucket_bandwidth_rollup_archives.interval_start, bucket_bandwidth_rollup_archives.action FROM bucket_bandwidth_rollup_archives WHERE bucket_bandwidth_rollup_archives.interval_start >= ? AND (bucket_bandwidth_rollup_archives.bucket_name, bucket_bandwidth_rollup_archives.project_id, bucket_bandwidth_rollup_archives.interval_start, bucket_bandwidth_rollup_archives.action) > (?, ?, ?, ?) ORDER BY bucket_bandwidth_rollup_archives.bucket_name, bucket_bandwidth_rollup_archives.project_id, bucket_bandwidth_rollup_archives.interval_start, bucket_bandwidth_rollup_archives.action LIMIT ?")
|
|
|
|
var __embed_first_stmt = __sqlbundle_Literal("SELECT bucket_bandwidth_rollup_archives.bucket_name, bucket_bandwidth_rollup_archives.project_id, bucket_bandwidth_rollup_archives.interval_start, bucket_bandwidth_rollup_archives.interval_seconds, bucket_bandwidth_rollup_archives.action, bucket_bandwidth_rollup_archives.inline, bucket_bandwidth_rollup_archives.allocated, bucket_bandwidth_rollup_archives.settled, bucket_bandwidth_rollup_archives.bucket_name, bucket_bandwidth_rollup_archives.project_id, bucket_bandwidth_rollup_archives.interval_start, bucket_bandwidth_rollup_archives.action FROM bucket_bandwidth_rollup_archives WHERE bucket_bandwidth_rollup_archives.interval_start >= ? ORDER BY bucket_bandwidth_rollup_archives.bucket_name, bucket_bandwidth_rollup_archives.project_id, bucket_bandwidth_rollup_archives.interval_start, bucket_bandwidth_rollup_archives.action LIMIT ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, bucket_bandwidth_rollup_archive_interval_start_greater_or_equal.value())
|
|
|
|
var __stmt string
|
|
if start != nil && start._set {
|
|
__values = append(__values, start._value_bucket_name, start._value_project_id, start._value_interval_start, start._value_action, limit)
|
|
__stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
} else {
|
|
__values = append(__values, limit)
|
|
__stmt = __sqlbundle_Render(obj.dialect, __embed_first_stmt)
|
|
}
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, next, err = func() (rows []*BucketBandwidthRollupArchive, next *Paged_BucketBandwidthRollupArchive_By_IntervalStart_GreaterOrEqual_Continuation, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
var __continuation Paged_BucketBandwidthRollupArchive_By_IntervalStart_GreaterOrEqual_Continuation
|
|
__continuation._set = true
|
|
|
|
for __rows.Next() {
|
|
bucket_bandwidth_rollup_archive := &BucketBandwidthRollupArchive{}
|
|
err = __rows.Scan(&bucket_bandwidth_rollup_archive.BucketName, &bucket_bandwidth_rollup_archive.ProjectId, &bucket_bandwidth_rollup_archive.IntervalStart, &bucket_bandwidth_rollup_archive.IntervalSeconds, &bucket_bandwidth_rollup_archive.Action, &bucket_bandwidth_rollup_archive.Inline, &bucket_bandwidth_rollup_archive.Allocated, &bucket_bandwidth_rollup_archive.Settled, &__continuation._value_bucket_name, &__continuation._value_project_id, &__continuation._value_interval_start, &__continuation._value_action)
|
|
if err != nil {
|
|
return nil, nil, err
|
|
}
|
|
rows = append(rows, bucket_bandwidth_rollup_archive)
|
|
next = &__continuation
|
|
}
|
|
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, nil, err
|
|
}
|
|
|
|
return rows, next, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, nil, obj.makeErr(err)
|
|
}
|
|
return rows, next, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) All_BucketStorageTally(ctx context.Context) (
|
|
rows []*BucketStorageTally, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_storage_tallies.bucket_name, bucket_storage_tallies.project_id, bucket_storage_tallies.interval_start, bucket_storage_tallies.inline, bucket_storage_tallies.remote, bucket_storage_tallies.remote_segments_count, bucket_storage_tallies.inline_segments_count, bucket_storage_tallies.object_count, bucket_storage_tallies.metadata_size FROM bucket_storage_tallies")
|
|
|
|
var __values []interface{}
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, err = func() (rows []*BucketStorageTally, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
bucket_storage_tally := &BucketStorageTally{}
|
|
err = __rows.Scan(&bucket_storage_tally.BucketName, &bucket_storage_tally.ProjectId, &bucket_storage_tally.IntervalStart, &bucket_storage_tally.Inline, &bucket_storage_tally.Remote, &bucket_storage_tally.RemoteSegmentsCount, &bucket_storage_tally.InlineSegmentsCount, &bucket_storage_tally.ObjectCount, &bucket_storage_tally.MetadataSize)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
rows = append(rows, bucket_storage_tally)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return rows, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) All_BucketStorageTally_By_ProjectId_And_BucketName_And_IntervalStart_GreaterOrEqual_And_IntervalStart_LessOrEqual_OrderBy_Desc_IntervalStart(ctx context.Context,
|
|
bucket_storage_tally_project_id BucketStorageTally_ProjectId_Field,
|
|
bucket_storage_tally_bucket_name BucketStorageTally_BucketName_Field,
|
|
bucket_storage_tally_interval_start_greater_or_equal BucketStorageTally_IntervalStart_Field,
|
|
bucket_storage_tally_interval_start_less_or_equal BucketStorageTally_IntervalStart_Field) (
|
|
rows []*BucketStorageTally, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_storage_tallies.bucket_name, bucket_storage_tallies.project_id, bucket_storage_tallies.interval_start, bucket_storage_tallies.inline, bucket_storage_tallies.remote, bucket_storage_tallies.remote_segments_count, bucket_storage_tallies.inline_segments_count, bucket_storage_tallies.object_count, bucket_storage_tallies.metadata_size FROM bucket_storage_tallies WHERE bucket_storage_tallies.project_id = ? AND bucket_storage_tallies.bucket_name = ? AND bucket_storage_tallies.interval_start >= ? AND bucket_storage_tallies.interval_start <= ? ORDER BY bucket_storage_tallies.interval_start DESC")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, bucket_storage_tally_project_id.value(), bucket_storage_tally_bucket_name.value(), bucket_storage_tally_interval_start_greater_or_equal.value(), bucket_storage_tally_interval_start_less_or_equal.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, err = func() (rows []*BucketStorageTally, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
bucket_storage_tally := &BucketStorageTally{}
|
|
err = __rows.Scan(&bucket_storage_tally.BucketName, &bucket_storage_tally.ProjectId, &bucket_storage_tally.IntervalStart, &bucket_storage_tally.Inline, &bucket_storage_tally.Remote, &bucket_storage_tally.RemoteSegmentsCount, &bucket_storage_tally.InlineSegmentsCount, &bucket_storage_tally.ObjectCount, &bucket_storage_tally.MetadataSize)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
rows = append(rows, bucket_storage_tally)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return rows, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) All_StoragenodeBandwidthRollup_By_StoragenodeId_And_IntervalStart(ctx context.Context,
|
|
storagenode_bandwidth_rollup_storagenode_id StoragenodeBandwidthRollup_StoragenodeId_Field,
|
|
storagenode_bandwidth_rollup_interval_start StoragenodeBandwidthRollup_IntervalStart_Field) (
|
|
rows []*StoragenodeBandwidthRollup, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT storagenode_bandwidth_rollups.storagenode_id, storagenode_bandwidth_rollups.interval_start, storagenode_bandwidth_rollups.interval_seconds, storagenode_bandwidth_rollups.action, storagenode_bandwidth_rollups.allocated, storagenode_bandwidth_rollups.settled FROM storagenode_bandwidth_rollups WHERE storagenode_bandwidth_rollups.storagenode_id = ? AND storagenode_bandwidth_rollups.interval_start = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, storagenode_bandwidth_rollup_storagenode_id.value(), storagenode_bandwidth_rollup_interval_start.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, err = func() (rows []*StoragenodeBandwidthRollup, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
storagenode_bandwidth_rollup := &StoragenodeBandwidthRollup{}
|
|
err = __rows.Scan(&storagenode_bandwidth_rollup.StoragenodeId, &storagenode_bandwidth_rollup.IntervalStart, &storagenode_bandwidth_rollup.IntervalSeconds, &storagenode_bandwidth_rollup.Action, &storagenode_bandwidth_rollup.Allocated, &storagenode_bandwidth_rollup.Settled)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
rows = append(rows, storagenode_bandwidth_rollup)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return rows, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Paged_StoragenodeBandwidthRollup_By_IntervalStart_GreaterOrEqual(ctx context.Context,
|
|
storagenode_bandwidth_rollup_interval_start_greater_or_equal StoragenodeBandwidthRollup_IntervalStart_Field,
|
|
limit int, start *Paged_StoragenodeBandwidthRollup_By_IntervalStart_GreaterOrEqual_Continuation) (
|
|
rows []*StoragenodeBandwidthRollup, next *Paged_StoragenodeBandwidthRollup_By_IntervalStart_GreaterOrEqual_Continuation, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT storagenode_bandwidth_rollups.storagenode_id, storagenode_bandwidth_rollups.interval_start, storagenode_bandwidth_rollups.interval_seconds, storagenode_bandwidth_rollups.action, storagenode_bandwidth_rollups.allocated, storagenode_bandwidth_rollups.settled, storagenode_bandwidth_rollups.storagenode_id, storagenode_bandwidth_rollups.interval_start, storagenode_bandwidth_rollups.action FROM storagenode_bandwidth_rollups WHERE storagenode_bandwidth_rollups.interval_start >= ? AND (storagenode_bandwidth_rollups.storagenode_id, storagenode_bandwidth_rollups.interval_start, storagenode_bandwidth_rollups.action) > (?, ?, ?) ORDER BY storagenode_bandwidth_rollups.storagenode_id, storagenode_bandwidth_rollups.interval_start, storagenode_bandwidth_rollups.action LIMIT ?")
|
|
|
|
var __embed_first_stmt = __sqlbundle_Literal("SELECT storagenode_bandwidth_rollups.storagenode_id, storagenode_bandwidth_rollups.interval_start, storagenode_bandwidth_rollups.interval_seconds, storagenode_bandwidth_rollups.action, storagenode_bandwidth_rollups.allocated, storagenode_bandwidth_rollups.settled, storagenode_bandwidth_rollups.storagenode_id, storagenode_bandwidth_rollups.interval_start, storagenode_bandwidth_rollups.action FROM storagenode_bandwidth_rollups WHERE storagenode_bandwidth_rollups.interval_start >= ? ORDER BY storagenode_bandwidth_rollups.storagenode_id, storagenode_bandwidth_rollups.interval_start, storagenode_bandwidth_rollups.action LIMIT ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, storagenode_bandwidth_rollup_interval_start_greater_or_equal.value())
|
|
|
|
var __stmt string
|
|
if start != nil && start._set {
|
|
__values = append(__values, start._value_storagenode_id, start._value_interval_start, start._value_action, limit)
|
|
__stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
} else {
|
|
__values = append(__values, limit)
|
|
__stmt = __sqlbundle_Render(obj.dialect, __embed_first_stmt)
|
|
}
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, next, err = func() (rows []*StoragenodeBandwidthRollup, next *Paged_StoragenodeBandwidthRollup_By_IntervalStart_GreaterOrEqual_Continuation, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
var __continuation Paged_StoragenodeBandwidthRollup_By_IntervalStart_GreaterOrEqual_Continuation
|
|
__continuation._set = true
|
|
|
|
for __rows.Next() {
|
|
storagenode_bandwidth_rollup := &StoragenodeBandwidthRollup{}
|
|
err = __rows.Scan(&storagenode_bandwidth_rollup.StoragenodeId, &storagenode_bandwidth_rollup.IntervalStart, &storagenode_bandwidth_rollup.IntervalSeconds, &storagenode_bandwidth_rollup.Action, &storagenode_bandwidth_rollup.Allocated, &storagenode_bandwidth_rollup.Settled, &__continuation._value_storagenode_id, &__continuation._value_interval_start, &__continuation._value_action)
|
|
if err != nil {
|
|
return nil, nil, err
|
|
}
|
|
rows = append(rows, storagenode_bandwidth_rollup)
|
|
next = &__continuation
|
|
}
|
|
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, nil, err
|
|
}
|
|
|
|
return rows, next, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, nil, obj.makeErr(err)
|
|
}
|
|
return rows, next, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Paged_StoragenodeBandwidthRollup_By_StoragenodeId_And_IntervalStart_GreaterOrEqual(ctx context.Context,
|
|
storagenode_bandwidth_rollup_storagenode_id StoragenodeBandwidthRollup_StoragenodeId_Field,
|
|
storagenode_bandwidth_rollup_interval_start_greater_or_equal StoragenodeBandwidthRollup_IntervalStart_Field,
|
|
limit int, start *Paged_StoragenodeBandwidthRollup_By_StoragenodeId_And_IntervalStart_GreaterOrEqual_Continuation) (
|
|
rows []*StoragenodeBandwidthRollup, next *Paged_StoragenodeBandwidthRollup_By_StoragenodeId_And_IntervalStart_GreaterOrEqual_Continuation, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT storagenode_bandwidth_rollups.storagenode_id, storagenode_bandwidth_rollups.interval_start, storagenode_bandwidth_rollups.interval_seconds, storagenode_bandwidth_rollups.action, storagenode_bandwidth_rollups.allocated, storagenode_bandwidth_rollups.settled, storagenode_bandwidth_rollups.storagenode_id, storagenode_bandwidth_rollups.interval_start, storagenode_bandwidth_rollups.action FROM storagenode_bandwidth_rollups WHERE storagenode_bandwidth_rollups.storagenode_id = ? AND storagenode_bandwidth_rollups.interval_start >= ? AND (storagenode_bandwidth_rollups.storagenode_id, storagenode_bandwidth_rollups.interval_start, storagenode_bandwidth_rollups.action) > (?, ?, ?) ORDER BY storagenode_bandwidth_rollups.storagenode_id, storagenode_bandwidth_rollups.interval_start, storagenode_bandwidth_rollups.action LIMIT ?")
|
|
|
|
var __embed_first_stmt = __sqlbundle_Literal("SELECT storagenode_bandwidth_rollups.storagenode_id, storagenode_bandwidth_rollups.interval_start, storagenode_bandwidth_rollups.interval_seconds, storagenode_bandwidth_rollups.action, storagenode_bandwidth_rollups.allocated, storagenode_bandwidth_rollups.settled, storagenode_bandwidth_rollups.storagenode_id, storagenode_bandwidth_rollups.interval_start, storagenode_bandwidth_rollups.action FROM storagenode_bandwidth_rollups WHERE storagenode_bandwidth_rollups.storagenode_id = ? AND storagenode_bandwidth_rollups.interval_start >= ? ORDER BY storagenode_bandwidth_rollups.storagenode_id, storagenode_bandwidth_rollups.interval_start, storagenode_bandwidth_rollups.action LIMIT ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, storagenode_bandwidth_rollup_storagenode_id.value(), storagenode_bandwidth_rollup_interval_start_greater_or_equal.value())
|
|
|
|
var __stmt string
|
|
if start != nil && start._set {
|
|
__values = append(__values, start._value_storagenode_id, start._value_interval_start, start._value_action, limit)
|
|
__stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
} else {
|
|
__values = append(__values, limit)
|
|
__stmt = __sqlbundle_Render(obj.dialect, __embed_first_stmt)
|
|
}
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, next, err = func() (rows []*StoragenodeBandwidthRollup, next *Paged_StoragenodeBandwidthRollup_By_StoragenodeId_And_IntervalStart_GreaterOrEqual_Continuation, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
var __continuation Paged_StoragenodeBandwidthRollup_By_StoragenodeId_And_IntervalStart_GreaterOrEqual_Continuation
|
|
__continuation._set = true
|
|
|
|
for __rows.Next() {
|
|
storagenode_bandwidth_rollup := &StoragenodeBandwidthRollup{}
|
|
err = __rows.Scan(&storagenode_bandwidth_rollup.StoragenodeId, &storagenode_bandwidth_rollup.IntervalStart, &storagenode_bandwidth_rollup.IntervalSeconds, &storagenode_bandwidth_rollup.Action, &storagenode_bandwidth_rollup.Allocated, &storagenode_bandwidth_rollup.Settled, &__continuation._value_storagenode_id, &__continuation._value_interval_start, &__continuation._value_action)
|
|
if err != nil {
|
|
return nil, nil, err
|
|
}
|
|
rows = append(rows, storagenode_bandwidth_rollup)
|
|
next = &__continuation
|
|
}
|
|
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, nil, err
|
|
}
|
|
|
|
return rows, next, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, nil, obj.makeErr(err)
|
|
}
|
|
return rows, next, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Paged_StoragenodeBandwidthRollupArchive_By_IntervalStart_GreaterOrEqual(ctx context.Context,
|
|
storagenode_bandwidth_rollup_archive_interval_start_greater_or_equal StoragenodeBandwidthRollupArchive_IntervalStart_Field,
|
|
limit int, start *Paged_StoragenodeBandwidthRollupArchive_By_IntervalStart_GreaterOrEqual_Continuation) (
|
|
rows []*StoragenodeBandwidthRollupArchive, next *Paged_StoragenodeBandwidthRollupArchive_By_IntervalStart_GreaterOrEqual_Continuation, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT storagenode_bandwidth_rollup_archives.storagenode_id, storagenode_bandwidth_rollup_archives.interval_start, storagenode_bandwidth_rollup_archives.interval_seconds, storagenode_bandwidth_rollup_archives.action, storagenode_bandwidth_rollup_archives.allocated, storagenode_bandwidth_rollup_archives.settled, storagenode_bandwidth_rollup_archives.storagenode_id, storagenode_bandwidth_rollup_archives.interval_start, storagenode_bandwidth_rollup_archives.action FROM storagenode_bandwidth_rollup_archives WHERE storagenode_bandwidth_rollup_archives.interval_start >= ? AND (storagenode_bandwidth_rollup_archives.storagenode_id, storagenode_bandwidth_rollup_archives.interval_start, storagenode_bandwidth_rollup_archives.action) > (?, ?, ?) ORDER BY storagenode_bandwidth_rollup_archives.storagenode_id, storagenode_bandwidth_rollup_archives.interval_start, storagenode_bandwidth_rollup_archives.action LIMIT ?")
|
|
|
|
var __embed_first_stmt = __sqlbundle_Literal("SELECT storagenode_bandwidth_rollup_archives.storagenode_id, storagenode_bandwidth_rollup_archives.interval_start, storagenode_bandwidth_rollup_archives.interval_seconds, storagenode_bandwidth_rollup_archives.action, storagenode_bandwidth_rollup_archives.allocated, storagenode_bandwidth_rollup_archives.settled, storagenode_bandwidth_rollup_archives.storagenode_id, storagenode_bandwidth_rollup_archives.interval_start, storagenode_bandwidth_rollup_archives.action FROM storagenode_bandwidth_rollup_archives WHERE storagenode_bandwidth_rollup_archives.interval_start >= ? ORDER BY storagenode_bandwidth_rollup_archives.storagenode_id, storagenode_bandwidth_rollup_archives.interval_start, storagenode_bandwidth_rollup_archives.action LIMIT ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, storagenode_bandwidth_rollup_archive_interval_start_greater_or_equal.value())
|
|
|
|
var __stmt string
|
|
if start != nil && start._set {
|
|
__values = append(__values, start._value_storagenode_id, start._value_interval_start, start._value_action, limit)
|
|
__stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
} else {
|
|
__values = append(__values, limit)
|
|
__stmt = __sqlbundle_Render(obj.dialect, __embed_first_stmt)
|
|
}
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, next, err = func() (rows []*StoragenodeBandwidthRollupArchive, next *Paged_StoragenodeBandwidthRollupArchive_By_IntervalStart_GreaterOrEqual_Continuation, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
var __continuation Paged_StoragenodeBandwidthRollupArchive_By_IntervalStart_GreaterOrEqual_Continuation
|
|
__continuation._set = true
|
|
|
|
for __rows.Next() {
|
|
storagenode_bandwidth_rollup_archive := &StoragenodeBandwidthRollupArchive{}
|
|
err = __rows.Scan(&storagenode_bandwidth_rollup_archive.StoragenodeId, &storagenode_bandwidth_rollup_archive.IntervalStart, &storagenode_bandwidth_rollup_archive.IntervalSeconds, &storagenode_bandwidth_rollup_archive.Action, &storagenode_bandwidth_rollup_archive.Allocated, &storagenode_bandwidth_rollup_archive.Settled, &__continuation._value_storagenode_id, &__continuation._value_interval_start, &__continuation._value_action)
|
|
if err != nil {
|
|
return nil, nil, err
|
|
}
|
|
rows = append(rows, storagenode_bandwidth_rollup_archive)
|
|
next = &__continuation
|
|
}
|
|
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, nil, err
|
|
}
|
|
|
|
return rows, next, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, nil, obj.makeErr(err)
|
|
}
|
|
return rows, next, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Paged_StoragenodeBandwidthRollupPhase2_By_StoragenodeId_And_IntervalStart_GreaterOrEqual(ctx context.Context,
|
|
storagenode_bandwidth_rollup_phase2_storagenode_id StoragenodeBandwidthRollupPhase2_StoragenodeId_Field,
|
|
storagenode_bandwidth_rollup_phase2_interval_start_greater_or_equal StoragenodeBandwidthRollupPhase2_IntervalStart_Field,
|
|
limit int, start *Paged_StoragenodeBandwidthRollupPhase2_By_StoragenodeId_And_IntervalStart_GreaterOrEqual_Continuation) (
|
|
rows []*StoragenodeBandwidthRollupPhase2, next *Paged_StoragenodeBandwidthRollupPhase2_By_StoragenodeId_And_IntervalStart_GreaterOrEqual_Continuation, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT storagenode_bandwidth_rollups_phase2.storagenode_id, storagenode_bandwidth_rollups_phase2.interval_start, storagenode_bandwidth_rollups_phase2.interval_seconds, storagenode_bandwidth_rollups_phase2.action, storagenode_bandwidth_rollups_phase2.allocated, storagenode_bandwidth_rollups_phase2.settled, storagenode_bandwidth_rollups_phase2.storagenode_id, storagenode_bandwidth_rollups_phase2.interval_start, storagenode_bandwidth_rollups_phase2.action FROM storagenode_bandwidth_rollups_phase2 WHERE storagenode_bandwidth_rollups_phase2.storagenode_id = ? AND storagenode_bandwidth_rollups_phase2.interval_start >= ? AND (storagenode_bandwidth_rollups_phase2.storagenode_id, storagenode_bandwidth_rollups_phase2.interval_start, storagenode_bandwidth_rollups_phase2.action) > (?, ?, ?) ORDER BY storagenode_bandwidth_rollups_phase2.storagenode_id, storagenode_bandwidth_rollups_phase2.interval_start, storagenode_bandwidth_rollups_phase2.action LIMIT ?")
|
|
|
|
var __embed_first_stmt = __sqlbundle_Literal("SELECT storagenode_bandwidth_rollups_phase2.storagenode_id, storagenode_bandwidth_rollups_phase2.interval_start, storagenode_bandwidth_rollups_phase2.interval_seconds, storagenode_bandwidth_rollups_phase2.action, storagenode_bandwidth_rollups_phase2.allocated, storagenode_bandwidth_rollups_phase2.settled, storagenode_bandwidth_rollups_phase2.storagenode_id, storagenode_bandwidth_rollups_phase2.interval_start, storagenode_bandwidth_rollups_phase2.action FROM storagenode_bandwidth_rollups_phase2 WHERE storagenode_bandwidth_rollups_phase2.storagenode_id = ? AND storagenode_bandwidth_rollups_phase2.interval_start >= ? ORDER BY storagenode_bandwidth_rollups_phase2.storagenode_id, storagenode_bandwidth_rollups_phase2.interval_start, storagenode_bandwidth_rollups_phase2.action LIMIT ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, storagenode_bandwidth_rollup_phase2_storagenode_id.value(), storagenode_bandwidth_rollup_phase2_interval_start_greater_or_equal.value())
|
|
|
|
var __stmt string
|
|
if start != nil && start._set {
|
|
__values = append(__values, start._value_storagenode_id, start._value_interval_start, start._value_action, limit)
|
|
__stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
} else {
|
|
__values = append(__values, limit)
|
|
__stmt = __sqlbundle_Render(obj.dialect, __embed_first_stmt)
|
|
}
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, next, err = func() (rows []*StoragenodeBandwidthRollupPhase2, next *Paged_StoragenodeBandwidthRollupPhase2_By_StoragenodeId_And_IntervalStart_GreaterOrEqual_Continuation, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
var __continuation Paged_StoragenodeBandwidthRollupPhase2_By_StoragenodeId_And_IntervalStart_GreaterOrEqual_Continuation
|
|
__continuation._set = true
|
|
|
|
for __rows.Next() {
|
|
storagenode_bandwidth_rollup_phase2 := &StoragenodeBandwidthRollupPhase2{}
|
|
err = __rows.Scan(&storagenode_bandwidth_rollup_phase2.StoragenodeId, &storagenode_bandwidth_rollup_phase2.IntervalStart, &storagenode_bandwidth_rollup_phase2.IntervalSeconds, &storagenode_bandwidth_rollup_phase2.Action, &storagenode_bandwidth_rollup_phase2.Allocated, &storagenode_bandwidth_rollup_phase2.Settled, &__continuation._value_storagenode_id, &__continuation._value_interval_start, &__continuation._value_action)
|
|
if err != nil {
|
|
return nil, nil, err
|
|
}
|
|
rows = append(rows, storagenode_bandwidth_rollup_phase2)
|
|
next = &__continuation
|
|
}
|
|
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, nil, err
|
|
}
|
|
|
|
return rows, next, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, nil, obj.makeErr(err)
|
|
}
|
|
return rows, next, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) All_StoragenodeStorageTally(ctx context.Context) (
|
|
rows []*StoragenodeStorageTally, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT storagenode_storage_tallies.node_id, storagenode_storage_tallies.interval_end_time, storagenode_storage_tallies.data_total FROM storagenode_storage_tallies")
|
|
|
|
var __values []interface{}
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, err = func() (rows []*StoragenodeStorageTally, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
storagenode_storage_tally := &StoragenodeStorageTally{}
|
|
err = __rows.Scan(&storagenode_storage_tally.NodeId, &storagenode_storage_tally.IntervalEndTime, &storagenode_storage_tally.DataTotal)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
rows = append(rows, storagenode_storage_tally)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return rows, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) All_StoragenodeStorageTally_By_IntervalEndTime_GreaterOrEqual(ctx context.Context,
|
|
storagenode_storage_tally_interval_end_time_greater_or_equal StoragenodeStorageTally_IntervalEndTime_Field) (
|
|
rows []*StoragenodeStorageTally, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT storagenode_storage_tallies.node_id, storagenode_storage_tallies.interval_end_time, storagenode_storage_tallies.data_total FROM storagenode_storage_tallies WHERE storagenode_storage_tallies.interval_end_time >= ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, storagenode_storage_tally_interval_end_time_greater_or_equal.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, err = func() (rows []*StoragenodeStorageTally, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
storagenode_storage_tally := &StoragenodeStorageTally{}
|
|
err = __rows.Scan(&storagenode_storage_tally.NodeId, &storagenode_storage_tally.IntervalEndTime, &storagenode_storage_tally.DataTotal)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
rows = append(rows, storagenode_storage_tally)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return rows, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Get_StoragenodePaystub_By_NodeId_And_Period(ctx context.Context,
|
|
storagenode_paystub_node_id StoragenodePaystub_NodeId_Field,
|
|
storagenode_paystub_period StoragenodePaystub_Period_Field) (
|
|
storagenode_paystub *StoragenodePaystub, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT storagenode_paystubs.period, storagenode_paystubs.node_id, storagenode_paystubs.created_at, storagenode_paystubs.codes, storagenode_paystubs.usage_at_rest, storagenode_paystubs.usage_get, storagenode_paystubs.usage_put, storagenode_paystubs.usage_get_repair, storagenode_paystubs.usage_put_repair, storagenode_paystubs.usage_get_audit, storagenode_paystubs.comp_at_rest, storagenode_paystubs.comp_get, storagenode_paystubs.comp_put, storagenode_paystubs.comp_get_repair, storagenode_paystubs.comp_put_repair, storagenode_paystubs.comp_get_audit, storagenode_paystubs.surge_percent, storagenode_paystubs.held, storagenode_paystubs.owed, storagenode_paystubs.disposed, storagenode_paystubs.paid, storagenode_paystubs.distributed FROM storagenode_paystubs WHERE storagenode_paystubs.node_id = ? AND storagenode_paystubs.period = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, storagenode_paystub_node_id.value(), storagenode_paystub_period.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
storagenode_paystub = &StoragenodePaystub{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&storagenode_paystub.Period, &storagenode_paystub.NodeId, &storagenode_paystub.CreatedAt, &storagenode_paystub.Codes, &storagenode_paystub.UsageAtRest, &storagenode_paystub.UsageGet, &storagenode_paystub.UsagePut, &storagenode_paystub.UsageGetRepair, &storagenode_paystub.UsagePutRepair, &storagenode_paystub.UsageGetAudit, &storagenode_paystub.CompAtRest, &storagenode_paystub.CompGet, &storagenode_paystub.CompPut, &storagenode_paystub.CompGetRepair, &storagenode_paystub.CompPutRepair, &storagenode_paystub.CompGetAudit, &storagenode_paystub.SurgePercent, &storagenode_paystub.Held, &storagenode_paystub.Owed, &storagenode_paystub.Disposed, &storagenode_paystub.Paid, &storagenode_paystub.Distributed)
|
|
if err != nil {
|
|
return (*StoragenodePaystub)(nil), obj.makeErr(err)
|
|
}
|
|
return storagenode_paystub, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) All_StoragenodePaystub_By_NodeId(ctx context.Context,
|
|
storagenode_paystub_node_id StoragenodePaystub_NodeId_Field) (
|
|
rows []*StoragenodePaystub, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT storagenode_paystubs.period, storagenode_paystubs.node_id, storagenode_paystubs.created_at, storagenode_paystubs.codes, storagenode_paystubs.usage_at_rest, storagenode_paystubs.usage_get, storagenode_paystubs.usage_put, storagenode_paystubs.usage_get_repair, storagenode_paystubs.usage_put_repair, storagenode_paystubs.usage_get_audit, storagenode_paystubs.comp_at_rest, storagenode_paystubs.comp_get, storagenode_paystubs.comp_put, storagenode_paystubs.comp_get_repair, storagenode_paystubs.comp_put_repair, storagenode_paystubs.comp_get_audit, storagenode_paystubs.surge_percent, storagenode_paystubs.held, storagenode_paystubs.owed, storagenode_paystubs.disposed, storagenode_paystubs.paid, storagenode_paystubs.distributed FROM storagenode_paystubs WHERE storagenode_paystubs.node_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, storagenode_paystub_node_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, err = func() (rows []*StoragenodePaystub, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
storagenode_paystub := &StoragenodePaystub{}
|
|
err = __rows.Scan(&storagenode_paystub.Period, &storagenode_paystub.NodeId, &storagenode_paystub.CreatedAt, &storagenode_paystub.Codes, &storagenode_paystub.UsageAtRest, &storagenode_paystub.UsageGet, &storagenode_paystub.UsagePut, &storagenode_paystub.UsageGetRepair, &storagenode_paystub.UsagePutRepair, &storagenode_paystub.UsageGetAudit, &storagenode_paystub.CompAtRest, &storagenode_paystub.CompGet, &storagenode_paystub.CompPut, &storagenode_paystub.CompGetRepair, &storagenode_paystub.CompPutRepair, &storagenode_paystub.CompGetAudit, &storagenode_paystub.SurgePercent, &storagenode_paystub.Held, &storagenode_paystub.Owed, &storagenode_paystub.Disposed, &storagenode_paystub.Paid, &storagenode_paystub.Distributed)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
rows = append(rows, storagenode_paystub)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return rows, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Limited_StoragenodePayment_By_NodeId_And_Period_OrderBy_Desc_Id(ctx context.Context,
|
|
storagenode_payment_node_id StoragenodePayment_NodeId_Field,
|
|
storagenode_payment_period StoragenodePayment_Period_Field,
|
|
limit int, offset int64) (
|
|
rows []*StoragenodePayment, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT storagenode_payments.id, storagenode_payments.created_at, storagenode_payments.node_id, storagenode_payments.period, storagenode_payments.amount, storagenode_payments.receipt, storagenode_payments.notes FROM storagenode_payments WHERE storagenode_payments.node_id = ? AND storagenode_payments.period = ? ORDER BY storagenode_payments.id DESC LIMIT ? OFFSET ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, storagenode_payment_node_id.value(), storagenode_payment_period.value())
|
|
|
|
__values = append(__values, limit, offset)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, err = func() (rows []*StoragenodePayment, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
storagenode_payment := &StoragenodePayment{}
|
|
err = __rows.Scan(&storagenode_payment.Id, &storagenode_payment.CreatedAt, &storagenode_payment.NodeId, &storagenode_payment.Period, &storagenode_payment.Amount, &storagenode_payment.Receipt, &storagenode_payment.Notes)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
rows = append(rows, storagenode_payment)
|
|
}
|
|
err = __rows.Err()
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
return rows, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) All_StoragenodePayment_By_NodeId(ctx context.Context,
|
|
storagenode_payment_node_id StoragenodePayment_NodeId_Field) (
|
|
rows []*StoragenodePayment, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT storagenode_payments.id, storagenode_payments.created_at, storagenode_payments.node_id, storagenode_payments.period, storagenode_payments.amount, storagenode_payments.receipt, storagenode_payments.notes FROM storagenode_payments WHERE storagenode_payments.node_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, storagenode_payment_node_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, err = func() (rows []*StoragenodePayment, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
storagenode_payment := &StoragenodePayment{}
|
|
err = __rows.Scan(&storagenode_payment.Id, &storagenode_payment.CreatedAt, &storagenode_payment.NodeId, &storagenode_payment.Period, &storagenode_payment.Amount, &storagenode_payment.Receipt, &storagenode_payment.Notes)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
rows = append(rows, storagenode_payment)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return rows, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) All_StoragenodePayment_By_NodeId_And_Period(ctx context.Context,
|
|
storagenode_payment_node_id StoragenodePayment_NodeId_Field,
|
|
storagenode_payment_period StoragenodePayment_Period_Field) (
|
|
rows []*StoragenodePayment, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT storagenode_payments.id, storagenode_payments.created_at, storagenode_payments.node_id, storagenode_payments.period, storagenode_payments.amount, storagenode_payments.receipt, storagenode_payments.notes FROM storagenode_payments WHERE storagenode_payments.node_id = ? AND storagenode_payments.period = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, storagenode_payment_node_id.value(), storagenode_payment_period.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, err = func() (rows []*StoragenodePayment, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
storagenode_payment := &StoragenodePayment{}
|
|
err = __rows.Scan(&storagenode_payment.Id, &storagenode_payment.CreatedAt, &storagenode_payment.NodeId, &storagenode_payment.Period, &storagenode_payment.Amount, &storagenode_payment.Receipt, &storagenode_payment.Notes)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
rows = append(rows, storagenode_payment)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return rows, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Get_PeerIdentity_By_NodeId(ctx context.Context,
|
|
peer_identity_node_id PeerIdentity_NodeId_Field) (
|
|
peer_identity *PeerIdentity, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT peer_identities.node_id, peer_identities.leaf_serial_number, peer_identities.chain, peer_identities.updated_at FROM peer_identities WHERE peer_identities.node_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, peer_identity_node_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
peer_identity = &PeerIdentity{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&peer_identity.NodeId, &peer_identity.LeafSerialNumber, &peer_identity.Chain, &peer_identity.UpdatedAt)
|
|
if err != nil {
|
|
return (*PeerIdentity)(nil), obj.makeErr(err)
|
|
}
|
|
return peer_identity, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Get_PeerIdentity_LeafSerialNumber_By_NodeId(ctx context.Context,
|
|
peer_identity_node_id PeerIdentity_NodeId_Field) (
|
|
row *LeafSerialNumber_Row, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT peer_identities.leaf_serial_number FROM peer_identities WHERE peer_identities.node_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, peer_identity_node_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
row = &LeafSerialNumber_Row{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&row.LeafSerialNumber)
|
|
if err != nil {
|
|
return (*LeafSerialNumber_Row)(nil), obj.makeErr(err)
|
|
}
|
|
return row, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Get_RegistrationToken_By_Secret(ctx context.Context,
|
|
registration_token_secret RegistrationToken_Secret_Field) (
|
|
registration_token *RegistrationToken, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT registration_tokens.secret, registration_tokens.owner_id, registration_tokens.project_limit, registration_tokens.created_at FROM registration_tokens WHERE registration_tokens.secret = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, registration_token_secret.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
registration_token = &RegistrationToken{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(®istration_token.Secret, ®istration_token.OwnerId, ®istration_token.ProjectLimit, ®istration_token.CreatedAt)
|
|
if err != nil {
|
|
return (*RegistrationToken)(nil), obj.makeErr(err)
|
|
}
|
|
return registration_token, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Get_RegistrationToken_By_OwnerId(ctx context.Context,
|
|
registration_token_owner_id RegistrationToken_OwnerId_Field) (
|
|
registration_token *RegistrationToken, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __cond_0 = &__sqlbundle_Condition{Left: "registration_tokens.owner_id", Equal: true, Right: "?", Null: true}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("SELECT registration_tokens.secret, registration_tokens.owner_id, registration_tokens.project_limit, registration_tokens.created_at FROM registration_tokens WHERE "), __cond_0}}
|
|
|
|
var __values []interface{}
|
|
if !registration_token_owner_id.isnull() {
|
|
__cond_0.Null = false
|
|
__values = append(__values, registration_token_owner_id.value())
|
|
}
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
registration_token = &RegistrationToken{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(®istration_token.Secret, ®istration_token.OwnerId, ®istration_token.ProjectLimit, ®istration_token.CreatedAt)
|
|
if err != nil {
|
|
return (*RegistrationToken)(nil), obj.makeErr(err)
|
|
}
|
|
return registration_token, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Get_ResetPasswordToken_By_Secret(ctx context.Context,
|
|
reset_password_token_secret ResetPasswordToken_Secret_Field) (
|
|
reset_password_token *ResetPasswordToken, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT reset_password_tokens.secret, reset_password_tokens.owner_id, reset_password_tokens.created_at FROM reset_password_tokens WHERE reset_password_tokens.secret = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, reset_password_token_secret.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
reset_password_token = &ResetPasswordToken{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&reset_password_token.Secret, &reset_password_token.OwnerId, &reset_password_token.CreatedAt)
|
|
if err != nil {
|
|
return (*ResetPasswordToken)(nil), obj.makeErr(err)
|
|
}
|
|
return reset_password_token, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Get_ResetPasswordToken_By_OwnerId(ctx context.Context,
|
|
reset_password_token_owner_id ResetPasswordToken_OwnerId_Field) (
|
|
reset_password_token *ResetPasswordToken, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT reset_password_tokens.secret, reset_password_tokens.owner_id, reset_password_tokens.created_at FROM reset_password_tokens WHERE reset_password_tokens.owner_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, reset_password_token_owner_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
reset_password_token = &ResetPasswordToken{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&reset_password_token.Secret, &reset_password_token.OwnerId, &reset_password_token.CreatedAt)
|
|
if err != nil {
|
|
return (*ResetPasswordToken)(nil), obj.makeErr(err)
|
|
}
|
|
return reset_password_token, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Get_BucketMetainfo_By_ProjectId_And_Name(ctx context.Context,
|
|
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
|
|
bucket_metainfo_name BucketMetainfo_Name_Field) (
|
|
bucket_metainfo *BucketMetainfo, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.partner_id, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares FROM bucket_metainfos WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, bucket_metainfo_project_id.value(), bucket_metainfo_name.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
bucket_metainfo = &BucketMetainfo{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.PartnerId, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares)
|
|
if err != nil {
|
|
return (*BucketMetainfo)(nil), obj.makeErr(err)
|
|
}
|
|
return bucket_metainfo, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Get_BucketMetainfo_Id_By_ProjectId_And_Name(ctx context.Context,
|
|
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
|
|
bucket_metainfo_name BucketMetainfo_Name_Field) (
|
|
row *Id_Row, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_metainfos.id FROM bucket_metainfos WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, bucket_metainfo_project_id.value(), bucket_metainfo_name.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
row = &Id_Row{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&row.Id)
|
|
if err != nil {
|
|
return (*Id_Row)(nil), obj.makeErr(err)
|
|
}
|
|
return row, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Has_BucketMetainfo_By_ProjectId_And_Name(ctx context.Context,
|
|
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
|
|
bucket_metainfo_name BucketMetainfo_Name_Field) (
|
|
has bool, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT EXISTS( SELECT 1 FROM bucket_metainfos WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name = ? )")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, bucket_metainfo_project_id.value(), bucket_metainfo_name.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&has)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
return has, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Limited_BucketMetainfo_By_ProjectId_And_Name_GreaterOrEqual_OrderBy_Asc_Name(ctx context.Context,
|
|
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
|
|
bucket_metainfo_name_greater_or_equal BucketMetainfo_Name_Field,
|
|
limit int, offset int64) (
|
|
rows []*BucketMetainfo, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.partner_id, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares FROM bucket_metainfos WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name >= ? ORDER BY bucket_metainfos.name LIMIT ? OFFSET ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, bucket_metainfo_project_id.value(), bucket_metainfo_name_greater_or_equal.value())
|
|
|
|
__values = append(__values, limit, offset)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, err = func() (rows []*BucketMetainfo, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
bucket_metainfo := &BucketMetainfo{}
|
|
err = __rows.Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.PartnerId, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
rows = append(rows, bucket_metainfo)
|
|
}
|
|
err = __rows.Err()
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
return rows, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Limited_BucketMetainfo_By_ProjectId_And_Name_Greater_OrderBy_Asc_Name(ctx context.Context,
|
|
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
|
|
bucket_metainfo_name_greater BucketMetainfo_Name_Field,
|
|
limit int, offset int64) (
|
|
rows []*BucketMetainfo, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.partner_id, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares FROM bucket_metainfos WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name > ? ORDER BY bucket_metainfos.name LIMIT ? OFFSET ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, bucket_metainfo_project_id.value(), bucket_metainfo_name_greater.value())
|
|
|
|
__values = append(__values, limit, offset)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, err = func() (rows []*BucketMetainfo, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
bucket_metainfo := &BucketMetainfo{}
|
|
err = __rows.Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.PartnerId, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
rows = append(rows, bucket_metainfo)
|
|
}
|
|
err = __rows.Err()
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
return rows, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Count_BucketMetainfo_Name_By_ProjectId(ctx context.Context,
|
|
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field) (
|
|
count int64, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT COUNT(*) FROM bucket_metainfos WHERE bucket_metainfos.project_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, bucket_metainfo_project_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&count)
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
return count, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Get_GracefulExitProgress_By_NodeId(ctx context.Context,
|
|
graceful_exit_progress_node_id GracefulExitProgress_NodeId_Field) (
|
|
graceful_exit_progress *GracefulExitProgress, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT graceful_exit_progress.node_id, graceful_exit_progress.bytes_transferred, graceful_exit_progress.pieces_transferred, graceful_exit_progress.pieces_failed, graceful_exit_progress.updated_at FROM graceful_exit_progress WHERE graceful_exit_progress.node_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, graceful_exit_progress_node_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
graceful_exit_progress = &GracefulExitProgress{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&graceful_exit_progress.NodeId, &graceful_exit_progress.BytesTransferred, &graceful_exit_progress.PiecesTransferred, &graceful_exit_progress.PiecesFailed, &graceful_exit_progress.UpdatedAt)
|
|
if err != nil {
|
|
return (*GracefulExitProgress)(nil), obj.makeErr(err)
|
|
}
|
|
return graceful_exit_progress, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Get_GracefulExitTransferQueue_By_NodeId_And_Path_And_PieceNum(ctx context.Context,
|
|
graceful_exit_transfer_queue_node_id GracefulExitTransferQueue_NodeId_Field,
|
|
graceful_exit_transfer_queue_path GracefulExitTransferQueue_Path_Field,
|
|
graceful_exit_transfer_queue_piece_num GracefulExitTransferQueue_PieceNum_Field) (
|
|
graceful_exit_transfer_queue *GracefulExitTransferQueue, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT graceful_exit_transfer_queue.node_id, graceful_exit_transfer_queue.path, graceful_exit_transfer_queue.piece_num, graceful_exit_transfer_queue.root_piece_id, graceful_exit_transfer_queue.durability_ratio, graceful_exit_transfer_queue.queued_at, graceful_exit_transfer_queue.requested_at, graceful_exit_transfer_queue.last_failed_at, graceful_exit_transfer_queue.last_failed_code, graceful_exit_transfer_queue.failed_count, graceful_exit_transfer_queue.finished_at, graceful_exit_transfer_queue.order_limit_send_count FROM graceful_exit_transfer_queue WHERE graceful_exit_transfer_queue.node_id = ? AND graceful_exit_transfer_queue.path = ? AND graceful_exit_transfer_queue.piece_num = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, graceful_exit_transfer_queue_node_id.value(), graceful_exit_transfer_queue_path.value(), graceful_exit_transfer_queue_piece_num.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
graceful_exit_transfer_queue = &GracefulExitTransferQueue{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&graceful_exit_transfer_queue.NodeId, &graceful_exit_transfer_queue.Path, &graceful_exit_transfer_queue.PieceNum, &graceful_exit_transfer_queue.RootPieceId, &graceful_exit_transfer_queue.DurabilityRatio, &graceful_exit_transfer_queue.QueuedAt, &graceful_exit_transfer_queue.RequestedAt, &graceful_exit_transfer_queue.LastFailedAt, &graceful_exit_transfer_queue.LastFailedCode, &graceful_exit_transfer_queue.FailedCount, &graceful_exit_transfer_queue.FinishedAt, &graceful_exit_transfer_queue.OrderLimitSendCount)
|
|
if err != nil {
|
|
return (*GracefulExitTransferQueue)(nil), obj.makeErr(err)
|
|
}
|
|
return graceful_exit_transfer_queue, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Get_StripeCustomer_CustomerId_By_UserId(ctx context.Context,
|
|
stripe_customer_user_id StripeCustomer_UserId_Field) (
|
|
row *CustomerId_Row, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT stripe_customers.customer_id FROM stripe_customers WHERE stripe_customers.user_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, stripe_customer_user_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
row = &CustomerId_Row{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&row.CustomerId)
|
|
if err != nil {
|
|
return (*CustomerId_Row)(nil), obj.makeErr(err)
|
|
}
|
|
return row, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Limited_StripeCustomer_By_CreatedAt_LessOrEqual_OrderBy_Desc_CreatedAt(ctx context.Context,
|
|
stripe_customer_created_at_less_or_equal StripeCustomer_CreatedAt_Field,
|
|
limit int, offset int64) (
|
|
rows []*StripeCustomer, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT stripe_customers.user_id, stripe_customers.customer_id, stripe_customers.created_at FROM stripe_customers WHERE stripe_customers.created_at <= ? ORDER BY stripe_customers.created_at DESC LIMIT ? OFFSET ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, stripe_customer_created_at_less_or_equal.value())
|
|
|
|
__values = append(__values, limit, offset)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, err = func() (rows []*StripeCustomer, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
stripe_customer := &StripeCustomer{}
|
|
err = __rows.Scan(&stripe_customer.UserId, &stripe_customer.CustomerId, &stripe_customer.CreatedAt)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
rows = append(rows, stripe_customer)
|
|
}
|
|
err = __rows.Err()
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
return rows, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) All_CoinpaymentsTransaction_By_UserId_OrderBy_Desc_CreatedAt(ctx context.Context,
|
|
coinpayments_transaction_user_id CoinpaymentsTransaction_UserId_Field) (
|
|
rows []*CoinpaymentsTransaction, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT coinpayments_transactions.id, coinpayments_transactions.user_id, coinpayments_transactions.address, coinpayments_transactions.amount, coinpayments_transactions.received, coinpayments_transactions.status, coinpayments_transactions.key, coinpayments_transactions.timeout, coinpayments_transactions.created_at FROM coinpayments_transactions WHERE coinpayments_transactions.user_id = ? ORDER BY coinpayments_transactions.created_at DESC")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, coinpayments_transaction_user_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, err = func() (rows []*CoinpaymentsTransaction, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
coinpayments_transaction := &CoinpaymentsTransaction{}
|
|
err = __rows.Scan(&coinpayments_transaction.Id, &coinpayments_transaction.UserId, &coinpayments_transaction.Address, &coinpayments_transaction.Amount, &coinpayments_transaction.Received, &coinpayments_transaction.Status, &coinpayments_transaction.Key, &coinpayments_transaction.Timeout, &coinpayments_transaction.CreatedAt)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
rows = append(rows, coinpayments_transaction)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return rows, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Get_StripecoinpaymentsInvoiceProjectRecord_By_ProjectId_And_PeriodStart_And_PeriodEnd(ctx context.Context,
|
|
stripecoinpayments_invoice_project_record_project_id StripecoinpaymentsInvoiceProjectRecord_ProjectId_Field,
|
|
stripecoinpayments_invoice_project_record_period_start StripecoinpaymentsInvoiceProjectRecord_PeriodStart_Field,
|
|
stripecoinpayments_invoice_project_record_period_end StripecoinpaymentsInvoiceProjectRecord_PeriodEnd_Field) (
|
|
stripecoinpayments_invoice_project_record *StripecoinpaymentsInvoiceProjectRecord, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT stripecoinpayments_invoice_project_records.id, stripecoinpayments_invoice_project_records.project_id, stripecoinpayments_invoice_project_records.storage, stripecoinpayments_invoice_project_records.egress, stripecoinpayments_invoice_project_records.objects, stripecoinpayments_invoice_project_records.period_start, stripecoinpayments_invoice_project_records.period_end, stripecoinpayments_invoice_project_records.state, stripecoinpayments_invoice_project_records.created_at FROM stripecoinpayments_invoice_project_records WHERE stripecoinpayments_invoice_project_records.project_id = ? AND stripecoinpayments_invoice_project_records.period_start = ? AND stripecoinpayments_invoice_project_records.period_end = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, stripecoinpayments_invoice_project_record_project_id.value(), stripecoinpayments_invoice_project_record_period_start.value(), stripecoinpayments_invoice_project_record_period_end.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
stripecoinpayments_invoice_project_record = &StripecoinpaymentsInvoiceProjectRecord{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&stripecoinpayments_invoice_project_record.Id, &stripecoinpayments_invoice_project_record.ProjectId, &stripecoinpayments_invoice_project_record.Storage, &stripecoinpayments_invoice_project_record.Egress, &stripecoinpayments_invoice_project_record.Objects, &stripecoinpayments_invoice_project_record.PeriodStart, &stripecoinpayments_invoice_project_record.PeriodEnd, &stripecoinpayments_invoice_project_record.State, &stripecoinpayments_invoice_project_record.CreatedAt)
|
|
if err != nil {
|
|
return (*StripecoinpaymentsInvoiceProjectRecord)(nil), obj.makeErr(err)
|
|
}
|
|
return stripecoinpayments_invoice_project_record, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Limited_StripecoinpaymentsInvoiceProjectRecord_By_PeriodStart_And_PeriodEnd_And_State(ctx context.Context,
|
|
stripecoinpayments_invoice_project_record_period_start StripecoinpaymentsInvoiceProjectRecord_PeriodStart_Field,
|
|
stripecoinpayments_invoice_project_record_period_end StripecoinpaymentsInvoiceProjectRecord_PeriodEnd_Field,
|
|
stripecoinpayments_invoice_project_record_state StripecoinpaymentsInvoiceProjectRecord_State_Field,
|
|
limit int, offset int64) (
|
|
rows []*StripecoinpaymentsInvoiceProjectRecord, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT stripecoinpayments_invoice_project_records.id, stripecoinpayments_invoice_project_records.project_id, stripecoinpayments_invoice_project_records.storage, stripecoinpayments_invoice_project_records.egress, stripecoinpayments_invoice_project_records.objects, stripecoinpayments_invoice_project_records.period_start, stripecoinpayments_invoice_project_records.period_end, stripecoinpayments_invoice_project_records.state, stripecoinpayments_invoice_project_records.created_at FROM stripecoinpayments_invoice_project_records WHERE stripecoinpayments_invoice_project_records.period_start = ? AND stripecoinpayments_invoice_project_records.period_end = ? AND stripecoinpayments_invoice_project_records.state = ? LIMIT ? OFFSET ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, stripecoinpayments_invoice_project_record_period_start.value(), stripecoinpayments_invoice_project_record_period_end.value(), stripecoinpayments_invoice_project_record_state.value())
|
|
|
|
__values = append(__values, limit, offset)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, err = func() (rows []*StripecoinpaymentsInvoiceProjectRecord, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
stripecoinpayments_invoice_project_record := &StripecoinpaymentsInvoiceProjectRecord{}
|
|
err = __rows.Scan(&stripecoinpayments_invoice_project_record.Id, &stripecoinpayments_invoice_project_record.ProjectId, &stripecoinpayments_invoice_project_record.Storage, &stripecoinpayments_invoice_project_record.Egress, &stripecoinpayments_invoice_project_record.Objects, &stripecoinpayments_invoice_project_record.PeriodStart, &stripecoinpayments_invoice_project_record.PeriodEnd, &stripecoinpayments_invoice_project_record.State, &stripecoinpayments_invoice_project_record.CreatedAt)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
rows = append(rows, stripecoinpayments_invoice_project_record)
|
|
}
|
|
err = __rows.Err()
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
return rows, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Get_StripecoinpaymentsTxConversionRate_By_TxId(ctx context.Context,
|
|
stripecoinpayments_tx_conversion_rate_tx_id StripecoinpaymentsTxConversionRate_TxId_Field) (
|
|
stripecoinpayments_tx_conversion_rate *StripecoinpaymentsTxConversionRate, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT stripecoinpayments_tx_conversion_rates.tx_id, stripecoinpayments_tx_conversion_rates.rate, stripecoinpayments_tx_conversion_rates.created_at FROM stripecoinpayments_tx_conversion_rates WHERE stripecoinpayments_tx_conversion_rates.tx_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, stripecoinpayments_tx_conversion_rate_tx_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
stripecoinpayments_tx_conversion_rate = &StripecoinpaymentsTxConversionRate{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&stripecoinpayments_tx_conversion_rate.TxId, &stripecoinpayments_tx_conversion_rate.Rate, &stripecoinpayments_tx_conversion_rate.CreatedAt)
|
|
if err != nil {
|
|
return (*StripecoinpaymentsTxConversionRate)(nil), obj.makeErr(err)
|
|
}
|
|
return stripecoinpayments_tx_conversion_rate, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Get_CouponCode_By_Name(ctx context.Context,
|
|
coupon_code_name CouponCode_Name_Field) (
|
|
coupon_code *CouponCode, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT coupon_codes.id, coupon_codes.name, coupon_codes.amount, coupon_codes.description, coupon_codes.type, coupon_codes.billing_periods, coupon_codes.created_at FROM coupon_codes WHERE coupon_codes.name = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, coupon_code_name.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
coupon_code = &CouponCode{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&coupon_code.Id, &coupon_code.Name, &coupon_code.Amount, &coupon_code.Description, &coupon_code.Type, &coupon_code.BillingPeriods, &coupon_code.CreatedAt)
|
|
if err != nil {
|
|
return (*CouponCode)(nil), obj.makeErr(err)
|
|
}
|
|
return coupon_code, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Get_Coupon_By_Id(ctx context.Context,
|
|
coupon_id Coupon_Id_Field) (
|
|
coupon *Coupon, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT coupons.id, coupons.user_id, coupons.amount, coupons.description, coupons.type, coupons.status, coupons.duration, coupons.billing_periods, coupons.coupon_code_name, coupons.created_at FROM coupons WHERE coupons.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, coupon_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
coupon = &Coupon{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&coupon.Id, &coupon.UserId, &coupon.Amount, &coupon.Description, &coupon.Type, &coupon.Status, &coupon.Duration, &coupon.BillingPeriods, &coupon.CouponCodeName, &coupon.CreatedAt)
|
|
if err != nil {
|
|
return (*Coupon)(nil), obj.makeErr(err)
|
|
}
|
|
return coupon, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) All_Coupon_By_UserId_OrderBy_Desc_CreatedAt(ctx context.Context,
|
|
coupon_user_id Coupon_UserId_Field) (
|
|
rows []*Coupon, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT coupons.id, coupons.user_id, coupons.amount, coupons.description, coupons.type, coupons.status, coupons.duration, coupons.billing_periods, coupons.coupon_code_name, coupons.created_at FROM coupons WHERE coupons.user_id = ? ORDER BY coupons.created_at DESC")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, coupon_user_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, err = func() (rows []*Coupon, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
coupon := &Coupon{}
|
|
err = __rows.Scan(&coupon.Id, &coupon.UserId, &coupon.Amount, &coupon.Description, &coupon.Type, &coupon.Status, &coupon.Duration, &coupon.BillingPeriods, &coupon.CouponCodeName, &coupon.CreatedAt)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
rows = append(rows, coupon)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return rows, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) All_Coupon_By_UserId_And_Status_OrderBy_Desc_CreatedAt(ctx context.Context,
|
|
coupon_user_id Coupon_UserId_Field,
|
|
coupon_status Coupon_Status_Field) (
|
|
rows []*Coupon, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT coupons.id, coupons.user_id, coupons.amount, coupons.description, coupons.type, coupons.status, coupons.duration, coupons.billing_periods, coupons.coupon_code_name, coupons.created_at FROM coupons WHERE coupons.user_id = ? AND coupons.status = ? ORDER BY coupons.created_at DESC")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, coupon_user_id.value(), coupon_status.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, err = func() (rows []*Coupon, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
coupon := &Coupon{}
|
|
err = __rows.Scan(&coupon.Id, &coupon.UserId, &coupon.Amount, &coupon.Description, &coupon.Type, &coupon.Status, &coupon.Duration, &coupon.BillingPeriods, &coupon.CouponCodeName, &coupon.CreatedAt)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
rows = append(rows, coupon)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return rows, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) All_Coupon_By_Status_OrderBy_Desc_CreatedAt(ctx context.Context,
|
|
coupon_status Coupon_Status_Field) (
|
|
rows []*Coupon, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT coupons.id, coupons.user_id, coupons.amount, coupons.description, coupons.type, coupons.status, coupons.duration, coupons.billing_periods, coupons.coupon_code_name, coupons.created_at FROM coupons WHERE coupons.status = ? ORDER BY coupons.created_at DESC")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, coupon_status.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, err = func() (rows []*Coupon, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
coupon := &Coupon{}
|
|
err = __rows.Scan(&coupon.Id, &coupon.UserId, &coupon.Amount, &coupon.Description, &coupon.Type, &coupon.Status, &coupon.Duration, &coupon.BillingPeriods, &coupon.CouponCodeName, &coupon.CreatedAt)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
rows = append(rows, coupon)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return rows, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Limited_Coupon_By_CreatedAt_LessOrEqual_And_Status_OrderBy_Desc_CreatedAt(ctx context.Context,
|
|
coupon_created_at_less_or_equal Coupon_CreatedAt_Field,
|
|
coupon_status Coupon_Status_Field,
|
|
limit int, offset int64) (
|
|
rows []*Coupon, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT coupons.id, coupons.user_id, coupons.amount, coupons.description, coupons.type, coupons.status, coupons.duration, coupons.billing_periods, coupons.coupon_code_name, coupons.created_at FROM coupons WHERE coupons.created_at <= ? AND coupons.status = ? ORDER BY coupons.created_at DESC LIMIT ? OFFSET ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, coupon_created_at_less_or_equal.value(), coupon_status.value())
|
|
|
|
__values = append(__values, limit, offset)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, err = func() (rows []*Coupon, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
coupon := &Coupon{}
|
|
err = __rows.Scan(&coupon.Id, &coupon.UserId, &coupon.Amount, &coupon.Description, &coupon.Type, &coupon.Status, &coupon.Duration, &coupon.BillingPeriods, &coupon.CouponCodeName, &coupon.CreatedAt)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
rows = append(rows, coupon)
|
|
}
|
|
err = __rows.Err()
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
return rows, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Limited_CouponUsage_By_Period_And_Status_Equal_Number(ctx context.Context,
|
|
coupon_usage_period CouponUsage_Period_Field,
|
|
limit int, offset int64) (
|
|
rows []*CouponUsage, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT coupon_usages.coupon_id, coupon_usages.amount, coupon_usages.status, coupon_usages.period FROM coupon_usages WHERE coupon_usages.period = ? AND coupon_usages.status = 0 LIMIT ? OFFSET ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, coupon_usage_period.value())
|
|
|
|
__values = append(__values, limit, offset)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
for {
|
|
rows, err = func() (rows []*CouponUsage, err error) {
|
|
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
coupon_usage := &CouponUsage{}
|
|
err = __rows.Scan(&coupon_usage.CouponId, &coupon_usage.Amount, &coupon_usage.Status, &coupon_usage.Period)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
rows = append(rows, coupon_usage)
|
|
}
|
|
err = __rows.Err()
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
return rows, nil
|
|
}()
|
|
if err != nil {
|
|
if obj.shouldRetry(err) {
|
|
continue
|
|
}
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
}
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Has_NodeApiVersion_By_Id_And_ApiVersion_GreaterOrEqual(ctx context.Context,
|
|
node_api_version_id NodeApiVersion_Id_Field,
|
|
node_api_version_api_version_greater_or_equal NodeApiVersion_ApiVersion_Field) (
|
|
has bool, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT EXISTS( SELECT 1 FROM node_api_versions WHERE node_api_versions.id = ? AND node_api_versions.api_version >= ? )")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, node_api_version_id.value(), node_api_version_api_version_greater_or_equal.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&has)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
return has, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) UpdateNoReturn_Irreparabledb_By_Segmentpath(ctx context.Context,
|
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field,
|
|
update Irreparabledb_Update_Fields) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE irreparabledbs SET "), __sets, __sqlbundle_Literal(" WHERE irreparabledbs.segmentpath = ?")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.Segmentdetail._set {
|
|
__values = append(__values, update.Segmentdetail.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("segmentdetail = ?"))
|
|
}
|
|
|
|
if update.PiecesLostCount._set {
|
|
__values = append(__values, update.PiecesLostCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("pieces_lost_count = ?"))
|
|
}
|
|
|
|
if update.SegDamagedUnixSec._set {
|
|
__values = append(__values, update.SegDamagedUnixSec.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("seg_damaged_unix_sec = ?"))
|
|
}
|
|
|
|
if update.RepairAttemptCount._set {
|
|
__values = append(__values, update.RepairAttemptCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("repair_attempt_count = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, irreparabledb_segmentpath.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
_, err = obj.driver.ExecContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) UpdateNoReturn_AccountingTimestamps_By_Name(ctx context.Context,
|
|
accounting_timestamps_name AccountingTimestamps_Name_Field,
|
|
update AccountingTimestamps_Update_Fields) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE accounting_timestamps SET "), __sets, __sqlbundle_Literal(" WHERE accounting_timestamps.name = ?")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.Value._set {
|
|
__values = append(__values, update.Value.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("value = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, accounting_timestamps_name.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
_, err = obj.driver.ExecContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Update_Node_By_Id(ctx context.Context,
|
|
node_id Node_Id_Field,
|
|
update Node_Update_Fields) (
|
|
node *Node, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE nodes SET "), __sets, __sqlbundle_Literal(" WHERE nodes.id = ? RETURNING nodes.id, nodes.address, nodes.last_net, nodes.last_ip_port, nodes.protocol, nodes.type, nodes.email, nodes.wallet, nodes.wallet_features, nodes.free_disk, nodes.piece_count, nodes.major, nodes.minor, nodes.patch, nodes.hash, nodes.timestamp, nodes.release, nodes.latency_90, nodes.audit_success_count, nodes.total_audit_count, nodes.vetted_at, nodes.created_at, nodes.updated_at, nodes.last_contact_success, nodes.last_contact_failure, nodes.contained, nodes.disqualified, nodes.suspended, nodes.unknown_audit_suspended, nodes.offline_suspended, nodes.under_review, nodes.online_score, nodes.audit_reputation_alpha, nodes.audit_reputation_beta, nodes.unknown_audit_reputation_alpha, nodes.unknown_audit_reputation_beta, nodes.exit_initiated_at, nodes.exit_loop_completed_at, nodes.exit_finished_at, nodes.exit_success")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.Address._set {
|
|
__values = append(__values, update.Address.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("address = ?"))
|
|
}
|
|
|
|
if update.LastNet._set {
|
|
__values = append(__values, update.LastNet.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("last_net = ?"))
|
|
}
|
|
|
|
if update.LastIpPort._set {
|
|
__values = append(__values, update.LastIpPort.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("last_ip_port = ?"))
|
|
}
|
|
|
|
if update.Protocol._set {
|
|
__values = append(__values, update.Protocol.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("protocol = ?"))
|
|
}
|
|
|
|
if update.Type._set {
|
|
__values = append(__values, update.Type.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("type = ?"))
|
|
}
|
|
|
|
if update.Email._set {
|
|
__values = append(__values, update.Email.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("email = ?"))
|
|
}
|
|
|
|
if update.Wallet._set {
|
|
__values = append(__values, update.Wallet.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("wallet = ?"))
|
|
}
|
|
|
|
if update.WalletFeatures._set {
|
|
__values = append(__values, update.WalletFeatures.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("wallet_features = ?"))
|
|
}
|
|
|
|
if update.FreeDisk._set {
|
|
__values = append(__values, update.FreeDisk.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("free_disk = ?"))
|
|
}
|
|
|
|
if update.PieceCount._set {
|
|
__values = append(__values, update.PieceCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("piece_count = ?"))
|
|
}
|
|
|
|
if update.Major._set {
|
|
__values = append(__values, update.Major.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("major = ?"))
|
|
}
|
|
|
|
if update.Minor._set {
|
|
__values = append(__values, update.Minor.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("minor = ?"))
|
|
}
|
|
|
|
if update.Patch._set {
|
|
__values = append(__values, update.Patch.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("patch = ?"))
|
|
}
|
|
|
|
if update.Hash._set {
|
|
__values = append(__values, update.Hash.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("hash = ?"))
|
|
}
|
|
|
|
if update.Timestamp._set {
|
|
__values = append(__values, update.Timestamp.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("timestamp = ?"))
|
|
}
|
|
|
|
if update.Release._set {
|
|
__values = append(__values, update.Release.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("release = ?"))
|
|
}
|
|
|
|
if update.Latency90._set {
|
|
__values = append(__values, update.Latency90.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("latency_90 = ?"))
|
|
}
|
|
|
|
if update.AuditSuccessCount._set {
|
|
__values = append(__values, update.AuditSuccessCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("audit_success_count = ?"))
|
|
}
|
|
|
|
if update.TotalAuditCount._set {
|
|
__values = append(__values, update.TotalAuditCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("total_audit_count = ?"))
|
|
}
|
|
|
|
if update.VettedAt._set {
|
|
__values = append(__values, update.VettedAt.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("vetted_at = ?"))
|
|
}
|
|
|
|
if update.LastContactSuccess._set {
|
|
__values = append(__values, update.LastContactSuccess.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("last_contact_success = ?"))
|
|
}
|
|
|
|
if update.LastContactFailure._set {
|
|
__values = append(__values, update.LastContactFailure.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("last_contact_failure = ?"))
|
|
}
|
|
|
|
if update.Contained._set {
|
|
__values = append(__values, update.Contained.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("contained = ?"))
|
|
}
|
|
|
|
if update.Disqualified._set {
|
|
__values = append(__values, update.Disqualified.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("disqualified = ?"))
|
|
}
|
|
|
|
if update.Suspended._set {
|
|
__values = append(__values, update.Suspended.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("suspended = ?"))
|
|
}
|
|
|
|
if update.UnknownAuditSuspended._set {
|
|
__values = append(__values, update.UnknownAuditSuspended.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("unknown_audit_suspended = ?"))
|
|
}
|
|
|
|
if update.OfflineSuspended._set {
|
|
__values = append(__values, update.OfflineSuspended.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("offline_suspended = ?"))
|
|
}
|
|
|
|
if update.UnderReview._set {
|
|
__values = append(__values, update.UnderReview.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("under_review = ?"))
|
|
}
|
|
|
|
if update.OnlineScore._set {
|
|
__values = append(__values, update.OnlineScore.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("online_score = ?"))
|
|
}
|
|
|
|
if update.AuditReputationAlpha._set {
|
|
__values = append(__values, update.AuditReputationAlpha.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("audit_reputation_alpha = ?"))
|
|
}
|
|
|
|
if update.AuditReputationBeta._set {
|
|
__values = append(__values, update.AuditReputationBeta.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("audit_reputation_beta = ?"))
|
|
}
|
|
|
|
if update.UnknownAuditReputationAlpha._set {
|
|
__values = append(__values, update.UnknownAuditReputationAlpha.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("unknown_audit_reputation_alpha = ?"))
|
|
}
|
|
|
|
if update.UnknownAuditReputationBeta._set {
|
|
__values = append(__values, update.UnknownAuditReputationBeta.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("unknown_audit_reputation_beta = ?"))
|
|
}
|
|
|
|
if update.ExitInitiatedAt._set {
|
|
__values = append(__values, update.ExitInitiatedAt.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("exit_initiated_at = ?"))
|
|
}
|
|
|
|
if update.ExitLoopCompletedAt._set {
|
|
__values = append(__values, update.ExitLoopCompletedAt.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("exit_loop_completed_at = ?"))
|
|
}
|
|
|
|
if update.ExitFinishedAt._set {
|
|
__values = append(__values, update.ExitFinishedAt.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("exit_finished_at = ?"))
|
|
}
|
|
|
|
if update.ExitSuccess._set {
|
|
__values = append(__values, update.ExitSuccess.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("exit_success = ?"))
|
|
}
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
|
|
__values = append(__values, __now)
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("updated_at = ?"))
|
|
|
|
__args = append(__args, node_id.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
node = &Node{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&node.Id, &node.Address, &node.LastNet, &node.LastIpPort, &node.Protocol, &node.Type, &node.Email, &node.Wallet, &node.WalletFeatures, &node.FreeDisk, &node.PieceCount, &node.Major, &node.Minor, &node.Patch, &node.Hash, &node.Timestamp, &node.Release, &node.Latency90, &node.AuditSuccessCount, &node.TotalAuditCount, &node.VettedAt, &node.CreatedAt, &node.UpdatedAt, &node.LastContactSuccess, &node.LastContactFailure, &node.Contained, &node.Disqualified, &node.Suspended, &node.UnknownAuditSuspended, &node.OfflineSuspended, &node.UnderReview, &node.OnlineScore, &node.AuditReputationAlpha, &node.AuditReputationBeta, &node.UnknownAuditReputationAlpha, &node.UnknownAuditReputationBeta, &node.ExitInitiatedAt, &node.ExitLoopCompletedAt, &node.ExitFinishedAt, &node.ExitSuccess)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return node, nil
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) UpdateNoReturn_Node_By_Id(ctx context.Context,
|
|
node_id Node_Id_Field,
|
|
update Node_Update_Fields) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE nodes SET "), __sets, __sqlbundle_Literal(" WHERE nodes.id = ?")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.Address._set {
|
|
__values = append(__values, update.Address.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("address = ?"))
|
|
}
|
|
|
|
if update.LastNet._set {
|
|
__values = append(__values, update.LastNet.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("last_net = ?"))
|
|
}
|
|
|
|
if update.LastIpPort._set {
|
|
__values = append(__values, update.LastIpPort.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("last_ip_port = ?"))
|
|
}
|
|
|
|
if update.Protocol._set {
|
|
__values = append(__values, update.Protocol.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("protocol = ?"))
|
|
}
|
|
|
|
if update.Type._set {
|
|
__values = append(__values, update.Type.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("type = ?"))
|
|
}
|
|
|
|
if update.Email._set {
|
|
__values = append(__values, update.Email.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("email = ?"))
|
|
}
|
|
|
|
if update.Wallet._set {
|
|
__values = append(__values, update.Wallet.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("wallet = ?"))
|
|
}
|
|
|
|
if update.WalletFeatures._set {
|
|
__values = append(__values, update.WalletFeatures.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("wallet_features = ?"))
|
|
}
|
|
|
|
if update.FreeDisk._set {
|
|
__values = append(__values, update.FreeDisk.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("free_disk = ?"))
|
|
}
|
|
|
|
if update.PieceCount._set {
|
|
__values = append(__values, update.PieceCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("piece_count = ?"))
|
|
}
|
|
|
|
if update.Major._set {
|
|
__values = append(__values, update.Major.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("major = ?"))
|
|
}
|
|
|
|
if update.Minor._set {
|
|
__values = append(__values, update.Minor.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("minor = ?"))
|
|
}
|
|
|
|
if update.Patch._set {
|
|
__values = append(__values, update.Patch.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("patch = ?"))
|
|
}
|
|
|
|
if update.Hash._set {
|
|
__values = append(__values, update.Hash.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("hash = ?"))
|
|
}
|
|
|
|
if update.Timestamp._set {
|
|
__values = append(__values, update.Timestamp.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("timestamp = ?"))
|
|
}
|
|
|
|
if update.Release._set {
|
|
__values = append(__values, update.Release.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("release = ?"))
|
|
}
|
|
|
|
if update.Latency90._set {
|
|
__values = append(__values, update.Latency90.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("latency_90 = ?"))
|
|
}
|
|
|
|
if update.AuditSuccessCount._set {
|
|
__values = append(__values, update.AuditSuccessCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("audit_success_count = ?"))
|
|
}
|
|
|
|
if update.TotalAuditCount._set {
|
|
__values = append(__values, update.TotalAuditCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("total_audit_count = ?"))
|
|
}
|
|
|
|
if update.VettedAt._set {
|
|
__values = append(__values, update.VettedAt.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("vetted_at = ?"))
|
|
}
|
|
|
|
if update.LastContactSuccess._set {
|
|
__values = append(__values, update.LastContactSuccess.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("last_contact_success = ?"))
|
|
}
|
|
|
|
if update.LastContactFailure._set {
|
|
__values = append(__values, update.LastContactFailure.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("last_contact_failure = ?"))
|
|
}
|
|
|
|
if update.Contained._set {
|
|
__values = append(__values, update.Contained.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("contained = ?"))
|
|
}
|
|
|
|
if update.Disqualified._set {
|
|
__values = append(__values, update.Disqualified.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("disqualified = ?"))
|
|
}
|
|
|
|
if update.Suspended._set {
|
|
__values = append(__values, update.Suspended.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("suspended = ?"))
|
|
}
|
|
|
|
if update.UnknownAuditSuspended._set {
|
|
__values = append(__values, update.UnknownAuditSuspended.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("unknown_audit_suspended = ?"))
|
|
}
|
|
|
|
if update.OfflineSuspended._set {
|
|
__values = append(__values, update.OfflineSuspended.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("offline_suspended = ?"))
|
|
}
|
|
|
|
if update.UnderReview._set {
|
|
__values = append(__values, update.UnderReview.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("under_review = ?"))
|
|
}
|
|
|
|
if update.OnlineScore._set {
|
|
__values = append(__values, update.OnlineScore.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("online_score = ?"))
|
|
}
|
|
|
|
if update.AuditReputationAlpha._set {
|
|
__values = append(__values, update.AuditReputationAlpha.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("audit_reputation_alpha = ?"))
|
|
}
|
|
|
|
if update.AuditReputationBeta._set {
|
|
__values = append(__values, update.AuditReputationBeta.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("audit_reputation_beta = ?"))
|
|
}
|
|
|
|
if update.UnknownAuditReputationAlpha._set {
|
|
__values = append(__values, update.UnknownAuditReputationAlpha.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("unknown_audit_reputation_alpha = ?"))
|
|
}
|
|
|
|
if update.UnknownAuditReputationBeta._set {
|
|
__values = append(__values, update.UnknownAuditReputationBeta.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("unknown_audit_reputation_beta = ?"))
|
|
}
|
|
|
|
if update.ExitInitiatedAt._set {
|
|
__values = append(__values, update.ExitInitiatedAt.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("exit_initiated_at = ?"))
|
|
}
|
|
|
|
if update.ExitLoopCompletedAt._set {
|
|
__values = append(__values, update.ExitLoopCompletedAt.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("exit_loop_completed_at = ?"))
|
|
}
|
|
|
|
if update.ExitFinishedAt._set {
|
|
__values = append(__values, update.ExitFinishedAt.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("exit_finished_at = ?"))
|
|
}
|
|
|
|
if update.ExitSuccess._set {
|
|
__values = append(__values, update.ExitSuccess.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("exit_success = ?"))
|
|
}
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
|
|
__values = append(__values, __now)
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("updated_at = ?"))
|
|
|
|
__args = append(__args, node_id.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
_, err = obj.driver.ExecContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Update_AuditHistory_By_NodeId(ctx context.Context,
|
|
audit_history_node_id AuditHistory_NodeId_Field,
|
|
update AuditHistory_Update_Fields) (
|
|
audit_history *AuditHistory, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE audit_histories SET "), __sets, __sqlbundle_Literal(" WHERE audit_histories.node_id = ? RETURNING audit_histories.node_id, audit_histories.history")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.History._set {
|
|
__values = append(__values, update.History.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("history = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return nil, emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, audit_history_node_id.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
audit_history = &AuditHistory{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&audit_history.NodeId, &audit_history.History)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return audit_history, nil
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Update_User_By_Id(ctx context.Context,
|
|
user_id User_Id_Field,
|
|
update User_Update_Fields) (
|
|
user *User, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE users SET "), __sets, __sqlbundle_Literal(" WHERE users.id = ? RETURNING users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.created_at, users.project_limit, users.position, users.company_name, users.company_size, users.working_on, users.is_professional, users.employee_count")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.Email._set {
|
|
__values = append(__values, update.Email.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("email = ?"))
|
|
}
|
|
|
|
if update.NormalizedEmail._set {
|
|
__values = append(__values, update.NormalizedEmail.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("normalized_email = ?"))
|
|
}
|
|
|
|
if update.FullName._set {
|
|
__values = append(__values, update.FullName.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("full_name = ?"))
|
|
}
|
|
|
|
if update.ShortName._set {
|
|
__values = append(__values, update.ShortName.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("short_name = ?"))
|
|
}
|
|
|
|
if update.PasswordHash._set {
|
|
__values = append(__values, update.PasswordHash.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("password_hash = ?"))
|
|
}
|
|
|
|
if update.Status._set {
|
|
__values = append(__values, update.Status.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("status = ?"))
|
|
}
|
|
|
|
if update.ProjectLimit._set {
|
|
__values = append(__values, update.ProjectLimit.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("project_limit = ?"))
|
|
}
|
|
|
|
if update.Position._set {
|
|
__values = append(__values, update.Position.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("position = ?"))
|
|
}
|
|
|
|
if update.CompanyName._set {
|
|
__values = append(__values, update.CompanyName.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("company_name = ?"))
|
|
}
|
|
|
|
if update.CompanySize._set {
|
|
__values = append(__values, update.CompanySize.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("company_size = ?"))
|
|
}
|
|
|
|
if update.WorkingOn._set {
|
|
__values = append(__values, update.WorkingOn.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("working_on = ?"))
|
|
}
|
|
|
|
if update.IsProfessional._set {
|
|
__values = append(__values, update.IsProfessional.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("is_professional = ?"))
|
|
}
|
|
|
|
if update.EmployeeCount._set {
|
|
__values = append(__values, update.EmployeeCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("employee_count = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return nil, emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, user_id.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
user = &User{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.CreatedAt, &user.ProjectLimit, &user.Position, &user.CompanyName, &user.CompanySize, &user.WorkingOn, &user.IsProfessional, &user.EmployeeCount)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return user, nil
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Update_Project_By_Id(ctx context.Context,
|
|
project_id Project_Id_Field,
|
|
update Project_Update_Fields) (
|
|
project *Project, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE projects SET "), __sets, __sqlbundle_Literal(" WHERE projects.id = ? RETURNING projects.id, projects.name, projects.description, projects.usage_limit, projects.bandwidth_limit, projects.rate_limit, projects.max_buckets, projects.partner_id, projects.owner_id, projects.created_at")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.Name._set {
|
|
__values = append(__values, update.Name.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("name = ?"))
|
|
}
|
|
|
|
if update.Description._set {
|
|
__values = append(__values, update.Description.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("description = ?"))
|
|
}
|
|
|
|
if update.UsageLimit._set {
|
|
__values = append(__values, update.UsageLimit.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("usage_limit = ?"))
|
|
}
|
|
|
|
if update.BandwidthLimit._set {
|
|
__values = append(__values, update.BandwidthLimit.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("bandwidth_limit = ?"))
|
|
}
|
|
|
|
if update.RateLimit._set {
|
|
__values = append(__values, update.RateLimit.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("rate_limit = ?"))
|
|
}
|
|
|
|
if update.MaxBuckets._set {
|
|
__values = append(__values, update.MaxBuckets.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("max_buckets = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return nil, emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, project_id.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
project = &Project{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.BandwidthLimit, &project.RateLimit, &project.MaxBuckets, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return project, nil
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) UpdateNoReturn_ApiKey_By_Id(ctx context.Context,
|
|
api_key_id ApiKey_Id_Field,
|
|
update ApiKey_Update_Fields) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE api_keys SET "), __sets, __sqlbundle_Literal(" WHERE api_keys.id = ?")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.Name._set {
|
|
__values = append(__values, update.Name.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("name = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, api_key_id.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
_, err = obj.driver.ExecContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) UpdateNoReturn_PeerIdentity_By_NodeId(ctx context.Context,
|
|
peer_identity_node_id PeerIdentity_NodeId_Field,
|
|
update PeerIdentity_Update_Fields) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE peer_identities SET "), __sets, __sqlbundle_Literal(" WHERE peer_identities.node_id = ?")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.LeafSerialNumber._set {
|
|
__values = append(__values, update.LeafSerialNumber.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("leaf_serial_number = ?"))
|
|
}
|
|
|
|
if update.Chain._set {
|
|
__values = append(__values, update.Chain.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("chain = ?"))
|
|
}
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
|
|
__values = append(__values, __now)
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("updated_at = ?"))
|
|
|
|
__args = append(__args, peer_identity_node_id.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
_, err = obj.driver.ExecContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Update_RegistrationToken_By_Secret(ctx context.Context,
|
|
registration_token_secret RegistrationToken_Secret_Field,
|
|
update RegistrationToken_Update_Fields) (
|
|
registration_token *RegistrationToken, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE registration_tokens SET "), __sets, __sqlbundle_Literal(" WHERE registration_tokens.secret = ? RETURNING registration_tokens.secret, registration_tokens.owner_id, registration_tokens.project_limit, registration_tokens.created_at")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.OwnerId._set {
|
|
__values = append(__values, update.OwnerId.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("owner_id = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return nil, emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, registration_token_secret.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
registration_token = &RegistrationToken{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(®istration_token.Secret, ®istration_token.OwnerId, ®istration_token.ProjectLimit, ®istration_token.CreatedAt)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return registration_token, nil
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Update_BucketMetainfo_By_ProjectId_And_Name(ctx context.Context,
|
|
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
|
|
bucket_metainfo_name BucketMetainfo_Name_Field,
|
|
update BucketMetainfo_Update_Fields) (
|
|
bucket_metainfo *BucketMetainfo, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE bucket_metainfos SET "), __sets, __sqlbundle_Literal(" WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name = ? RETURNING bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.partner_id, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.PartnerId._set {
|
|
__values = append(__values, update.PartnerId.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("partner_id = ?"))
|
|
}
|
|
|
|
if update.DefaultSegmentSize._set {
|
|
__values = append(__values, update.DefaultSegmentSize.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("default_segment_size = ?"))
|
|
}
|
|
|
|
if update.DefaultEncryptionCipherSuite._set {
|
|
__values = append(__values, update.DefaultEncryptionCipherSuite.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("default_encryption_cipher_suite = ?"))
|
|
}
|
|
|
|
if update.DefaultEncryptionBlockSize._set {
|
|
__values = append(__values, update.DefaultEncryptionBlockSize.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("default_encryption_block_size = ?"))
|
|
}
|
|
|
|
if update.DefaultRedundancyAlgorithm._set {
|
|
__values = append(__values, update.DefaultRedundancyAlgorithm.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("default_redundancy_algorithm = ?"))
|
|
}
|
|
|
|
if update.DefaultRedundancyShareSize._set {
|
|
__values = append(__values, update.DefaultRedundancyShareSize.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("default_redundancy_share_size = ?"))
|
|
}
|
|
|
|
if update.DefaultRedundancyRequiredShares._set {
|
|
__values = append(__values, update.DefaultRedundancyRequiredShares.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("default_redundancy_required_shares = ?"))
|
|
}
|
|
|
|
if update.DefaultRedundancyRepairShares._set {
|
|
__values = append(__values, update.DefaultRedundancyRepairShares.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("default_redundancy_repair_shares = ?"))
|
|
}
|
|
|
|
if update.DefaultRedundancyOptimalShares._set {
|
|
__values = append(__values, update.DefaultRedundancyOptimalShares.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("default_redundancy_optimal_shares = ?"))
|
|
}
|
|
|
|
if update.DefaultRedundancyTotalShares._set {
|
|
__values = append(__values, update.DefaultRedundancyTotalShares.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("default_redundancy_total_shares = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return nil, emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, bucket_metainfo_project_id.value(), bucket_metainfo_name.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
bucket_metainfo = &BucketMetainfo{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.PartnerId, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return bucket_metainfo, nil
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) UpdateNoReturn_GracefulExitTransferQueue_By_NodeId_And_Path_And_PieceNum(ctx context.Context,
|
|
graceful_exit_transfer_queue_node_id GracefulExitTransferQueue_NodeId_Field,
|
|
graceful_exit_transfer_queue_path GracefulExitTransferQueue_Path_Field,
|
|
graceful_exit_transfer_queue_piece_num GracefulExitTransferQueue_PieceNum_Field,
|
|
update GracefulExitTransferQueue_Update_Fields) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE graceful_exit_transfer_queue SET "), __sets, __sqlbundle_Literal(" WHERE graceful_exit_transfer_queue.node_id = ? AND graceful_exit_transfer_queue.path = ? AND graceful_exit_transfer_queue.piece_num = ?")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.DurabilityRatio._set {
|
|
__values = append(__values, update.DurabilityRatio.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("durability_ratio = ?"))
|
|
}
|
|
|
|
if update.RequestedAt._set {
|
|
__values = append(__values, update.RequestedAt.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("requested_at = ?"))
|
|
}
|
|
|
|
if update.LastFailedAt._set {
|
|
__values = append(__values, update.LastFailedAt.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("last_failed_at = ?"))
|
|
}
|
|
|
|
if update.LastFailedCode._set {
|
|
__values = append(__values, update.LastFailedCode.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("last_failed_code = ?"))
|
|
}
|
|
|
|
if update.FailedCount._set {
|
|
__values = append(__values, update.FailedCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("failed_count = ?"))
|
|
}
|
|
|
|
if update.FinishedAt._set {
|
|
__values = append(__values, update.FinishedAt.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("finished_at = ?"))
|
|
}
|
|
|
|
if update.OrderLimitSendCount._set {
|
|
__values = append(__values, update.OrderLimitSendCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("order_limit_send_count = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, graceful_exit_transfer_queue_node_id.value(), graceful_exit_transfer_queue_path.value(), graceful_exit_transfer_queue_piece_num.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
_, err = obj.driver.ExecContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Update_CoinpaymentsTransaction_By_Id(ctx context.Context,
|
|
coinpayments_transaction_id CoinpaymentsTransaction_Id_Field,
|
|
update CoinpaymentsTransaction_Update_Fields) (
|
|
coinpayments_transaction *CoinpaymentsTransaction, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE coinpayments_transactions SET "), __sets, __sqlbundle_Literal(" WHERE coinpayments_transactions.id = ? RETURNING coinpayments_transactions.id, coinpayments_transactions.user_id, coinpayments_transactions.address, coinpayments_transactions.amount, coinpayments_transactions.received, coinpayments_transactions.status, coinpayments_transactions.key, coinpayments_transactions.timeout, coinpayments_transactions.created_at")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.Received._set {
|
|
__values = append(__values, update.Received.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("received = ?"))
|
|
}
|
|
|
|
if update.Status._set {
|
|
__values = append(__values, update.Status.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("status = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return nil, emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, coinpayments_transaction_id.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
coinpayments_transaction = &CoinpaymentsTransaction{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&coinpayments_transaction.Id, &coinpayments_transaction.UserId, &coinpayments_transaction.Address, &coinpayments_transaction.Amount, &coinpayments_transaction.Received, &coinpayments_transaction.Status, &coinpayments_transaction.Key, &coinpayments_transaction.Timeout, &coinpayments_transaction.CreatedAt)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return coinpayments_transaction, nil
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Update_StripecoinpaymentsInvoiceProjectRecord_By_Id(ctx context.Context,
|
|
stripecoinpayments_invoice_project_record_id StripecoinpaymentsInvoiceProjectRecord_Id_Field,
|
|
update StripecoinpaymentsInvoiceProjectRecord_Update_Fields) (
|
|
stripecoinpayments_invoice_project_record *StripecoinpaymentsInvoiceProjectRecord, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE stripecoinpayments_invoice_project_records SET "), __sets, __sqlbundle_Literal(" WHERE stripecoinpayments_invoice_project_records.id = ? RETURNING stripecoinpayments_invoice_project_records.id, stripecoinpayments_invoice_project_records.project_id, stripecoinpayments_invoice_project_records.storage, stripecoinpayments_invoice_project_records.egress, stripecoinpayments_invoice_project_records.objects, stripecoinpayments_invoice_project_records.period_start, stripecoinpayments_invoice_project_records.period_end, stripecoinpayments_invoice_project_records.state, stripecoinpayments_invoice_project_records.created_at")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.State._set {
|
|
__values = append(__values, update.State.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("state = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return nil, emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, stripecoinpayments_invoice_project_record_id.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
stripecoinpayments_invoice_project_record = &StripecoinpaymentsInvoiceProjectRecord{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&stripecoinpayments_invoice_project_record.Id, &stripecoinpayments_invoice_project_record.ProjectId, &stripecoinpayments_invoice_project_record.Storage, &stripecoinpayments_invoice_project_record.Egress, &stripecoinpayments_invoice_project_record.Objects, &stripecoinpayments_invoice_project_record.PeriodStart, &stripecoinpayments_invoice_project_record.PeriodEnd, &stripecoinpayments_invoice_project_record.State, &stripecoinpayments_invoice_project_record.CreatedAt)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return stripecoinpayments_invoice_project_record, nil
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Update_Coupon_By_Id(ctx context.Context,
|
|
coupon_id Coupon_Id_Field,
|
|
update Coupon_Update_Fields) (
|
|
coupon *Coupon, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE coupons SET "), __sets, __sqlbundle_Literal(" WHERE coupons.id = ? RETURNING coupons.id, coupons.user_id, coupons.amount, coupons.description, coupons.type, coupons.status, coupons.duration, coupons.billing_periods, coupons.coupon_code_name, coupons.created_at")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.Status._set {
|
|
__values = append(__values, update.Status.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("status = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return nil, emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, coupon_id.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
coupon = &Coupon{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&coupon.Id, &coupon.UserId, &coupon.Amount, &coupon.Description, &coupon.Type, &coupon.Status, &coupon.Duration, &coupon.BillingPeriods, &coupon.CouponCodeName, &coupon.CreatedAt)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return coupon, nil
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Update_CouponUsage_By_CouponId_And_Period(ctx context.Context,
|
|
coupon_usage_coupon_id CouponUsage_CouponId_Field,
|
|
coupon_usage_period CouponUsage_Period_Field,
|
|
update CouponUsage_Update_Fields) (
|
|
coupon_usage *CouponUsage, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE coupon_usages SET "), __sets, __sqlbundle_Literal(" WHERE coupon_usages.coupon_id = ? AND coupon_usages.period = ? RETURNING coupon_usages.coupon_id, coupon_usages.amount, coupon_usages.status, coupon_usages.period")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.Status._set {
|
|
__values = append(__values, update.Status.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("status = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return nil, emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, coupon_usage_coupon_id.value(), coupon_usage_period.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
coupon_usage = &CouponUsage{}
|
|
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&coupon_usage.CouponId, &coupon_usage.Amount, &coupon_usage.Status, &coupon_usage.Period)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return coupon_usage, nil
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) UpdateNoReturn_NodeApiVersion_By_Id_And_ApiVersion_Less(ctx context.Context,
|
|
node_api_version_id NodeApiVersion_Id_Field,
|
|
node_api_version_api_version_less NodeApiVersion_ApiVersion_Field,
|
|
update NodeApiVersion_Update_Fields) (
|
|
err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE node_api_versions SET "), __sets, __sqlbundle_Literal(" WHERE node_api_versions.id = ? AND node_api_versions.api_version < ?")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.ApiVersion._set {
|
|
__values = append(__values, update.ApiVersion.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("api_version = ?"))
|
|
}
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
|
|
__values = append(__values, __now)
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("updated_at = ?"))
|
|
|
|
__args = append(__args, node_api_version_id.value(), node_api_version_api_version_less.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
_, err = obj.driver.ExecContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return obj.makeErr(err)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Delete_PendingAudits_By_NodeId(ctx context.Context,
|
|
pending_audits_node_id PendingAudits_NodeId_Field) (
|
|
deleted bool, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM pending_audits WHERE pending_audits.node_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, pending_audits_node_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.ExecContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Delete_Irreparabledb_By_Segmentpath(ctx context.Context,
|
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field) (
|
|
deleted bool, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM irreparabledbs WHERE irreparabledbs.segmentpath = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, irreparabledb_segmentpath.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.ExecContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Delete_Injuredsegment_By_UpdatedAt_Less(ctx context.Context,
|
|
injuredsegment_updated_at_less Injuredsegment_UpdatedAt_Field) (
|
|
count int64, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM injuredsegments WHERE injuredsegments.updated_at < ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, injuredsegment_updated_at_less.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.ExecContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
return count, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Delete_User_By_Id(ctx context.Context,
|
|
user_id User_Id_Field) (
|
|
deleted bool, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM users WHERE users.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, user_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.ExecContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Delete_Project_By_Id(ctx context.Context,
|
|
project_id Project_Id_Field) (
|
|
deleted bool, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM projects WHERE projects.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, project_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.ExecContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Delete_ProjectMember_By_MemberId_And_ProjectId(ctx context.Context,
|
|
project_member_member_id ProjectMember_MemberId_Field,
|
|
project_member_project_id ProjectMember_ProjectId_Field) (
|
|
deleted bool, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM project_members WHERE project_members.member_id = ? AND project_members.project_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, project_member_member_id.value(), project_member_project_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.ExecContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Delete_ApiKey_By_Id(ctx context.Context,
|
|
api_key_id ApiKey_Id_Field) (
|
|
deleted bool, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM api_keys WHERE api_keys.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, api_key_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.ExecContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Delete_ResetPasswordToken_By_Secret(ctx context.Context,
|
|
reset_password_token_secret ResetPasswordToken_Secret_Field) (
|
|
deleted bool, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM reset_password_tokens WHERE reset_password_tokens.secret = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, reset_password_token_secret.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.ExecContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Delete_BucketMetainfo_By_ProjectId_And_Name(ctx context.Context,
|
|
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
|
|
bucket_metainfo_name BucketMetainfo_Name_Field) (
|
|
deleted bool, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM bucket_metainfos WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, bucket_metainfo_project_id.value(), bucket_metainfo_name.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.ExecContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Delete_GracefulExitTransferQueue_By_NodeId(ctx context.Context,
|
|
graceful_exit_transfer_queue_node_id GracefulExitTransferQueue_NodeId_Field) (
|
|
count int64, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM graceful_exit_transfer_queue WHERE graceful_exit_transfer_queue.node_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, graceful_exit_transfer_queue_node_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.ExecContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
return count, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Delete_GracefulExitTransferQueue_By_NodeId_And_Path_And_PieceNum(ctx context.Context,
|
|
graceful_exit_transfer_queue_node_id GracefulExitTransferQueue_NodeId_Field,
|
|
graceful_exit_transfer_queue_path GracefulExitTransferQueue_Path_Field,
|
|
graceful_exit_transfer_queue_piece_num GracefulExitTransferQueue_PieceNum_Field) (
|
|
deleted bool, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM graceful_exit_transfer_queue WHERE graceful_exit_transfer_queue.node_id = ? AND graceful_exit_transfer_queue.path = ? AND graceful_exit_transfer_queue.piece_num = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, graceful_exit_transfer_queue_node_id.value(), graceful_exit_transfer_queue_path.value(), graceful_exit_transfer_queue_piece_num.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.ExecContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Delete_GracefulExitTransferQueue_By_NodeId_And_FinishedAt_IsNot_Null(ctx context.Context,
|
|
graceful_exit_transfer_queue_node_id GracefulExitTransferQueue_NodeId_Field) (
|
|
count int64, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM graceful_exit_transfer_queue WHERE graceful_exit_transfer_queue.node_id = ? AND graceful_exit_transfer_queue.finished_at is not NULL")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, graceful_exit_transfer_queue_node_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.ExecContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
return count, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Delete_CouponCode_By_Name(ctx context.Context,
|
|
coupon_code_name CouponCode_Name_Field) (
|
|
deleted bool, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM coupon_codes WHERE coupon_codes.name = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, coupon_code_name.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.ExecContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) Delete_Coupon_By_Id(ctx context.Context,
|
|
coupon_id Coupon_Id_Field) (
|
|
deleted bool, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM coupons WHERE coupons.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, coupon_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.ExecContext(ctx, __stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (impl pgxcockroachImpl) isConstraintError(err error) (
|
|
constraint string, ok bool) {
|
|
if e, ok := err.(*pgconn.PgError); ok {
|
|
if e.Code[:2] == "23" {
|
|
return e.ConstraintName, true
|
|
}
|
|
}
|
|
return "", false
|
|
}
|
|
|
|
func (obj *pgxcockroachImpl) deleteAll(ctx context.Context) (count int64, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
var __res sql.Result
|
|
var __count int64
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM user_credits;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM stripecoinpayments_apply_balance_intents;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM project_members;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM bucket_metainfos;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM api_keys;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM value_attributions;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM users;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM stripecoinpayments_tx_conversion_rates;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM stripecoinpayments_invoice_project_records;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM stripe_customers;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM storagenode_storage_tallies;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM storagenode_paystubs;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM storagenode_payments;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM storagenode_bandwidth_rollups_phase2;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM storagenode_bandwidth_rollup_archives;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM storagenode_bandwidth_rollups;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM revocations;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM reset_password_tokens;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM registration_tokens;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM project_bandwidth_rollups;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM projects;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM pending_audits;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM peer_identities;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM offers;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM node_api_versions;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM nodes;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM irreparabledbs;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM injuredsegments;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM graceful_exit_transfer_queue;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM graceful_exit_progress;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM coupon_usages;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM coupon_codes;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM coupons;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM coinpayments_transactions;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM bucket_storage_tallies;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM bucket_bandwidth_rollup_archives;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM bucket_bandwidth_rollups;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM audit_histories;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM accounting_timestamps;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM accounting_rollups;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
|
|
return count, nil
|
|
|
|
}
|
|
|
|
type Rx struct {
|
|
db *DB
|
|
tx *Tx
|
|
}
|
|
|
|
func (rx *Rx) UnsafeTx(ctx context.Context) (unsafe_tx tagsql.Tx, err error) {
|
|
tx, err := rx.getTx(ctx)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
return tx.Tx, nil
|
|
}
|
|
|
|
func (rx *Rx) getTx(ctx context.Context) (tx *Tx, err error) {
|
|
if rx.tx == nil {
|
|
if rx.tx, err = rx.db.Open(ctx); err != nil {
|
|
return nil, err
|
|
}
|
|
}
|
|
return rx.tx, nil
|
|
}
|
|
|
|
func (rx *Rx) Rebind(s string) string {
|
|
return rx.db.Rebind(s)
|
|
}
|
|
|
|
func (rx *Rx) Commit() (err error) {
|
|
if rx.tx != nil {
|
|
err = rx.tx.Commit()
|
|
rx.tx = nil
|
|
}
|
|
return err
|
|
}
|
|
|
|
func (rx *Rx) Rollback() (err error) {
|
|
if rx.tx != nil {
|
|
err = rx.tx.Rollback()
|
|
rx.tx = nil
|
|
}
|
|
return err
|
|
}
|
|
|
|
func (rx *Rx) All_BucketStorageTally(ctx context.Context) (
|
|
rows []*BucketStorageTally, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.All_BucketStorageTally(ctx)
|
|
}
|
|
|
|
func (rx *Rx) All_BucketStorageTally_By_ProjectId_And_BucketName_And_IntervalStart_GreaterOrEqual_And_IntervalStart_LessOrEqual_OrderBy_Desc_IntervalStart(ctx context.Context,
|
|
bucket_storage_tally_project_id BucketStorageTally_ProjectId_Field,
|
|
bucket_storage_tally_bucket_name BucketStorageTally_BucketName_Field,
|
|
bucket_storage_tally_interval_start_greater_or_equal BucketStorageTally_IntervalStart_Field,
|
|
bucket_storage_tally_interval_start_less_or_equal BucketStorageTally_IntervalStart_Field) (
|
|
rows []*BucketStorageTally, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.All_BucketStorageTally_By_ProjectId_And_BucketName_And_IntervalStart_GreaterOrEqual_And_IntervalStart_LessOrEqual_OrderBy_Desc_IntervalStart(ctx, bucket_storage_tally_project_id, bucket_storage_tally_bucket_name, bucket_storage_tally_interval_start_greater_or_equal, bucket_storage_tally_interval_start_less_or_equal)
|
|
}
|
|
|
|
func (rx *Rx) All_CoinpaymentsTransaction_By_UserId_OrderBy_Desc_CreatedAt(ctx context.Context,
|
|
coinpayments_transaction_user_id CoinpaymentsTransaction_UserId_Field) (
|
|
rows []*CoinpaymentsTransaction, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.All_CoinpaymentsTransaction_By_UserId_OrderBy_Desc_CreatedAt(ctx, coinpayments_transaction_user_id)
|
|
}
|
|
|
|
func (rx *Rx) All_Coupon_By_Status_OrderBy_Desc_CreatedAt(ctx context.Context,
|
|
coupon_status Coupon_Status_Field) (
|
|
rows []*Coupon, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.All_Coupon_By_Status_OrderBy_Desc_CreatedAt(ctx, coupon_status)
|
|
}
|
|
|
|
func (rx *Rx) All_Coupon_By_UserId_And_Status_OrderBy_Desc_CreatedAt(ctx context.Context,
|
|
coupon_user_id Coupon_UserId_Field,
|
|
coupon_status Coupon_Status_Field) (
|
|
rows []*Coupon, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.All_Coupon_By_UserId_And_Status_OrderBy_Desc_CreatedAt(ctx, coupon_user_id, coupon_status)
|
|
}
|
|
|
|
func (rx *Rx) All_Coupon_By_UserId_OrderBy_Desc_CreatedAt(ctx context.Context,
|
|
coupon_user_id Coupon_UserId_Field) (
|
|
rows []*Coupon, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.All_Coupon_By_UserId_OrderBy_Desc_CreatedAt(ctx, coupon_user_id)
|
|
}
|
|
|
|
func (rx *Rx) All_Node_Id(ctx context.Context) (
|
|
rows []*Id_Row, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.All_Node_Id(ctx)
|
|
}
|
|
|
|
func (rx *Rx) All_Node_Id_Node_PieceCount_By_PieceCount_Not_Number(ctx context.Context) (
|
|
rows []*Id_PieceCount_Row, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.All_Node_Id_Node_PieceCount_By_PieceCount_Not_Number(ctx)
|
|
}
|
|
|
|
func (rx *Rx) All_Project(ctx context.Context) (
|
|
rows []*Project, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.All_Project(ctx)
|
|
}
|
|
|
|
func (rx *Rx) All_ProjectMember_By_MemberId(ctx context.Context,
|
|
project_member_member_id ProjectMember_MemberId_Field) (
|
|
rows []*ProjectMember, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.All_ProjectMember_By_MemberId(ctx, project_member_member_id)
|
|
}
|
|
|
|
func (rx *Rx) All_Project_By_CreatedAt_Less_OrderBy_Asc_CreatedAt(ctx context.Context,
|
|
project_created_at_less Project_CreatedAt_Field) (
|
|
rows []*Project, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.All_Project_By_CreatedAt_Less_OrderBy_Asc_CreatedAt(ctx, project_created_at_less)
|
|
}
|
|
|
|
func (rx *Rx) All_Project_By_OwnerId_OrderBy_Asc_CreatedAt(ctx context.Context,
|
|
project_owner_id Project_OwnerId_Field) (
|
|
rows []*Project, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.All_Project_By_OwnerId_OrderBy_Asc_CreatedAt(ctx, project_owner_id)
|
|
}
|
|
|
|
func (rx *Rx) All_Project_By_ProjectMember_MemberId_OrderBy_Asc_Project_Name(ctx context.Context,
|
|
project_member_member_id ProjectMember_MemberId_Field) (
|
|
rows []*Project, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.All_Project_By_ProjectMember_MemberId_OrderBy_Asc_Project_Name(ctx, project_member_member_id)
|
|
}
|
|
|
|
func (rx *Rx) All_StoragenodeBandwidthRollup_By_StoragenodeId_And_IntervalStart(ctx context.Context,
|
|
storagenode_bandwidth_rollup_storagenode_id StoragenodeBandwidthRollup_StoragenodeId_Field,
|
|
storagenode_bandwidth_rollup_interval_start StoragenodeBandwidthRollup_IntervalStart_Field) (
|
|
rows []*StoragenodeBandwidthRollup, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.All_StoragenodeBandwidthRollup_By_StoragenodeId_And_IntervalStart(ctx, storagenode_bandwidth_rollup_storagenode_id, storagenode_bandwidth_rollup_interval_start)
|
|
}
|
|
|
|
func (rx *Rx) All_StoragenodePayment_By_NodeId(ctx context.Context,
|
|
storagenode_payment_node_id StoragenodePayment_NodeId_Field) (
|
|
rows []*StoragenodePayment, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.All_StoragenodePayment_By_NodeId(ctx, storagenode_payment_node_id)
|
|
}
|
|
|
|
func (rx *Rx) All_StoragenodePayment_By_NodeId_And_Period(ctx context.Context,
|
|
storagenode_payment_node_id StoragenodePayment_NodeId_Field,
|
|
storagenode_payment_period StoragenodePayment_Period_Field) (
|
|
rows []*StoragenodePayment, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.All_StoragenodePayment_By_NodeId_And_Period(ctx, storagenode_payment_node_id, storagenode_payment_period)
|
|
}
|
|
|
|
func (rx *Rx) All_StoragenodePaystub_By_NodeId(ctx context.Context,
|
|
storagenode_paystub_node_id StoragenodePaystub_NodeId_Field) (
|
|
rows []*StoragenodePaystub, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.All_StoragenodePaystub_By_NodeId(ctx, storagenode_paystub_node_id)
|
|
}
|
|
|
|
func (rx *Rx) All_StoragenodeStorageTally(ctx context.Context) (
|
|
rows []*StoragenodeStorageTally, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.All_StoragenodeStorageTally(ctx)
|
|
}
|
|
|
|
func (rx *Rx) All_StoragenodeStorageTally_By_IntervalEndTime_GreaterOrEqual(ctx context.Context,
|
|
storagenode_storage_tally_interval_end_time_greater_or_equal StoragenodeStorageTally_IntervalEndTime_Field) (
|
|
rows []*StoragenodeStorageTally, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.All_StoragenodeStorageTally_By_IntervalEndTime_GreaterOrEqual(ctx, storagenode_storage_tally_interval_end_time_greater_or_equal)
|
|
}
|
|
|
|
func (rx *Rx) Count_BucketMetainfo_Name_By_ProjectId(ctx context.Context,
|
|
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field) (
|
|
count int64, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Count_BucketMetainfo_Name_By_ProjectId(ctx, bucket_metainfo_project_id)
|
|
}
|
|
|
|
func (rx *Rx) CreateNoReturn_AccountingTimestamps(ctx context.Context,
|
|
accounting_timestamps_name AccountingTimestamps_Name_Field,
|
|
accounting_timestamps_value AccountingTimestamps_Value_Field) (
|
|
err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.CreateNoReturn_AccountingTimestamps(ctx, accounting_timestamps_name, accounting_timestamps_value)
|
|
|
|
}
|
|
|
|
func (rx *Rx) CreateNoReturn_BucketStorageTally(ctx context.Context,
|
|
bucket_storage_tally_bucket_name BucketStorageTally_BucketName_Field,
|
|
bucket_storage_tally_project_id BucketStorageTally_ProjectId_Field,
|
|
bucket_storage_tally_interval_start BucketStorageTally_IntervalStart_Field,
|
|
bucket_storage_tally_inline BucketStorageTally_Inline_Field,
|
|
bucket_storage_tally_remote BucketStorageTally_Remote_Field,
|
|
bucket_storage_tally_remote_segments_count BucketStorageTally_RemoteSegmentsCount_Field,
|
|
bucket_storage_tally_inline_segments_count BucketStorageTally_InlineSegmentsCount_Field,
|
|
bucket_storage_tally_object_count BucketStorageTally_ObjectCount_Field,
|
|
bucket_storage_tally_metadata_size BucketStorageTally_MetadataSize_Field) (
|
|
err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.CreateNoReturn_BucketStorageTally(ctx, bucket_storage_tally_bucket_name, bucket_storage_tally_project_id, bucket_storage_tally_interval_start, bucket_storage_tally_inline, bucket_storage_tally_remote, bucket_storage_tally_remote_segments_count, bucket_storage_tally_inline_segments_count, bucket_storage_tally_object_count, bucket_storage_tally_metadata_size)
|
|
|
|
}
|
|
|
|
func (rx *Rx) CreateNoReturn_Irreparabledb(ctx context.Context,
|
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field,
|
|
irreparabledb_segmentdetail Irreparabledb_Segmentdetail_Field,
|
|
irreparabledb_pieces_lost_count Irreparabledb_PiecesLostCount_Field,
|
|
irreparabledb_seg_damaged_unix_sec Irreparabledb_SegDamagedUnixSec_Field,
|
|
irreparabledb_repair_attempt_count Irreparabledb_RepairAttemptCount_Field) (
|
|
err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.CreateNoReturn_Irreparabledb(ctx, irreparabledb_segmentpath, irreparabledb_segmentdetail, irreparabledb_pieces_lost_count, irreparabledb_seg_damaged_unix_sec, irreparabledb_repair_attempt_count)
|
|
|
|
}
|
|
|
|
func (rx *Rx) CreateNoReturn_PeerIdentity(ctx context.Context,
|
|
peer_identity_node_id PeerIdentity_NodeId_Field,
|
|
peer_identity_leaf_serial_number PeerIdentity_LeafSerialNumber_Field,
|
|
peer_identity_chain PeerIdentity_Chain_Field) (
|
|
err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.CreateNoReturn_PeerIdentity(ctx, peer_identity_node_id, peer_identity_leaf_serial_number, peer_identity_chain)
|
|
|
|
}
|
|
|
|
func (rx *Rx) CreateNoReturn_Revocation(ctx context.Context,
|
|
revocation_revoked Revocation_Revoked_Field,
|
|
revocation_api_key_id Revocation_ApiKeyId_Field) (
|
|
err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.CreateNoReturn_Revocation(ctx, revocation_revoked, revocation_api_key_id)
|
|
|
|
}
|
|
|
|
func (rx *Rx) CreateNoReturn_StoragenodePayment(ctx context.Context,
|
|
storagenode_payment_node_id StoragenodePayment_NodeId_Field,
|
|
storagenode_payment_period StoragenodePayment_Period_Field,
|
|
storagenode_payment_amount StoragenodePayment_Amount_Field,
|
|
optional StoragenodePayment_Create_Fields) (
|
|
err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.CreateNoReturn_StoragenodePayment(ctx, storagenode_payment_node_id, storagenode_payment_period, storagenode_payment_amount, optional)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Create_ApiKey(ctx context.Context,
|
|
api_key_id ApiKey_Id_Field,
|
|
api_key_project_id ApiKey_ProjectId_Field,
|
|
api_key_head ApiKey_Head_Field,
|
|
api_key_name ApiKey_Name_Field,
|
|
api_key_secret ApiKey_Secret_Field,
|
|
optional ApiKey_Create_Fields) (
|
|
api_key *ApiKey, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Create_ApiKey(ctx, api_key_id, api_key_project_id, api_key_head, api_key_name, api_key_secret, optional)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Create_AuditHistory(ctx context.Context,
|
|
audit_history_node_id AuditHistory_NodeId_Field,
|
|
audit_history_history AuditHistory_History_Field) (
|
|
audit_history *AuditHistory, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Create_AuditHistory(ctx, audit_history_node_id, audit_history_history)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Create_BucketMetainfo(ctx context.Context,
|
|
bucket_metainfo_id BucketMetainfo_Id_Field,
|
|
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
|
|
bucket_metainfo_name BucketMetainfo_Name_Field,
|
|
bucket_metainfo_path_cipher BucketMetainfo_PathCipher_Field,
|
|
bucket_metainfo_default_segment_size BucketMetainfo_DefaultSegmentSize_Field,
|
|
bucket_metainfo_default_encryption_cipher_suite BucketMetainfo_DefaultEncryptionCipherSuite_Field,
|
|
bucket_metainfo_default_encryption_block_size BucketMetainfo_DefaultEncryptionBlockSize_Field,
|
|
bucket_metainfo_default_redundancy_algorithm BucketMetainfo_DefaultRedundancyAlgorithm_Field,
|
|
bucket_metainfo_default_redundancy_share_size BucketMetainfo_DefaultRedundancyShareSize_Field,
|
|
bucket_metainfo_default_redundancy_required_shares BucketMetainfo_DefaultRedundancyRequiredShares_Field,
|
|
bucket_metainfo_default_redundancy_repair_shares BucketMetainfo_DefaultRedundancyRepairShares_Field,
|
|
bucket_metainfo_default_redundancy_optimal_shares BucketMetainfo_DefaultRedundancyOptimalShares_Field,
|
|
bucket_metainfo_default_redundancy_total_shares BucketMetainfo_DefaultRedundancyTotalShares_Field,
|
|
optional BucketMetainfo_Create_Fields) (
|
|
bucket_metainfo *BucketMetainfo, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Create_BucketMetainfo(ctx, bucket_metainfo_id, bucket_metainfo_project_id, bucket_metainfo_name, bucket_metainfo_path_cipher, bucket_metainfo_default_segment_size, bucket_metainfo_default_encryption_cipher_suite, bucket_metainfo_default_encryption_block_size, bucket_metainfo_default_redundancy_algorithm, bucket_metainfo_default_redundancy_share_size, bucket_metainfo_default_redundancy_required_shares, bucket_metainfo_default_redundancy_repair_shares, bucket_metainfo_default_redundancy_optimal_shares, bucket_metainfo_default_redundancy_total_shares, optional)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Create_CoinpaymentsTransaction(ctx context.Context,
|
|
coinpayments_transaction_id CoinpaymentsTransaction_Id_Field,
|
|
coinpayments_transaction_user_id CoinpaymentsTransaction_UserId_Field,
|
|
coinpayments_transaction_address CoinpaymentsTransaction_Address_Field,
|
|
coinpayments_transaction_amount CoinpaymentsTransaction_Amount_Field,
|
|
coinpayments_transaction_received CoinpaymentsTransaction_Received_Field,
|
|
coinpayments_transaction_status CoinpaymentsTransaction_Status_Field,
|
|
coinpayments_transaction_key CoinpaymentsTransaction_Key_Field,
|
|
coinpayments_transaction_timeout CoinpaymentsTransaction_Timeout_Field) (
|
|
coinpayments_transaction *CoinpaymentsTransaction, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Create_CoinpaymentsTransaction(ctx, coinpayments_transaction_id, coinpayments_transaction_user_id, coinpayments_transaction_address, coinpayments_transaction_amount, coinpayments_transaction_received, coinpayments_transaction_status, coinpayments_transaction_key, coinpayments_transaction_timeout)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Create_Coupon(ctx context.Context,
|
|
coupon_id Coupon_Id_Field,
|
|
coupon_user_id Coupon_UserId_Field,
|
|
coupon_amount Coupon_Amount_Field,
|
|
coupon_description Coupon_Description_Field,
|
|
coupon_type Coupon_Type_Field,
|
|
coupon_status Coupon_Status_Field,
|
|
coupon_duration Coupon_Duration_Field,
|
|
optional Coupon_Create_Fields) (
|
|
coupon *Coupon, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Create_Coupon(ctx, coupon_id, coupon_user_id, coupon_amount, coupon_description, coupon_type, coupon_status, coupon_duration, optional)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Create_CouponCode(ctx context.Context,
|
|
coupon_code_id CouponCode_Id_Field,
|
|
coupon_code_name CouponCode_Name_Field,
|
|
coupon_code_amount CouponCode_Amount_Field,
|
|
coupon_code_description CouponCode_Description_Field,
|
|
coupon_code_type CouponCode_Type_Field,
|
|
optional CouponCode_Create_Fields) (
|
|
coupon_code *CouponCode, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Create_CouponCode(ctx, coupon_code_id, coupon_code_name, coupon_code_amount, coupon_code_description, coupon_code_type, optional)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Create_CouponUsage(ctx context.Context,
|
|
coupon_usage_coupon_id CouponUsage_CouponId_Field,
|
|
coupon_usage_amount CouponUsage_Amount_Field,
|
|
coupon_usage_status CouponUsage_Status_Field,
|
|
coupon_usage_period CouponUsage_Period_Field) (
|
|
coupon_usage *CouponUsage, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Create_CouponUsage(ctx, coupon_usage_coupon_id, coupon_usage_amount, coupon_usage_status, coupon_usage_period)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Create_Project(ctx context.Context,
|
|
project_id Project_Id_Field,
|
|
project_name Project_Name_Field,
|
|
project_description Project_Description_Field,
|
|
project_owner_id Project_OwnerId_Field,
|
|
optional Project_Create_Fields) (
|
|
project *Project, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Create_Project(ctx, project_id, project_name, project_description, project_owner_id, optional)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Create_ProjectMember(ctx context.Context,
|
|
project_member_member_id ProjectMember_MemberId_Field,
|
|
project_member_project_id ProjectMember_ProjectId_Field) (
|
|
project_member *ProjectMember, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Create_ProjectMember(ctx, project_member_member_id, project_member_project_id)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Create_RegistrationToken(ctx context.Context,
|
|
registration_token_secret RegistrationToken_Secret_Field,
|
|
registration_token_project_limit RegistrationToken_ProjectLimit_Field,
|
|
optional RegistrationToken_Create_Fields) (
|
|
registration_token *RegistrationToken, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Create_RegistrationToken(ctx, registration_token_secret, registration_token_project_limit, optional)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Create_ResetPasswordToken(ctx context.Context,
|
|
reset_password_token_secret ResetPasswordToken_Secret_Field,
|
|
reset_password_token_owner_id ResetPasswordToken_OwnerId_Field) (
|
|
reset_password_token *ResetPasswordToken, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Create_ResetPasswordToken(ctx, reset_password_token_secret, reset_password_token_owner_id)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Create_StoragenodeBandwidthRollup(ctx context.Context,
|
|
storagenode_bandwidth_rollup_storagenode_id StoragenodeBandwidthRollup_StoragenodeId_Field,
|
|
storagenode_bandwidth_rollup_interval_start StoragenodeBandwidthRollup_IntervalStart_Field,
|
|
storagenode_bandwidth_rollup_interval_seconds StoragenodeBandwidthRollup_IntervalSeconds_Field,
|
|
storagenode_bandwidth_rollup_action StoragenodeBandwidthRollup_Action_Field,
|
|
storagenode_bandwidth_rollup_settled StoragenodeBandwidthRollup_Settled_Field,
|
|
optional StoragenodeBandwidthRollup_Create_Fields) (
|
|
storagenode_bandwidth_rollup *StoragenodeBandwidthRollup, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Create_StoragenodeBandwidthRollup(ctx, storagenode_bandwidth_rollup_storagenode_id, storagenode_bandwidth_rollup_interval_start, storagenode_bandwidth_rollup_interval_seconds, storagenode_bandwidth_rollup_action, storagenode_bandwidth_rollup_settled, optional)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Create_StripeCustomer(ctx context.Context,
|
|
stripe_customer_user_id StripeCustomer_UserId_Field,
|
|
stripe_customer_customer_id StripeCustomer_CustomerId_Field) (
|
|
stripe_customer *StripeCustomer, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Create_StripeCustomer(ctx, stripe_customer_user_id, stripe_customer_customer_id)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Create_StripecoinpaymentsInvoiceProjectRecord(ctx context.Context,
|
|
stripecoinpayments_invoice_project_record_id StripecoinpaymentsInvoiceProjectRecord_Id_Field,
|
|
stripecoinpayments_invoice_project_record_project_id StripecoinpaymentsInvoiceProjectRecord_ProjectId_Field,
|
|
stripecoinpayments_invoice_project_record_storage StripecoinpaymentsInvoiceProjectRecord_Storage_Field,
|
|
stripecoinpayments_invoice_project_record_egress StripecoinpaymentsInvoiceProjectRecord_Egress_Field,
|
|
stripecoinpayments_invoice_project_record_objects StripecoinpaymentsInvoiceProjectRecord_Objects_Field,
|
|
stripecoinpayments_invoice_project_record_period_start StripecoinpaymentsInvoiceProjectRecord_PeriodStart_Field,
|
|
stripecoinpayments_invoice_project_record_period_end StripecoinpaymentsInvoiceProjectRecord_PeriodEnd_Field,
|
|
stripecoinpayments_invoice_project_record_state StripecoinpaymentsInvoiceProjectRecord_State_Field) (
|
|
stripecoinpayments_invoice_project_record *StripecoinpaymentsInvoiceProjectRecord, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Create_StripecoinpaymentsInvoiceProjectRecord(ctx, stripecoinpayments_invoice_project_record_id, stripecoinpayments_invoice_project_record_project_id, stripecoinpayments_invoice_project_record_storage, stripecoinpayments_invoice_project_record_egress, stripecoinpayments_invoice_project_record_objects, stripecoinpayments_invoice_project_record_period_start, stripecoinpayments_invoice_project_record_period_end, stripecoinpayments_invoice_project_record_state)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Create_StripecoinpaymentsTxConversionRate(ctx context.Context,
|
|
stripecoinpayments_tx_conversion_rate_tx_id StripecoinpaymentsTxConversionRate_TxId_Field,
|
|
stripecoinpayments_tx_conversion_rate_rate StripecoinpaymentsTxConversionRate_Rate_Field) (
|
|
stripecoinpayments_tx_conversion_rate *StripecoinpaymentsTxConversionRate, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Create_StripecoinpaymentsTxConversionRate(ctx, stripecoinpayments_tx_conversion_rate_tx_id, stripecoinpayments_tx_conversion_rate_rate)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Create_User(ctx context.Context,
|
|
user_id User_Id_Field,
|
|
user_email User_Email_Field,
|
|
user_normalized_email User_NormalizedEmail_Field,
|
|
user_full_name User_FullName_Field,
|
|
user_password_hash User_PasswordHash_Field,
|
|
optional User_Create_Fields) (
|
|
user *User, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Create_User(ctx, user_id, user_email, user_normalized_email, user_full_name, user_password_hash, optional)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Create_ValueAttribution(ctx context.Context,
|
|
value_attribution_project_id ValueAttribution_ProjectId_Field,
|
|
value_attribution_bucket_name ValueAttribution_BucketName_Field,
|
|
value_attribution_partner_id ValueAttribution_PartnerId_Field) (
|
|
value_attribution *ValueAttribution, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Create_ValueAttribution(ctx, value_attribution_project_id, value_attribution_bucket_name, value_attribution_partner_id)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Delete_ApiKey_By_Id(ctx context.Context,
|
|
api_key_id ApiKey_Id_Field) (
|
|
deleted bool, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Delete_ApiKey_By_Id(ctx, api_key_id)
|
|
}
|
|
|
|
func (rx *Rx) Delete_BucketMetainfo_By_ProjectId_And_Name(ctx context.Context,
|
|
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
|
|
bucket_metainfo_name BucketMetainfo_Name_Field) (
|
|
deleted bool, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Delete_BucketMetainfo_By_ProjectId_And_Name(ctx, bucket_metainfo_project_id, bucket_metainfo_name)
|
|
}
|
|
|
|
func (rx *Rx) Delete_CouponCode_By_Name(ctx context.Context,
|
|
coupon_code_name CouponCode_Name_Field) (
|
|
deleted bool, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Delete_CouponCode_By_Name(ctx, coupon_code_name)
|
|
}
|
|
|
|
func (rx *Rx) Delete_Coupon_By_Id(ctx context.Context,
|
|
coupon_id Coupon_Id_Field) (
|
|
deleted bool, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Delete_Coupon_By_Id(ctx, coupon_id)
|
|
}
|
|
|
|
func (rx *Rx) Delete_GracefulExitTransferQueue_By_NodeId(ctx context.Context,
|
|
graceful_exit_transfer_queue_node_id GracefulExitTransferQueue_NodeId_Field) (
|
|
count int64, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Delete_GracefulExitTransferQueue_By_NodeId(ctx, graceful_exit_transfer_queue_node_id)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Delete_GracefulExitTransferQueue_By_NodeId_And_FinishedAt_IsNot_Null(ctx context.Context,
|
|
graceful_exit_transfer_queue_node_id GracefulExitTransferQueue_NodeId_Field) (
|
|
count int64, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Delete_GracefulExitTransferQueue_By_NodeId_And_FinishedAt_IsNot_Null(ctx, graceful_exit_transfer_queue_node_id)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Delete_GracefulExitTransferQueue_By_NodeId_And_Path_And_PieceNum(ctx context.Context,
|
|
graceful_exit_transfer_queue_node_id GracefulExitTransferQueue_NodeId_Field,
|
|
graceful_exit_transfer_queue_path GracefulExitTransferQueue_Path_Field,
|
|
graceful_exit_transfer_queue_piece_num GracefulExitTransferQueue_PieceNum_Field) (
|
|
deleted bool, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Delete_GracefulExitTransferQueue_By_NodeId_And_Path_And_PieceNum(ctx, graceful_exit_transfer_queue_node_id, graceful_exit_transfer_queue_path, graceful_exit_transfer_queue_piece_num)
|
|
}
|
|
|
|
func (rx *Rx) Delete_Injuredsegment_By_UpdatedAt_Less(ctx context.Context,
|
|
injuredsegment_updated_at_less Injuredsegment_UpdatedAt_Field) (
|
|
count int64, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Delete_Injuredsegment_By_UpdatedAt_Less(ctx, injuredsegment_updated_at_less)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Delete_Irreparabledb_By_Segmentpath(ctx context.Context,
|
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field) (
|
|
deleted bool, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Delete_Irreparabledb_By_Segmentpath(ctx, irreparabledb_segmentpath)
|
|
}
|
|
|
|
func (rx *Rx) Delete_PendingAudits_By_NodeId(ctx context.Context,
|
|
pending_audits_node_id PendingAudits_NodeId_Field) (
|
|
deleted bool, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Delete_PendingAudits_By_NodeId(ctx, pending_audits_node_id)
|
|
}
|
|
|
|
func (rx *Rx) Delete_ProjectMember_By_MemberId_And_ProjectId(ctx context.Context,
|
|
project_member_member_id ProjectMember_MemberId_Field,
|
|
project_member_project_id ProjectMember_ProjectId_Field) (
|
|
deleted bool, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Delete_ProjectMember_By_MemberId_And_ProjectId(ctx, project_member_member_id, project_member_project_id)
|
|
}
|
|
|
|
func (rx *Rx) Delete_Project_By_Id(ctx context.Context,
|
|
project_id Project_Id_Field) (
|
|
deleted bool, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Delete_Project_By_Id(ctx, project_id)
|
|
}
|
|
|
|
func (rx *Rx) Delete_ResetPasswordToken_By_Secret(ctx context.Context,
|
|
reset_password_token_secret ResetPasswordToken_Secret_Field) (
|
|
deleted bool, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Delete_ResetPasswordToken_By_Secret(ctx, reset_password_token_secret)
|
|
}
|
|
|
|
func (rx *Rx) Delete_User_By_Id(ctx context.Context,
|
|
user_id User_Id_Field) (
|
|
deleted bool, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Delete_User_By_Id(ctx, user_id)
|
|
}
|
|
|
|
func (rx *Rx) Find_AccountingTimestamps_Value_By_Name(ctx context.Context,
|
|
accounting_timestamps_name AccountingTimestamps_Name_Field) (
|
|
row *Value_Row, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Find_AccountingTimestamps_Value_By_Name(ctx, accounting_timestamps_name)
|
|
}
|
|
|
|
func (rx *Rx) Get_ApiKey_By_Head(ctx context.Context,
|
|
api_key_head ApiKey_Head_Field) (
|
|
api_key *ApiKey, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_ApiKey_By_Head(ctx, api_key_head)
|
|
}
|
|
|
|
func (rx *Rx) Get_ApiKey_By_Id(ctx context.Context,
|
|
api_key_id ApiKey_Id_Field) (
|
|
api_key *ApiKey, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_ApiKey_By_Id(ctx, api_key_id)
|
|
}
|
|
|
|
func (rx *Rx) Get_ApiKey_By_Name_And_ProjectId(ctx context.Context,
|
|
api_key_name ApiKey_Name_Field,
|
|
api_key_project_id ApiKey_ProjectId_Field) (
|
|
api_key *ApiKey, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_ApiKey_By_Name_And_ProjectId(ctx, api_key_name, api_key_project_id)
|
|
}
|
|
|
|
func (rx *Rx) Get_AuditHistory_By_NodeId(ctx context.Context,
|
|
audit_history_node_id AuditHistory_NodeId_Field) (
|
|
audit_history *AuditHistory, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_AuditHistory_By_NodeId(ctx, audit_history_node_id)
|
|
}
|
|
|
|
func (rx *Rx) Get_BucketMetainfo_By_ProjectId_And_Name(ctx context.Context,
|
|
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
|
|
bucket_metainfo_name BucketMetainfo_Name_Field) (
|
|
bucket_metainfo *BucketMetainfo, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_BucketMetainfo_By_ProjectId_And_Name(ctx, bucket_metainfo_project_id, bucket_metainfo_name)
|
|
}
|
|
|
|
func (rx *Rx) Get_BucketMetainfo_Id_By_ProjectId_And_Name(ctx context.Context,
|
|
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
|
|
bucket_metainfo_name BucketMetainfo_Name_Field) (
|
|
row *Id_Row, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_BucketMetainfo_Id_By_ProjectId_And_Name(ctx, bucket_metainfo_project_id, bucket_metainfo_name)
|
|
}
|
|
|
|
func (rx *Rx) Get_CouponCode_By_Name(ctx context.Context,
|
|
coupon_code_name CouponCode_Name_Field) (
|
|
coupon_code *CouponCode, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_CouponCode_By_Name(ctx, coupon_code_name)
|
|
}
|
|
|
|
func (rx *Rx) Get_Coupon_By_Id(ctx context.Context,
|
|
coupon_id Coupon_Id_Field) (
|
|
coupon *Coupon, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_Coupon_By_Id(ctx, coupon_id)
|
|
}
|
|
|
|
func (rx *Rx) Get_GracefulExitProgress_By_NodeId(ctx context.Context,
|
|
graceful_exit_progress_node_id GracefulExitProgress_NodeId_Field) (
|
|
graceful_exit_progress *GracefulExitProgress, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_GracefulExitProgress_By_NodeId(ctx, graceful_exit_progress_node_id)
|
|
}
|
|
|
|
func (rx *Rx) Get_GracefulExitTransferQueue_By_NodeId_And_Path_And_PieceNum(ctx context.Context,
|
|
graceful_exit_transfer_queue_node_id GracefulExitTransferQueue_NodeId_Field,
|
|
graceful_exit_transfer_queue_path GracefulExitTransferQueue_Path_Field,
|
|
graceful_exit_transfer_queue_piece_num GracefulExitTransferQueue_PieceNum_Field) (
|
|
graceful_exit_transfer_queue *GracefulExitTransferQueue, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_GracefulExitTransferQueue_By_NodeId_And_Path_And_PieceNum(ctx, graceful_exit_transfer_queue_node_id, graceful_exit_transfer_queue_path, graceful_exit_transfer_queue_piece_num)
|
|
}
|
|
|
|
func (rx *Rx) Get_Irreparabledb_By_Segmentpath(ctx context.Context,
|
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field) (
|
|
irreparabledb *Irreparabledb, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_Irreparabledb_By_Segmentpath(ctx, irreparabledb_segmentpath)
|
|
}
|
|
|
|
func (rx *Rx) Get_Node_By_Id(ctx context.Context,
|
|
node_id Node_Id_Field) (
|
|
node *Node, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_Node_By_Id(ctx, node_id)
|
|
}
|
|
|
|
func (rx *Rx) Get_PeerIdentity_By_NodeId(ctx context.Context,
|
|
peer_identity_node_id PeerIdentity_NodeId_Field) (
|
|
peer_identity *PeerIdentity, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_PeerIdentity_By_NodeId(ctx, peer_identity_node_id)
|
|
}
|
|
|
|
func (rx *Rx) Get_PeerIdentity_LeafSerialNumber_By_NodeId(ctx context.Context,
|
|
peer_identity_node_id PeerIdentity_NodeId_Field) (
|
|
row *LeafSerialNumber_Row, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_PeerIdentity_LeafSerialNumber_By_NodeId(ctx, peer_identity_node_id)
|
|
}
|
|
|
|
func (rx *Rx) Get_PendingAudits_By_NodeId(ctx context.Context,
|
|
pending_audits_node_id PendingAudits_NodeId_Field) (
|
|
pending_audits *PendingAudits, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_PendingAudits_By_NodeId(ctx, pending_audits_node_id)
|
|
}
|
|
|
|
func (rx *Rx) Get_Project_BandwidthLimit_By_Id(ctx context.Context,
|
|
project_id Project_Id_Field) (
|
|
row *BandwidthLimit_Row, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_Project_BandwidthLimit_By_Id(ctx, project_id)
|
|
}
|
|
|
|
func (rx *Rx) Get_Project_BandwidthLimit_Project_UsageLimit_By_Id(ctx context.Context,
|
|
project_id Project_Id_Field) (
|
|
row *BandwidthLimit_UsageLimit_Row, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_Project_BandwidthLimit_Project_UsageLimit_By_Id(ctx, project_id)
|
|
}
|
|
|
|
func (rx *Rx) Get_Project_By_Id(ctx context.Context,
|
|
project_id Project_Id_Field) (
|
|
project *Project, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_Project_By_Id(ctx, project_id)
|
|
}
|
|
|
|
func (rx *Rx) Get_Project_MaxBuckets_By_Id(ctx context.Context,
|
|
project_id Project_Id_Field) (
|
|
row *MaxBuckets_Row, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_Project_MaxBuckets_By_Id(ctx, project_id)
|
|
}
|
|
|
|
func (rx *Rx) Get_Project_UsageLimit_By_Id(ctx context.Context,
|
|
project_id Project_Id_Field) (
|
|
row *UsageLimit_Row, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_Project_UsageLimit_By_Id(ctx, project_id)
|
|
}
|
|
|
|
func (rx *Rx) Get_RegistrationToken_By_OwnerId(ctx context.Context,
|
|
registration_token_owner_id RegistrationToken_OwnerId_Field) (
|
|
registration_token *RegistrationToken, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_RegistrationToken_By_OwnerId(ctx, registration_token_owner_id)
|
|
}
|
|
|
|
func (rx *Rx) Get_RegistrationToken_By_Secret(ctx context.Context,
|
|
registration_token_secret RegistrationToken_Secret_Field) (
|
|
registration_token *RegistrationToken, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_RegistrationToken_By_Secret(ctx, registration_token_secret)
|
|
}
|
|
|
|
func (rx *Rx) Get_ResetPasswordToken_By_OwnerId(ctx context.Context,
|
|
reset_password_token_owner_id ResetPasswordToken_OwnerId_Field) (
|
|
reset_password_token *ResetPasswordToken, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_ResetPasswordToken_By_OwnerId(ctx, reset_password_token_owner_id)
|
|
}
|
|
|
|
func (rx *Rx) Get_ResetPasswordToken_By_Secret(ctx context.Context,
|
|
reset_password_token_secret ResetPasswordToken_Secret_Field) (
|
|
reset_password_token *ResetPasswordToken, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_ResetPasswordToken_By_Secret(ctx, reset_password_token_secret)
|
|
}
|
|
|
|
func (rx *Rx) Get_StoragenodePaystub_By_NodeId_And_Period(ctx context.Context,
|
|
storagenode_paystub_node_id StoragenodePaystub_NodeId_Field,
|
|
storagenode_paystub_period StoragenodePaystub_Period_Field) (
|
|
storagenode_paystub *StoragenodePaystub, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_StoragenodePaystub_By_NodeId_And_Period(ctx, storagenode_paystub_node_id, storagenode_paystub_period)
|
|
}
|
|
|
|
func (rx *Rx) Get_StripeCustomer_CustomerId_By_UserId(ctx context.Context,
|
|
stripe_customer_user_id StripeCustomer_UserId_Field) (
|
|
row *CustomerId_Row, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_StripeCustomer_CustomerId_By_UserId(ctx, stripe_customer_user_id)
|
|
}
|
|
|
|
func (rx *Rx) Get_StripecoinpaymentsInvoiceProjectRecord_By_ProjectId_And_PeriodStart_And_PeriodEnd(ctx context.Context,
|
|
stripecoinpayments_invoice_project_record_project_id StripecoinpaymentsInvoiceProjectRecord_ProjectId_Field,
|
|
stripecoinpayments_invoice_project_record_period_start StripecoinpaymentsInvoiceProjectRecord_PeriodStart_Field,
|
|
stripecoinpayments_invoice_project_record_period_end StripecoinpaymentsInvoiceProjectRecord_PeriodEnd_Field) (
|
|
stripecoinpayments_invoice_project_record *StripecoinpaymentsInvoiceProjectRecord, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_StripecoinpaymentsInvoiceProjectRecord_By_ProjectId_And_PeriodStart_And_PeriodEnd(ctx, stripecoinpayments_invoice_project_record_project_id, stripecoinpayments_invoice_project_record_period_start, stripecoinpayments_invoice_project_record_period_end)
|
|
}
|
|
|
|
func (rx *Rx) Get_StripecoinpaymentsTxConversionRate_By_TxId(ctx context.Context,
|
|
stripecoinpayments_tx_conversion_rate_tx_id StripecoinpaymentsTxConversionRate_TxId_Field) (
|
|
stripecoinpayments_tx_conversion_rate *StripecoinpaymentsTxConversionRate, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_StripecoinpaymentsTxConversionRate_By_TxId(ctx, stripecoinpayments_tx_conversion_rate_tx_id)
|
|
}
|
|
|
|
func (rx *Rx) Get_User_By_Id(ctx context.Context,
|
|
user_id User_Id_Field) (
|
|
user *User, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_User_By_Id(ctx, user_id)
|
|
}
|
|
|
|
func (rx *Rx) Get_User_By_NormalizedEmail_And_Status_Not_Number(ctx context.Context,
|
|
user_normalized_email User_NormalizedEmail_Field) (
|
|
user *User, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_User_By_NormalizedEmail_And_Status_Not_Number(ctx, user_normalized_email)
|
|
}
|
|
|
|
func (rx *Rx) Get_User_ProjectLimit_By_Id(ctx context.Context,
|
|
user_id User_Id_Field) (
|
|
row *ProjectLimit_Row, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_User_ProjectLimit_By_Id(ctx, user_id)
|
|
}
|
|
|
|
func (rx *Rx) Get_ValueAttribution_By_ProjectId_And_BucketName(ctx context.Context,
|
|
value_attribution_project_id ValueAttribution_ProjectId_Field,
|
|
value_attribution_bucket_name ValueAttribution_BucketName_Field) (
|
|
value_attribution *ValueAttribution, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_ValueAttribution_By_ProjectId_And_BucketName(ctx, value_attribution_project_id, value_attribution_bucket_name)
|
|
}
|
|
|
|
func (rx *Rx) Has_BucketMetainfo_By_ProjectId_And_Name(ctx context.Context,
|
|
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
|
|
bucket_metainfo_name BucketMetainfo_Name_Field) (
|
|
has bool, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Has_BucketMetainfo_By_ProjectId_And_Name(ctx, bucket_metainfo_project_id, bucket_metainfo_name)
|
|
}
|
|
|
|
func (rx *Rx) Has_NodeApiVersion_By_Id_And_ApiVersion_GreaterOrEqual(ctx context.Context,
|
|
node_api_version_id NodeApiVersion_Id_Field,
|
|
node_api_version_api_version_greater_or_equal NodeApiVersion_ApiVersion_Field) (
|
|
has bool, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Has_NodeApiVersion_By_Id_And_ApiVersion_GreaterOrEqual(ctx, node_api_version_id, node_api_version_api_version_greater_or_equal)
|
|
}
|
|
|
|
func (rx *Rx) Limited_BucketMetainfo_By_ProjectId_And_Name_GreaterOrEqual_OrderBy_Asc_Name(ctx context.Context,
|
|
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
|
|
bucket_metainfo_name_greater_or_equal BucketMetainfo_Name_Field,
|
|
limit int, offset int64) (
|
|
rows []*BucketMetainfo, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Limited_BucketMetainfo_By_ProjectId_And_Name_GreaterOrEqual_OrderBy_Asc_Name(ctx, bucket_metainfo_project_id, bucket_metainfo_name_greater_or_equal, limit, offset)
|
|
}
|
|
|
|
func (rx *Rx) Limited_BucketMetainfo_By_ProjectId_And_Name_Greater_OrderBy_Asc_Name(ctx context.Context,
|
|
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
|
|
bucket_metainfo_name_greater BucketMetainfo_Name_Field,
|
|
limit int, offset int64) (
|
|
rows []*BucketMetainfo, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Limited_BucketMetainfo_By_ProjectId_And_Name_Greater_OrderBy_Asc_Name(ctx, bucket_metainfo_project_id, bucket_metainfo_name_greater, limit, offset)
|
|
}
|
|
|
|
func (rx *Rx) Limited_CouponUsage_By_Period_And_Status_Equal_Number(ctx context.Context,
|
|
coupon_usage_period CouponUsage_Period_Field,
|
|
limit int, offset int64) (
|
|
rows []*CouponUsage, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Limited_CouponUsage_By_Period_And_Status_Equal_Number(ctx, coupon_usage_period, limit, offset)
|
|
}
|
|
|
|
func (rx *Rx) Limited_Coupon_By_CreatedAt_LessOrEqual_And_Status_OrderBy_Desc_CreatedAt(ctx context.Context,
|
|
coupon_created_at_less_or_equal Coupon_CreatedAt_Field,
|
|
coupon_status Coupon_Status_Field,
|
|
limit int, offset int64) (
|
|
rows []*Coupon, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Limited_Coupon_By_CreatedAt_LessOrEqual_And_Status_OrderBy_Desc_CreatedAt(ctx, coupon_created_at_less_or_equal, coupon_status, limit, offset)
|
|
}
|
|
|
|
func (rx *Rx) Limited_Irreparabledb_By_Segmentpath_Greater_OrderBy_Asc_Segmentpath(ctx context.Context,
|
|
irreparabledb_segmentpath_greater Irreparabledb_Segmentpath_Field,
|
|
limit int, offset int64) (
|
|
rows []*Irreparabledb, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Limited_Irreparabledb_By_Segmentpath_Greater_OrderBy_Asc_Segmentpath(ctx, irreparabledb_segmentpath_greater, limit, offset)
|
|
}
|
|
|
|
func (rx *Rx) Limited_Project_By_CreatedAt_Less_OrderBy_Asc_CreatedAt(ctx context.Context,
|
|
project_created_at_less Project_CreatedAt_Field,
|
|
limit int, offset int64) (
|
|
rows []*Project, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Limited_Project_By_CreatedAt_Less_OrderBy_Asc_CreatedAt(ctx, project_created_at_less, limit, offset)
|
|
}
|
|
|
|
func (rx *Rx) Limited_StoragenodePayment_By_NodeId_And_Period_OrderBy_Desc_Id(ctx context.Context,
|
|
storagenode_payment_node_id StoragenodePayment_NodeId_Field,
|
|
storagenode_payment_period StoragenodePayment_Period_Field,
|
|
limit int, offset int64) (
|
|
rows []*StoragenodePayment, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Limited_StoragenodePayment_By_NodeId_And_Period_OrderBy_Desc_Id(ctx, storagenode_payment_node_id, storagenode_payment_period, limit, offset)
|
|
}
|
|
|
|
func (rx *Rx) Limited_StripeCustomer_By_CreatedAt_LessOrEqual_OrderBy_Desc_CreatedAt(ctx context.Context,
|
|
stripe_customer_created_at_less_or_equal StripeCustomer_CreatedAt_Field,
|
|
limit int, offset int64) (
|
|
rows []*StripeCustomer, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Limited_StripeCustomer_By_CreatedAt_LessOrEqual_OrderBy_Desc_CreatedAt(ctx, stripe_customer_created_at_less_or_equal, limit, offset)
|
|
}
|
|
|
|
func (rx *Rx) Limited_StripecoinpaymentsInvoiceProjectRecord_By_PeriodStart_And_PeriodEnd_And_State(ctx context.Context,
|
|
stripecoinpayments_invoice_project_record_period_start StripecoinpaymentsInvoiceProjectRecord_PeriodStart_Field,
|
|
stripecoinpayments_invoice_project_record_period_end StripecoinpaymentsInvoiceProjectRecord_PeriodEnd_Field,
|
|
stripecoinpayments_invoice_project_record_state StripecoinpaymentsInvoiceProjectRecord_State_Field,
|
|
limit int, offset int64) (
|
|
rows []*StripecoinpaymentsInvoiceProjectRecord, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Limited_StripecoinpaymentsInvoiceProjectRecord_By_PeriodStart_And_PeriodEnd_And_State(ctx, stripecoinpayments_invoice_project_record_period_start, stripecoinpayments_invoice_project_record_period_end, stripecoinpayments_invoice_project_record_state, limit, offset)
|
|
}
|
|
|
|
func (rx *Rx) Paged_BucketBandwidthRollupArchive_By_IntervalStart_GreaterOrEqual(ctx context.Context,
|
|
bucket_bandwidth_rollup_archive_interval_start_greater_or_equal BucketBandwidthRollupArchive_IntervalStart_Field,
|
|
limit int, start *Paged_BucketBandwidthRollupArchive_By_IntervalStart_GreaterOrEqual_Continuation) (
|
|
rows []*BucketBandwidthRollupArchive, next *Paged_BucketBandwidthRollupArchive_By_IntervalStart_GreaterOrEqual_Continuation, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Paged_BucketBandwidthRollupArchive_By_IntervalStart_GreaterOrEqual(ctx, bucket_bandwidth_rollup_archive_interval_start_greater_or_equal, limit, start)
|
|
}
|
|
|
|
func (rx *Rx) Paged_BucketBandwidthRollup_By_IntervalStart_GreaterOrEqual(ctx context.Context,
|
|
bucket_bandwidth_rollup_interval_start_greater_or_equal BucketBandwidthRollup_IntervalStart_Field,
|
|
limit int, start *Paged_BucketBandwidthRollup_By_IntervalStart_GreaterOrEqual_Continuation) (
|
|
rows []*BucketBandwidthRollup, next *Paged_BucketBandwidthRollup_By_IntervalStart_GreaterOrEqual_Continuation, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Paged_BucketBandwidthRollup_By_IntervalStart_GreaterOrEqual(ctx, bucket_bandwidth_rollup_interval_start_greater_or_equal, limit, start)
|
|
}
|
|
|
|
func (rx *Rx) Paged_Node(ctx context.Context,
|
|
limit int, start *Paged_Node_Continuation) (
|
|
rows []*Node, next *Paged_Node_Continuation, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Paged_Node(ctx, limit, start)
|
|
}
|
|
|
|
func (rx *Rx) Paged_StoragenodeBandwidthRollupArchive_By_IntervalStart_GreaterOrEqual(ctx context.Context,
|
|
storagenode_bandwidth_rollup_archive_interval_start_greater_or_equal StoragenodeBandwidthRollupArchive_IntervalStart_Field,
|
|
limit int, start *Paged_StoragenodeBandwidthRollupArchive_By_IntervalStart_GreaterOrEqual_Continuation) (
|
|
rows []*StoragenodeBandwidthRollupArchive, next *Paged_StoragenodeBandwidthRollupArchive_By_IntervalStart_GreaterOrEqual_Continuation, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Paged_StoragenodeBandwidthRollupArchive_By_IntervalStart_GreaterOrEqual(ctx, storagenode_bandwidth_rollup_archive_interval_start_greater_or_equal, limit, start)
|
|
}
|
|
|
|
func (rx *Rx) Paged_StoragenodeBandwidthRollupPhase2_By_StoragenodeId_And_IntervalStart_GreaterOrEqual(ctx context.Context,
|
|
storagenode_bandwidth_rollup_phase2_storagenode_id StoragenodeBandwidthRollupPhase2_StoragenodeId_Field,
|
|
storagenode_bandwidth_rollup_phase2_interval_start_greater_or_equal StoragenodeBandwidthRollupPhase2_IntervalStart_Field,
|
|
limit int, start *Paged_StoragenodeBandwidthRollupPhase2_By_StoragenodeId_And_IntervalStart_GreaterOrEqual_Continuation) (
|
|
rows []*StoragenodeBandwidthRollupPhase2, next *Paged_StoragenodeBandwidthRollupPhase2_By_StoragenodeId_And_IntervalStart_GreaterOrEqual_Continuation, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Paged_StoragenodeBandwidthRollupPhase2_By_StoragenodeId_And_IntervalStart_GreaterOrEqual(ctx, storagenode_bandwidth_rollup_phase2_storagenode_id, storagenode_bandwidth_rollup_phase2_interval_start_greater_or_equal, limit, start)
|
|
}
|
|
|
|
func (rx *Rx) Paged_StoragenodeBandwidthRollup_By_IntervalStart_GreaterOrEqual(ctx context.Context,
|
|
storagenode_bandwidth_rollup_interval_start_greater_or_equal StoragenodeBandwidthRollup_IntervalStart_Field,
|
|
limit int, start *Paged_StoragenodeBandwidthRollup_By_IntervalStart_GreaterOrEqual_Continuation) (
|
|
rows []*StoragenodeBandwidthRollup, next *Paged_StoragenodeBandwidthRollup_By_IntervalStart_GreaterOrEqual_Continuation, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Paged_StoragenodeBandwidthRollup_By_IntervalStart_GreaterOrEqual(ctx, storagenode_bandwidth_rollup_interval_start_greater_or_equal, limit, start)
|
|
}
|
|
|
|
func (rx *Rx) Paged_StoragenodeBandwidthRollup_By_StoragenodeId_And_IntervalStart_GreaterOrEqual(ctx context.Context,
|
|
storagenode_bandwidth_rollup_storagenode_id StoragenodeBandwidthRollup_StoragenodeId_Field,
|
|
storagenode_bandwidth_rollup_interval_start_greater_or_equal StoragenodeBandwidthRollup_IntervalStart_Field,
|
|
limit int, start *Paged_StoragenodeBandwidthRollup_By_StoragenodeId_And_IntervalStart_GreaterOrEqual_Continuation) (
|
|
rows []*StoragenodeBandwidthRollup, next *Paged_StoragenodeBandwidthRollup_By_StoragenodeId_And_IntervalStart_GreaterOrEqual_Continuation, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Paged_StoragenodeBandwidthRollup_By_StoragenodeId_And_IntervalStart_GreaterOrEqual(ctx, storagenode_bandwidth_rollup_storagenode_id, storagenode_bandwidth_rollup_interval_start_greater_or_equal, limit, start)
|
|
}
|
|
|
|
func (rx *Rx) ReplaceNoReturn_NodeApiVersion(ctx context.Context,
|
|
node_api_version_id NodeApiVersion_Id_Field,
|
|
node_api_version_api_version NodeApiVersion_ApiVersion_Field) (
|
|
err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.ReplaceNoReturn_NodeApiVersion(ctx, node_api_version_id, node_api_version_api_version)
|
|
|
|
}
|
|
|
|
func (rx *Rx) ReplaceNoReturn_StoragenodePaystub(ctx context.Context,
|
|
storagenode_paystub_period StoragenodePaystub_Period_Field,
|
|
storagenode_paystub_node_id StoragenodePaystub_NodeId_Field,
|
|
storagenode_paystub_codes StoragenodePaystub_Codes_Field,
|
|
storagenode_paystub_usage_at_rest StoragenodePaystub_UsageAtRest_Field,
|
|
storagenode_paystub_usage_get StoragenodePaystub_UsageGet_Field,
|
|
storagenode_paystub_usage_put StoragenodePaystub_UsagePut_Field,
|
|
storagenode_paystub_usage_get_repair StoragenodePaystub_UsageGetRepair_Field,
|
|
storagenode_paystub_usage_put_repair StoragenodePaystub_UsagePutRepair_Field,
|
|
storagenode_paystub_usage_get_audit StoragenodePaystub_UsageGetAudit_Field,
|
|
storagenode_paystub_comp_at_rest StoragenodePaystub_CompAtRest_Field,
|
|
storagenode_paystub_comp_get StoragenodePaystub_CompGet_Field,
|
|
storagenode_paystub_comp_put StoragenodePaystub_CompPut_Field,
|
|
storagenode_paystub_comp_get_repair StoragenodePaystub_CompGetRepair_Field,
|
|
storagenode_paystub_comp_put_repair StoragenodePaystub_CompPutRepair_Field,
|
|
storagenode_paystub_comp_get_audit StoragenodePaystub_CompGetAudit_Field,
|
|
storagenode_paystub_surge_percent StoragenodePaystub_SurgePercent_Field,
|
|
storagenode_paystub_held StoragenodePaystub_Held_Field,
|
|
storagenode_paystub_owed StoragenodePaystub_Owed_Field,
|
|
storagenode_paystub_disposed StoragenodePaystub_Disposed_Field,
|
|
storagenode_paystub_paid StoragenodePaystub_Paid_Field,
|
|
storagenode_paystub_distributed StoragenodePaystub_Distributed_Field) (
|
|
err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.ReplaceNoReturn_StoragenodePaystub(ctx, storagenode_paystub_period, storagenode_paystub_node_id, storagenode_paystub_codes, storagenode_paystub_usage_at_rest, storagenode_paystub_usage_get, storagenode_paystub_usage_put, storagenode_paystub_usage_get_repair, storagenode_paystub_usage_put_repair, storagenode_paystub_usage_get_audit, storagenode_paystub_comp_at_rest, storagenode_paystub_comp_get, storagenode_paystub_comp_put, storagenode_paystub_comp_get_repair, storagenode_paystub_comp_put_repair, storagenode_paystub_comp_get_audit, storagenode_paystub_surge_percent, storagenode_paystub_held, storagenode_paystub_owed, storagenode_paystub_disposed, storagenode_paystub_paid, storagenode_paystub_distributed)
|
|
|
|
}
|
|
|
|
func (rx *Rx) UpdateNoReturn_AccountingTimestamps_By_Name(ctx context.Context,
|
|
accounting_timestamps_name AccountingTimestamps_Name_Field,
|
|
update AccountingTimestamps_Update_Fields) (
|
|
err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.UpdateNoReturn_AccountingTimestamps_By_Name(ctx, accounting_timestamps_name, update)
|
|
}
|
|
|
|
func (rx *Rx) UpdateNoReturn_ApiKey_By_Id(ctx context.Context,
|
|
api_key_id ApiKey_Id_Field,
|
|
update ApiKey_Update_Fields) (
|
|
err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.UpdateNoReturn_ApiKey_By_Id(ctx, api_key_id, update)
|
|
}
|
|
|
|
func (rx *Rx) UpdateNoReturn_GracefulExitTransferQueue_By_NodeId_And_Path_And_PieceNum(ctx context.Context,
|
|
graceful_exit_transfer_queue_node_id GracefulExitTransferQueue_NodeId_Field,
|
|
graceful_exit_transfer_queue_path GracefulExitTransferQueue_Path_Field,
|
|
graceful_exit_transfer_queue_piece_num GracefulExitTransferQueue_PieceNum_Field,
|
|
update GracefulExitTransferQueue_Update_Fields) (
|
|
err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.UpdateNoReturn_GracefulExitTransferQueue_By_NodeId_And_Path_And_PieceNum(ctx, graceful_exit_transfer_queue_node_id, graceful_exit_transfer_queue_path, graceful_exit_transfer_queue_piece_num, update)
|
|
}
|
|
|
|
func (rx *Rx) UpdateNoReturn_Irreparabledb_By_Segmentpath(ctx context.Context,
|
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field,
|
|
update Irreparabledb_Update_Fields) (
|
|
err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.UpdateNoReturn_Irreparabledb_By_Segmentpath(ctx, irreparabledb_segmentpath, update)
|
|
}
|
|
|
|
func (rx *Rx) UpdateNoReturn_NodeApiVersion_By_Id_And_ApiVersion_Less(ctx context.Context,
|
|
node_api_version_id NodeApiVersion_Id_Field,
|
|
node_api_version_api_version_less NodeApiVersion_ApiVersion_Field,
|
|
update NodeApiVersion_Update_Fields) (
|
|
err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.UpdateNoReturn_NodeApiVersion_By_Id_And_ApiVersion_Less(ctx, node_api_version_id, node_api_version_api_version_less, update)
|
|
}
|
|
|
|
func (rx *Rx) UpdateNoReturn_Node_By_Id(ctx context.Context,
|
|
node_id Node_Id_Field,
|
|
update Node_Update_Fields) (
|
|
err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.UpdateNoReturn_Node_By_Id(ctx, node_id, update)
|
|
}
|
|
|
|
func (rx *Rx) UpdateNoReturn_PeerIdentity_By_NodeId(ctx context.Context,
|
|
peer_identity_node_id PeerIdentity_NodeId_Field,
|
|
update PeerIdentity_Update_Fields) (
|
|
err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.UpdateNoReturn_PeerIdentity_By_NodeId(ctx, peer_identity_node_id, update)
|
|
}
|
|
|
|
func (rx *Rx) Update_AuditHistory_By_NodeId(ctx context.Context,
|
|
audit_history_node_id AuditHistory_NodeId_Field,
|
|
update AuditHistory_Update_Fields) (
|
|
audit_history *AuditHistory, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Update_AuditHistory_By_NodeId(ctx, audit_history_node_id, update)
|
|
}
|
|
|
|
func (rx *Rx) Update_BucketMetainfo_By_ProjectId_And_Name(ctx context.Context,
|
|
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
|
|
bucket_metainfo_name BucketMetainfo_Name_Field,
|
|
update BucketMetainfo_Update_Fields) (
|
|
bucket_metainfo *BucketMetainfo, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Update_BucketMetainfo_By_ProjectId_And_Name(ctx, bucket_metainfo_project_id, bucket_metainfo_name, update)
|
|
}
|
|
|
|
func (rx *Rx) Update_CoinpaymentsTransaction_By_Id(ctx context.Context,
|
|
coinpayments_transaction_id CoinpaymentsTransaction_Id_Field,
|
|
update CoinpaymentsTransaction_Update_Fields) (
|
|
coinpayments_transaction *CoinpaymentsTransaction, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Update_CoinpaymentsTransaction_By_Id(ctx, coinpayments_transaction_id, update)
|
|
}
|
|
|
|
func (rx *Rx) Update_CouponUsage_By_CouponId_And_Period(ctx context.Context,
|
|
coupon_usage_coupon_id CouponUsage_CouponId_Field,
|
|
coupon_usage_period CouponUsage_Period_Field,
|
|
update CouponUsage_Update_Fields) (
|
|
coupon_usage *CouponUsage, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Update_CouponUsage_By_CouponId_And_Period(ctx, coupon_usage_coupon_id, coupon_usage_period, update)
|
|
}
|
|
|
|
func (rx *Rx) Update_Coupon_By_Id(ctx context.Context,
|
|
coupon_id Coupon_Id_Field,
|
|
update Coupon_Update_Fields) (
|
|
coupon *Coupon, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Update_Coupon_By_Id(ctx, coupon_id, update)
|
|
}
|
|
|
|
func (rx *Rx) Update_Node_By_Id(ctx context.Context,
|
|
node_id Node_Id_Field,
|
|
update Node_Update_Fields) (
|
|
node *Node, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Update_Node_By_Id(ctx, node_id, update)
|
|
}
|
|
|
|
func (rx *Rx) Update_Project_By_Id(ctx context.Context,
|
|
project_id Project_Id_Field,
|
|
update Project_Update_Fields) (
|
|
project *Project, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Update_Project_By_Id(ctx, project_id, update)
|
|
}
|
|
|
|
func (rx *Rx) Update_RegistrationToken_By_Secret(ctx context.Context,
|
|
registration_token_secret RegistrationToken_Secret_Field,
|
|
update RegistrationToken_Update_Fields) (
|
|
registration_token *RegistrationToken, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Update_RegistrationToken_By_Secret(ctx, registration_token_secret, update)
|
|
}
|
|
|
|
func (rx *Rx) Update_StripecoinpaymentsInvoiceProjectRecord_By_Id(ctx context.Context,
|
|
stripecoinpayments_invoice_project_record_id StripecoinpaymentsInvoiceProjectRecord_Id_Field,
|
|
update StripecoinpaymentsInvoiceProjectRecord_Update_Fields) (
|
|
stripecoinpayments_invoice_project_record *StripecoinpaymentsInvoiceProjectRecord, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Update_StripecoinpaymentsInvoiceProjectRecord_By_Id(ctx, stripecoinpayments_invoice_project_record_id, update)
|
|
}
|
|
|
|
func (rx *Rx) Update_User_By_Id(ctx context.Context,
|
|
user_id User_Id_Field,
|
|
update User_Update_Fields) (
|
|
user *User, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Update_User_By_Id(ctx, user_id, update)
|
|
}
|
|
|
|
type Methods interface {
|
|
All_BucketStorageTally(ctx context.Context) (
|
|
rows []*BucketStorageTally, err error)
|
|
|
|
All_BucketStorageTally_By_ProjectId_And_BucketName_And_IntervalStart_GreaterOrEqual_And_IntervalStart_LessOrEqual_OrderBy_Desc_IntervalStart(ctx context.Context,
|
|
bucket_storage_tally_project_id BucketStorageTally_ProjectId_Field,
|
|
bucket_storage_tally_bucket_name BucketStorageTally_BucketName_Field,
|
|
bucket_storage_tally_interval_start_greater_or_equal BucketStorageTally_IntervalStart_Field,
|
|
bucket_storage_tally_interval_start_less_or_equal BucketStorageTally_IntervalStart_Field) (
|
|
rows []*BucketStorageTally, err error)
|
|
|
|
All_CoinpaymentsTransaction_By_UserId_OrderBy_Desc_CreatedAt(ctx context.Context,
|
|
coinpayments_transaction_user_id CoinpaymentsTransaction_UserId_Field) (
|
|
rows []*CoinpaymentsTransaction, err error)
|
|
|
|
All_Coupon_By_Status_OrderBy_Desc_CreatedAt(ctx context.Context,
|
|
coupon_status Coupon_Status_Field) (
|
|
rows []*Coupon, err error)
|
|
|
|
All_Coupon_By_UserId_And_Status_OrderBy_Desc_CreatedAt(ctx context.Context,
|
|
coupon_user_id Coupon_UserId_Field,
|
|
coupon_status Coupon_Status_Field) (
|
|
rows []*Coupon, err error)
|
|
|
|
All_Coupon_By_UserId_OrderBy_Desc_CreatedAt(ctx context.Context,
|
|
coupon_user_id Coupon_UserId_Field) (
|
|
rows []*Coupon, err error)
|
|
|
|
All_Node_Id(ctx context.Context) (
|
|
rows []*Id_Row, err error)
|
|
|
|
All_Node_Id_Node_PieceCount_By_PieceCount_Not_Number(ctx context.Context) (
|
|
rows []*Id_PieceCount_Row, err error)
|
|
|
|
All_Project(ctx context.Context) (
|
|
rows []*Project, err error)
|
|
|
|
All_ProjectMember_By_MemberId(ctx context.Context,
|
|
project_member_member_id ProjectMember_MemberId_Field) (
|
|
rows []*ProjectMember, err error)
|
|
|
|
All_Project_By_CreatedAt_Less_OrderBy_Asc_CreatedAt(ctx context.Context,
|
|
project_created_at_less Project_CreatedAt_Field) (
|
|
rows []*Project, err error)
|
|
|
|
All_Project_By_OwnerId_OrderBy_Asc_CreatedAt(ctx context.Context,
|
|
project_owner_id Project_OwnerId_Field) (
|
|
rows []*Project, err error)
|
|
|
|
All_Project_By_ProjectMember_MemberId_OrderBy_Asc_Project_Name(ctx context.Context,
|
|
project_member_member_id ProjectMember_MemberId_Field) (
|
|
rows []*Project, err error)
|
|
|
|
All_StoragenodeBandwidthRollup_By_StoragenodeId_And_IntervalStart(ctx context.Context,
|
|
storagenode_bandwidth_rollup_storagenode_id StoragenodeBandwidthRollup_StoragenodeId_Field,
|
|
storagenode_bandwidth_rollup_interval_start StoragenodeBandwidthRollup_IntervalStart_Field) (
|
|
rows []*StoragenodeBandwidthRollup, err error)
|
|
|
|
All_StoragenodePayment_By_NodeId(ctx context.Context,
|
|
storagenode_payment_node_id StoragenodePayment_NodeId_Field) (
|
|
rows []*StoragenodePayment, err error)
|
|
|
|
All_StoragenodePayment_By_NodeId_And_Period(ctx context.Context,
|
|
storagenode_payment_node_id StoragenodePayment_NodeId_Field,
|
|
storagenode_payment_period StoragenodePayment_Period_Field) (
|
|
rows []*StoragenodePayment, err error)
|
|
|
|
All_StoragenodePaystub_By_NodeId(ctx context.Context,
|
|
storagenode_paystub_node_id StoragenodePaystub_NodeId_Field) (
|
|
rows []*StoragenodePaystub, err error)
|
|
|
|
All_StoragenodeStorageTally(ctx context.Context) (
|
|
rows []*StoragenodeStorageTally, err error)
|
|
|
|
All_StoragenodeStorageTally_By_IntervalEndTime_GreaterOrEqual(ctx context.Context,
|
|
storagenode_storage_tally_interval_end_time_greater_or_equal StoragenodeStorageTally_IntervalEndTime_Field) (
|
|
rows []*StoragenodeStorageTally, err error)
|
|
|
|
Count_BucketMetainfo_Name_By_ProjectId(ctx context.Context,
|
|
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field) (
|
|
count int64, err error)
|
|
|
|
CreateNoReturn_AccountingTimestamps(ctx context.Context,
|
|
accounting_timestamps_name AccountingTimestamps_Name_Field,
|
|
accounting_timestamps_value AccountingTimestamps_Value_Field) (
|
|
err error)
|
|
|
|
CreateNoReturn_BucketStorageTally(ctx context.Context,
|
|
bucket_storage_tally_bucket_name BucketStorageTally_BucketName_Field,
|
|
bucket_storage_tally_project_id BucketStorageTally_ProjectId_Field,
|
|
bucket_storage_tally_interval_start BucketStorageTally_IntervalStart_Field,
|
|
bucket_storage_tally_inline BucketStorageTally_Inline_Field,
|
|
bucket_storage_tally_remote BucketStorageTally_Remote_Field,
|
|
bucket_storage_tally_remote_segments_count BucketStorageTally_RemoteSegmentsCount_Field,
|
|
bucket_storage_tally_inline_segments_count BucketStorageTally_InlineSegmentsCount_Field,
|
|
bucket_storage_tally_object_count BucketStorageTally_ObjectCount_Field,
|
|
bucket_storage_tally_metadata_size BucketStorageTally_MetadataSize_Field) (
|
|
err error)
|
|
|
|
CreateNoReturn_Irreparabledb(ctx context.Context,
|
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field,
|
|
irreparabledb_segmentdetail Irreparabledb_Segmentdetail_Field,
|
|
irreparabledb_pieces_lost_count Irreparabledb_PiecesLostCount_Field,
|
|
irreparabledb_seg_damaged_unix_sec Irreparabledb_SegDamagedUnixSec_Field,
|
|
irreparabledb_repair_attempt_count Irreparabledb_RepairAttemptCount_Field) (
|
|
err error)
|
|
|
|
CreateNoReturn_PeerIdentity(ctx context.Context,
|
|
peer_identity_node_id PeerIdentity_NodeId_Field,
|
|
peer_identity_leaf_serial_number PeerIdentity_LeafSerialNumber_Field,
|
|
peer_identity_chain PeerIdentity_Chain_Field) (
|
|
err error)
|
|
|
|
CreateNoReturn_Revocation(ctx context.Context,
|
|
revocation_revoked Revocation_Revoked_Field,
|
|
revocation_api_key_id Revocation_ApiKeyId_Field) (
|
|
err error)
|
|
|
|
CreateNoReturn_StoragenodePayment(ctx context.Context,
|
|
storagenode_payment_node_id StoragenodePayment_NodeId_Field,
|
|
storagenode_payment_period StoragenodePayment_Period_Field,
|
|
storagenode_payment_amount StoragenodePayment_Amount_Field,
|
|
optional StoragenodePayment_Create_Fields) (
|
|
err error)
|
|
|
|
Create_ApiKey(ctx context.Context,
|
|
api_key_id ApiKey_Id_Field,
|
|
api_key_project_id ApiKey_ProjectId_Field,
|
|
api_key_head ApiKey_Head_Field,
|
|
api_key_name ApiKey_Name_Field,
|
|
api_key_secret ApiKey_Secret_Field,
|
|
optional ApiKey_Create_Fields) (
|
|
api_key *ApiKey, err error)
|
|
|
|
Create_AuditHistory(ctx context.Context,
|
|
audit_history_node_id AuditHistory_NodeId_Field,
|
|
audit_history_history AuditHistory_History_Field) (
|
|
audit_history *AuditHistory, err error)
|
|
|
|
Create_BucketMetainfo(ctx context.Context,
|
|
bucket_metainfo_id BucketMetainfo_Id_Field,
|
|
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
|
|
bucket_metainfo_name BucketMetainfo_Name_Field,
|
|
bucket_metainfo_path_cipher BucketMetainfo_PathCipher_Field,
|
|
bucket_metainfo_default_segment_size BucketMetainfo_DefaultSegmentSize_Field,
|
|
bucket_metainfo_default_encryption_cipher_suite BucketMetainfo_DefaultEncryptionCipherSuite_Field,
|
|
bucket_metainfo_default_encryption_block_size BucketMetainfo_DefaultEncryptionBlockSize_Field,
|
|
bucket_metainfo_default_redundancy_algorithm BucketMetainfo_DefaultRedundancyAlgorithm_Field,
|
|
bucket_metainfo_default_redundancy_share_size BucketMetainfo_DefaultRedundancyShareSize_Field,
|
|
bucket_metainfo_default_redundancy_required_shares BucketMetainfo_DefaultRedundancyRequiredShares_Field,
|
|
bucket_metainfo_default_redundancy_repair_shares BucketMetainfo_DefaultRedundancyRepairShares_Field,
|
|
bucket_metainfo_default_redundancy_optimal_shares BucketMetainfo_DefaultRedundancyOptimalShares_Field,
|
|
bucket_metainfo_default_redundancy_total_shares BucketMetainfo_DefaultRedundancyTotalShares_Field,
|
|
optional BucketMetainfo_Create_Fields) (
|
|
bucket_metainfo *BucketMetainfo, err error)
|
|
|
|
Create_CoinpaymentsTransaction(ctx context.Context,
|
|
coinpayments_transaction_id CoinpaymentsTransaction_Id_Field,
|
|
coinpayments_transaction_user_id CoinpaymentsTransaction_UserId_Field,
|
|
coinpayments_transaction_address CoinpaymentsTransaction_Address_Field,
|
|
coinpayments_transaction_amount CoinpaymentsTransaction_Amount_Field,
|
|
coinpayments_transaction_received CoinpaymentsTransaction_Received_Field,
|
|
coinpayments_transaction_status CoinpaymentsTransaction_Status_Field,
|
|
coinpayments_transaction_key CoinpaymentsTransaction_Key_Field,
|
|
coinpayments_transaction_timeout CoinpaymentsTransaction_Timeout_Field) (
|
|
coinpayments_transaction *CoinpaymentsTransaction, err error)
|
|
|
|
Create_Coupon(ctx context.Context,
|
|
coupon_id Coupon_Id_Field,
|
|
coupon_user_id Coupon_UserId_Field,
|
|
coupon_amount Coupon_Amount_Field,
|
|
coupon_description Coupon_Description_Field,
|
|
coupon_type Coupon_Type_Field,
|
|
coupon_status Coupon_Status_Field,
|
|
coupon_duration Coupon_Duration_Field,
|
|
optional Coupon_Create_Fields) (
|
|
coupon *Coupon, err error)
|
|
|
|
Create_CouponCode(ctx context.Context,
|
|
coupon_code_id CouponCode_Id_Field,
|
|
coupon_code_name CouponCode_Name_Field,
|
|
coupon_code_amount CouponCode_Amount_Field,
|
|
coupon_code_description CouponCode_Description_Field,
|
|
coupon_code_type CouponCode_Type_Field,
|
|
optional CouponCode_Create_Fields) (
|
|
coupon_code *CouponCode, err error)
|
|
|
|
Create_CouponUsage(ctx context.Context,
|
|
coupon_usage_coupon_id CouponUsage_CouponId_Field,
|
|
coupon_usage_amount CouponUsage_Amount_Field,
|
|
coupon_usage_status CouponUsage_Status_Field,
|
|
coupon_usage_period CouponUsage_Period_Field) (
|
|
coupon_usage *CouponUsage, err error)
|
|
|
|
Create_Project(ctx context.Context,
|
|
project_id Project_Id_Field,
|
|
project_name Project_Name_Field,
|
|
project_description Project_Description_Field,
|
|
project_owner_id Project_OwnerId_Field,
|
|
optional Project_Create_Fields) (
|
|
project *Project, err error)
|
|
|
|
Create_ProjectMember(ctx context.Context,
|
|
project_member_member_id ProjectMember_MemberId_Field,
|
|
project_member_project_id ProjectMember_ProjectId_Field) (
|
|
project_member *ProjectMember, err error)
|
|
|
|
Create_RegistrationToken(ctx context.Context,
|
|
registration_token_secret RegistrationToken_Secret_Field,
|
|
registration_token_project_limit RegistrationToken_ProjectLimit_Field,
|
|
optional RegistrationToken_Create_Fields) (
|
|
registration_token *RegistrationToken, err error)
|
|
|
|
Create_ResetPasswordToken(ctx context.Context,
|
|
reset_password_token_secret ResetPasswordToken_Secret_Field,
|
|
reset_password_token_owner_id ResetPasswordToken_OwnerId_Field) (
|
|
reset_password_token *ResetPasswordToken, err error)
|
|
|
|
Create_StoragenodeBandwidthRollup(ctx context.Context,
|
|
storagenode_bandwidth_rollup_storagenode_id StoragenodeBandwidthRollup_StoragenodeId_Field,
|
|
storagenode_bandwidth_rollup_interval_start StoragenodeBandwidthRollup_IntervalStart_Field,
|
|
storagenode_bandwidth_rollup_interval_seconds StoragenodeBandwidthRollup_IntervalSeconds_Field,
|
|
storagenode_bandwidth_rollup_action StoragenodeBandwidthRollup_Action_Field,
|
|
storagenode_bandwidth_rollup_settled StoragenodeBandwidthRollup_Settled_Field,
|
|
optional StoragenodeBandwidthRollup_Create_Fields) (
|
|
storagenode_bandwidth_rollup *StoragenodeBandwidthRollup, err error)
|
|
|
|
Create_StripeCustomer(ctx context.Context,
|
|
stripe_customer_user_id StripeCustomer_UserId_Field,
|
|
stripe_customer_customer_id StripeCustomer_CustomerId_Field) (
|
|
stripe_customer *StripeCustomer, err error)
|
|
|
|
Create_StripecoinpaymentsInvoiceProjectRecord(ctx context.Context,
|
|
stripecoinpayments_invoice_project_record_id StripecoinpaymentsInvoiceProjectRecord_Id_Field,
|
|
stripecoinpayments_invoice_project_record_project_id StripecoinpaymentsInvoiceProjectRecord_ProjectId_Field,
|
|
stripecoinpayments_invoice_project_record_storage StripecoinpaymentsInvoiceProjectRecord_Storage_Field,
|
|
stripecoinpayments_invoice_project_record_egress StripecoinpaymentsInvoiceProjectRecord_Egress_Field,
|
|
stripecoinpayments_invoice_project_record_objects StripecoinpaymentsInvoiceProjectRecord_Objects_Field,
|
|
stripecoinpayments_invoice_project_record_period_start StripecoinpaymentsInvoiceProjectRecord_PeriodStart_Field,
|
|
stripecoinpayments_invoice_project_record_period_end StripecoinpaymentsInvoiceProjectRecord_PeriodEnd_Field,
|
|
stripecoinpayments_invoice_project_record_state StripecoinpaymentsInvoiceProjectRecord_State_Field) (
|
|
stripecoinpayments_invoice_project_record *StripecoinpaymentsInvoiceProjectRecord, err error)
|
|
|
|
Create_StripecoinpaymentsTxConversionRate(ctx context.Context,
|
|
stripecoinpayments_tx_conversion_rate_tx_id StripecoinpaymentsTxConversionRate_TxId_Field,
|
|
stripecoinpayments_tx_conversion_rate_rate StripecoinpaymentsTxConversionRate_Rate_Field) (
|
|
stripecoinpayments_tx_conversion_rate *StripecoinpaymentsTxConversionRate, err error)
|
|
|
|
Create_User(ctx context.Context,
|
|
user_id User_Id_Field,
|
|
user_email User_Email_Field,
|
|
user_normalized_email User_NormalizedEmail_Field,
|
|
user_full_name User_FullName_Field,
|
|
user_password_hash User_PasswordHash_Field,
|
|
optional User_Create_Fields) (
|
|
user *User, err error)
|
|
|
|
Create_ValueAttribution(ctx context.Context,
|
|
value_attribution_project_id ValueAttribution_ProjectId_Field,
|
|
value_attribution_bucket_name ValueAttribution_BucketName_Field,
|
|
value_attribution_partner_id ValueAttribution_PartnerId_Field) (
|
|
value_attribution *ValueAttribution, err error)
|
|
|
|
Delete_ApiKey_By_Id(ctx context.Context,
|
|
api_key_id ApiKey_Id_Field) (
|
|
deleted bool, err error)
|
|
|
|
Delete_BucketMetainfo_By_ProjectId_And_Name(ctx context.Context,
|
|
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
|
|
bucket_metainfo_name BucketMetainfo_Name_Field) (
|
|
deleted bool, err error)
|
|
|
|
Delete_CouponCode_By_Name(ctx context.Context,
|
|
coupon_code_name CouponCode_Name_Field) (
|
|
deleted bool, err error)
|
|
|
|
Delete_Coupon_By_Id(ctx context.Context,
|
|
coupon_id Coupon_Id_Field) (
|
|
deleted bool, err error)
|
|
|
|
Delete_GracefulExitTransferQueue_By_NodeId(ctx context.Context,
|
|
graceful_exit_transfer_queue_node_id GracefulExitTransferQueue_NodeId_Field) (
|
|
count int64, err error)
|
|
|
|
Delete_GracefulExitTransferQueue_By_NodeId_And_FinishedAt_IsNot_Null(ctx context.Context,
|
|
graceful_exit_transfer_queue_node_id GracefulExitTransferQueue_NodeId_Field) (
|
|
count int64, err error)
|
|
|
|
Delete_GracefulExitTransferQueue_By_NodeId_And_Path_And_PieceNum(ctx context.Context,
|
|
graceful_exit_transfer_queue_node_id GracefulExitTransferQueue_NodeId_Field,
|
|
graceful_exit_transfer_queue_path GracefulExitTransferQueue_Path_Field,
|
|
graceful_exit_transfer_queue_piece_num GracefulExitTransferQueue_PieceNum_Field) (
|
|
deleted bool, err error)
|
|
|
|
Delete_Injuredsegment_By_UpdatedAt_Less(ctx context.Context,
|
|
injuredsegment_updated_at_less Injuredsegment_UpdatedAt_Field) (
|
|
count int64, err error)
|
|
|
|
Delete_Irreparabledb_By_Segmentpath(ctx context.Context,
|
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field) (
|
|
deleted bool, err error)
|
|
|
|
Delete_PendingAudits_By_NodeId(ctx context.Context,
|
|
pending_audits_node_id PendingAudits_NodeId_Field) (
|
|
deleted bool, err error)
|
|
|
|
Delete_ProjectMember_By_MemberId_And_ProjectId(ctx context.Context,
|
|
project_member_member_id ProjectMember_MemberId_Field,
|
|
project_member_project_id ProjectMember_ProjectId_Field) (
|
|
deleted bool, err error)
|
|
|
|
Delete_Project_By_Id(ctx context.Context,
|
|
project_id Project_Id_Field) (
|
|
deleted bool, err error)
|
|
|
|
Delete_ResetPasswordToken_By_Secret(ctx context.Context,
|
|
reset_password_token_secret ResetPasswordToken_Secret_Field) (
|
|
deleted bool, err error)
|
|
|
|
Delete_User_By_Id(ctx context.Context,
|
|
user_id User_Id_Field) (
|
|
deleted bool, err error)
|
|
|
|
Find_AccountingTimestamps_Value_By_Name(ctx context.Context,
|
|
accounting_timestamps_name AccountingTimestamps_Name_Field) (
|
|
row *Value_Row, err error)
|
|
|
|
Get_ApiKey_By_Head(ctx context.Context,
|
|
api_key_head ApiKey_Head_Field) (
|
|
api_key *ApiKey, err error)
|
|
|
|
Get_ApiKey_By_Id(ctx context.Context,
|
|
api_key_id ApiKey_Id_Field) (
|
|
api_key *ApiKey, err error)
|
|
|
|
Get_ApiKey_By_Name_And_ProjectId(ctx context.Context,
|
|
api_key_name ApiKey_Name_Field,
|
|
api_key_project_id ApiKey_ProjectId_Field) (
|
|
api_key *ApiKey, err error)
|
|
|
|
Get_AuditHistory_By_NodeId(ctx context.Context,
|
|
audit_history_node_id AuditHistory_NodeId_Field) (
|
|
audit_history *AuditHistory, err error)
|
|
|
|
Get_BucketMetainfo_By_ProjectId_And_Name(ctx context.Context,
|
|
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
|
|
bucket_metainfo_name BucketMetainfo_Name_Field) (
|
|
bucket_metainfo *BucketMetainfo, err error)
|
|
|
|
Get_BucketMetainfo_Id_By_ProjectId_And_Name(ctx context.Context,
|
|
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
|
|
bucket_metainfo_name BucketMetainfo_Name_Field) (
|
|
row *Id_Row, err error)
|
|
|
|
Get_CouponCode_By_Name(ctx context.Context,
|
|
coupon_code_name CouponCode_Name_Field) (
|
|
coupon_code *CouponCode, err error)
|
|
|
|
Get_Coupon_By_Id(ctx context.Context,
|
|
coupon_id Coupon_Id_Field) (
|
|
coupon *Coupon, err error)
|
|
|
|
Get_GracefulExitProgress_By_NodeId(ctx context.Context,
|
|
graceful_exit_progress_node_id GracefulExitProgress_NodeId_Field) (
|
|
graceful_exit_progress *GracefulExitProgress, err error)
|
|
|
|
Get_GracefulExitTransferQueue_By_NodeId_And_Path_And_PieceNum(ctx context.Context,
|
|
graceful_exit_transfer_queue_node_id GracefulExitTransferQueue_NodeId_Field,
|
|
graceful_exit_transfer_queue_path GracefulExitTransferQueue_Path_Field,
|
|
graceful_exit_transfer_queue_piece_num GracefulExitTransferQueue_PieceNum_Field) (
|
|
graceful_exit_transfer_queue *GracefulExitTransferQueue, err error)
|
|
|
|
Get_Irreparabledb_By_Segmentpath(ctx context.Context,
|
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field) (
|
|
irreparabledb *Irreparabledb, err error)
|
|
|
|
Get_Node_By_Id(ctx context.Context,
|
|
node_id Node_Id_Field) (
|
|
node *Node, err error)
|
|
|
|
Get_PeerIdentity_By_NodeId(ctx context.Context,
|
|
peer_identity_node_id PeerIdentity_NodeId_Field) (
|
|
peer_identity *PeerIdentity, err error)
|
|
|
|
Get_PeerIdentity_LeafSerialNumber_By_NodeId(ctx context.Context,
|
|
peer_identity_node_id PeerIdentity_NodeId_Field) (
|
|
row *LeafSerialNumber_Row, err error)
|
|
|
|
Get_PendingAudits_By_NodeId(ctx context.Context,
|
|
pending_audits_node_id PendingAudits_NodeId_Field) (
|
|
pending_audits *PendingAudits, err error)
|
|
|
|
Get_Project_BandwidthLimit_By_Id(ctx context.Context,
|
|
project_id Project_Id_Field) (
|
|
row *BandwidthLimit_Row, err error)
|
|
|
|
Get_Project_BandwidthLimit_Project_UsageLimit_By_Id(ctx context.Context,
|
|
project_id Project_Id_Field) (
|
|
row *BandwidthLimit_UsageLimit_Row, err error)
|
|
|
|
Get_Project_By_Id(ctx context.Context,
|
|
project_id Project_Id_Field) (
|
|
project *Project, err error)
|
|
|
|
Get_Project_MaxBuckets_By_Id(ctx context.Context,
|
|
project_id Project_Id_Field) (
|
|
row *MaxBuckets_Row, err error)
|
|
|
|
Get_Project_UsageLimit_By_Id(ctx context.Context,
|
|
project_id Project_Id_Field) (
|
|
row *UsageLimit_Row, err error)
|
|
|
|
Get_RegistrationToken_By_OwnerId(ctx context.Context,
|
|
registration_token_owner_id RegistrationToken_OwnerId_Field) (
|
|
registration_token *RegistrationToken, err error)
|
|
|
|
Get_RegistrationToken_By_Secret(ctx context.Context,
|
|
registration_token_secret RegistrationToken_Secret_Field) (
|
|
registration_token *RegistrationToken, err error)
|
|
|
|
Get_ResetPasswordToken_By_OwnerId(ctx context.Context,
|
|
reset_password_token_owner_id ResetPasswordToken_OwnerId_Field) (
|
|
reset_password_token *ResetPasswordToken, err error)
|
|
|
|
Get_ResetPasswordToken_By_Secret(ctx context.Context,
|
|
reset_password_token_secret ResetPasswordToken_Secret_Field) (
|
|
reset_password_token *ResetPasswordToken, err error)
|
|
|
|
Get_StoragenodePaystub_By_NodeId_And_Period(ctx context.Context,
|
|
storagenode_paystub_node_id StoragenodePaystub_NodeId_Field,
|
|
storagenode_paystub_period StoragenodePaystub_Period_Field) (
|
|
storagenode_paystub *StoragenodePaystub, err error)
|
|
|
|
Get_StripeCustomer_CustomerId_By_UserId(ctx context.Context,
|
|
stripe_customer_user_id StripeCustomer_UserId_Field) (
|
|
row *CustomerId_Row, err error)
|
|
|
|
Get_StripecoinpaymentsInvoiceProjectRecord_By_ProjectId_And_PeriodStart_And_PeriodEnd(ctx context.Context,
|
|
stripecoinpayments_invoice_project_record_project_id StripecoinpaymentsInvoiceProjectRecord_ProjectId_Field,
|
|
stripecoinpayments_invoice_project_record_period_start StripecoinpaymentsInvoiceProjectRecord_PeriodStart_Field,
|
|
stripecoinpayments_invoice_project_record_period_end StripecoinpaymentsInvoiceProjectRecord_PeriodEnd_Field) (
|
|
stripecoinpayments_invoice_project_record *StripecoinpaymentsInvoiceProjectRecord, err error)
|
|
|
|
Get_StripecoinpaymentsTxConversionRate_By_TxId(ctx context.Context,
|
|
stripecoinpayments_tx_conversion_rate_tx_id StripecoinpaymentsTxConversionRate_TxId_Field) (
|
|
stripecoinpayments_tx_conversion_rate *StripecoinpaymentsTxConversionRate, err error)
|
|
|
|
Get_User_By_Id(ctx context.Context,
|
|
user_id User_Id_Field) (
|
|
user *User, err error)
|
|
|
|
Get_User_By_NormalizedEmail_And_Status_Not_Number(ctx context.Context,
|
|
user_normalized_email User_NormalizedEmail_Field) (
|
|
user *User, err error)
|
|
|
|
Get_User_ProjectLimit_By_Id(ctx context.Context,
|
|
user_id User_Id_Field) (
|
|
row *ProjectLimit_Row, err error)
|
|
|
|
Get_ValueAttribution_By_ProjectId_And_BucketName(ctx context.Context,
|
|
value_attribution_project_id ValueAttribution_ProjectId_Field,
|
|
value_attribution_bucket_name ValueAttribution_BucketName_Field) (
|
|
value_attribution *ValueAttribution, err error)
|
|
|
|
Has_BucketMetainfo_By_ProjectId_And_Name(ctx context.Context,
|
|
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
|
|
bucket_metainfo_name BucketMetainfo_Name_Field) (
|
|
has bool, err error)
|
|
|
|
Has_NodeApiVersion_By_Id_And_ApiVersion_GreaterOrEqual(ctx context.Context,
|
|
node_api_version_id NodeApiVersion_Id_Field,
|
|
node_api_version_api_version_greater_or_equal NodeApiVersion_ApiVersion_Field) (
|
|
has bool, err error)
|
|
|
|
Limited_BucketMetainfo_By_ProjectId_And_Name_GreaterOrEqual_OrderBy_Asc_Name(ctx context.Context,
|
|
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
|
|
bucket_metainfo_name_greater_or_equal BucketMetainfo_Name_Field,
|
|
limit int, offset int64) (
|
|
rows []*BucketMetainfo, err error)
|
|
|
|
Limited_BucketMetainfo_By_ProjectId_And_Name_Greater_OrderBy_Asc_Name(ctx context.Context,
|
|
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
|
|
bucket_metainfo_name_greater BucketMetainfo_Name_Field,
|
|
limit int, offset int64) (
|
|
rows []*BucketMetainfo, err error)
|
|
|
|
Limited_CouponUsage_By_Period_And_Status_Equal_Number(ctx context.Context,
|
|
coupon_usage_period CouponUsage_Period_Field,
|
|
limit int, offset int64) (
|
|
rows []*CouponUsage, err error)
|
|
|
|
Limited_Coupon_By_CreatedAt_LessOrEqual_And_Status_OrderBy_Desc_CreatedAt(ctx context.Context,
|
|
coupon_created_at_less_or_equal Coupon_CreatedAt_Field,
|
|
coupon_status Coupon_Status_Field,
|
|
limit int, offset int64) (
|
|
rows []*Coupon, err error)
|
|
|
|
Limited_Irreparabledb_By_Segmentpath_Greater_OrderBy_Asc_Segmentpath(ctx context.Context,
|
|
irreparabledb_segmentpath_greater Irreparabledb_Segmentpath_Field,
|
|
limit int, offset int64) (
|
|
rows []*Irreparabledb, err error)
|
|
|
|
Limited_Project_By_CreatedAt_Less_OrderBy_Asc_CreatedAt(ctx context.Context,
|
|
project_created_at_less Project_CreatedAt_Field,
|
|
limit int, offset int64) (
|
|
rows []*Project, err error)
|
|
|
|
Limited_StoragenodePayment_By_NodeId_And_Period_OrderBy_Desc_Id(ctx context.Context,
|
|
storagenode_payment_node_id StoragenodePayment_NodeId_Field,
|
|
storagenode_payment_period StoragenodePayment_Period_Field,
|
|
limit int, offset int64) (
|
|
rows []*StoragenodePayment, err error)
|
|
|
|
Limited_StripeCustomer_By_CreatedAt_LessOrEqual_OrderBy_Desc_CreatedAt(ctx context.Context,
|
|
stripe_customer_created_at_less_or_equal StripeCustomer_CreatedAt_Field,
|
|
limit int, offset int64) (
|
|
rows []*StripeCustomer, err error)
|
|
|
|
Limited_StripecoinpaymentsInvoiceProjectRecord_By_PeriodStart_And_PeriodEnd_And_State(ctx context.Context,
|
|
stripecoinpayments_invoice_project_record_period_start StripecoinpaymentsInvoiceProjectRecord_PeriodStart_Field,
|
|
stripecoinpayments_invoice_project_record_period_end StripecoinpaymentsInvoiceProjectRecord_PeriodEnd_Field,
|
|
stripecoinpayments_invoice_project_record_state StripecoinpaymentsInvoiceProjectRecord_State_Field,
|
|
limit int, offset int64) (
|
|
rows []*StripecoinpaymentsInvoiceProjectRecord, err error)
|
|
|
|
Paged_BucketBandwidthRollupArchive_By_IntervalStart_GreaterOrEqual(ctx context.Context,
|
|
bucket_bandwidth_rollup_archive_interval_start_greater_or_equal BucketBandwidthRollupArchive_IntervalStart_Field,
|
|
limit int, start *Paged_BucketBandwidthRollupArchive_By_IntervalStart_GreaterOrEqual_Continuation) (
|
|
rows []*BucketBandwidthRollupArchive, next *Paged_BucketBandwidthRollupArchive_By_IntervalStart_GreaterOrEqual_Continuation, err error)
|
|
|
|
Paged_BucketBandwidthRollup_By_IntervalStart_GreaterOrEqual(ctx context.Context,
|
|
bucket_bandwidth_rollup_interval_start_greater_or_equal BucketBandwidthRollup_IntervalStart_Field,
|
|
limit int, start *Paged_BucketBandwidthRollup_By_IntervalStart_GreaterOrEqual_Continuation) (
|
|
rows []*BucketBandwidthRollup, next *Paged_BucketBandwidthRollup_By_IntervalStart_GreaterOrEqual_Continuation, err error)
|
|
|
|
Paged_Node(ctx context.Context,
|
|
limit int, start *Paged_Node_Continuation) (
|
|
rows []*Node, next *Paged_Node_Continuation, err error)
|
|
|
|
Paged_StoragenodeBandwidthRollupArchive_By_IntervalStart_GreaterOrEqual(ctx context.Context,
|
|
storagenode_bandwidth_rollup_archive_interval_start_greater_or_equal StoragenodeBandwidthRollupArchive_IntervalStart_Field,
|
|
limit int, start *Paged_StoragenodeBandwidthRollupArchive_By_IntervalStart_GreaterOrEqual_Continuation) (
|
|
rows []*StoragenodeBandwidthRollupArchive, next *Paged_StoragenodeBandwidthRollupArchive_By_IntervalStart_GreaterOrEqual_Continuation, err error)
|
|
|
|
Paged_StoragenodeBandwidthRollupPhase2_By_StoragenodeId_And_IntervalStart_GreaterOrEqual(ctx context.Context,
|
|
storagenode_bandwidth_rollup_phase2_storagenode_id StoragenodeBandwidthRollupPhase2_StoragenodeId_Field,
|
|
storagenode_bandwidth_rollup_phase2_interval_start_greater_or_equal StoragenodeBandwidthRollupPhase2_IntervalStart_Field,
|
|
limit int, start *Paged_StoragenodeBandwidthRollupPhase2_By_StoragenodeId_And_IntervalStart_GreaterOrEqual_Continuation) (
|
|
rows []*StoragenodeBandwidthRollupPhase2, next *Paged_StoragenodeBandwidthRollupPhase2_By_StoragenodeId_And_IntervalStart_GreaterOrEqual_Continuation, err error)
|
|
|
|
Paged_StoragenodeBandwidthRollup_By_IntervalStart_GreaterOrEqual(ctx context.Context,
|
|
storagenode_bandwidth_rollup_interval_start_greater_or_equal StoragenodeBandwidthRollup_IntervalStart_Field,
|
|
limit int, start *Paged_StoragenodeBandwidthRollup_By_IntervalStart_GreaterOrEqual_Continuation) (
|
|
rows []*StoragenodeBandwidthRollup, next *Paged_StoragenodeBandwidthRollup_By_IntervalStart_GreaterOrEqual_Continuation, err error)
|
|
|
|
Paged_StoragenodeBandwidthRollup_By_StoragenodeId_And_IntervalStart_GreaterOrEqual(ctx context.Context,
|
|
storagenode_bandwidth_rollup_storagenode_id StoragenodeBandwidthRollup_StoragenodeId_Field,
|
|
storagenode_bandwidth_rollup_interval_start_greater_or_equal StoragenodeBandwidthRollup_IntervalStart_Field,
|
|
limit int, start *Paged_StoragenodeBandwidthRollup_By_StoragenodeId_And_IntervalStart_GreaterOrEqual_Continuation) (
|
|
rows []*StoragenodeBandwidthRollup, next *Paged_StoragenodeBandwidthRollup_By_StoragenodeId_And_IntervalStart_GreaterOrEqual_Continuation, err error)
|
|
|
|
ReplaceNoReturn_NodeApiVersion(ctx context.Context,
|
|
node_api_version_id NodeApiVersion_Id_Field,
|
|
node_api_version_api_version NodeApiVersion_ApiVersion_Field) (
|
|
err error)
|
|
|
|
ReplaceNoReturn_StoragenodePaystub(ctx context.Context,
|
|
storagenode_paystub_period StoragenodePaystub_Period_Field,
|
|
storagenode_paystub_node_id StoragenodePaystub_NodeId_Field,
|
|
storagenode_paystub_codes StoragenodePaystub_Codes_Field,
|
|
storagenode_paystub_usage_at_rest StoragenodePaystub_UsageAtRest_Field,
|
|
storagenode_paystub_usage_get StoragenodePaystub_UsageGet_Field,
|
|
storagenode_paystub_usage_put StoragenodePaystub_UsagePut_Field,
|
|
storagenode_paystub_usage_get_repair StoragenodePaystub_UsageGetRepair_Field,
|
|
storagenode_paystub_usage_put_repair StoragenodePaystub_UsagePutRepair_Field,
|
|
storagenode_paystub_usage_get_audit StoragenodePaystub_UsageGetAudit_Field,
|
|
storagenode_paystub_comp_at_rest StoragenodePaystub_CompAtRest_Field,
|
|
storagenode_paystub_comp_get StoragenodePaystub_CompGet_Field,
|
|
storagenode_paystub_comp_put StoragenodePaystub_CompPut_Field,
|
|
storagenode_paystub_comp_get_repair StoragenodePaystub_CompGetRepair_Field,
|
|
storagenode_paystub_comp_put_repair StoragenodePaystub_CompPutRepair_Field,
|
|
storagenode_paystub_comp_get_audit StoragenodePaystub_CompGetAudit_Field,
|
|
storagenode_paystub_surge_percent StoragenodePaystub_SurgePercent_Field,
|
|
storagenode_paystub_held StoragenodePaystub_Held_Field,
|
|
storagenode_paystub_owed StoragenodePaystub_Owed_Field,
|
|
storagenode_paystub_disposed StoragenodePaystub_Disposed_Field,
|
|
storagenode_paystub_paid StoragenodePaystub_Paid_Field,
|
|
storagenode_paystub_distributed StoragenodePaystub_Distributed_Field) (
|
|
err error)
|
|
|
|
UpdateNoReturn_AccountingTimestamps_By_Name(ctx context.Context,
|
|
accounting_timestamps_name AccountingTimestamps_Name_Field,
|
|
update AccountingTimestamps_Update_Fields) (
|
|
err error)
|
|
|
|
UpdateNoReturn_ApiKey_By_Id(ctx context.Context,
|
|
api_key_id ApiKey_Id_Field,
|
|
update ApiKey_Update_Fields) (
|
|
err error)
|
|
|
|
UpdateNoReturn_GracefulExitTransferQueue_By_NodeId_And_Path_And_PieceNum(ctx context.Context,
|
|
graceful_exit_transfer_queue_node_id GracefulExitTransferQueue_NodeId_Field,
|
|
graceful_exit_transfer_queue_path GracefulExitTransferQueue_Path_Field,
|
|
graceful_exit_transfer_queue_piece_num GracefulExitTransferQueue_PieceNum_Field,
|
|
update GracefulExitTransferQueue_Update_Fields) (
|
|
err error)
|
|
|
|
UpdateNoReturn_Irreparabledb_By_Segmentpath(ctx context.Context,
|
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field,
|
|
update Irreparabledb_Update_Fields) (
|
|
err error)
|
|
|
|
UpdateNoReturn_NodeApiVersion_By_Id_And_ApiVersion_Less(ctx context.Context,
|
|
node_api_version_id NodeApiVersion_Id_Field,
|
|
node_api_version_api_version_less NodeApiVersion_ApiVersion_Field,
|
|
update NodeApiVersion_Update_Fields) (
|
|
err error)
|
|
|
|
UpdateNoReturn_Node_By_Id(ctx context.Context,
|
|
node_id Node_Id_Field,
|
|
update Node_Update_Fields) (
|
|
err error)
|
|
|
|
UpdateNoReturn_PeerIdentity_By_NodeId(ctx context.Context,
|
|
peer_identity_node_id PeerIdentity_NodeId_Field,
|
|
update PeerIdentity_Update_Fields) (
|
|
err error)
|
|
|
|
Update_AuditHistory_By_NodeId(ctx context.Context,
|
|
audit_history_node_id AuditHistory_NodeId_Field,
|
|
update AuditHistory_Update_Fields) (
|
|
audit_history *AuditHistory, err error)
|
|
|
|
Update_BucketMetainfo_By_ProjectId_And_Name(ctx context.Context,
|
|
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
|
|
bucket_metainfo_name BucketMetainfo_Name_Field,
|
|
update BucketMetainfo_Update_Fields) (
|
|
bucket_metainfo *BucketMetainfo, err error)
|
|
|
|
Update_CoinpaymentsTransaction_By_Id(ctx context.Context,
|
|
coinpayments_transaction_id CoinpaymentsTransaction_Id_Field,
|
|
update CoinpaymentsTransaction_Update_Fields) (
|
|
coinpayments_transaction *CoinpaymentsTransaction, err error)
|
|
|
|
Update_CouponUsage_By_CouponId_And_Period(ctx context.Context,
|
|
coupon_usage_coupon_id CouponUsage_CouponId_Field,
|
|
coupon_usage_period CouponUsage_Period_Field,
|
|
update CouponUsage_Update_Fields) (
|
|
coupon_usage *CouponUsage, err error)
|
|
|
|
Update_Coupon_By_Id(ctx context.Context,
|
|
coupon_id Coupon_Id_Field,
|
|
update Coupon_Update_Fields) (
|
|
coupon *Coupon, err error)
|
|
|
|
Update_Node_By_Id(ctx context.Context,
|
|
node_id Node_Id_Field,
|
|
update Node_Update_Fields) (
|
|
node *Node, err error)
|
|
|
|
Update_Project_By_Id(ctx context.Context,
|
|
project_id Project_Id_Field,
|
|
update Project_Update_Fields) (
|
|
project *Project, err error)
|
|
|
|
Update_RegistrationToken_By_Secret(ctx context.Context,
|
|
registration_token_secret RegistrationToken_Secret_Field,
|
|
update RegistrationToken_Update_Fields) (
|
|
registration_token *RegistrationToken, err error)
|
|
|
|
Update_StripecoinpaymentsInvoiceProjectRecord_By_Id(ctx context.Context,
|
|
stripecoinpayments_invoice_project_record_id StripecoinpaymentsInvoiceProjectRecord_Id_Field,
|
|
update StripecoinpaymentsInvoiceProjectRecord_Update_Fields) (
|
|
stripecoinpayments_invoice_project_record *StripecoinpaymentsInvoiceProjectRecord, err error)
|
|
|
|
Update_User_By_Id(ctx context.Context,
|
|
user_id User_Id_Field,
|
|
update User_Update_Fields) (
|
|
user *User, err error)
|
|
}
|
|
|
|
type TxMethods interface {
|
|
Methods
|
|
|
|
Rebind(s string) string
|
|
Commit() error
|
|
Rollback() error
|
|
}
|
|
|
|
type txMethods interface {
|
|
TxMethods
|
|
|
|
deleteAll(ctx context.Context) (int64, error)
|
|
makeErr(err error) error
|
|
}
|
|
|
|
type DBMethods interface {
|
|
Methods
|
|
|
|
Schema() string
|
|
Rebind(sql string) string
|
|
}
|
|
|
|
type dbMethods interface {
|
|
DBMethods
|
|
|
|
wrapTx(tx tagsql.Tx) txMethods
|
|
makeErr(err error) error
|
|
}
|
|
|
|
func openpgx(source string) (*sql.DB, error) {
|
|
return sql.Open("pgx", source)
|
|
}
|
|
|
|
func openpgxcockroach(source string) (*sql.DB, error) {
|
|
// try first with "cockroach" as a driver in case someone has registered
|
|
// some special stuff. if that fails, then try again with "pgx" as
|
|
// the driver.
|
|
db, err := sql.Open("cockroach", source)
|
|
if err != nil {
|
|
db, err = sql.Open("pgx", source)
|
|
}
|
|
return db, err
|
|
}
|