3db903fe40
* Move from Unique to Index * Remove Index * Make some more Indexes Unique and adjust migration * Fix Migration Statements * Fix Typo * Fix Migration of older Table * Exchange DROP statement * Remove "if not exists" * Revert Change in old Migration
10267 lines
317 KiB
Go
10267 lines
317 KiB
Go
// AUTOGENERATED BY gopkg.in/spacemonkeygo/dbx.v1
|
|
// DO NOT EDIT.
|
|
|
|
package satellitedb
|
|
|
|
import (
|
|
"bytes"
|
|
"context"
|
|
"database/sql"
|
|
"errors"
|
|
"fmt"
|
|
"reflect"
|
|
"strconv"
|
|
"strings"
|
|
"sync"
|
|
"time"
|
|
"unicode"
|
|
|
|
"github.com/lib/pq"
|
|
|
|
"github.com/mattn/go-sqlite3"
|
|
"math/rand"
|
|
)
|
|
|
|
// Prevent conditional imports from causing build failures
|
|
var _ = strconv.Itoa
|
|
var _ = strings.LastIndex
|
|
var _ = fmt.Sprint
|
|
var _ sync.Mutex
|
|
|
|
var (
|
|
WrapErr = func(err *Error) error { return err }
|
|
Logger func(format string, args ...interface{})
|
|
|
|
errTooManyRows = errors.New("too many rows")
|
|
errUnsupportedDriver = errors.New("unsupported driver")
|
|
errEmptyUpdate = errors.New("empty update")
|
|
)
|
|
|
|
func logError(format string, args ...interface{}) {
|
|
if Logger != nil {
|
|
Logger(format, args...)
|
|
}
|
|
}
|
|
|
|
type ErrorCode int
|
|
|
|
const (
|
|
ErrorCode_Unknown ErrorCode = iota
|
|
ErrorCode_UnsupportedDriver
|
|
ErrorCode_NoRows
|
|
ErrorCode_TxDone
|
|
ErrorCode_TooManyRows
|
|
ErrorCode_ConstraintViolation
|
|
ErrorCode_EmptyUpdate
|
|
)
|
|
|
|
type Error struct {
|
|
Err error
|
|
Code ErrorCode
|
|
Driver string
|
|
Constraint string
|
|
QuerySuffix string
|
|
}
|
|
|
|
func (e *Error) Error() string {
|
|
return e.Err.Error()
|
|
}
|
|
|
|
func wrapErr(e *Error) error {
|
|
if WrapErr == nil {
|
|
return e
|
|
}
|
|
return WrapErr(e)
|
|
}
|
|
|
|
func makeErr(err error) error {
|
|
if err == nil {
|
|
return nil
|
|
}
|
|
e := &Error{Err: err}
|
|
switch err {
|
|
case sql.ErrNoRows:
|
|
e.Code = ErrorCode_NoRows
|
|
case sql.ErrTxDone:
|
|
e.Code = ErrorCode_TxDone
|
|
}
|
|
return wrapErr(e)
|
|
}
|
|
|
|
func unsupportedDriver(driver string) error {
|
|
return wrapErr(&Error{
|
|
Err: errUnsupportedDriver,
|
|
Code: ErrorCode_UnsupportedDriver,
|
|
Driver: driver,
|
|
})
|
|
}
|
|
|
|
func emptyUpdate() error {
|
|
return wrapErr(&Error{
|
|
Err: errEmptyUpdate,
|
|
Code: ErrorCode_EmptyUpdate,
|
|
})
|
|
}
|
|
|
|
func tooManyRows(query_suffix string) error {
|
|
return wrapErr(&Error{
|
|
Err: errTooManyRows,
|
|
Code: ErrorCode_TooManyRows,
|
|
QuerySuffix: query_suffix,
|
|
})
|
|
}
|
|
|
|
func constraintViolation(err error, constraint string) error {
|
|
return wrapErr(&Error{
|
|
Err: err,
|
|
Code: ErrorCode_ConstraintViolation,
|
|
Constraint: constraint,
|
|
})
|
|
}
|
|
|
|
type driver interface {
|
|
Exec(query string, args ...interface{}) (sql.Result, error)
|
|
Query(query string, args ...interface{}) (*sql.Rows, error)
|
|
QueryRow(query string, args ...interface{}) *sql.Row
|
|
}
|
|
|
|
var (
|
|
notAPointer = errors.New("destination not a pointer")
|
|
lossyConversion = errors.New("lossy conversion")
|
|
)
|
|
|
|
type DB struct {
|
|
*sql.DB
|
|
dbMethods
|
|
|
|
Hooks struct {
|
|
Now func() time.Time
|
|
}
|
|
}
|
|
|
|
func Open(driver, source string) (db *DB, err error) {
|
|
var sql_db *sql.DB
|
|
switch driver {
|
|
case "postgres":
|
|
sql_db, err = openpostgres(source)
|
|
case "sqlite3":
|
|
sql_db, err = opensqlite3(source)
|
|
default:
|
|
return nil, unsupportedDriver(driver)
|
|
}
|
|
if err != nil {
|
|
return nil, makeErr(err)
|
|
}
|
|
defer func(sql_db *sql.DB) {
|
|
if err != nil {
|
|
sql_db.Close()
|
|
}
|
|
}(sql_db)
|
|
|
|
if err := sql_db.Ping(); err != nil {
|
|
return nil, makeErr(err)
|
|
}
|
|
|
|
db = &DB{
|
|
DB: sql_db,
|
|
}
|
|
db.Hooks.Now = time.Now
|
|
|
|
switch driver {
|
|
case "postgres":
|
|
db.dbMethods = newpostgres(db)
|
|
case "sqlite3":
|
|
db.dbMethods = newsqlite3(db)
|
|
default:
|
|
return nil, unsupportedDriver(driver)
|
|
}
|
|
|
|
return db, nil
|
|
}
|
|
|
|
func (obj *DB) Close() (err error) {
|
|
return obj.makeErr(obj.DB.Close())
|
|
}
|
|
|
|
func (obj *DB) Open(ctx context.Context) (*Tx, error) {
|
|
tx, err := obj.DB.Begin()
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
|
|
return &Tx{
|
|
Tx: tx,
|
|
txMethods: obj.wrapTx(tx),
|
|
}, nil
|
|
}
|
|
|
|
func (obj *DB) NewRx() *Rx {
|
|
return &Rx{db: obj}
|
|
}
|
|
|
|
func DeleteAll(ctx context.Context, db *DB) (int64, error) {
|
|
tx, err := db.Open(ctx)
|
|
if err != nil {
|
|
return 0, err
|
|
}
|
|
defer func() {
|
|
if err == nil {
|
|
err = db.makeErr(tx.Commit())
|
|
return
|
|
}
|
|
|
|
if err_rollback := tx.Rollback(); err_rollback != nil {
|
|
logError("delete-all: rollback failed: %v", db.makeErr(err_rollback))
|
|
}
|
|
}()
|
|
return tx.deleteAll(ctx)
|
|
}
|
|
|
|
type Tx struct {
|
|
Tx *sql.Tx
|
|
txMethods
|
|
}
|
|
|
|
type dialectTx struct {
|
|
tx *sql.Tx
|
|
}
|
|
|
|
func (tx *dialectTx) Commit() (err error) {
|
|
return makeErr(tx.tx.Commit())
|
|
}
|
|
|
|
func (tx *dialectTx) Rollback() (err error) {
|
|
return makeErr(tx.tx.Rollback())
|
|
}
|
|
|
|
type postgresImpl struct {
|
|
db *DB
|
|
dialect __sqlbundle_postgres
|
|
driver driver
|
|
}
|
|
|
|
func (obj *postgresImpl) Rebind(s string) string {
|
|
return obj.dialect.Rebind(s)
|
|
}
|
|
|
|
func (obj *postgresImpl) logStmt(stmt string, args ...interface{}) {
|
|
postgresLogStmt(stmt, args...)
|
|
}
|
|
|
|
func (obj *postgresImpl) makeErr(err error) error {
|
|
constraint, ok := obj.isConstraintError(err)
|
|
if ok {
|
|
return constraintViolation(err, constraint)
|
|
}
|
|
return makeErr(err)
|
|
}
|
|
|
|
type postgresDB struct {
|
|
db *DB
|
|
*postgresImpl
|
|
}
|
|
|
|
func newpostgres(db *DB) *postgresDB {
|
|
return &postgresDB{
|
|
db: db,
|
|
postgresImpl: &postgresImpl{
|
|
db: db,
|
|
driver: db.DB,
|
|
},
|
|
}
|
|
}
|
|
|
|
func (obj *postgresDB) Schema() string {
|
|
return `CREATE TABLE accounting_raws (
|
|
id bigserial NOT NULL,
|
|
node_id bytea NOT NULL,
|
|
interval_end_time timestamp with time zone NOT NULL,
|
|
data_total double precision NOT NULL,
|
|
data_type integer NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( id )
|
|
);
|
|
CREATE TABLE accounting_rollups (
|
|
id bigserial NOT NULL,
|
|
node_id bytea NOT NULL,
|
|
start_time timestamp with time zone NOT NULL,
|
|
put_total bigint NOT NULL,
|
|
get_total bigint NOT NULL,
|
|
get_audit_total bigint NOT NULL,
|
|
get_repair_total bigint NOT NULL,
|
|
put_repair_total bigint NOT NULL,
|
|
at_rest_total double precision NOT NULL,
|
|
PRIMARY KEY ( id )
|
|
);
|
|
CREATE TABLE accounting_timestamps (
|
|
name text NOT NULL,
|
|
value timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( name )
|
|
);
|
|
CREATE TABLE bucket_bandwidth_rollups (
|
|
bucket_id bytea NOT NULL,
|
|
interval_start timestamp NOT NULL,
|
|
interval_seconds integer NOT NULL,
|
|
action integer NOT NULL,
|
|
inline bigint NOT NULL,
|
|
allocated bigint NOT NULL,
|
|
settled bigint NOT NULL,
|
|
PRIMARY KEY ( bucket_id, interval_start, action )
|
|
);
|
|
CREATE TABLE bucket_storage_rollups (
|
|
bucket_id bytea NOT NULL,
|
|
interval_start timestamp NOT NULL,
|
|
interval_seconds integer NOT NULL,
|
|
inline bigint NOT NULL,
|
|
remote bigint NOT NULL,
|
|
PRIMARY KEY ( bucket_id, interval_start )
|
|
);
|
|
CREATE TABLE bucket_usages (
|
|
id bytea NOT NULL,
|
|
bucket_id bytea NOT NULL,
|
|
rollup_end_time timestamp with time zone NOT NULL,
|
|
remote_stored_data bigint NOT NULL,
|
|
inline_stored_data bigint NOT NULL,
|
|
remote_segments integer NOT NULL,
|
|
inline_segments integer NOT NULL,
|
|
objects integer NOT NULL,
|
|
metadata_size bigint NOT NULL,
|
|
repair_egress bigint NOT NULL,
|
|
get_egress bigint NOT NULL,
|
|
audit_egress bigint NOT NULL,
|
|
PRIMARY KEY ( id )
|
|
);
|
|
CREATE TABLE bwagreements (
|
|
serialnum text NOT NULL,
|
|
storage_node_id bytea NOT NULL,
|
|
uplink_id bytea NOT NULL,
|
|
action bigint NOT NULL,
|
|
total bigint NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
expires_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( serialnum )
|
|
);
|
|
CREATE TABLE certRecords (
|
|
publickey bytea NOT NULL,
|
|
id bytea NOT NULL,
|
|
update_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( id )
|
|
);
|
|
CREATE TABLE injuredsegments (
|
|
id bigserial NOT NULL,
|
|
info bytea NOT NULL,
|
|
PRIMARY KEY ( id )
|
|
);
|
|
CREATE TABLE irreparabledbs (
|
|
segmentpath bytea NOT NULL,
|
|
segmentdetail bytea NOT NULL,
|
|
pieces_lost_count bigint NOT NULL,
|
|
seg_damaged_unix_sec bigint NOT NULL,
|
|
repair_attempt_count bigint NOT NULL,
|
|
PRIMARY KEY ( segmentpath )
|
|
);
|
|
CREATE TABLE nodes (
|
|
id bytea NOT NULL,
|
|
audit_success_count bigint NOT NULL,
|
|
total_audit_count bigint NOT NULL,
|
|
audit_success_ratio double precision NOT NULL,
|
|
uptime_success_count bigint NOT NULL,
|
|
total_uptime_count bigint NOT NULL,
|
|
uptime_ratio double precision NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
updated_at timestamp with time zone NOT NULL,
|
|
wallet text NOT NULL,
|
|
email text NOT NULL,
|
|
PRIMARY KEY ( id )
|
|
);
|
|
CREATE TABLE overlay_cache_nodes (
|
|
node_id bytea NOT NULL,
|
|
node_type integer NOT NULL,
|
|
address text NOT NULL,
|
|
protocol integer NOT NULL,
|
|
operator_email text NOT NULL,
|
|
operator_wallet text NOT NULL,
|
|
free_bandwidth bigint NOT NULL,
|
|
free_disk bigint NOT NULL,
|
|
latency_90 bigint NOT NULL,
|
|
audit_success_ratio double precision NOT NULL,
|
|
audit_uptime_ratio double precision NOT NULL,
|
|
audit_count bigint NOT NULL,
|
|
audit_success_count bigint NOT NULL,
|
|
uptime_count bigint NOT NULL,
|
|
uptime_success_count bigint NOT NULL,
|
|
PRIMARY KEY ( node_id ),
|
|
UNIQUE ( node_id )
|
|
);
|
|
CREATE TABLE projects (
|
|
id bytea NOT NULL,
|
|
name text NOT NULL,
|
|
description text NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( id )
|
|
);
|
|
CREATE TABLE registration_tokens (
|
|
secret bytea NOT NULL,
|
|
owner_id bytea,
|
|
project_limit integer NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( secret ),
|
|
UNIQUE ( owner_id )
|
|
);
|
|
CREATE TABLE serial_numbers (
|
|
id serial NOT NULL,
|
|
serial_number bytea NOT NULL,
|
|
bucket_id bytea NOT NULL,
|
|
expires_at timestamp NOT NULL,
|
|
PRIMARY KEY ( id )
|
|
);
|
|
CREATE TABLE storagenode_bandwidth_rollups (
|
|
storagenode_id bytea NOT NULL,
|
|
interval_start timestamp NOT NULL,
|
|
interval_seconds integer NOT NULL,
|
|
action integer NOT NULL,
|
|
allocated bigint NOT NULL,
|
|
settled bigint NOT NULL,
|
|
PRIMARY KEY ( storagenode_id, interval_start, action )
|
|
);
|
|
CREATE TABLE storagenode_storage_rollups (
|
|
storagenode_id bytea NOT NULL,
|
|
interval_start timestamp NOT NULL,
|
|
interval_seconds integer NOT NULL,
|
|
total bigint NOT NULL,
|
|
PRIMARY KEY ( storagenode_id, interval_start )
|
|
);
|
|
CREATE TABLE users (
|
|
id bytea NOT NULL,
|
|
first_name text NOT NULL,
|
|
last_name text NOT NULL,
|
|
email text NOT NULL,
|
|
password_hash bytea NOT NULL,
|
|
status integer NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( id )
|
|
);
|
|
CREATE TABLE api_keys (
|
|
id bytea NOT NULL,
|
|
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
|
key bytea NOT NULL,
|
|
name text NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( id ),
|
|
UNIQUE ( key ),
|
|
UNIQUE ( name, project_id )
|
|
);
|
|
CREATE TABLE project_members (
|
|
member_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
|
|
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
|
created_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( member_id, project_id )
|
|
);
|
|
CREATE TABLE used_serials (
|
|
serial_number_id integer NOT NULL REFERENCES serial_numbers( id ) ON DELETE CASCADE,
|
|
storage_node_id bytea NOT NULL,
|
|
PRIMARY KEY ( serial_number_id, storage_node_id )
|
|
);
|
|
CREATE INDEX bucket_id_interval_start_interval_seconds ON bucket_bandwidth_rollups ( bucket_id, interval_start, interval_seconds );
|
|
CREATE UNIQUE INDEX bucket_id_rollup ON bucket_usages ( bucket_id, rollup_end_time );
|
|
CREATE UNIQUE INDEX serial_number ON serial_numbers ( serial_number );
|
|
CREATE INDEX serial_numbers_expires_at_index ON serial_numbers ( expires_at );
|
|
CREATE INDEX storagenode_id_interval_start_interval_seconds ON storagenode_bandwidth_rollups ( storagenode_id, interval_start, interval_seconds );`
|
|
}
|
|
|
|
func (obj *postgresDB) wrapTx(tx *sql.Tx) txMethods {
|
|
return &postgresTx{
|
|
dialectTx: dialectTx{tx: tx},
|
|
postgresImpl: &postgresImpl{
|
|
db: obj.db,
|
|
driver: tx,
|
|
},
|
|
}
|
|
}
|
|
|
|
type postgresTx struct {
|
|
dialectTx
|
|
*postgresImpl
|
|
}
|
|
|
|
func postgresLogStmt(stmt string, args ...interface{}) {
|
|
// TODO: render placeholders
|
|
if Logger != nil {
|
|
out := fmt.Sprintf("stmt: %s\nargs: %v\n", stmt, pretty(args))
|
|
Logger(out)
|
|
}
|
|
}
|
|
|
|
type sqlite3Impl struct {
|
|
db *DB
|
|
dialect __sqlbundle_sqlite3
|
|
driver driver
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Rebind(s string) string {
|
|
return obj.dialect.Rebind(s)
|
|
}
|
|
|
|
func (obj *sqlite3Impl) logStmt(stmt string, args ...interface{}) {
|
|
sqlite3LogStmt(stmt, args...)
|
|
}
|
|
|
|
func (obj *sqlite3Impl) makeErr(err error) error {
|
|
constraint, ok := obj.isConstraintError(err)
|
|
if ok {
|
|
return constraintViolation(err, constraint)
|
|
}
|
|
return makeErr(err)
|
|
}
|
|
|
|
type sqlite3DB struct {
|
|
db *DB
|
|
*sqlite3Impl
|
|
}
|
|
|
|
func newsqlite3(db *DB) *sqlite3DB {
|
|
return &sqlite3DB{
|
|
db: db,
|
|
sqlite3Impl: &sqlite3Impl{
|
|
db: db,
|
|
driver: db.DB,
|
|
},
|
|
}
|
|
}
|
|
|
|
func (obj *sqlite3DB) Schema() string {
|
|
return `CREATE TABLE accounting_raws (
|
|
id INTEGER NOT NULL,
|
|
node_id BLOB NOT NULL,
|
|
interval_end_time TIMESTAMP NOT NULL,
|
|
data_total REAL NOT NULL,
|
|
data_type INTEGER NOT NULL,
|
|
created_at TIMESTAMP NOT NULL,
|
|
PRIMARY KEY ( id )
|
|
);
|
|
CREATE TABLE accounting_rollups (
|
|
id INTEGER NOT NULL,
|
|
node_id BLOB NOT NULL,
|
|
start_time TIMESTAMP NOT NULL,
|
|
put_total INTEGER NOT NULL,
|
|
get_total INTEGER NOT NULL,
|
|
get_audit_total INTEGER NOT NULL,
|
|
get_repair_total INTEGER NOT NULL,
|
|
put_repair_total INTEGER NOT NULL,
|
|
at_rest_total REAL NOT NULL,
|
|
PRIMARY KEY ( id )
|
|
);
|
|
CREATE TABLE accounting_timestamps (
|
|
name TEXT NOT NULL,
|
|
value TIMESTAMP NOT NULL,
|
|
PRIMARY KEY ( name )
|
|
);
|
|
CREATE TABLE bucket_bandwidth_rollups (
|
|
bucket_id BLOB NOT NULL,
|
|
interval_start TIMESTAMP NOT NULL,
|
|
interval_seconds INTEGER NOT NULL,
|
|
action INTEGER NOT NULL,
|
|
inline INTEGER NOT NULL,
|
|
allocated INTEGER NOT NULL,
|
|
settled INTEGER NOT NULL,
|
|
PRIMARY KEY ( bucket_id, interval_start, action )
|
|
);
|
|
CREATE TABLE bucket_storage_rollups (
|
|
bucket_id BLOB NOT NULL,
|
|
interval_start TIMESTAMP NOT NULL,
|
|
interval_seconds INTEGER NOT NULL,
|
|
inline INTEGER NOT NULL,
|
|
remote INTEGER NOT NULL,
|
|
PRIMARY KEY ( bucket_id, interval_start )
|
|
);
|
|
CREATE TABLE bucket_usages (
|
|
id BLOB NOT NULL,
|
|
bucket_id BLOB NOT NULL,
|
|
rollup_end_time TIMESTAMP NOT NULL,
|
|
remote_stored_data INTEGER NOT NULL,
|
|
inline_stored_data INTEGER NOT NULL,
|
|
remote_segments INTEGER NOT NULL,
|
|
inline_segments INTEGER NOT NULL,
|
|
objects INTEGER NOT NULL,
|
|
metadata_size INTEGER NOT NULL,
|
|
repair_egress INTEGER NOT NULL,
|
|
get_egress INTEGER NOT NULL,
|
|
audit_egress INTEGER NOT NULL,
|
|
PRIMARY KEY ( id )
|
|
);
|
|
CREATE TABLE bwagreements (
|
|
serialnum TEXT NOT NULL,
|
|
storage_node_id BLOB NOT NULL,
|
|
uplink_id BLOB NOT NULL,
|
|
action INTEGER NOT NULL,
|
|
total INTEGER NOT NULL,
|
|
created_at TIMESTAMP NOT NULL,
|
|
expires_at TIMESTAMP NOT NULL,
|
|
PRIMARY KEY ( serialnum )
|
|
);
|
|
CREATE TABLE certRecords (
|
|
publickey BLOB NOT NULL,
|
|
id BLOB NOT NULL,
|
|
update_at TIMESTAMP NOT NULL,
|
|
PRIMARY KEY ( id )
|
|
);
|
|
CREATE TABLE injuredsegments (
|
|
id INTEGER NOT NULL,
|
|
info BLOB NOT NULL,
|
|
PRIMARY KEY ( id )
|
|
);
|
|
CREATE TABLE irreparabledbs (
|
|
segmentpath BLOB NOT NULL,
|
|
segmentdetail BLOB NOT NULL,
|
|
pieces_lost_count INTEGER NOT NULL,
|
|
seg_damaged_unix_sec INTEGER NOT NULL,
|
|
repair_attempt_count INTEGER NOT NULL,
|
|
PRIMARY KEY ( segmentpath )
|
|
);
|
|
CREATE TABLE nodes (
|
|
id BLOB NOT NULL,
|
|
audit_success_count INTEGER NOT NULL,
|
|
total_audit_count INTEGER NOT NULL,
|
|
audit_success_ratio REAL NOT NULL,
|
|
uptime_success_count INTEGER NOT NULL,
|
|
total_uptime_count INTEGER NOT NULL,
|
|
uptime_ratio REAL NOT NULL,
|
|
created_at TIMESTAMP NOT NULL,
|
|
updated_at TIMESTAMP NOT NULL,
|
|
wallet TEXT NOT NULL,
|
|
email TEXT NOT NULL,
|
|
PRIMARY KEY ( id )
|
|
);
|
|
CREATE TABLE overlay_cache_nodes (
|
|
node_id BLOB NOT NULL,
|
|
node_type INTEGER NOT NULL,
|
|
address TEXT NOT NULL,
|
|
protocol INTEGER NOT NULL,
|
|
operator_email TEXT NOT NULL,
|
|
operator_wallet TEXT NOT NULL,
|
|
free_bandwidth INTEGER NOT NULL,
|
|
free_disk INTEGER NOT NULL,
|
|
latency_90 INTEGER NOT NULL,
|
|
audit_success_ratio REAL NOT NULL,
|
|
audit_uptime_ratio REAL NOT NULL,
|
|
audit_count INTEGER NOT NULL,
|
|
audit_success_count INTEGER NOT NULL,
|
|
uptime_count INTEGER NOT NULL,
|
|
uptime_success_count INTEGER NOT NULL,
|
|
PRIMARY KEY ( node_id ),
|
|
UNIQUE ( node_id )
|
|
);
|
|
CREATE TABLE projects (
|
|
id BLOB NOT NULL,
|
|
name TEXT NOT NULL,
|
|
description TEXT NOT NULL,
|
|
created_at TIMESTAMP NOT NULL,
|
|
PRIMARY KEY ( id )
|
|
);
|
|
CREATE TABLE registration_tokens (
|
|
secret BLOB NOT NULL,
|
|
owner_id BLOB,
|
|
project_limit INTEGER NOT NULL,
|
|
created_at TIMESTAMP NOT NULL,
|
|
PRIMARY KEY ( secret ),
|
|
UNIQUE ( owner_id )
|
|
);
|
|
CREATE TABLE serial_numbers (
|
|
id INTEGER NOT NULL,
|
|
serial_number BLOB NOT NULL,
|
|
bucket_id BLOB NOT NULL,
|
|
expires_at TIMESTAMP NOT NULL,
|
|
PRIMARY KEY ( id )
|
|
);
|
|
CREATE TABLE storagenode_bandwidth_rollups (
|
|
storagenode_id BLOB NOT NULL,
|
|
interval_start TIMESTAMP NOT NULL,
|
|
interval_seconds INTEGER NOT NULL,
|
|
action INTEGER NOT NULL,
|
|
allocated INTEGER NOT NULL,
|
|
settled INTEGER NOT NULL,
|
|
PRIMARY KEY ( storagenode_id, interval_start, action )
|
|
);
|
|
CREATE TABLE storagenode_storage_rollups (
|
|
storagenode_id BLOB NOT NULL,
|
|
interval_start TIMESTAMP NOT NULL,
|
|
interval_seconds INTEGER NOT NULL,
|
|
total INTEGER NOT NULL,
|
|
PRIMARY KEY ( storagenode_id, interval_start )
|
|
);
|
|
CREATE TABLE users (
|
|
id BLOB NOT NULL,
|
|
first_name TEXT NOT NULL,
|
|
last_name TEXT NOT NULL,
|
|
email TEXT NOT NULL,
|
|
password_hash BLOB NOT NULL,
|
|
status INTEGER NOT NULL,
|
|
created_at TIMESTAMP NOT NULL,
|
|
PRIMARY KEY ( id )
|
|
);
|
|
CREATE TABLE api_keys (
|
|
id BLOB NOT NULL,
|
|
project_id BLOB NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
|
key BLOB NOT NULL,
|
|
name TEXT NOT NULL,
|
|
created_at TIMESTAMP NOT NULL,
|
|
PRIMARY KEY ( id ),
|
|
UNIQUE ( key ),
|
|
UNIQUE ( name, project_id )
|
|
);
|
|
CREATE TABLE project_members (
|
|
member_id BLOB NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
|
|
project_id BLOB NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
|
created_at TIMESTAMP NOT NULL,
|
|
PRIMARY KEY ( member_id, project_id )
|
|
);
|
|
CREATE TABLE used_serials (
|
|
serial_number_id INTEGER NOT NULL REFERENCES serial_numbers( id ) ON DELETE CASCADE,
|
|
storage_node_id BLOB NOT NULL,
|
|
PRIMARY KEY ( serial_number_id, storage_node_id )
|
|
);
|
|
CREATE INDEX bucket_id_interval_start_interval_seconds ON bucket_bandwidth_rollups ( bucket_id, interval_start, interval_seconds );
|
|
CREATE UNIQUE INDEX bucket_id_rollup ON bucket_usages ( bucket_id, rollup_end_time );
|
|
CREATE UNIQUE INDEX serial_number ON serial_numbers ( serial_number );
|
|
CREATE INDEX serial_numbers_expires_at_index ON serial_numbers ( expires_at );
|
|
CREATE INDEX storagenode_id_interval_start_interval_seconds ON storagenode_bandwidth_rollups ( storagenode_id, interval_start, interval_seconds );`
|
|
}
|
|
|
|
func (obj *sqlite3DB) wrapTx(tx *sql.Tx) txMethods {
|
|
return &sqlite3Tx{
|
|
dialectTx: dialectTx{tx: tx},
|
|
sqlite3Impl: &sqlite3Impl{
|
|
db: obj.db,
|
|
driver: tx,
|
|
},
|
|
}
|
|
}
|
|
|
|
type sqlite3Tx struct {
|
|
dialectTx
|
|
*sqlite3Impl
|
|
}
|
|
|
|
func sqlite3LogStmt(stmt string, args ...interface{}) {
|
|
// TODO: render placeholders
|
|
if Logger != nil {
|
|
out := fmt.Sprintf("stmt: %s\nargs: %v\n", stmt, pretty(args))
|
|
Logger(out)
|
|
}
|
|
}
|
|
|
|
type pretty []interface{}
|
|
|
|
func (p pretty) Format(f fmt.State, c rune) {
|
|
fmt.Fprint(f, "[")
|
|
nextval:
|
|
for i, val := range p {
|
|
if i > 0 {
|
|
fmt.Fprint(f, ", ")
|
|
}
|
|
rv := reflect.ValueOf(val)
|
|
if rv.Kind() == reflect.Ptr {
|
|
if rv.IsNil() {
|
|
fmt.Fprint(f, "NULL")
|
|
continue
|
|
}
|
|
val = rv.Elem().Interface()
|
|
}
|
|
switch v := val.(type) {
|
|
case string:
|
|
fmt.Fprintf(f, "%q", v)
|
|
case time.Time:
|
|
fmt.Fprintf(f, "%s", v.Format(time.RFC3339Nano))
|
|
case []byte:
|
|
for _, b := range v {
|
|
if !unicode.IsPrint(rune(b)) {
|
|
fmt.Fprintf(f, "%#x", v)
|
|
continue nextval
|
|
}
|
|
}
|
|
fmt.Fprintf(f, "%q", v)
|
|
default:
|
|
fmt.Fprintf(f, "%v", v)
|
|
}
|
|
}
|
|
fmt.Fprint(f, "]")
|
|
}
|
|
|
|
type AccountingRaw struct {
|
|
Id int64
|
|
NodeId []byte
|
|
IntervalEndTime time.Time
|
|
DataTotal float64
|
|
DataType int
|
|
CreatedAt time.Time
|
|
}
|
|
|
|
func (AccountingRaw) _Table() string { return "accounting_raws" }
|
|
|
|
type AccountingRaw_Update_Fields struct {
|
|
}
|
|
|
|
type AccountingRaw_Id_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func AccountingRaw_Id(v int64) AccountingRaw_Id_Field {
|
|
return AccountingRaw_Id_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f AccountingRaw_Id_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (AccountingRaw_Id_Field) _Column() string { return "id" }
|
|
|
|
type AccountingRaw_NodeId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func AccountingRaw_NodeId(v []byte) AccountingRaw_NodeId_Field {
|
|
return AccountingRaw_NodeId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f AccountingRaw_NodeId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (AccountingRaw_NodeId_Field) _Column() string { return "node_id" }
|
|
|
|
type AccountingRaw_IntervalEndTime_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func AccountingRaw_IntervalEndTime(v time.Time) AccountingRaw_IntervalEndTime_Field {
|
|
return AccountingRaw_IntervalEndTime_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f AccountingRaw_IntervalEndTime_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (AccountingRaw_IntervalEndTime_Field) _Column() string { return "interval_end_time" }
|
|
|
|
type AccountingRaw_DataTotal_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value float64
|
|
}
|
|
|
|
func AccountingRaw_DataTotal(v float64) AccountingRaw_DataTotal_Field {
|
|
return AccountingRaw_DataTotal_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f AccountingRaw_DataTotal_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (AccountingRaw_DataTotal_Field) _Column() string { return "data_total" }
|
|
|
|
type AccountingRaw_DataType_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func AccountingRaw_DataType(v int) AccountingRaw_DataType_Field {
|
|
return AccountingRaw_DataType_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f AccountingRaw_DataType_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (AccountingRaw_DataType_Field) _Column() string { return "data_type" }
|
|
|
|
type AccountingRaw_CreatedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func AccountingRaw_CreatedAt(v time.Time) AccountingRaw_CreatedAt_Field {
|
|
return AccountingRaw_CreatedAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f AccountingRaw_CreatedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (AccountingRaw_CreatedAt_Field) _Column() string { return "created_at" }
|
|
|
|
type AccountingRollup struct {
|
|
Id int64
|
|
NodeId []byte
|
|
StartTime time.Time
|
|
PutTotal int64
|
|
GetTotal int64
|
|
GetAuditTotal int64
|
|
GetRepairTotal int64
|
|
PutRepairTotal int64
|
|
AtRestTotal float64
|
|
}
|
|
|
|
func (AccountingRollup) _Table() string { return "accounting_rollups" }
|
|
|
|
type AccountingRollup_Update_Fields struct {
|
|
}
|
|
|
|
type AccountingRollup_Id_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func AccountingRollup_Id(v int64) AccountingRollup_Id_Field {
|
|
return AccountingRollup_Id_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f AccountingRollup_Id_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (AccountingRollup_Id_Field) _Column() string { return "id" }
|
|
|
|
type AccountingRollup_NodeId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func AccountingRollup_NodeId(v []byte) AccountingRollup_NodeId_Field {
|
|
return AccountingRollup_NodeId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f AccountingRollup_NodeId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (AccountingRollup_NodeId_Field) _Column() string { return "node_id" }
|
|
|
|
type AccountingRollup_StartTime_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func AccountingRollup_StartTime(v time.Time) AccountingRollup_StartTime_Field {
|
|
return AccountingRollup_StartTime_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f AccountingRollup_StartTime_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (AccountingRollup_StartTime_Field) _Column() string { return "start_time" }
|
|
|
|
type AccountingRollup_PutTotal_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func AccountingRollup_PutTotal(v int64) AccountingRollup_PutTotal_Field {
|
|
return AccountingRollup_PutTotal_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f AccountingRollup_PutTotal_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (AccountingRollup_PutTotal_Field) _Column() string { return "put_total" }
|
|
|
|
type AccountingRollup_GetTotal_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func AccountingRollup_GetTotal(v int64) AccountingRollup_GetTotal_Field {
|
|
return AccountingRollup_GetTotal_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f AccountingRollup_GetTotal_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (AccountingRollup_GetTotal_Field) _Column() string { return "get_total" }
|
|
|
|
type AccountingRollup_GetAuditTotal_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func AccountingRollup_GetAuditTotal(v int64) AccountingRollup_GetAuditTotal_Field {
|
|
return AccountingRollup_GetAuditTotal_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f AccountingRollup_GetAuditTotal_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (AccountingRollup_GetAuditTotal_Field) _Column() string { return "get_audit_total" }
|
|
|
|
type AccountingRollup_GetRepairTotal_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func AccountingRollup_GetRepairTotal(v int64) AccountingRollup_GetRepairTotal_Field {
|
|
return AccountingRollup_GetRepairTotal_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f AccountingRollup_GetRepairTotal_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (AccountingRollup_GetRepairTotal_Field) _Column() string { return "get_repair_total" }
|
|
|
|
type AccountingRollup_PutRepairTotal_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func AccountingRollup_PutRepairTotal(v int64) AccountingRollup_PutRepairTotal_Field {
|
|
return AccountingRollup_PutRepairTotal_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f AccountingRollup_PutRepairTotal_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (AccountingRollup_PutRepairTotal_Field) _Column() string { return "put_repair_total" }
|
|
|
|
type AccountingRollup_AtRestTotal_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value float64
|
|
}
|
|
|
|
func AccountingRollup_AtRestTotal(v float64) AccountingRollup_AtRestTotal_Field {
|
|
return AccountingRollup_AtRestTotal_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f AccountingRollup_AtRestTotal_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (AccountingRollup_AtRestTotal_Field) _Column() string { return "at_rest_total" }
|
|
|
|
type AccountingTimestamps struct {
|
|
Name string
|
|
Value time.Time
|
|
}
|
|
|
|
func (AccountingTimestamps) _Table() string { return "accounting_timestamps" }
|
|
|
|
type AccountingTimestamps_Update_Fields struct {
|
|
Value AccountingTimestamps_Value_Field
|
|
}
|
|
|
|
type AccountingTimestamps_Name_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func AccountingTimestamps_Name(v string) AccountingTimestamps_Name_Field {
|
|
return AccountingTimestamps_Name_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f AccountingTimestamps_Name_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (AccountingTimestamps_Name_Field) _Column() string { return "name" }
|
|
|
|
type AccountingTimestamps_Value_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func AccountingTimestamps_Value(v time.Time) AccountingTimestamps_Value_Field {
|
|
return AccountingTimestamps_Value_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f AccountingTimestamps_Value_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (AccountingTimestamps_Value_Field) _Column() string { return "value" }
|
|
|
|
type BucketBandwidthRollup struct {
|
|
BucketId []byte
|
|
IntervalStart time.Time
|
|
IntervalSeconds uint
|
|
Action uint
|
|
Inline uint64
|
|
Allocated uint64
|
|
Settled uint64
|
|
}
|
|
|
|
func (BucketBandwidthRollup) _Table() string { return "bucket_bandwidth_rollups" }
|
|
|
|
type BucketBandwidthRollup_Update_Fields struct {
|
|
}
|
|
|
|
type BucketBandwidthRollup_BucketId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func BucketBandwidthRollup_BucketId(v []byte) BucketBandwidthRollup_BucketId_Field {
|
|
return BucketBandwidthRollup_BucketId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketBandwidthRollup_BucketId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketBandwidthRollup_BucketId_Field) _Column() string { return "bucket_id" }
|
|
|
|
type BucketBandwidthRollup_IntervalStart_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func BucketBandwidthRollup_IntervalStart(v time.Time) BucketBandwidthRollup_IntervalStart_Field {
|
|
v = toUTC(v)
|
|
return BucketBandwidthRollup_IntervalStart_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketBandwidthRollup_IntervalStart_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketBandwidthRollup_IntervalStart_Field) _Column() string { return "interval_start" }
|
|
|
|
type BucketBandwidthRollup_IntervalSeconds_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value uint
|
|
}
|
|
|
|
func BucketBandwidthRollup_IntervalSeconds(v uint) BucketBandwidthRollup_IntervalSeconds_Field {
|
|
return BucketBandwidthRollup_IntervalSeconds_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketBandwidthRollup_IntervalSeconds_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketBandwidthRollup_IntervalSeconds_Field) _Column() string { return "interval_seconds" }
|
|
|
|
type BucketBandwidthRollup_Action_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value uint
|
|
}
|
|
|
|
func BucketBandwidthRollup_Action(v uint) BucketBandwidthRollup_Action_Field {
|
|
return BucketBandwidthRollup_Action_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketBandwidthRollup_Action_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketBandwidthRollup_Action_Field) _Column() string { return "action" }
|
|
|
|
type BucketBandwidthRollup_Inline_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value uint64
|
|
}
|
|
|
|
func BucketBandwidthRollup_Inline(v uint64) BucketBandwidthRollup_Inline_Field {
|
|
return BucketBandwidthRollup_Inline_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketBandwidthRollup_Inline_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketBandwidthRollup_Inline_Field) _Column() string { return "inline" }
|
|
|
|
type BucketBandwidthRollup_Allocated_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value uint64
|
|
}
|
|
|
|
func BucketBandwidthRollup_Allocated(v uint64) BucketBandwidthRollup_Allocated_Field {
|
|
return BucketBandwidthRollup_Allocated_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketBandwidthRollup_Allocated_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketBandwidthRollup_Allocated_Field) _Column() string { return "allocated" }
|
|
|
|
type BucketBandwidthRollup_Settled_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value uint64
|
|
}
|
|
|
|
func BucketBandwidthRollup_Settled(v uint64) BucketBandwidthRollup_Settled_Field {
|
|
return BucketBandwidthRollup_Settled_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketBandwidthRollup_Settled_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketBandwidthRollup_Settled_Field) _Column() string { return "settled" }
|
|
|
|
type BucketStorageRollup struct {
|
|
BucketId []byte
|
|
IntervalStart time.Time
|
|
IntervalSeconds uint
|
|
Inline uint64
|
|
Remote uint64
|
|
}
|
|
|
|
func (BucketStorageRollup) _Table() string { return "bucket_storage_rollups" }
|
|
|
|
type BucketStorageRollup_Update_Fields struct {
|
|
}
|
|
|
|
type BucketStorageRollup_BucketId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func BucketStorageRollup_BucketId(v []byte) BucketStorageRollup_BucketId_Field {
|
|
return BucketStorageRollup_BucketId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketStorageRollup_BucketId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketStorageRollup_BucketId_Field) _Column() string { return "bucket_id" }
|
|
|
|
type BucketStorageRollup_IntervalStart_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func BucketStorageRollup_IntervalStart(v time.Time) BucketStorageRollup_IntervalStart_Field {
|
|
v = toUTC(v)
|
|
return BucketStorageRollup_IntervalStart_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketStorageRollup_IntervalStart_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketStorageRollup_IntervalStart_Field) _Column() string { return "interval_start" }
|
|
|
|
type BucketStorageRollup_IntervalSeconds_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value uint
|
|
}
|
|
|
|
func BucketStorageRollup_IntervalSeconds(v uint) BucketStorageRollup_IntervalSeconds_Field {
|
|
return BucketStorageRollup_IntervalSeconds_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketStorageRollup_IntervalSeconds_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketStorageRollup_IntervalSeconds_Field) _Column() string { return "interval_seconds" }
|
|
|
|
type BucketStorageRollup_Inline_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value uint64
|
|
}
|
|
|
|
func BucketStorageRollup_Inline(v uint64) BucketStorageRollup_Inline_Field {
|
|
return BucketStorageRollup_Inline_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketStorageRollup_Inline_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketStorageRollup_Inline_Field) _Column() string { return "inline" }
|
|
|
|
type BucketStorageRollup_Remote_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value uint64
|
|
}
|
|
|
|
func BucketStorageRollup_Remote(v uint64) BucketStorageRollup_Remote_Field {
|
|
return BucketStorageRollup_Remote_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketStorageRollup_Remote_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketStorageRollup_Remote_Field) _Column() string { return "remote" }
|
|
|
|
type BucketUsage struct {
|
|
Id []byte
|
|
BucketId []byte
|
|
RollupEndTime time.Time
|
|
RemoteStoredData uint64
|
|
InlineStoredData uint64
|
|
RemoteSegments uint
|
|
InlineSegments uint
|
|
Objects uint
|
|
MetadataSize uint64
|
|
RepairEgress uint64
|
|
GetEgress uint64
|
|
AuditEgress uint64
|
|
}
|
|
|
|
func (BucketUsage) _Table() string { return "bucket_usages" }
|
|
|
|
type BucketUsage_Update_Fields struct {
|
|
}
|
|
|
|
type BucketUsage_Id_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func BucketUsage_Id(v []byte) BucketUsage_Id_Field {
|
|
return BucketUsage_Id_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketUsage_Id_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketUsage_Id_Field) _Column() string { return "id" }
|
|
|
|
type BucketUsage_BucketId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func BucketUsage_BucketId(v []byte) BucketUsage_BucketId_Field {
|
|
return BucketUsage_BucketId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketUsage_BucketId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketUsage_BucketId_Field) _Column() string { return "bucket_id" }
|
|
|
|
type BucketUsage_RollupEndTime_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func BucketUsage_RollupEndTime(v time.Time) BucketUsage_RollupEndTime_Field {
|
|
return BucketUsage_RollupEndTime_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketUsage_RollupEndTime_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketUsage_RollupEndTime_Field) _Column() string { return "rollup_end_time" }
|
|
|
|
type BucketUsage_RemoteStoredData_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value uint64
|
|
}
|
|
|
|
func BucketUsage_RemoteStoredData(v uint64) BucketUsage_RemoteStoredData_Field {
|
|
return BucketUsage_RemoteStoredData_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketUsage_RemoteStoredData_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketUsage_RemoteStoredData_Field) _Column() string { return "remote_stored_data" }
|
|
|
|
type BucketUsage_InlineStoredData_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value uint64
|
|
}
|
|
|
|
func BucketUsage_InlineStoredData(v uint64) BucketUsage_InlineStoredData_Field {
|
|
return BucketUsage_InlineStoredData_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketUsage_InlineStoredData_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketUsage_InlineStoredData_Field) _Column() string { return "inline_stored_data" }
|
|
|
|
type BucketUsage_RemoteSegments_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value uint
|
|
}
|
|
|
|
func BucketUsage_RemoteSegments(v uint) BucketUsage_RemoteSegments_Field {
|
|
return BucketUsage_RemoteSegments_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketUsage_RemoteSegments_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketUsage_RemoteSegments_Field) _Column() string { return "remote_segments" }
|
|
|
|
type BucketUsage_InlineSegments_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value uint
|
|
}
|
|
|
|
func BucketUsage_InlineSegments(v uint) BucketUsage_InlineSegments_Field {
|
|
return BucketUsage_InlineSegments_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketUsage_InlineSegments_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketUsage_InlineSegments_Field) _Column() string { return "inline_segments" }
|
|
|
|
type BucketUsage_Objects_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value uint
|
|
}
|
|
|
|
func BucketUsage_Objects(v uint) BucketUsage_Objects_Field {
|
|
return BucketUsage_Objects_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketUsage_Objects_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketUsage_Objects_Field) _Column() string { return "objects" }
|
|
|
|
type BucketUsage_MetadataSize_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value uint64
|
|
}
|
|
|
|
func BucketUsage_MetadataSize(v uint64) BucketUsage_MetadataSize_Field {
|
|
return BucketUsage_MetadataSize_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketUsage_MetadataSize_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketUsage_MetadataSize_Field) _Column() string { return "metadata_size" }
|
|
|
|
type BucketUsage_RepairEgress_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value uint64
|
|
}
|
|
|
|
func BucketUsage_RepairEgress(v uint64) BucketUsage_RepairEgress_Field {
|
|
return BucketUsage_RepairEgress_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketUsage_RepairEgress_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketUsage_RepairEgress_Field) _Column() string { return "repair_egress" }
|
|
|
|
type BucketUsage_GetEgress_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value uint64
|
|
}
|
|
|
|
func BucketUsage_GetEgress(v uint64) BucketUsage_GetEgress_Field {
|
|
return BucketUsage_GetEgress_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketUsage_GetEgress_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketUsage_GetEgress_Field) _Column() string { return "get_egress" }
|
|
|
|
type BucketUsage_AuditEgress_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value uint64
|
|
}
|
|
|
|
func BucketUsage_AuditEgress(v uint64) BucketUsage_AuditEgress_Field {
|
|
return BucketUsage_AuditEgress_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f BucketUsage_AuditEgress_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (BucketUsage_AuditEgress_Field) _Column() string { return "audit_egress" }
|
|
|
|
type Bwagreement struct {
|
|
Serialnum string
|
|
StorageNodeId []byte
|
|
UplinkId []byte
|
|
Action int64
|
|
Total int64
|
|
CreatedAt time.Time
|
|
ExpiresAt time.Time
|
|
}
|
|
|
|
func (Bwagreement) _Table() string { return "bwagreements" }
|
|
|
|
type Bwagreement_Update_Fields struct {
|
|
}
|
|
|
|
type Bwagreement_Serialnum_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func Bwagreement_Serialnum(v string) Bwagreement_Serialnum_Field {
|
|
return Bwagreement_Serialnum_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Bwagreement_Serialnum_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Bwagreement_Serialnum_Field) _Column() string { return "serialnum" }
|
|
|
|
type Bwagreement_StorageNodeId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func Bwagreement_StorageNodeId(v []byte) Bwagreement_StorageNodeId_Field {
|
|
return Bwagreement_StorageNodeId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Bwagreement_StorageNodeId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Bwagreement_StorageNodeId_Field) _Column() string { return "storage_node_id" }
|
|
|
|
type Bwagreement_UplinkId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func Bwagreement_UplinkId(v []byte) Bwagreement_UplinkId_Field {
|
|
return Bwagreement_UplinkId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Bwagreement_UplinkId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Bwagreement_UplinkId_Field) _Column() string { return "uplink_id" }
|
|
|
|
type Bwagreement_Action_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func Bwagreement_Action(v int64) Bwagreement_Action_Field {
|
|
return Bwagreement_Action_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Bwagreement_Action_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Bwagreement_Action_Field) _Column() string { return "action" }
|
|
|
|
type Bwagreement_Total_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func Bwagreement_Total(v int64) Bwagreement_Total_Field {
|
|
return Bwagreement_Total_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Bwagreement_Total_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Bwagreement_Total_Field) _Column() string { return "total" }
|
|
|
|
type Bwagreement_CreatedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func Bwagreement_CreatedAt(v time.Time) Bwagreement_CreatedAt_Field {
|
|
return Bwagreement_CreatedAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Bwagreement_CreatedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Bwagreement_CreatedAt_Field) _Column() string { return "created_at" }
|
|
|
|
type Bwagreement_ExpiresAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func Bwagreement_ExpiresAt(v time.Time) Bwagreement_ExpiresAt_Field {
|
|
return Bwagreement_ExpiresAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Bwagreement_ExpiresAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Bwagreement_ExpiresAt_Field) _Column() string { return "expires_at" }
|
|
|
|
type CertRecord struct {
|
|
Publickey []byte
|
|
Id []byte
|
|
UpdateAt time.Time
|
|
}
|
|
|
|
func (CertRecord) _Table() string { return "certRecords" }
|
|
|
|
type CertRecord_Update_Fields struct {
|
|
}
|
|
|
|
type CertRecord_Publickey_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func CertRecord_Publickey(v []byte) CertRecord_Publickey_Field {
|
|
return CertRecord_Publickey_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f CertRecord_Publickey_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (CertRecord_Publickey_Field) _Column() string { return "publickey" }
|
|
|
|
type CertRecord_Id_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func CertRecord_Id(v []byte) CertRecord_Id_Field {
|
|
return CertRecord_Id_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f CertRecord_Id_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (CertRecord_Id_Field) _Column() string { return "id" }
|
|
|
|
type CertRecord_UpdateAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func CertRecord_UpdateAt(v time.Time) CertRecord_UpdateAt_Field {
|
|
return CertRecord_UpdateAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f CertRecord_UpdateAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (CertRecord_UpdateAt_Field) _Column() string { return "update_at" }
|
|
|
|
type Injuredsegment struct {
|
|
Id int64
|
|
Info []byte
|
|
}
|
|
|
|
func (Injuredsegment) _Table() string { return "injuredsegments" }
|
|
|
|
type Injuredsegment_Update_Fields struct {
|
|
}
|
|
|
|
type Injuredsegment_Id_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func Injuredsegment_Id(v int64) Injuredsegment_Id_Field {
|
|
return Injuredsegment_Id_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Injuredsegment_Id_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Injuredsegment_Id_Field) _Column() string { return "id" }
|
|
|
|
type Injuredsegment_Info_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func Injuredsegment_Info(v []byte) Injuredsegment_Info_Field {
|
|
return Injuredsegment_Info_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Injuredsegment_Info_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Injuredsegment_Info_Field) _Column() string { return "info" }
|
|
|
|
type Irreparabledb struct {
|
|
Segmentpath []byte
|
|
Segmentdetail []byte
|
|
PiecesLostCount int64
|
|
SegDamagedUnixSec int64
|
|
RepairAttemptCount int64
|
|
}
|
|
|
|
func (Irreparabledb) _Table() string { return "irreparabledbs" }
|
|
|
|
type Irreparabledb_Update_Fields struct {
|
|
Segmentdetail Irreparabledb_Segmentdetail_Field
|
|
PiecesLostCount Irreparabledb_PiecesLostCount_Field
|
|
SegDamagedUnixSec Irreparabledb_SegDamagedUnixSec_Field
|
|
RepairAttemptCount Irreparabledb_RepairAttemptCount_Field
|
|
}
|
|
|
|
type Irreparabledb_Segmentpath_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func Irreparabledb_Segmentpath(v []byte) Irreparabledb_Segmentpath_Field {
|
|
return Irreparabledb_Segmentpath_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Irreparabledb_Segmentpath_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Irreparabledb_Segmentpath_Field) _Column() string { return "segmentpath" }
|
|
|
|
type Irreparabledb_Segmentdetail_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func Irreparabledb_Segmentdetail(v []byte) Irreparabledb_Segmentdetail_Field {
|
|
return Irreparabledb_Segmentdetail_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Irreparabledb_Segmentdetail_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Irreparabledb_Segmentdetail_Field) _Column() string { return "segmentdetail" }
|
|
|
|
type Irreparabledb_PiecesLostCount_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func Irreparabledb_PiecesLostCount(v int64) Irreparabledb_PiecesLostCount_Field {
|
|
return Irreparabledb_PiecesLostCount_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Irreparabledb_PiecesLostCount_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Irreparabledb_PiecesLostCount_Field) _Column() string { return "pieces_lost_count" }
|
|
|
|
type Irreparabledb_SegDamagedUnixSec_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func Irreparabledb_SegDamagedUnixSec(v int64) Irreparabledb_SegDamagedUnixSec_Field {
|
|
return Irreparabledb_SegDamagedUnixSec_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Irreparabledb_SegDamagedUnixSec_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Irreparabledb_SegDamagedUnixSec_Field) _Column() string { return "seg_damaged_unix_sec" }
|
|
|
|
type Irreparabledb_RepairAttemptCount_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func Irreparabledb_RepairAttemptCount(v int64) Irreparabledb_RepairAttemptCount_Field {
|
|
return Irreparabledb_RepairAttemptCount_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Irreparabledb_RepairAttemptCount_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Irreparabledb_RepairAttemptCount_Field) _Column() string { return "repair_attempt_count" }
|
|
|
|
type Node struct {
|
|
Id []byte
|
|
AuditSuccessCount int64
|
|
TotalAuditCount int64
|
|
AuditSuccessRatio float64
|
|
UptimeSuccessCount int64
|
|
TotalUptimeCount int64
|
|
UptimeRatio float64
|
|
CreatedAt time.Time
|
|
UpdatedAt time.Time
|
|
Wallet string
|
|
Email string
|
|
}
|
|
|
|
func (Node) _Table() string { return "nodes" }
|
|
|
|
type Node_Update_Fields struct {
|
|
AuditSuccessCount Node_AuditSuccessCount_Field
|
|
TotalAuditCount Node_TotalAuditCount_Field
|
|
AuditSuccessRatio Node_AuditSuccessRatio_Field
|
|
UptimeSuccessCount Node_UptimeSuccessCount_Field
|
|
TotalUptimeCount Node_TotalUptimeCount_Field
|
|
UptimeRatio Node_UptimeRatio_Field
|
|
Wallet Node_Wallet_Field
|
|
Email Node_Email_Field
|
|
}
|
|
|
|
type Node_Id_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func Node_Id(v []byte) Node_Id_Field {
|
|
return Node_Id_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_Id_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_Id_Field) _Column() string { return "id" }
|
|
|
|
type Node_AuditSuccessCount_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func Node_AuditSuccessCount(v int64) Node_AuditSuccessCount_Field {
|
|
return Node_AuditSuccessCount_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_AuditSuccessCount_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_AuditSuccessCount_Field) _Column() string { return "audit_success_count" }
|
|
|
|
type Node_TotalAuditCount_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func Node_TotalAuditCount(v int64) Node_TotalAuditCount_Field {
|
|
return Node_TotalAuditCount_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_TotalAuditCount_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_TotalAuditCount_Field) _Column() string { return "total_audit_count" }
|
|
|
|
type Node_AuditSuccessRatio_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value float64
|
|
}
|
|
|
|
func Node_AuditSuccessRatio(v float64) Node_AuditSuccessRatio_Field {
|
|
return Node_AuditSuccessRatio_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_AuditSuccessRatio_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_AuditSuccessRatio_Field) _Column() string { return "audit_success_ratio" }
|
|
|
|
type Node_UptimeSuccessCount_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func Node_UptimeSuccessCount(v int64) Node_UptimeSuccessCount_Field {
|
|
return Node_UptimeSuccessCount_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_UptimeSuccessCount_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_UptimeSuccessCount_Field) _Column() string { return "uptime_success_count" }
|
|
|
|
type Node_TotalUptimeCount_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func Node_TotalUptimeCount(v int64) Node_TotalUptimeCount_Field {
|
|
return Node_TotalUptimeCount_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_TotalUptimeCount_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_TotalUptimeCount_Field) _Column() string { return "total_uptime_count" }
|
|
|
|
type Node_UptimeRatio_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value float64
|
|
}
|
|
|
|
func Node_UptimeRatio(v float64) Node_UptimeRatio_Field {
|
|
return Node_UptimeRatio_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_UptimeRatio_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_UptimeRatio_Field) _Column() string { return "uptime_ratio" }
|
|
|
|
type Node_CreatedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func Node_CreatedAt(v time.Time) Node_CreatedAt_Field {
|
|
return Node_CreatedAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_CreatedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_CreatedAt_Field) _Column() string { return "created_at" }
|
|
|
|
type Node_UpdatedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func Node_UpdatedAt(v time.Time) Node_UpdatedAt_Field {
|
|
return Node_UpdatedAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_UpdatedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_UpdatedAt_Field) _Column() string { return "updated_at" }
|
|
|
|
type Node_Wallet_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func Node_Wallet(v string) Node_Wallet_Field {
|
|
return Node_Wallet_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_Wallet_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_Wallet_Field) _Column() string { return "wallet" }
|
|
|
|
type Node_Email_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func Node_Email(v string) Node_Email_Field {
|
|
return Node_Email_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_Email_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_Email_Field) _Column() string { return "email" }
|
|
|
|
type OverlayCacheNode struct {
|
|
NodeId []byte
|
|
NodeType int
|
|
Address string
|
|
Protocol int
|
|
OperatorEmail string
|
|
OperatorWallet string
|
|
FreeBandwidth int64
|
|
FreeDisk int64
|
|
Latency90 int64
|
|
AuditSuccessRatio float64
|
|
AuditUptimeRatio float64
|
|
AuditCount int64
|
|
AuditSuccessCount int64
|
|
UptimeCount int64
|
|
UptimeSuccessCount int64
|
|
}
|
|
|
|
func (OverlayCacheNode) _Table() string { return "overlay_cache_nodes" }
|
|
|
|
type OverlayCacheNode_Update_Fields struct {
|
|
Address OverlayCacheNode_Address_Field
|
|
Protocol OverlayCacheNode_Protocol_Field
|
|
OperatorEmail OverlayCacheNode_OperatorEmail_Field
|
|
OperatorWallet OverlayCacheNode_OperatorWallet_Field
|
|
FreeBandwidth OverlayCacheNode_FreeBandwidth_Field
|
|
FreeDisk OverlayCacheNode_FreeDisk_Field
|
|
Latency90 OverlayCacheNode_Latency90_Field
|
|
AuditSuccessRatio OverlayCacheNode_AuditSuccessRatio_Field
|
|
AuditUptimeRatio OverlayCacheNode_AuditUptimeRatio_Field
|
|
AuditCount OverlayCacheNode_AuditCount_Field
|
|
AuditSuccessCount OverlayCacheNode_AuditSuccessCount_Field
|
|
UptimeCount OverlayCacheNode_UptimeCount_Field
|
|
UptimeSuccessCount OverlayCacheNode_UptimeSuccessCount_Field
|
|
}
|
|
|
|
type OverlayCacheNode_NodeId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func OverlayCacheNode_NodeId(v []byte) OverlayCacheNode_NodeId_Field {
|
|
return OverlayCacheNode_NodeId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f OverlayCacheNode_NodeId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (OverlayCacheNode_NodeId_Field) _Column() string { return "node_id" }
|
|
|
|
type OverlayCacheNode_NodeType_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func OverlayCacheNode_NodeType(v int) OverlayCacheNode_NodeType_Field {
|
|
return OverlayCacheNode_NodeType_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f OverlayCacheNode_NodeType_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (OverlayCacheNode_NodeType_Field) _Column() string { return "node_type" }
|
|
|
|
type OverlayCacheNode_Address_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func OverlayCacheNode_Address(v string) OverlayCacheNode_Address_Field {
|
|
return OverlayCacheNode_Address_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f OverlayCacheNode_Address_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (OverlayCacheNode_Address_Field) _Column() string { return "address" }
|
|
|
|
type OverlayCacheNode_Protocol_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func OverlayCacheNode_Protocol(v int) OverlayCacheNode_Protocol_Field {
|
|
return OverlayCacheNode_Protocol_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f OverlayCacheNode_Protocol_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (OverlayCacheNode_Protocol_Field) _Column() string { return "protocol" }
|
|
|
|
type OverlayCacheNode_OperatorEmail_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func OverlayCacheNode_OperatorEmail(v string) OverlayCacheNode_OperatorEmail_Field {
|
|
return OverlayCacheNode_OperatorEmail_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f OverlayCacheNode_OperatorEmail_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (OverlayCacheNode_OperatorEmail_Field) _Column() string { return "operator_email" }
|
|
|
|
type OverlayCacheNode_OperatorWallet_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func OverlayCacheNode_OperatorWallet(v string) OverlayCacheNode_OperatorWallet_Field {
|
|
return OverlayCacheNode_OperatorWallet_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f OverlayCacheNode_OperatorWallet_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (OverlayCacheNode_OperatorWallet_Field) _Column() string { return "operator_wallet" }
|
|
|
|
type OverlayCacheNode_FreeBandwidth_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func OverlayCacheNode_FreeBandwidth(v int64) OverlayCacheNode_FreeBandwidth_Field {
|
|
return OverlayCacheNode_FreeBandwidth_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f OverlayCacheNode_FreeBandwidth_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (OverlayCacheNode_FreeBandwidth_Field) _Column() string { return "free_bandwidth" }
|
|
|
|
type OverlayCacheNode_FreeDisk_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func OverlayCacheNode_FreeDisk(v int64) OverlayCacheNode_FreeDisk_Field {
|
|
return OverlayCacheNode_FreeDisk_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f OverlayCacheNode_FreeDisk_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (OverlayCacheNode_FreeDisk_Field) _Column() string { return "free_disk" }
|
|
|
|
type OverlayCacheNode_Latency90_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func OverlayCacheNode_Latency90(v int64) OverlayCacheNode_Latency90_Field {
|
|
return OverlayCacheNode_Latency90_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f OverlayCacheNode_Latency90_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (OverlayCacheNode_Latency90_Field) _Column() string { return "latency_90" }
|
|
|
|
type OverlayCacheNode_AuditSuccessRatio_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value float64
|
|
}
|
|
|
|
func OverlayCacheNode_AuditSuccessRatio(v float64) OverlayCacheNode_AuditSuccessRatio_Field {
|
|
return OverlayCacheNode_AuditSuccessRatio_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f OverlayCacheNode_AuditSuccessRatio_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (OverlayCacheNode_AuditSuccessRatio_Field) _Column() string { return "audit_success_ratio" }
|
|
|
|
type OverlayCacheNode_AuditUptimeRatio_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value float64
|
|
}
|
|
|
|
func OverlayCacheNode_AuditUptimeRatio(v float64) OverlayCacheNode_AuditUptimeRatio_Field {
|
|
return OverlayCacheNode_AuditUptimeRatio_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f OverlayCacheNode_AuditUptimeRatio_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (OverlayCacheNode_AuditUptimeRatio_Field) _Column() string { return "audit_uptime_ratio" }
|
|
|
|
type OverlayCacheNode_AuditCount_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func OverlayCacheNode_AuditCount(v int64) OverlayCacheNode_AuditCount_Field {
|
|
return OverlayCacheNode_AuditCount_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f OverlayCacheNode_AuditCount_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (OverlayCacheNode_AuditCount_Field) _Column() string { return "audit_count" }
|
|
|
|
type OverlayCacheNode_AuditSuccessCount_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func OverlayCacheNode_AuditSuccessCount(v int64) OverlayCacheNode_AuditSuccessCount_Field {
|
|
return OverlayCacheNode_AuditSuccessCount_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f OverlayCacheNode_AuditSuccessCount_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (OverlayCacheNode_AuditSuccessCount_Field) _Column() string { return "audit_success_count" }
|
|
|
|
type OverlayCacheNode_UptimeCount_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func OverlayCacheNode_UptimeCount(v int64) OverlayCacheNode_UptimeCount_Field {
|
|
return OverlayCacheNode_UptimeCount_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f OverlayCacheNode_UptimeCount_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (OverlayCacheNode_UptimeCount_Field) _Column() string { return "uptime_count" }
|
|
|
|
type OverlayCacheNode_UptimeSuccessCount_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func OverlayCacheNode_UptimeSuccessCount(v int64) OverlayCacheNode_UptimeSuccessCount_Field {
|
|
return OverlayCacheNode_UptimeSuccessCount_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f OverlayCacheNode_UptimeSuccessCount_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (OverlayCacheNode_UptimeSuccessCount_Field) _Column() string { return "uptime_success_count" }
|
|
|
|
type Project struct {
|
|
Id []byte
|
|
Name string
|
|
Description string
|
|
CreatedAt time.Time
|
|
}
|
|
|
|
func (Project) _Table() string { return "projects" }
|
|
|
|
type Project_Update_Fields struct {
|
|
Description Project_Description_Field
|
|
}
|
|
|
|
type Project_Id_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func Project_Id(v []byte) Project_Id_Field {
|
|
return Project_Id_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Project_Id_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Project_Id_Field) _Column() string { return "id" }
|
|
|
|
type Project_Name_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func Project_Name(v string) Project_Name_Field {
|
|
return Project_Name_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Project_Name_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Project_Name_Field) _Column() string { return "name" }
|
|
|
|
type Project_Description_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func Project_Description(v string) Project_Description_Field {
|
|
return Project_Description_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Project_Description_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Project_Description_Field) _Column() string { return "description" }
|
|
|
|
type Project_CreatedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func Project_CreatedAt(v time.Time) Project_CreatedAt_Field {
|
|
return Project_CreatedAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Project_CreatedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Project_CreatedAt_Field) _Column() string { return "created_at" }
|
|
|
|
type RegistrationToken struct {
|
|
Secret []byte
|
|
OwnerId []byte
|
|
ProjectLimit int
|
|
CreatedAt time.Time
|
|
}
|
|
|
|
func (RegistrationToken) _Table() string { return "registration_tokens" }
|
|
|
|
type RegistrationToken_Create_Fields struct {
|
|
OwnerId RegistrationToken_OwnerId_Field
|
|
}
|
|
|
|
type RegistrationToken_Update_Fields struct {
|
|
OwnerId RegistrationToken_OwnerId_Field
|
|
}
|
|
|
|
type RegistrationToken_Secret_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func RegistrationToken_Secret(v []byte) RegistrationToken_Secret_Field {
|
|
return RegistrationToken_Secret_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f RegistrationToken_Secret_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (RegistrationToken_Secret_Field) _Column() string { return "secret" }
|
|
|
|
type RegistrationToken_OwnerId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func RegistrationToken_OwnerId(v []byte) RegistrationToken_OwnerId_Field {
|
|
return RegistrationToken_OwnerId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func RegistrationToken_OwnerId_Raw(v []byte) RegistrationToken_OwnerId_Field {
|
|
if v == nil {
|
|
return RegistrationToken_OwnerId_Null()
|
|
}
|
|
return RegistrationToken_OwnerId(v)
|
|
}
|
|
|
|
func RegistrationToken_OwnerId_Null() RegistrationToken_OwnerId_Field {
|
|
return RegistrationToken_OwnerId_Field{_set: true, _null: true}
|
|
}
|
|
|
|
func (f RegistrationToken_OwnerId_Field) isnull() bool { return !f._set || f._null || f._value == nil }
|
|
|
|
func (f RegistrationToken_OwnerId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (RegistrationToken_OwnerId_Field) _Column() string { return "owner_id" }
|
|
|
|
type RegistrationToken_ProjectLimit_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func RegistrationToken_ProjectLimit(v int) RegistrationToken_ProjectLimit_Field {
|
|
return RegistrationToken_ProjectLimit_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f RegistrationToken_ProjectLimit_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (RegistrationToken_ProjectLimit_Field) _Column() string { return "project_limit" }
|
|
|
|
type RegistrationToken_CreatedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func RegistrationToken_CreatedAt(v time.Time) RegistrationToken_CreatedAt_Field {
|
|
return RegistrationToken_CreatedAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f RegistrationToken_CreatedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (RegistrationToken_CreatedAt_Field) _Column() string { return "created_at" }
|
|
|
|
type SerialNumber struct {
|
|
Id int
|
|
SerialNumber []byte
|
|
BucketId []byte
|
|
ExpiresAt time.Time
|
|
}
|
|
|
|
func (SerialNumber) _Table() string { return "serial_numbers" }
|
|
|
|
type SerialNumber_Update_Fields struct {
|
|
}
|
|
|
|
type SerialNumber_Id_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func SerialNumber_Id(v int) SerialNumber_Id_Field {
|
|
return SerialNumber_Id_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f SerialNumber_Id_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (SerialNumber_Id_Field) _Column() string { return "id" }
|
|
|
|
type SerialNumber_SerialNumber_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func SerialNumber_SerialNumber(v []byte) SerialNumber_SerialNumber_Field {
|
|
return SerialNumber_SerialNumber_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f SerialNumber_SerialNumber_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (SerialNumber_SerialNumber_Field) _Column() string { return "serial_number" }
|
|
|
|
type SerialNumber_BucketId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func SerialNumber_BucketId(v []byte) SerialNumber_BucketId_Field {
|
|
return SerialNumber_BucketId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f SerialNumber_BucketId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (SerialNumber_BucketId_Field) _Column() string { return "bucket_id" }
|
|
|
|
type SerialNumber_ExpiresAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func SerialNumber_ExpiresAt(v time.Time) SerialNumber_ExpiresAt_Field {
|
|
v = toUTC(v)
|
|
return SerialNumber_ExpiresAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f SerialNumber_ExpiresAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (SerialNumber_ExpiresAt_Field) _Column() string { return "expires_at" }
|
|
|
|
type StoragenodeBandwidthRollup struct {
|
|
StoragenodeId []byte
|
|
IntervalStart time.Time
|
|
IntervalSeconds uint
|
|
Action uint
|
|
Allocated uint64
|
|
Settled uint64
|
|
}
|
|
|
|
func (StoragenodeBandwidthRollup) _Table() string { return "storagenode_bandwidth_rollups" }
|
|
|
|
type StoragenodeBandwidthRollup_Update_Fields struct {
|
|
}
|
|
|
|
type StoragenodeBandwidthRollup_StoragenodeId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func StoragenodeBandwidthRollup_StoragenodeId(v []byte) StoragenodeBandwidthRollup_StoragenodeId_Field {
|
|
return StoragenodeBandwidthRollup_StoragenodeId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodeBandwidthRollup_StoragenodeId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodeBandwidthRollup_StoragenodeId_Field) _Column() string { return "storagenode_id" }
|
|
|
|
type StoragenodeBandwidthRollup_IntervalStart_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func StoragenodeBandwidthRollup_IntervalStart(v time.Time) StoragenodeBandwidthRollup_IntervalStart_Field {
|
|
v = toUTC(v)
|
|
return StoragenodeBandwidthRollup_IntervalStart_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodeBandwidthRollup_IntervalStart_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodeBandwidthRollup_IntervalStart_Field) _Column() string { return "interval_start" }
|
|
|
|
type StoragenodeBandwidthRollup_IntervalSeconds_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value uint
|
|
}
|
|
|
|
func StoragenodeBandwidthRollup_IntervalSeconds(v uint) StoragenodeBandwidthRollup_IntervalSeconds_Field {
|
|
return StoragenodeBandwidthRollup_IntervalSeconds_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodeBandwidthRollup_IntervalSeconds_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodeBandwidthRollup_IntervalSeconds_Field) _Column() string { return "interval_seconds" }
|
|
|
|
type StoragenodeBandwidthRollup_Action_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value uint
|
|
}
|
|
|
|
func StoragenodeBandwidthRollup_Action(v uint) StoragenodeBandwidthRollup_Action_Field {
|
|
return StoragenodeBandwidthRollup_Action_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodeBandwidthRollup_Action_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodeBandwidthRollup_Action_Field) _Column() string { return "action" }
|
|
|
|
type StoragenodeBandwidthRollup_Allocated_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value uint64
|
|
}
|
|
|
|
func StoragenodeBandwidthRollup_Allocated(v uint64) StoragenodeBandwidthRollup_Allocated_Field {
|
|
return StoragenodeBandwidthRollup_Allocated_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodeBandwidthRollup_Allocated_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodeBandwidthRollup_Allocated_Field) _Column() string { return "allocated" }
|
|
|
|
type StoragenodeBandwidthRollup_Settled_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value uint64
|
|
}
|
|
|
|
func StoragenodeBandwidthRollup_Settled(v uint64) StoragenodeBandwidthRollup_Settled_Field {
|
|
return StoragenodeBandwidthRollup_Settled_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodeBandwidthRollup_Settled_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodeBandwidthRollup_Settled_Field) _Column() string { return "settled" }
|
|
|
|
type StoragenodeStorageRollup struct {
|
|
StoragenodeId []byte
|
|
IntervalStart time.Time
|
|
IntervalSeconds uint
|
|
Total uint64
|
|
}
|
|
|
|
func (StoragenodeStorageRollup) _Table() string { return "storagenode_storage_rollups" }
|
|
|
|
type StoragenodeStorageRollup_Update_Fields struct {
|
|
}
|
|
|
|
type StoragenodeStorageRollup_StoragenodeId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func StoragenodeStorageRollup_StoragenodeId(v []byte) StoragenodeStorageRollup_StoragenodeId_Field {
|
|
return StoragenodeStorageRollup_StoragenodeId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodeStorageRollup_StoragenodeId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodeStorageRollup_StoragenodeId_Field) _Column() string { return "storagenode_id" }
|
|
|
|
type StoragenodeStorageRollup_IntervalStart_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func StoragenodeStorageRollup_IntervalStart(v time.Time) StoragenodeStorageRollup_IntervalStart_Field {
|
|
v = toUTC(v)
|
|
return StoragenodeStorageRollup_IntervalStart_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodeStorageRollup_IntervalStart_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodeStorageRollup_IntervalStart_Field) _Column() string { return "interval_start" }
|
|
|
|
type StoragenodeStorageRollup_IntervalSeconds_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value uint
|
|
}
|
|
|
|
func StoragenodeStorageRollup_IntervalSeconds(v uint) StoragenodeStorageRollup_IntervalSeconds_Field {
|
|
return StoragenodeStorageRollup_IntervalSeconds_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodeStorageRollup_IntervalSeconds_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodeStorageRollup_IntervalSeconds_Field) _Column() string { return "interval_seconds" }
|
|
|
|
type StoragenodeStorageRollup_Total_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value uint64
|
|
}
|
|
|
|
func StoragenodeStorageRollup_Total(v uint64) StoragenodeStorageRollup_Total_Field {
|
|
return StoragenodeStorageRollup_Total_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f StoragenodeStorageRollup_Total_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (StoragenodeStorageRollup_Total_Field) _Column() string { return "total" }
|
|
|
|
type User struct {
|
|
Id []byte
|
|
FirstName string
|
|
LastName string
|
|
Email string
|
|
PasswordHash []byte
|
|
Status int
|
|
CreatedAt time.Time
|
|
}
|
|
|
|
func (User) _Table() string { return "users" }
|
|
|
|
type User_Update_Fields struct {
|
|
FirstName User_FirstName_Field
|
|
LastName User_LastName_Field
|
|
Email User_Email_Field
|
|
PasswordHash User_PasswordHash_Field
|
|
Status User_Status_Field
|
|
}
|
|
|
|
type User_Id_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func User_Id(v []byte) User_Id_Field {
|
|
return User_Id_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f User_Id_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (User_Id_Field) _Column() string { return "id" }
|
|
|
|
type User_FirstName_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func User_FirstName(v string) User_FirstName_Field {
|
|
return User_FirstName_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f User_FirstName_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (User_FirstName_Field) _Column() string { return "first_name" }
|
|
|
|
type User_LastName_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func User_LastName(v string) User_LastName_Field {
|
|
return User_LastName_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f User_LastName_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (User_LastName_Field) _Column() string { return "last_name" }
|
|
|
|
type User_Email_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func User_Email(v string) User_Email_Field {
|
|
return User_Email_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f User_Email_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (User_Email_Field) _Column() string { return "email" }
|
|
|
|
type User_PasswordHash_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func User_PasswordHash(v []byte) User_PasswordHash_Field {
|
|
return User_PasswordHash_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f User_PasswordHash_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (User_PasswordHash_Field) _Column() string { return "password_hash" }
|
|
|
|
type User_Status_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func User_Status(v int) User_Status_Field {
|
|
return User_Status_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f User_Status_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (User_Status_Field) _Column() string { return "status" }
|
|
|
|
type User_CreatedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func User_CreatedAt(v time.Time) User_CreatedAt_Field {
|
|
return User_CreatedAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f User_CreatedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (User_CreatedAt_Field) _Column() string { return "created_at" }
|
|
|
|
type ApiKey struct {
|
|
Id []byte
|
|
ProjectId []byte
|
|
Key []byte
|
|
Name string
|
|
CreatedAt time.Time
|
|
}
|
|
|
|
func (ApiKey) _Table() string { return "api_keys" }
|
|
|
|
type ApiKey_Update_Fields struct {
|
|
Name ApiKey_Name_Field
|
|
}
|
|
|
|
type ApiKey_Id_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func ApiKey_Id(v []byte) ApiKey_Id_Field {
|
|
return ApiKey_Id_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f ApiKey_Id_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (ApiKey_Id_Field) _Column() string { return "id" }
|
|
|
|
type ApiKey_ProjectId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func ApiKey_ProjectId(v []byte) ApiKey_ProjectId_Field {
|
|
return ApiKey_ProjectId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f ApiKey_ProjectId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (ApiKey_ProjectId_Field) _Column() string { return "project_id" }
|
|
|
|
type ApiKey_Key_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func ApiKey_Key(v []byte) ApiKey_Key_Field {
|
|
return ApiKey_Key_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f ApiKey_Key_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (ApiKey_Key_Field) _Column() string { return "key" }
|
|
|
|
type ApiKey_Name_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func ApiKey_Name(v string) ApiKey_Name_Field {
|
|
return ApiKey_Name_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f ApiKey_Name_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (ApiKey_Name_Field) _Column() string { return "name" }
|
|
|
|
type ApiKey_CreatedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func ApiKey_CreatedAt(v time.Time) ApiKey_CreatedAt_Field {
|
|
return ApiKey_CreatedAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f ApiKey_CreatedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (ApiKey_CreatedAt_Field) _Column() string { return "created_at" }
|
|
|
|
type ProjectMember struct {
|
|
MemberId []byte
|
|
ProjectId []byte
|
|
CreatedAt time.Time
|
|
}
|
|
|
|
func (ProjectMember) _Table() string { return "project_members" }
|
|
|
|
type ProjectMember_Update_Fields struct {
|
|
}
|
|
|
|
type ProjectMember_MemberId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func ProjectMember_MemberId(v []byte) ProjectMember_MemberId_Field {
|
|
return ProjectMember_MemberId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f ProjectMember_MemberId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (ProjectMember_MemberId_Field) _Column() string { return "member_id" }
|
|
|
|
type ProjectMember_ProjectId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func ProjectMember_ProjectId(v []byte) ProjectMember_ProjectId_Field {
|
|
return ProjectMember_ProjectId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f ProjectMember_ProjectId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (ProjectMember_ProjectId_Field) _Column() string { return "project_id" }
|
|
|
|
type ProjectMember_CreatedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func ProjectMember_CreatedAt(v time.Time) ProjectMember_CreatedAt_Field {
|
|
return ProjectMember_CreatedAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f ProjectMember_CreatedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (ProjectMember_CreatedAt_Field) _Column() string { return "created_at" }
|
|
|
|
type UsedSerial struct {
|
|
SerialNumberId int
|
|
StorageNodeId []byte
|
|
}
|
|
|
|
func (UsedSerial) _Table() string { return "used_serials" }
|
|
|
|
type UsedSerial_Update_Fields struct {
|
|
}
|
|
|
|
type UsedSerial_SerialNumberId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func UsedSerial_SerialNumberId(v int) UsedSerial_SerialNumberId_Field {
|
|
return UsedSerial_SerialNumberId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f UsedSerial_SerialNumberId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (UsedSerial_SerialNumberId_Field) _Column() string { return "serial_number_id" }
|
|
|
|
type UsedSerial_StorageNodeId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func UsedSerial_StorageNodeId(v []byte) UsedSerial_StorageNodeId_Field {
|
|
return UsedSerial_StorageNodeId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f UsedSerial_StorageNodeId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (UsedSerial_StorageNodeId_Field) _Column() string { return "storage_node_id" }
|
|
|
|
func toUTC(t time.Time) time.Time {
|
|
return t.UTC()
|
|
}
|
|
|
|
func toDate(t time.Time) time.Time {
|
|
// keep up the minute portion so that translations between timezones will
|
|
// continue to reflect properly.
|
|
return t.Truncate(time.Minute)
|
|
}
|
|
|
|
//
|
|
// runtime support for building sql statements
|
|
//
|
|
|
|
type __sqlbundle_SQL interface {
|
|
Render() string
|
|
|
|
private()
|
|
}
|
|
|
|
type __sqlbundle_Dialect interface {
|
|
Rebind(sql string) string
|
|
}
|
|
|
|
type __sqlbundle_RenderOp int
|
|
|
|
const (
|
|
__sqlbundle_NoFlatten __sqlbundle_RenderOp = iota
|
|
__sqlbundle_NoTerminate
|
|
)
|
|
|
|
func __sqlbundle_Render(dialect __sqlbundle_Dialect, sql __sqlbundle_SQL, ops ...__sqlbundle_RenderOp) string {
|
|
out := sql.Render()
|
|
|
|
flatten := true
|
|
terminate := true
|
|
for _, op := range ops {
|
|
switch op {
|
|
case __sqlbundle_NoFlatten:
|
|
flatten = false
|
|
case __sqlbundle_NoTerminate:
|
|
terminate = false
|
|
}
|
|
}
|
|
|
|
if flatten {
|
|
out = __sqlbundle_flattenSQL(out)
|
|
}
|
|
if terminate {
|
|
out += ";"
|
|
}
|
|
|
|
return dialect.Rebind(out)
|
|
}
|
|
|
|
func __sqlbundle_flattenSQL(x string) string {
|
|
// trim whitespace from beginning and end
|
|
s, e := 0, len(x)-1
|
|
for s < len(x) && (x[s] == ' ' || x[s] == '\t' || x[s] == '\n') {
|
|
s++
|
|
}
|
|
for s <= e && (x[e] == ' ' || x[e] == '\t' || x[e] == '\n') {
|
|
e--
|
|
}
|
|
if s > e {
|
|
return ""
|
|
}
|
|
x = x[s : e+1]
|
|
|
|
// check for whitespace that needs fixing
|
|
wasSpace := false
|
|
for i := 0; i < len(x); i++ {
|
|
r := x[i]
|
|
justSpace := r == ' '
|
|
if (wasSpace && justSpace) || r == '\t' || r == '\n' {
|
|
// whitespace detected, start writing a new string
|
|
var result strings.Builder
|
|
result.Grow(len(x))
|
|
if wasSpace {
|
|
result.WriteString(x[:i-1])
|
|
} else {
|
|
result.WriteString(x[:i])
|
|
}
|
|
for p := i; p < len(x); p++ {
|
|
for p < len(x) && (x[p] == ' ' || x[p] == '\t' || x[p] == '\n') {
|
|
p++
|
|
}
|
|
result.WriteByte(' ')
|
|
|
|
start := p
|
|
for p < len(x) && !(x[p] == ' ' || x[p] == '\t' || x[p] == '\n') {
|
|
p++
|
|
}
|
|
result.WriteString(x[start:p])
|
|
}
|
|
|
|
return result.String()
|
|
}
|
|
wasSpace = justSpace
|
|
}
|
|
|
|
// no problematic whitespace found
|
|
return x
|
|
}
|
|
|
|
// this type is specially named to match up with the name returned by the
|
|
// dialect impl in the sql package.
|
|
type __sqlbundle_postgres struct{}
|
|
|
|
func (p __sqlbundle_postgres) Rebind(sql string) string {
|
|
out := make([]byte, 0, len(sql)+10)
|
|
|
|
j := 1
|
|
for i := 0; i < len(sql); i++ {
|
|
ch := sql[i]
|
|
if ch != '?' {
|
|
out = append(out, ch)
|
|
continue
|
|
}
|
|
|
|
out = append(out, '$')
|
|
out = append(out, strconv.Itoa(j)...)
|
|
j++
|
|
}
|
|
|
|
return string(out)
|
|
}
|
|
|
|
// this type is specially named to match up with the name returned by the
|
|
// dialect impl in the sql package.
|
|
type __sqlbundle_sqlite3 struct{}
|
|
|
|
func (s __sqlbundle_sqlite3) Rebind(sql string) string {
|
|
return sql
|
|
}
|
|
|
|
type __sqlbundle_Literal string
|
|
|
|
func (__sqlbundle_Literal) private() {}
|
|
|
|
func (l __sqlbundle_Literal) Render() string { return string(l) }
|
|
|
|
type __sqlbundle_Literals struct {
|
|
Join string
|
|
SQLs []__sqlbundle_SQL
|
|
}
|
|
|
|
func (__sqlbundle_Literals) private() {}
|
|
|
|
func (l __sqlbundle_Literals) Render() string {
|
|
var out bytes.Buffer
|
|
|
|
first := true
|
|
for _, sql := range l.SQLs {
|
|
if sql == nil {
|
|
continue
|
|
}
|
|
if !first {
|
|
out.WriteString(l.Join)
|
|
}
|
|
first = false
|
|
out.WriteString(sql.Render())
|
|
}
|
|
|
|
return out.String()
|
|
}
|
|
|
|
type __sqlbundle_Condition struct {
|
|
// set at compile/embed time
|
|
Name string
|
|
Left string
|
|
Equal bool
|
|
Right string
|
|
|
|
// set at runtime
|
|
Null bool
|
|
}
|
|
|
|
func (*__sqlbundle_Condition) private() {}
|
|
|
|
func (c *__sqlbundle_Condition) Render() string {
|
|
// TODO(jeff): maybe check if we can use placeholders instead of the
|
|
// literal null: this would make the templates easier.
|
|
|
|
switch {
|
|
case c.Equal && c.Null:
|
|
return c.Left + " is null"
|
|
case c.Equal && !c.Null:
|
|
return c.Left + " = " + c.Right
|
|
case !c.Equal && c.Null:
|
|
return c.Left + " is not null"
|
|
case !c.Equal && !c.Null:
|
|
return c.Left + " != " + c.Right
|
|
default:
|
|
panic("unhandled case")
|
|
}
|
|
}
|
|
|
|
type __sqlbundle_Hole struct {
|
|
// set at compiile/embed time
|
|
Name string
|
|
|
|
// set at runtime
|
|
SQL __sqlbundle_SQL
|
|
}
|
|
|
|
func (*__sqlbundle_Hole) private() {}
|
|
|
|
func (h *__sqlbundle_Hole) Render() string { return h.SQL.Render() }
|
|
|
|
//
|
|
// end runtime support for building sql statements
|
|
//
|
|
|
|
type Id_Row struct {
|
|
Id []byte
|
|
}
|
|
|
|
type Value_Row struct {
|
|
Value time.Time
|
|
}
|
|
|
|
func (obj *postgresImpl) Create_Irreparabledb(ctx context.Context,
|
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field,
|
|
irreparabledb_segmentdetail Irreparabledb_Segmentdetail_Field,
|
|
irreparabledb_pieces_lost_count Irreparabledb_PiecesLostCount_Field,
|
|
irreparabledb_seg_damaged_unix_sec Irreparabledb_SegDamagedUnixSec_Field,
|
|
irreparabledb_repair_attempt_count Irreparabledb_RepairAttemptCount_Field) (
|
|
irreparabledb *Irreparabledb, err error) {
|
|
__segmentpath_val := irreparabledb_segmentpath.value()
|
|
__segmentdetail_val := irreparabledb_segmentdetail.value()
|
|
__pieces_lost_count_val := irreparabledb_pieces_lost_count.value()
|
|
__seg_damaged_unix_sec_val := irreparabledb_seg_damaged_unix_sec.value()
|
|
__repair_attempt_count_val := irreparabledb_repair_attempt_count.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO irreparabledbs ( segmentpath, segmentdetail, pieces_lost_count, seg_damaged_unix_sec, repair_attempt_count ) VALUES ( ?, ?, ?, ?, ? ) RETURNING irreparabledbs.segmentpath, irreparabledbs.segmentdetail, irreparabledbs.pieces_lost_count, irreparabledbs.seg_damaged_unix_sec, irreparabledbs.repair_attempt_count")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __segmentpath_val, __segmentdetail_val, __pieces_lost_count_val, __seg_damaged_unix_sec_val, __repair_attempt_count_val)
|
|
|
|
irreparabledb = &Irreparabledb{}
|
|
err = obj.driver.QueryRow(__stmt, __segmentpath_val, __segmentdetail_val, __pieces_lost_count_val, __seg_damaged_unix_sec_val, __repair_attempt_count_val).Scan(&irreparabledb.Segmentpath, &irreparabledb.Segmentdetail, &irreparabledb.PiecesLostCount, &irreparabledb.SegDamagedUnixSec, &irreparabledb.RepairAttemptCount)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return irreparabledb, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Create_AccountingTimestamps(ctx context.Context,
|
|
accounting_timestamps_name AccountingTimestamps_Name_Field,
|
|
accounting_timestamps_value AccountingTimestamps_Value_Field) (
|
|
accounting_timestamps *AccountingTimestamps, err error) {
|
|
__name_val := accounting_timestamps_name.value()
|
|
__value_val := accounting_timestamps_value.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO accounting_timestamps ( name, value ) VALUES ( ?, ? ) RETURNING accounting_timestamps.name, accounting_timestamps.value")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __name_val, __value_val)
|
|
|
|
accounting_timestamps = &AccountingTimestamps{}
|
|
err = obj.driver.QueryRow(__stmt, __name_val, __value_val).Scan(&accounting_timestamps.Name, &accounting_timestamps.Value)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return accounting_timestamps, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Create_AccountingRollup(ctx context.Context,
|
|
accounting_rollup_node_id AccountingRollup_NodeId_Field,
|
|
accounting_rollup_start_time AccountingRollup_StartTime_Field,
|
|
accounting_rollup_put_total AccountingRollup_PutTotal_Field,
|
|
accounting_rollup_get_total AccountingRollup_GetTotal_Field,
|
|
accounting_rollup_get_audit_total AccountingRollup_GetAuditTotal_Field,
|
|
accounting_rollup_get_repair_total AccountingRollup_GetRepairTotal_Field,
|
|
accounting_rollup_put_repair_total AccountingRollup_PutRepairTotal_Field,
|
|
accounting_rollup_at_rest_total AccountingRollup_AtRestTotal_Field) (
|
|
accounting_rollup *AccountingRollup, err error) {
|
|
__node_id_val := accounting_rollup_node_id.value()
|
|
__start_time_val := accounting_rollup_start_time.value()
|
|
__put_total_val := accounting_rollup_put_total.value()
|
|
__get_total_val := accounting_rollup_get_total.value()
|
|
__get_audit_total_val := accounting_rollup_get_audit_total.value()
|
|
__get_repair_total_val := accounting_rollup_get_repair_total.value()
|
|
__put_repair_total_val := accounting_rollup_put_repair_total.value()
|
|
__at_rest_total_val := accounting_rollup_at_rest_total.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO accounting_rollups ( node_id, start_time, put_total, get_total, get_audit_total, get_repair_total, put_repair_total, at_rest_total ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ? ) RETURNING accounting_rollups.id, accounting_rollups.node_id, accounting_rollups.start_time, accounting_rollups.put_total, accounting_rollups.get_total, accounting_rollups.get_audit_total, accounting_rollups.get_repair_total, accounting_rollups.put_repair_total, accounting_rollups.at_rest_total")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __node_id_val, __start_time_val, __put_total_val, __get_total_val, __get_audit_total_val, __get_repair_total_val, __put_repair_total_val, __at_rest_total_val)
|
|
|
|
accounting_rollup = &AccountingRollup{}
|
|
err = obj.driver.QueryRow(__stmt, __node_id_val, __start_time_val, __put_total_val, __get_total_val, __get_audit_total_val, __get_repair_total_val, __put_repair_total_val, __at_rest_total_val).Scan(&accounting_rollup.Id, &accounting_rollup.NodeId, &accounting_rollup.StartTime, &accounting_rollup.PutTotal, &accounting_rollup.GetTotal, &accounting_rollup.GetAuditTotal, &accounting_rollup.GetRepairTotal, &accounting_rollup.PutRepairTotal, &accounting_rollup.AtRestTotal)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return accounting_rollup, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Create_AccountingRaw(ctx context.Context,
|
|
accounting_raw_node_id AccountingRaw_NodeId_Field,
|
|
accounting_raw_interval_end_time AccountingRaw_IntervalEndTime_Field,
|
|
accounting_raw_data_total AccountingRaw_DataTotal_Field,
|
|
accounting_raw_data_type AccountingRaw_DataType_Field,
|
|
accounting_raw_created_at AccountingRaw_CreatedAt_Field) (
|
|
accounting_raw *AccountingRaw, err error) {
|
|
__node_id_val := accounting_raw_node_id.value()
|
|
__interval_end_time_val := accounting_raw_interval_end_time.value()
|
|
__data_total_val := accounting_raw_data_total.value()
|
|
__data_type_val := accounting_raw_data_type.value()
|
|
__created_at_val := accounting_raw_created_at.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO accounting_raws ( node_id, interval_end_time, data_total, data_type, created_at ) VALUES ( ?, ?, ?, ?, ? ) RETURNING accounting_raws.id, accounting_raws.node_id, accounting_raws.interval_end_time, accounting_raws.data_total, accounting_raws.data_type, accounting_raws.created_at")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __node_id_val, __interval_end_time_val, __data_total_val, __data_type_val, __created_at_val)
|
|
|
|
accounting_raw = &AccountingRaw{}
|
|
err = obj.driver.QueryRow(__stmt, __node_id_val, __interval_end_time_val, __data_total_val, __data_type_val, __created_at_val).Scan(&accounting_raw.Id, &accounting_raw.NodeId, &accounting_raw.IntervalEndTime, &accounting_raw.DataTotal, &accounting_raw.DataType, &accounting_raw.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return accounting_raw, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Create_Node(ctx context.Context,
|
|
node_id Node_Id_Field,
|
|
node_audit_success_count Node_AuditSuccessCount_Field,
|
|
node_total_audit_count Node_TotalAuditCount_Field,
|
|
node_audit_success_ratio Node_AuditSuccessRatio_Field,
|
|
node_uptime_success_count Node_UptimeSuccessCount_Field,
|
|
node_total_uptime_count Node_TotalUptimeCount_Field,
|
|
node_uptime_ratio Node_UptimeRatio_Field,
|
|
node_wallet Node_Wallet_Field,
|
|
node_email Node_Email_Field) (
|
|
node *Node, err error) {
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__id_val := node_id.value()
|
|
__audit_success_count_val := node_audit_success_count.value()
|
|
__total_audit_count_val := node_total_audit_count.value()
|
|
__audit_success_ratio_val := node_audit_success_ratio.value()
|
|
__uptime_success_count_val := node_uptime_success_count.value()
|
|
__total_uptime_count_val := node_total_uptime_count.value()
|
|
__uptime_ratio_val := node_uptime_ratio.value()
|
|
__created_at_val := __now
|
|
__updated_at_val := __now
|
|
__wallet_val := node_wallet.value()
|
|
__email_val := node_email.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO nodes ( id, audit_success_count, total_audit_count, audit_success_ratio, uptime_success_count, total_uptime_count, uptime_ratio, created_at, updated_at, wallet, email ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? ) RETURNING nodes.id, nodes.audit_success_count, nodes.total_audit_count, nodes.audit_success_ratio, nodes.uptime_success_count, nodes.total_uptime_count, nodes.uptime_ratio, nodes.created_at, nodes.updated_at, nodes.wallet, nodes.email")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __id_val, __audit_success_count_val, __total_audit_count_val, __audit_success_ratio_val, __uptime_success_count_val, __total_uptime_count_val, __uptime_ratio_val, __created_at_val, __updated_at_val, __wallet_val, __email_val)
|
|
|
|
node = &Node{}
|
|
err = obj.driver.QueryRow(__stmt, __id_val, __audit_success_count_val, __total_audit_count_val, __audit_success_ratio_val, __uptime_success_count_val, __total_uptime_count_val, __uptime_ratio_val, __created_at_val, __updated_at_val, __wallet_val, __email_val).Scan(&node.Id, &node.AuditSuccessCount, &node.TotalAuditCount, &node.AuditSuccessRatio, &node.UptimeSuccessCount, &node.TotalUptimeCount, &node.UptimeRatio, &node.CreatedAt, &node.UpdatedAt, &node.Wallet, &node.Email)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return node, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Create_OverlayCacheNode(ctx context.Context,
|
|
overlay_cache_node_node_id OverlayCacheNode_NodeId_Field,
|
|
overlay_cache_node_node_type OverlayCacheNode_NodeType_Field,
|
|
overlay_cache_node_address OverlayCacheNode_Address_Field,
|
|
overlay_cache_node_protocol OverlayCacheNode_Protocol_Field,
|
|
overlay_cache_node_operator_email OverlayCacheNode_OperatorEmail_Field,
|
|
overlay_cache_node_operator_wallet OverlayCacheNode_OperatorWallet_Field,
|
|
overlay_cache_node_free_bandwidth OverlayCacheNode_FreeBandwidth_Field,
|
|
overlay_cache_node_free_disk OverlayCacheNode_FreeDisk_Field,
|
|
overlay_cache_node_latency_90 OverlayCacheNode_Latency90_Field,
|
|
overlay_cache_node_audit_success_ratio OverlayCacheNode_AuditSuccessRatio_Field,
|
|
overlay_cache_node_audit_uptime_ratio OverlayCacheNode_AuditUptimeRatio_Field,
|
|
overlay_cache_node_audit_count OverlayCacheNode_AuditCount_Field,
|
|
overlay_cache_node_audit_success_count OverlayCacheNode_AuditSuccessCount_Field,
|
|
overlay_cache_node_uptime_count OverlayCacheNode_UptimeCount_Field,
|
|
overlay_cache_node_uptime_success_count OverlayCacheNode_UptimeSuccessCount_Field) (
|
|
overlay_cache_node *OverlayCacheNode, err error) {
|
|
__node_id_val := overlay_cache_node_node_id.value()
|
|
__node_type_val := overlay_cache_node_node_type.value()
|
|
__address_val := overlay_cache_node_address.value()
|
|
__protocol_val := overlay_cache_node_protocol.value()
|
|
__operator_email_val := overlay_cache_node_operator_email.value()
|
|
__operator_wallet_val := overlay_cache_node_operator_wallet.value()
|
|
__free_bandwidth_val := overlay_cache_node_free_bandwidth.value()
|
|
__free_disk_val := overlay_cache_node_free_disk.value()
|
|
__latency_90_val := overlay_cache_node_latency_90.value()
|
|
__audit_success_ratio_val := overlay_cache_node_audit_success_ratio.value()
|
|
__audit_uptime_ratio_val := overlay_cache_node_audit_uptime_ratio.value()
|
|
__audit_count_val := overlay_cache_node_audit_count.value()
|
|
__audit_success_count_val := overlay_cache_node_audit_success_count.value()
|
|
__uptime_count_val := overlay_cache_node_uptime_count.value()
|
|
__uptime_success_count_val := overlay_cache_node_uptime_success_count.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO overlay_cache_nodes ( node_id, node_type, address, protocol, operator_email, operator_wallet, free_bandwidth, free_disk, latency_90, audit_success_ratio, audit_uptime_ratio, audit_count, audit_success_count, uptime_count, uptime_success_count ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? ) RETURNING overlay_cache_nodes.node_id, overlay_cache_nodes.node_type, overlay_cache_nodes.address, overlay_cache_nodes.protocol, overlay_cache_nodes.operator_email, overlay_cache_nodes.operator_wallet, overlay_cache_nodes.free_bandwidth, overlay_cache_nodes.free_disk, overlay_cache_nodes.latency_90, overlay_cache_nodes.audit_success_ratio, overlay_cache_nodes.audit_uptime_ratio, overlay_cache_nodes.audit_count, overlay_cache_nodes.audit_success_count, overlay_cache_nodes.uptime_count, overlay_cache_nodes.uptime_success_count")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __node_id_val, __node_type_val, __address_val, __protocol_val, __operator_email_val, __operator_wallet_val, __free_bandwidth_val, __free_disk_val, __latency_90_val, __audit_success_ratio_val, __audit_uptime_ratio_val, __audit_count_val, __audit_success_count_val, __uptime_count_val, __uptime_success_count_val)
|
|
|
|
overlay_cache_node = &OverlayCacheNode{}
|
|
err = obj.driver.QueryRow(__stmt, __node_id_val, __node_type_val, __address_val, __protocol_val, __operator_email_val, __operator_wallet_val, __free_bandwidth_val, __free_disk_val, __latency_90_val, __audit_success_ratio_val, __audit_uptime_ratio_val, __audit_count_val, __audit_success_count_val, __uptime_count_val, __uptime_success_count_val).Scan(&overlay_cache_node.NodeId, &overlay_cache_node.NodeType, &overlay_cache_node.Address, &overlay_cache_node.Protocol, &overlay_cache_node.OperatorEmail, &overlay_cache_node.OperatorWallet, &overlay_cache_node.FreeBandwidth, &overlay_cache_node.FreeDisk, &overlay_cache_node.Latency90, &overlay_cache_node.AuditSuccessRatio, &overlay_cache_node.AuditUptimeRatio, &overlay_cache_node.AuditCount, &overlay_cache_node.AuditSuccessCount, &overlay_cache_node.UptimeCount, &overlay_cache_node.UptimeSuccessCount)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return overlay_cache_node, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Create_Injuredsegment(ctx context.Context,
|
|
injuredsegment_info Injuredsegment_Info_Field) (
|
|
injuredsegment *Injuredsegment, err error) {
|
|
__info_val := injuredsegment_info.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO injuredsegments ( info ) VALUES ( ? ) RETURNING injuredsegments.id, injuredsegments.info")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __info_val)
|
|
|
|
injuredsegment = &Injuredsegment{}
|
|
err = obj.driver.QueryRow(__stmt, __info_val).Scan(&injuredsegment.Id, &injuredsegment.Info)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return injuredsegment, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Create_User(ctx context.Context,
|
|
user_id User_Id_Field,
|
|
user_first_name User_FirstName_Field,
|
|
user_last_name User_LastName_Field,
|
|
user_email User_Email_Field,
|
|
user_password_hash User_PasswordHash_Field) (
|
|
user *User, err error) {
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__id_val := user_id.value()
|
|
__first_name_val := user_first_name.value()
|
|
__last_name_val := user_last_name.value()
|
|
__email_val := user_email.value()
|
|
__password_hash_val := user_password_hash.value()
|
|
__status_val := int(0)
|
|
__created_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO users ( id, first_name, last_name, email, password_hash, status, created_at ) VALUES ( ?, ?, ?, ?, ?, ?, ? ) RETURNING users.id, users.first_name, users.last_name, users.email, users.password_hash, users.status, users.created_at")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __id_val, __first_name_val, __last_name_val, __email_val, __password_hash_val, __status_val, __created_at_val)
|
|
|
|
user = &User{}
|
|
err = obj.driver.QueryRow(__stmt, __id_val, __first_name_val, __last_name_val, __email_val, __password_hash_val, __status_val, __created_at_val).Scan(&user.Id, &user.FirstName, &user.LastName, &user.Email, &user.PasswordHash, &user.Status, &user.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return user, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Create_Project(ctx context.Context,
|
|
project_id Project_Id_Field,
|
|
project_name Project_Name_Field,
|
|
project_description Project_Description_Field) (
|
|
project *Project, err error) {
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__id_val := project_id.value()
|
|
__name_val := project_name.value()
|
|
__description_val := project_description.value()
|
|
__created_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO projects ( id, name, description, created_at ) VALUES ( ?, ?, ?, ? ) RETURNING projects.id, projects.name, projects.description, projects.created_at")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __id_val, __name_val, __description_val, __created_at_val)
|
|
|
|
project = &Project{}
|
|
err = obj.driver.QueryRow(__stmt, __id_val, __name_val, __description_val, __created_at_val).Scan(&project.Id, &project.Name, &project.Description, &project.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return project, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Create_ProjectMember(ctx context.Context,
|
|
project_member_member_id ProjectMember_MemberId_Field,
|
|
project_member_project_id ProjectMember_ProjectId_Field) (
|
|
project_member *ProjectMember, err error) {
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__member_id_val := project_member_member_id.value()
|
|
__project_id_val := project_member_project_id.value()
|
|
__created_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO project_members ( member_id, project_id, created_at ) VALUES ( ?, ?, ? ) RETURNING project_members.member_id, project_members.project_id, project_members.created_at")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __member_id_val, __project_id_val, __created_at_val)
|
|
|
|
project_member = &ProjectMember{}
|
|
err = obj.driver.QueryRow(__stmt, __member_id_val, __project_id_val, __created_at_val).Scan(&project_member.MemberId, &project_member.ProjectId, &project_member.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return project_member, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Create_ApiKey(ctx context.Context,
|
|
api_key_id ApiKey_Id_Field,
|
|
api_key_project_id ApiKey_ProjectId_Field,
|
|
api_key_key ApiKey_Key_Field,
|
|
api_key_name ApiKey_Name_Field) (
|
|
api_key *ApiKey, err error) {
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__id_val := api_key_id.value()
|
|
__project_id_val := api_key_project_id.value()
|
|
__key_val := api_key_key.value()
|
|
__name_val := api_key_name.value()
|
|
__created_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO api_keys ( id, project_id, key, name, created_at ) VALUES ( ?, ?, ?, ?, ? ) RETURNING api_keys.id, api_keys.project_id, api_keys.key, api_keys.name, api_keys.created_at")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __id_val, __project_id_val, __key_val, __name_val, __created_at_val)
|
|
|
|
api_key = &ApiKey{}
|
|
err = obj.driver.QueryRow(__stmt, __id_val, __project_id_val, __key_val, __name_val, __created_at_val).Scan(&api_key.Id, &api_key.ProjectId, &api_key.Key, &api_key.Name, &api_key.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return api_key, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Create_BucketUsage(ctx context.Context,
|
|
bucket_usage_id BucketUsage_Id_Field,
|
|
bucket_usage_bucket_id BucketUsage_BucketId_Field,
|
|
bucket_usage_rollup_end_time BucketUsage_RollupEndTime_Field,
|
|
bucket_usage_remote_stored_data BucketUsage_RemoteStoredData_Field,
|
|
bucket_usage_inline_stored_data BucketUsage_InlineStoredData_Field,
|
|
bucket_usage_remote_segments BucketUsage_RemoteSegments_Field,
|
|
bucket_usage_inline_segments BucketUsage_InlineSegments_Field,
|
|
bucket_usage_objects BucketUsage_Objects_Field,
|
|
bucket_usage_metadata_size BucketUsage_MetadataSize_Field,
|
|
bucket_usage_repair_egress BucketUsage_RepairEgress_Field,
|
|
bucket_usage_get_egress BucketUsage_GetEgress_Field,
|
|
bucket_usage_audit_egress BucketUsage_AuditEgress_Field) (
|
|
bucket_usage *BucketUsage, err error) {
|
|
__id_val := bucket_usage_id.value()
|
|
__bucket_id_val := bucket_usage_bucket_id.value()
|
|
__rollup_end_time_val := bucket_usage_rollup_end_time.value()
|
|
__remote_stored_data_val := bucket_usage_remote_stored_data.value()
|
|
__inline_stored_data_val := bucket_usage_inline_stored_data.value()
|
|
__remote_segments_val := bucket_usage_remote_segments.value()
|
|
__inline_segments_val := bucket_usage_inline_segments.value()
|
|
__objects_val := bucket_usage_objects.value()
|
|
__metadata_size_val := bucket_usage_metadata_size.value()
|
|
__repair_egress_val := bucket_usage_repair_egress.value()
|
|
__get_egress_val := bucket_usage_get_egress.value()
|
|
__audit_egress_val := bucket_usage_audit_egress.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO bucket_usages ( id, bucket_id, rollup_end_time, remote_stored_data, inline_stored_data, remote_segments, inline_segments, objects, metadata_size, repair_egress, get_egress, audit_egress ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? ) RETURNING bucket_usages.id, bucket_usages.bucket_id, bucket_usages.rollup_end_time, bucket_usages.remote_stored_data, bucket_usages.inline_stored_data, bucket_usages.remote_segments, bucket_usages.inline_segments, bucket_usages.objects, bucket_usages.metadata_size, bucket_usages.repair_egress, bucket_usages.get_egress, bucket_usages.audit_egress")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __id_val, __bucket_id_val, __rollup_end_time_val, __remote_stored_data_val, __inline_stored_data_val, __remote_segments_val, __inline_segments_val, __objects_val, __metadata_size_val, __repair_egress_val, __get_egress_val, __audit_egress_val)
|
|
|
|
bucket_usage = &BucketUsage{}
|
|
err = obj.driver.QueryRow(__stmt, __id_val, __bucket_id_val, __rollup_end_time_val, __remote_stored_data_val, __inline_stored_data_val, __remote_segments_val, __inline_segments_val, __objects_val, __metadata_size_val, __repair_egress_val, __get_egress_val, __audit_egress_val).Scan(&bucket_usage.Id, &bucket_usage.BucketId, &bucket_usage.RollupEndTime, &bucket_usage.RemoteStoredData, &bucket_usage.InlineStoredData, &bucket_usage.RemoteSegments, &bucket_usage.InlineSegments, &bucket_usage.Objects, &bucket_usage.MetadataSize, &bucket_usage.RepairEgress, &bucket_usage.GetEgress, &bucket_usage.AuditEgress)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return bucket_usage, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Create_SerialNumber(ctx context.Context,
|
|
serial_number_serial_number SerialNumber_SerialNumber_Field,
|
|
serial_number_bucket_id SerialNumber_BucketId_Field,
|
|
serial_number_expires_at SerialNumber_ExpiresAt_Field) (
|
|
serial_number *SerialNumber, err error) {
|
|
__serial_number_val := serial_number_serial_number.value()
|
|
__bucket_id_val := serial_number_bucket_id.value()
|
|
__expires_at_val := serial_number_expires_at.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO serial_numbers ( serial_number, bucket_id, expires_at ) VALUES ( ?, ?, ? ) RETURNING serial_numbers.id, serial_numbers.serial_number, serial_numbers.bucket_id, serial_numbers.expires_at")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __serial_number_val, __bucket_id_val, __expires_at_val)
|
|
|
|
serial_number = &SerialNumber{}
|
|
err = obj.driver.QueryRow(__stmt, __serial_number_val, __bucket_id_val, __expires_at_val).Scan(&serial_number.Id, &serial_number.SerialNumber, &serial_number.BucketId, &serial_number.ExpiresAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return serial_number, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Create_UsedSerial(ctx context.Context,
|
|
used_serial_serial_number_id UsedSerial_SerialNumberId_Field,
|
|
used_serial_storage_node_id UsedSerial_StorageNodeId_Field) (
|
|
used_serial *UsedSerial, err error) {
|
|
__serial_number_id_val := used_serial_serial_number_id.value()
|
|
__storage_node_id_val := used_serial_storage_node_id.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO used_serials ( serial_number_id, storage_node_id ) VALUES ( ?, ? ) RETURNING used_serials.serial_number_id, used_serials.storage_node_id")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __serial_number_id_val, __storage_node_id_val)
|
|
|
|
used_serial = &UsedSerial{}
|
|
err = obj.driver.QueryRow(__stmt, __serial_number_id_val, __storage_node_id_val).Scan(&used_serial.SerialNumberId, &used_serial.StorageNodeId)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return used_serial, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Create_CertRecord(ctx context.Context,
|
|
certRecord_publickey CertRecord_Publickey_Field,
|
|
certRecord_id CertRecord_Id_Field) (
|
|
certRecord *CertRecord, err error) {
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__publickey_val := certRecord_publickey.value()
|
|
__id_val := certRecord_id.value()
|
|
__update_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO certRecords ( publickey, id, update_at ) VALUES ( ?, ?, ? ) RETURNING certRecords.publickey, certRecords.id, certRecords.update_at")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __publickey_val, __id_val, __update_at_val)
|
|
|
|
certRecord = &CertRecord{}
|
|
err = obj.driver.QueryRow(__stmt, __publickey_val, __id_val, __update_at_val).Scan(&certRecord.Publickey, &certRecord.Id, &certRecord.UpdateAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return certRecord, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Create_RegistrationToken(ctx context.Context,
|
|
registration_token_secret RegistrationToken_Secret_Field,
|
|
registration_token_project_limit RegistrationToken_ProjectLimit_Field,
|
|
optional RegistrationToken_Create_Fields) (
|
|
registration_token *RegistrationToken, err error) {
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__secret_val := registration_token_secret.value()
|
|
__owner_id_val := optional.OwnerId.value()
|
|
__project_limit_val := registration_token_project_limit.value()
|
|
__created_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO registration_tokens ( secret, owner_id, project_limit, created_at ) VALUES ( ?, ?, ?, ? ) RETURNING registration_tokens.secret, registration_tokens.owner_id, registration_tokens.project_limit, registration_tokens.created_at")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __secret_val, __owner_id_val, __project_limit_val, __created_at_val)
|
|
|
|
registration_token = &RegistrationToken{}
|
|
err = obj.driver.QueryRow(__stmt, __secret_val, __owner_id_val, __project_limit_val, __created_at_val).Scan(®istration_token.Secret, ®istration_token.OwnerId, ®istration_token.ProjectLimit, ®istration_token.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return registration_token, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Get_Irreparabledb_By_Segmentpath(ctx context.Context,
|
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field) (
|
|
irreparabledb *Irreparabledb, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT irreparabledbs.segmentpath, irreparabledbs.segmentdetail, irreparabledbs.pieces_lost_count, irreparabledbs.seg_damaged_unix_sec, irreparabledbs.repair_attempt_count FROM irreparabledbs WHERE irreparabledbs.segmentpath = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, irreparabledb_segmentpath.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
irreparabledb = &Irreparabledb{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&irreparabledb.Segmentpath, &irreparabledb.Segmentdetail, &irreparabledb.PiecesLostCount, &irreparabledb.SegDamagedUnixSec, &irreparabledb.RepairAttemptCount)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return irreparabledb, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Limited_Irreparabledb_OrderBy_Asc_Segmentpath(ctx context.Context,
|
|
limit int, offset int64) (
|
|
rows []*Irreparabledb, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT irreparabledbs.segmentpath, irreparabledbs.segmentdetail, irreparabledbs.pieces_lost_count, irreparabledbs.seg_damaged_unix_sec, irreparabledbs.repair_attempt_count FROM irreparabledbs ORDER BY irreparabledbs.segmentpath LIMIT ? OFFSET ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values)
|
|
|
|
__values = append(__values, limit, offset)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
irreparabledb := &Irreparabledb{}
|
|
err = __rows.Scan(&irreparabledb.Segmentpath, &irreparabledb.Segmentdetail, &irreparabledb.PiecesLostCount, &irreparabledb.SegDamagedUnixSec, &irreparabledb.RepairAttemptCount)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, irreparabledb)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Find_AccountingTimestamps_Value_By_Name(ctx context.Context,
|
|
accounting_timestamps_name AccountingTimestamps_Name_Field) (
|
|
row *Value_Row, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT accounting_timestamps.value FROM accounting_timestamps WHERE accounting_timestamps.name = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, accounting_timestamps_name.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
row = &Value_Row{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&row.Value)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return row, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Get_AccountingRollup_By_Id(ctx context.Context,
|
|
accounting_rollup_id AccountingRollup_Id_Field) (
|
|
accounting_rollup *AccountingRollup, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT accounting_rollups.id, accounting_rollups.node_id, accounting_rollups.start_time, accounting_rollups.put_total, accounting_rollups.get_total, accounting_rollups.get_audit_total, accounting_rollups.get_repair_total, accounting_rollups.put_repair_total, accounting_rollups.at_rest_total FROM accounting_rollups WHERE accounting_rollups.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, accounting_rollup_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
accounting_rollup = &AccountingRollup{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&accounting_rollup.Id, &accounting_rollup.NodeId, &accounting_rollup.StartTime, &accounting_rollup.PutTotal, &accounting_rollup.GetTotal, &accounting_rollup.GetAuditTotal, &accounting_rollup.GetRepairTotal, &accounting_rollup.PutRepairTotal, &accounting_rollup.AtRestTotal)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return accounting_rollup, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) All_AccountingRollup_By_StartTime_GreaterOrEqual(ctx context.Context,
|
|
accounting_rollup_start_time_greater_or_equal AccountingRollup_StartTime_Field) (
|
|
rows []*AccountingRollup, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT accounting_rollups.id, accounting_rollups.node_id, accounting_rollups.start_time, accounting_rollups.put_total, accounting_rollups.get_total, accounting_rollups.get_audit_total, accounting_rollups.get_repair_total, accounting_rollups.put_repair_total, accounting_rollups.at_rest_total FROM accounting_rollups WHERE accounting_rollups.start_time >= ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, accounting_rollup_start_time_greater_or_equal.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
accounting_rollup := &AccountingRollup{}
|
|
err = __rows.Scan(&accounting_rollup.Id, &accounting_rollup.NodeId, &accounting_rollup.StartTime, &accounting_rollup.PutTotal, &accounting_rollup.GetTotal, &accounting_rollup.GetAuditTotal, &accounting_rollup.GetRepairTotal, &accounting_rollup.PutRepairTotal, &accounting_rollup.AtRestTotal)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, accounting_rollup)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Get_AccountingRaw_By_Id(ctx context.Context,
|
|
accounting_raw_id AccountingRaw_Id_Field) (
|
|
accounting_raw *AccountingRaw, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT accounting_raws.id, accounting_raws.node_id, accounting_raws.interval_end_time, accounting_raws.data_total, accounting_raws.data_type, accounting_raws.created_at FROM accounting_raws WHERE accounting_raws.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, accounting_raw_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
accounting_raw = &AccountingRaw{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&accounting_raw.Id, &accounting_raw.NodeId, &accounting_raw.IntervalEndTime, &accounting_raw.DataTotal, &accounting_raw.DataType, &accounting_raw.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return accounting_raw, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) All_AccountingRaw(ctx context.Context) (
|
|
rows []*AccountingRaw, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT accounting_raws.id, accounting_raws.node_id, accounting_raws.interval_end_time, accounting_raws.data_total, accounting_raws.data_type, accounting_raws.created_at FROM accounting_raws")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
accounting_raw := &AccountingRaw{}
|
|
err = __rows.Scan(&accounting_raw.Id, &accounting_raw.NodeId, &accounting_raw.IntervalEndTime, &accounting_raw.DataTotal, &accounting_raw.DataType, &accounting_raw.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, accounting_raw)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) All_AccountingRaw_By_IntervalEndTime_GreaterOrEqual(ctx context.Context,
|
|
accounting_raw_interval_end_time_greater_or_equal AccountingRaw_IntervalEndTime_Field) (
|
|
rows []*AccountingRaw, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT accounting_raws.id, accounting_raws.node_id, accounting_raws.interval_end_time, accounting_raws.data_total, accounting_raws.data_type, accounting_raws.created_at FROM accounting_raws WHERE accounting_raws.interval_end_time >= ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, accounting_raw_interval_end_time_greater_or_equal.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
accounting_raw := &AccountingRaw{}
|
|
err = __rows.Scan(&accounting_raw.Id, &accounting_raw.NodeId, &accounting_raw.IntervalEndTime, &accounting_raw.DataTotal, &accounting_raw.DataType, &accounting_raw.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, accounting_raw)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Get_Node_By_Id(ctx context.Context,
|
|
node_id Node_Id_Field) (
|
|
node *Node, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.audit_success_count, nodes.total_audit_count, nodes.audit_success_ratio, nodes.uptime_success_count, nodes.total_uptime_count, nodes.uptime_ratio, nodes.created_at, nodes.updated_at, nodes.wallet, nodes.email FROM nodes WHERE nodes.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, node_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
node = &Node{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&node.Id, &node.AuditSuccessCount, &node.TotalAuditCount, &node.AuditSuccessRatio, &node.UptimeSuccessCount, &node.TotalUptimeCount, &node.UptimeRatio, &node.CreatedAt, &node.UpdatedAt, &node.Wallet, &node.Email)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return node, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Find_Node_By_Id(ctx context.Context,
|
|
node_id Node_Id_Field) (
|
|
node *Node, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.audit_success_count, nodes.total_audit_count, nodes.audit_success_ratio, nodes.uptime_success_count, nodes.total_uptime_count, nodes.uptime_ratio, nodes.created_at, nodes.updated_at, nodes.wallet, nodes.email FROM nodes WHERE nodes.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, node_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
node = &Node{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&node.Id, &node.AuditSuccessCount, &node.TotalAuditCount, &node.AuditSuccessRatio, &node.UptimeSuccessCount, &node.TotalUptimeCount, &node.UptimeRatio, &node.CreatedAt, &node.UpdatedAt, &node.Wallet, &node.Email)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return node, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) All_Node_Id(ctx context.Context) (
|
|
rows []*Id_Row, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id FROM nodes")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
row := &Id_Row{}
|
|
err = __rows.Scan(&row.Id)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, row)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Get_OverlayCacheNode_By_NodeId(ctx context.Context,
|
|
overlay_cache_node_node_id OverlayCacheNode_NodeId_Field) (
|
|
overlay_cache_node *OverlayCacheNode, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT overlay_cache_nodes.node_id, overlay_cache_nodes.node_type, overlay_cache_nodes.address, overlay_cache_nodes.protocol, overlay_cache_nodes.operator_email, overlay_cache_nodes.operator_wallet, overlay_cache_nodes.free_bandwidth, overlay_cache_nodes.free_disk, overlay_cache_nodes.latency_90, overlay_cache_nodes.audit_success_ratio, overlay_cache_nodes.audit_uptime_ratio, overlay_cache_nodes.audit_count, overlay_cache_nodes.audit_success_count, overlay_cache_nodes.uptime_count, overlay_cache_nodes.uptime_success_count FROM overlay_cache_nodes WHERE overlay_cache_nodes.node_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, overlay_cache_node_node_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
overlay_cache_node = &OverlayCacheNode{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&overlay_cache_node.NodeId, &overlay_cache_node.NodeType, &overlay_cache_node.Address, &overlay_cache_node.Protocol, &overlay_cache_node.OperatorEmail, &overlay_cache_node.OperatorWallet, &overlay_cache_node.FreeBandwidth, &overlay_cache_node.FreeDisk, &overlay_cache_node.Latency90, &overlay_cache_node.AuditSuccessRatio, &overlay_cache_node.AuditUptimeRatio, &overlay_cache_node.AuditCount, &overlay_cache_node.AuditSuccessCount, &overlay_cache_node.UptimeCount, &overlay_cache_node.UptimeSuccessCount)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return overlay_cache_node, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Limited_OverlayCacheNode_By_NodeId_GreaterOrEqual(ctx context.Context,
|
|
overlay_cache_node_node_id_greater_or_equal OverlayCacheNode_NodeId_Field,
|
|
limit int, offset int64) (
|
|
rows []*OverlayCacheNode, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT overlay_cache_nodes.node_id, overlay_cache_nodes.node_type, overlay_cache_nodes.address, overlay_cache_nodes.protocol, overlay_cache_nodes.operator_email, overlay_cache_nodes.operator_wallet, overlay_cache_nodes.free_bandwidth, overlay_cache_nodes.free_disk, overlay_cache_nodes.latency_90, overlay_cache_nodes.audit_success_ratio, overlay_cache_nodes.audit_uptime_ratio, overlay_cache_nodes.audit_count, overlay_cache_nodes.audit_success_count, overlay_cache_nodes.uptime_count, overlay_cache_nodes.uptime_success_count FROM overlay_cache_nodes WHERE overlay_cache_nodes.node_id >= ? LIMIT ? OFFSET ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, overlay_cache_node_node_id_greater_or_equal.value())
|
|
|
|
__values = append(__values, limit, offset)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
overlay_cache_node := &OverlayCacheNode{}
|
|
err = __rows.Scan(&overlay_cache_node.NodeId, &overlay_cache_node.NodeType, &overlay_cache_node.Address, &overlay_cache_node.Protocol, &overlay_cache_node.OperatorEmail, &overlay_cache_node.OperatorWallet, &overlay_cache_node.FreeBandwidth, &overlay_cache_node.FreeDisk, &overlay_cache_node.Latency90, &overlay_cache_node.AuditSuccessRatio, &overlay_cache_node.AuditUptimeRatio, &overlay_cache_node.AuditCount, &overlay_cache_node.AuditSuccessCount, &overlay_cache_node.UptimeCount, &overlay_cache_node.UptimeSuccessCount)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, overlay_cache_node)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) First_Injuredsegment(ctx context.Context) (
|
|
injuredsegment *Injuredsegment, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT injuredsegments.id, injuredsegments.info FROM injuredsegments LIMIT 1 OFFSET 0")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
if !__rows.Next() {
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return nil, nil
|
|
}
|
|
|
|
injuredsegment = &Injuredsegment{}
|
|
err = __rows.Scan(&injuredsegment.Id, &injuredsegment.Info)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
|
|
return injuredsegment, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Limited_Injuredsegment(ctx context.Context,
|
|
limit int, offset int64) (
|
|
rows []*Injuredsegment, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT injuredsegments.id, injuredsegments.info FROM injuredsegments LIMIT ? OFFSET ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values)
|
|
|
|
__values = append(__values, limit, offset)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
injuredsegment := &Injuredsegment{}
|
|
err = __rows.Scan(&injuredsegment.Id, &injuredsegment.Info)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, injuredsegment)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Get_User_By_Email_And_Status_Not_Number(ctx context.Context,
|
|
user_email User_Email_Field) (
|
|
user *User, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.first_name, users.last_name, users.email, users.password_hash, users.status, users.created_at FROM users WHERE users.email = ? AND users.status != 0 LIMIT 2")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, user_email.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
if !__rows.Next() {
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return nil, makeErr(sql.ErrNoRows)
|
|
}
|
|
|
|
user = &User{}
|
|
err = __rows.Scan(&user.Id, &user.FirstName, &user.LastName, &user.Email, &user.PasswordHash, &user.Status, &user.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
|
|
if __rows.Next() {
|
|
return nil, tooManyRows("User_By_Email_And_Status_Not_Number")
|
|
}
|
|
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
|
|
return user, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Get_User_By_Id(ctx context.Context,
|
|
user_id User_Id_Field) (
|
|
user *User, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.first_name, users.last_name, users.email, users.password_hash, users.status, users.created_at FROM users WHERE users.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, user_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
user = &User{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&user.Id, &user.FirstName, &user.LastName, &user.Email, &user.PasswordHash, &user.Status, &user.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return user, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) All_Project(ctx context.Context) (
|
|
rows []*Project, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.created_at FROM projects")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
project := &Project{}
|
|
err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, project)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Get_Project_By_Id(ctx context.Context,
|
|
project_id Project_Id_Field) (
|
|
project *Project, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.created_at FROM projects WHERE projects.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, project_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
project = &Project{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&project.Id, &project.Name, &project.Description, &project.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return project, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) All_Project_By_ProjectMember_MemberId_OrderBy_Asc_Project_Name(ctx context.Context,
|
|
project_member_member_id ProjectMember_MemberId_Field) (
|
|
rows []*Project, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.created_at FROM projects JOIN project_members ON projects.id = project_members.project_id WHERE project_members.member_id = ? ORDER BY projects.name")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, project_member_member_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
project := &Project{}
|
|
err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, project)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) All_ProjectMember_By_MemberId(ctx context.Context,
|
|
project_member_member_id ProjectMember_MemberId_Field) (
|
|
rows []*ProjectMember, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT project_members.member_id, project_members.project_id, project_members.created_at FROM project_members WHERE project_members.member_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, project_member_member_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
project_member := &ProjectMember{}
|
|
err = __rows.Scan(&project_member.MemberId, &project_member.ProjectId, &project_member.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, project_member)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Limited_ProjectMember_By_ProjectId(ctx context.Context,
|
|
project_member_project_id ProjectMember_ProjectId_Field,
|
|
limit int, offset int64) (
|
|
rows []*ProjectMember, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT project_members.member_id, project_members.project_id, project_members.created_at FROM project_members WHERE project_members.project_id = ? LIMIT ? OFFSET ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, project_member_project_id.value())
|
|
|
|
__values = append(__values, limit, offset)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
project_member := &ProjectMember{}
|
|
err = __rows.Scan(&project_member.MemberId, &project_member.ProjectId, &project_member.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, project_member)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Get_ApiKey_By_Id(ctx context.Context,
|
|
api_key_id ApiKey_Id_Field) (
|
|
api_key *ApiKey, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.key, api_keys.name, api_keys.created_at FROM api_keys WHERE api_keys.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, api_key_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
api_key = &ApiKey{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&api_key.Id, &api_key.ProjectId, &api_key.Key, &api_key.Name, &api_key.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return api_key, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Get_ApiKey_By_Key(ctx context.Context,
|
|
api_key_key ApiKey_Key_Field) (
|
|
api_key *ApiKey, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.key, api_keys.name, api_keys.created_at FROM api_keys WHERE api_keys.key = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, api_key_key.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
api_key = &ApiKey{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&api_key.Id, &api_key.ProjectId, &api_key.Key, &api_key.Name, &api_key.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return api_key, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) All_ApiKey_By_ProjectId_OrderBy_Asc_Name(ctx context.Context,
|
|
api_key_project_id ApiKey_ProjectId_Field) (
|
|
rows []*ApiKey, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.key, api_keys.name, api_keys.created_at FROM api_keys WHERE api_keys.project_id = ? ORDER BY api_keys.name")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, api_key_project_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
api_key := &ApiKey{}
|
|
err = __rows.Scan(&api_key.Id, &api_key.ProjectId, &api_key.Key, &api_key.Name, &api_key.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, api_key)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Get_BucketUsage_By_Id(ctx context.Context,
|
|
bucket_usage_id BucketUsage_Id_Field) (
|
|
bucket_usage *BucketUsage, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_usages.id, bucket_usages.bucket_id, bucket_usages.rollup_end_time, bucket_usages.remote_stored_data, bucket_usages.inline_stored_data, bucket_usages.remote_segments, bucket_usages.inline_segments, bucket_usages.objects, bucket_usages.metadata_size, bucket_usages.repair_egress, bucket_usages.get_egress, bucket_usages.audit_egress FROM bucket_usages WHERE bucket_usages.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, bucket_usage_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
bucket_usage = &BucketUsage{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&bucket_usage.Id, &bucket_usage.BucketId, &bucket_usage.RollupEndTime, &bucket_usage.RemoteStoredData, &bucket_usage.InlineStoredData, &bucket_usage.RemoteSegments, &bucket_usage.InlineSegments, &bucket_usage.Objects, &bucket_usage.MetadataSize, &bucket_usage.RepairEgress, &bucket_usage.GetEgress, &bucket_usage.AuditEgress)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return bucket_usage, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Limited_BucketUsage_By_BucketId_And_RollupEndTime_Greater_And_RollupEndTime_LessOrEqual_OrderBy_Asc_RollupEndTime(ctx context.Context,
|
|
bucket_usage_bucket_id BucketUsage_BucketId_Field,
|
|
bucket_usage_rollup_end_time_greater BucketUsage_RollupEndTime_Field,
|
|
bucket_usage_rollup_end_time_less_or_equal BucketUsage_RollupEndTime_Field,
|
|
limit int, offset int64) (
|
|
rows []*BucketUsage, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_usages.id, bucket_usages.bucket_id, bucket_usages.rollup_end_time, bucket_usages.remote_stored_data, bucket_usages.inline_stored_data, bucket_usages.remote_segments, bucket_usages.inline_segments, bucket_usages.objects, bucket_usages.metadata_size, bucket_usages.repair_egress, bucket_usages.get_egress, bucket_usages.audit_egress FROM bucket_usages WHERE bucket_usages.bucket_id = ? AND bucket_usages.rollup_end_time > ? AND bucket_usages.rollup_end_time <= ? ORDER BY bucket_usages.rollup_end_time LIMIT ? OFFSET ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, bucket_usage_bucket_id.value(), bucket_usage_rollup_end_time_greater.value(), bucket_usage_rollup_end_time_less_or_equal.value())
|
|
|
|
__values = append(__values, limit, offset)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
bucket_usage := &BucketUsage{}
|
|
err = __rows.Scan(&bucket_usage.Id, &bucket_usage.BucketId, &bucket_usage.RollupEndTime, &bucket_usage.RemoteStoredData, &bucket_usage.InlineStoredData, &bucket_usage.RemoteSegments, &bucket_usage.InlineSegments, &bucket_usage.Objects, &bucket_usage.MetadataSize, &bucket_usage.RepairEgress, &bucket_usage.GetEgress, &bucket_usage.AuditEgress)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, bucket_usage)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Limited_BucketUsage_By_BucketId_And_RollupEndTime_Greater_And_RollupEndTime_LessOrEqual_OrderBy_Desc_RollupEndTime(ctx context.Context,
|
|
bucket_usage_bucket_id BucketUsage_BucketId_Field,
|
|
bucket_usage_rollup_end_time_greater BucketUsage_RollupEndTime_Field,
|
|
bucket_usage_rollup_end_time_less_or_equal BucketUsage_RollupEndTime_Field,
|
|
limit int, offset int64) (
|
|
rows []*BucketUsage, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_usages.id, bucket_usages.bucket_id, bucket_usages.rollup_end_time, bucket_usages.remote_stored_data, bucket_usages.inline_stored_data, bucket_usages.remote_segments, bucket_usages.inline_segments, bucket_usages.objects, bucket_usages.metadata_size, bucket_usages.repair_egress, bucket_usages.get_egress, bucket_usages.audit_egress FROM bucket_usages WHERE bucket_usages.bucket_id = ? AND bucket_usages.rollup_end_time > ? AND bucket_usages.rollup_end_time <= ? ORDER BY bucket_usages.rollup_end_time DESC LIMIT ? OFFSET ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, bucket_usage_bucket_id.value(), bucket_usage_rollup_end_time_greater.value(), bucket_usage_rollup_end_time_less_or_equal.value())
|
|
|
|
__values = append(__values, limit, offset)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
bucket_usage := &BucketUsage{}
|
|
err = __rows.Scan(&bucket_usage.Id, &bucket_usage.BucketId, &bucket_usage.RollupEndTime, &bucket_usage.RemoteStoredData, &bucket_usage.InlineStoredData, &bucket_usage.RemoteSegments, &bucket_usage.InlineSegments, &bucket_usage.Objects, &bucket_usage.MetadataSize, &bucket_usage.RepairEgress, &bucket_usage.GetEgress, &bucket_usage.AuditEgress)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, bucket_usage)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Find_SerialNumber_By_SerialNumber(ctx context.Context,
|
|
serial_number_serial_number SerialNumber_SerialNumber_Field) (
|
|
serial_number *SerialNumber, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT serial_numbers.id, serial_numbers.serial_number, serial_numbers.bucket_id, serial_numbers.expires_at FROM serial_numbers WHERE serial_numbers.serial_number = ? LIMIT 2")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, serial_number_serial_number.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
if !__rows.Next() {
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return nil, nil
|
|
}
|
|
|
|
serial_number = &SerialNumber{}
|
|
err = __rows.Scan(&serial_number.Id, &serial_number.SerialNumber, &serial_number.BucketId, &serial_number.ExpiresAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
|
|
if __rows.Next() {
|
|
return nil, tooManyRows("SerialNumber_By_SerialNumber")
|
|
}
|
|
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
|
|
return serial_number, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Get_CertRecord_By_Id(ctx context.Context,
|
|
certRecord_id CertRecord_Id_Field) (
|
|
certRecord *CertRecord, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT certRecords.publickey, certRecords.id, certRecords.update_at FROM certRecords WHERE certRecords.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, certRecord_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
certRecord = &CertRecord{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&certRecord.Publickey, &certRecord.Id, &certRecord.UpdateAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return certRecord, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Get_RegistrationToken_By_Secret(ctx context.Context,
|
|
registration_token_secret RegistrationToken_Secret_Field) (
|
|
registration_token *RegistrationToken, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT registration_tokens.secret, registration_tokens.owner_id, registration_tokens.project_limit, registration_tokens.created_at FROM registration_tokens WHERE registration_tokens.secret = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, registration_token_secret.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
registration_token = &RegistrationToken{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(®istration_token.Secret, ®istration_token.OwnerId, ®istration_token.ProjectLimit, ®istration_token.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return registration_token, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Get_RegistrationToken_By_OwnerId(ctx context.Context,
|
|
registration_token_owner_id RegistrationToken_OwnerId_Field) (
|
|
registration_token *RegistrationToken, err error) {
|
|
|
|
var __cond_0 = &__sqlbundle_Condition{Left: "registration_tokens.owner_id", Equal: true, Right: "?", Null: true}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("SELECT registration_tokens.secret, registration_tokens.owner_id, registration_tokens.project_limit, registration_tokens.created_at FROM registration_tokens WHERE "), __cond_0}}
|
|
|
|
var __values []interface{}
|
|
__values = append(__values)
|
|
|
|
if !registration_token_owner_id.isnull() {
|
|
__cond_0.Null = false
|
|
__values = append(__values, registration_token_owner_id.value())
|
|
}
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
registration_token = &RegistrationToken{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(®istration_token.Secret, ®istration_token.OwnerId, ®istration_token.ProjectLimit, ®istration_token.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return registration_token, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Update_Irreparabledb_By_Segmentpath(ctx context.Context,
|
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field,
|
|
update Irreparabledb_Update_Fields) (
|
|
irreparabledb *Irreparabledb, err error) {
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE irreparabledbs SET "), __sets, __sqlbundle_Literal(" WHERE irreparabledbs.segmentpath = ? RETURNING irreparabledbs.segmentpath, irreparabledbs.segmentdetail, irreparabledbs.pieces_lost_count, irreparabledbs.seg_damaged_unix_sec, irreparabledbs.repair_attempt_count")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.Segmentdetail._set {
|
|
__values = append(__values, update.Segmentdetail.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("segmentdetail = ?"))
|
|
}
|
|
|
|
if update.PiecesLostCount._set {
|
|
__values = append(__values, update.PiecesLostCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("pieces_lost_count = ?"))
|
|
}
|
|
|
|
if update.SegDamagedUnixSec._set {
|
|
__values = append(__values, update.SegDamagedUnixSec.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("seg_damaged_unix_sec = ?"))
|
|
}
|
|
|
|
if update.RepairAttemptCount._set {
|
|
__values = append(__values, update.RepairAttemptCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("repair_attempt_count = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return nil, emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, irreparabledb_segmentpath.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
irreparabledb = &Irreparabledb{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&irreparabledb.Segmentpath, &irreparabledb.Segmentdetail, &irreparabledb.PiecesLostCount, &irreparabledb.SegDamagedUnixSec, &irreparabledb.RepairAttemptCount)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return irreparabledb, nil
|
|
}
|
|
|
|
func (obj *postgresImpl) Update_AccountingTimestamps_By_Name(ctx context.Context,
|
|
accounting_timestamps_name AccountingTimestamps_Name_Field,
|
|
update AccountingTimestamps_Update_Fields) (
|
|
accounting_timestamps *AccountingTimestamps, err error) {
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE accounting_timestamps SET "), __sets, __sqlbundle_Literal(" WHERE accounting_timestamps.name = ? RETURNING accounting_timestamps.name, accounting_timestamps.value")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.Value._set {
|
|
__values = append(__values, update.Value.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("value = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return nil, emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, accounting_timestamps_name.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
accounting_timestamps = &AccountingTimestamps{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&accounting_timestamps.Name, &accounting_timestamps.Value)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return accounting_timestamps, nil
|
|
}
|
|
|
|
func (obj *postgresImpl) Update_Node_By_Id(ctx context.Context,
|
|
node_id Node_Id_Field,
|
|
update Node_Update_Fields) (
|
|
node *Node, err error) {
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE nodes SET "), __sets, __sqlbundle_Literal(" WHERE nodes.id = ? RETURNING nodes.id, nodes.audit_success_count, nodes.total_audit_count, nodes.audit_success_ratio, nodes.uptime_success_count, nodes.total_uptime_count, nodes.uptime_ratio, nodes.created_at, nodes.updated_at, nodes.wallet, nodes.email")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.AuditSuccessCount._set {
|
|
__values = append(__values, update.AuditSuccessCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("audit_success_count = ?"))
|
|
}
|
|
|
|
if update.TotalAuditCount._set {
|
|
__values = append(__values, update.TotalAuditCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("total_audit_count = ?"))
|
|
}
|
|
|
|
if update.AuditSuccessRatio._set {
|
|
__values = append(__values, update.AuditSuccessRatio.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("audit_success_ratio = ?"))
|
|
}
|
|
|
|
if update.UptimeSuccessCount._set {
|
|
__values = append(__values, update.UptimeSuccessCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("uptime_success_count = ?"))
|
|
}
|
|
|
|
if update.TotalUptimeCount._set {
|
|
__values = append(__values, update.TotalUptimeCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("total_uptime_count = ?"))
|
|
}
|
|
|
|
if update.UptimeRatio._set {
|
|
__values = append(__values, update.UptimeRatio.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("uptime_ratio = ?"))
|
|
}
|
|
|
|
if update.Wallet._set {
|
|
__values = append(__values, update.Wallet.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("wallet = ?"))
|
|
}
|
|
|
|
if update.Email._set {
|
|
__values = append(__values, update.Email.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("email = ?"))
|
|
}
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
|
|
__values = append(__values, __now)
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("updated_at = ?"))
|
|
|
|
__args = append(__args, node_id.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
node = &Node{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&node.Id, &node.AuditSuccessCount, &node.TotalAuditCount, &node.AuditSuccessRatio, &node.UptimeSuccessCount, &node.TotalUptimeCount, &node.UptimeRatio, &node.CreatedAt, &node.UpdatedAt, &node.Wallet, &node.Email)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return node, nil
|
|
}
|
|
|
|
func (obj *postgresImpl) Update_OverlayCacheNode_By_NodeId(ctx context.Context,
|
|
overlay_cache_node_node_id OverlayCacheNode_NodeId_Field,
|
|
update OverlayCacheNode_Update_Fields) (
|
|
overlay_cache_node *OverlayCacheNode, err error) {
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE overlay_cache_nodes SET "), __sets, __sqlbundle_Literal(" WHERE overlay_cache_nodes.node_id = ? RETURNING overlay_cache_nodes.node_id, overlay_cache_nodes.node_type, overlay_cache_nodes.address, overlay_cache_nodes.protocol, overlay_cache_nodes.operator_email, overlay_cache_nodes.operator_wallet, overlay_cache_nodes.free_bandwidth, overlay_cache_nodes.free_disk, overlay_cache_nodes.latency_90, overlay_cache_nodes.audit_success_ratio, overlay_cache_nodes.audit_uptime_ratio, overlay_cache_nodes.audit_count, overlay_cache_nodes.audit_success_count, overlay_cache_nodes.uptime_count, overlay_cache_nodes.uptime_success_count")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.Address._set {
|
|
__values = append(__values, update.Address.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("address = ?"))
|
|
}
|
|
|
|
if update.Protocol._set {
|
|
__values = append(__values, update.Protocol.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("protocol = ?"))
|
|
}
|
|
|
|
if update.OperatorEmail._set {
|
|
__values = append(__values, update.OperatorEmail.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("operator_email = ?"))
|
|
}
|
|
|
|
if update.OperatorWallet._set {
|
|
__values = append(__values, update.OperatorWallet.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("operator_wallet = ?"))
|
|
}
|
|
|
|
if update.FreeBandwidth._set {
|
|
__values = append(__values, update.FreeBandwidth.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("free_bandwidth = ?"))
|
|
}
|
|
|
|
if update.FreeDisk._set {
|
|
__values = append(__values, update.FreeDisk.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("free_disk = ?"))
|
|
}
|
|
|
|
if update.Latency90._set {
|
|
__values = append(__values, update.Latency90.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("latency_90 = ?"))
|
|
}
|
|
|
|
if update.AuditSuccessRatio._set {
|
|
__values = append(__values, update.AuditSuccessRatio.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("audit_success_ratio = ?"))
|
|
}
|
|
|
|
if update.AuditUptimeRatio._set {
|
|
__values = append(__values, update.AuditUptimeRatio.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("audit_uptime_ratio = ?"))
|
|
}
|
|
|
|
if update.AuditCount._set {
|
|
__values = append(__values, update.AuditCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("audit_count = ?"))
|
|
}
|
|
|
|
if update.AuditSuccessCount._set {
|
|
__values = append(__values, update.AuditSuccessCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("audit_success_count = ?"))
|
|
}
|
|
|
|
if update.UptimeCount._set {
|
|
__values = append(__values, update.UptimeCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("uptime_count = ?"))
|
|
}
|
|
|
|
if update.UptimeSuccessCount._set {
|
|
__values = append(__values, update.UptimeSuccessCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("uptime_success_count = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return nil, emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, overlay_cache_node_node_id.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
overlay_cache_node = &OverlayCacheNode{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&overlay_cache_node.NodeId, &overlay_cache_node.NodeType, &overlay_cache_node.Address, &overlay_cache_node.Protocol, &overlay_cache_node.OperatorEmail, &overlay_cache_node.OperatorWallet, &overlay_cache_node.FreeBandwidth, &overlay_cache_node.FreeDisk, &overlay_cache_node.Latency90, &overlay_cache_node.AuditSuccessRatio, &overlay_cache_node.AuditUptimeRatio, &overlay_cache_node.AuditCount, &overlay_cache_node.AuditSuccessCount, &overlay_cache_node.UptimeCount, &overlay_cache_node.UptimeSuccessCount)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return overlay_cache_node, nil
|
|
}
|
|
|
|
func (obj *postgresImpl) Update_User_By_Id(ctx context.Context,
|
|
user_id User_Id_Field,
|
|
update User_Update_Fields) (
|
|
user *User, err error) {
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE users SET "), __sets, __sqlbundle_Literal(" WHERE users.id = ? RETURNING users.id, users.first_name, users.last_name, users.email, users.password_hash, users.status, users.created_at")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.FirstName._set {
|
|
__values = append(__values, update.FirstName.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("first_name = ?"))
|
|
}
|
|
|
|
if update.LastName._set {
|
|
__values = append(__values, update.LastName.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("last_name = ?"))
|
|
}
|
|
|
|
if update.Email._set {
|
|
__values = append(__values, update.Email.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("email = ?"))
|
|
}
|
|
|
|
if update.PasswordHash._set {
|
|
__values = append(__values, update.PasswordHash.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("password_hash = ?"))
|
|
}
|
|
|
|
if update.Status._set {
|
|
__values = append(__values, update.Status.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("status = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return nil, emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, user_id.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
user = &User{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&user.Id, &user.FirstName, &user.LastName, &user.Email, &user.PasswordHash, &user.Status, &user.CreatedAt)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return user, nil
|
|
}
|
|
|
|
func (obj *postgresImpl) Update_Project_By_Id(ctx context.Context,
|
|
project_id Project_Id_Field,
|
|
update Project_Update_Fields) (
|
|
project *Project, err error) {
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE projects SET "), __sets, __sqlbundle_Literal(" WHERE projects.id = ? RETURNING projects.id, projects.name, projects.description, projects.created_at")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.Description._set {
|
|
__values = append(__values, update.Description.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("description = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return nil, emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, project_id.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
project = &Project{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&project.Id, &project.Name, &project.Description, &project.CreatedAt)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return project, nil
|
|
}
|
|
|
|
func (obj *postgresImpl) Update_ApiKey_By_Id(ctx context.Context,
|
|
api_key_id ApiKey_Id_Field,
|
|
update ApiKey_Update_Fields) (
|
|
api_key *ApiKey, err error) {
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE api_keys SET "), __sets, __sqlbundle_Literal(" WHERE api_keys.id = ? RETURNING api_keys.id, api_keys.project_id, api_keys.key, api_keys.name, api_keys.created_at")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.Name._set {
|
|
__values = append(__values, update.Name.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("name = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return nil, emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, api_key_id.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
api_key = &ApiKey{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&api_key.Id, &api_key.ProjectId, &api_key.Key, &api_key.Name, &api_key.CreatedAt)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return api_key, nil
|
|
}
|
|
|
|
func (obj *postgresImpl) Update_CertRecord_By_Id(ctx context.Context,
|
|
certRecord_id CertRecord_Id_Field,
|
|
update CertRecord_Update_Fields) (
|
|
certRecord *CertRecord, err error) {
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE certRecords SET "), __sets, __sqlbundle_Literal(" WHERE certRecords.id = ? RETURNING certRecords.publickey, certRecords.id, certRecords.update_at")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
|
|
__values = append(__values, __now)
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("update_at = ?"))
|
|
|
|
__args = append(__args, certRecord_id.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
certRecord = &CertRecord{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&certRecord.Publickey, &certRecord.Id, &certRecord.UpdateAt)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return certRecord, nil
|
|
}
|
|
|
|
func (obj *postgresImpl) Update_RegistrationToken_By_Secret(ctx context.Context,
|
|
registration_token_secret RegistrationToken_Secret_Field,
|
|
update RegistrationToken_Update_Fields) (
|
|
registration_token *RegistrationToken, err error) {
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE registration_tokens SET "), __sets, __sqlbundle_Literal(" WHERE registration_tokens.secret = ? RETURNING registration_tokens.secret, registration_tokens.owner_id, registration_tokens.project_limit, registration_tokens.created_at")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.OwnerId._set {
|
|
__values = append(__values, update.OwnerId.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("owner_id = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return nil, emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, registration_token_secret.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
registration_token = &RegistrationToken{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(®istration_token.Secret, ®istration_token.OwnerId, ®istration_token.ProjectLimit, ®istration_token.CreatedAt)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return registration_token, nil
|
|
}
|
|
|
|
func (obj *postgresImpl) Delete_Irreparabledb_By_Segmentpath(ctx context.Context,
|
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field) (
|
|
deleted bool, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM irreparabledbs WHERE irreparabledbs.segmentpath = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, irreparabledb_segmentpath.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Delete_AccountingRollup_By_Id(ctx context.Context,
|
|
accounting_rollup_id AccountingRollup_Id_Field) (
|
|
deleted bool, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM accounting_rollups WHERE accounting_rollups.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, accounting_rollup_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Delete_AccountingRaw_By_Id(ctx context.Context,
|
|
accounting_raw_id AccountingRaw_Id_Field) (
|
|
deleted bool, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM accounting_raws WHERE accounting_raws.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, accounting_raw_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Delete_Node_By_Id(ctx context.Context,
|
|
node_id Node_Id_Field) (
|
|
deleted bool, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM nodes WHERE nodes.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, node_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Delete_OverlayCacheNode_By_NodeId(ctx context.Context,
|
|
overlay_cache_node_node_id OverlayCacheNode_NodeId_Field) (
|
|
deleted bool, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM overlay_cache_nodes WHERE overlay_cache_nodes.node_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, overlay_cache_node_node_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Delete_Injuredsegment_By_Id(ctx context.Context,
|
|
injuredsegment_id Injuredsegment_Id_Field) (
|
|
deleted bool, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM injuredsegments WHERE injuredsegments.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, injuredsegment_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Delete_User_By_Id(ctx context.Context,
|
|
user_id User_Id_Field) (
|
|
deleted bool, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM users WHERE users.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, user_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Delete_Project_By_Id(ctx context.Context,
|
|
project_id Project_Id_Field) (
|
|
deleted bool, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM projects WHERE projects.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, project_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Delete_ProjectMember_By_MemberId_And_ProjectId(ctx context.Context,
|
|
project_member_member_id ProjectMember_MemberId_Field,
|
|
project_member_project_id ProjectMember_ProjectId_Field) (
|
|
deleted bool, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM project_members WHERE project_members.member_id = ? AND project_members.project_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, project_member_member_id.value(), project_member_project_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Delete_ApiKey_By_Id(ctx context.Context,
|
|
api_key_id ApiKey_Id_Field) (
|
|
deleted bool, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM api_keys WHERE api_keys.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, api_key_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Delete_BucketUsage_By_Id(ctx context.Context,
|
|
bucket_usage_id BucketUsage_Id_Field) (
|
|
deleted bool, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM bucket_usages WHERE bucket_usages.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, bucket_usage_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Delete_SerialNumber_By_ExpiresAt_LessOrEqual(ctx context.Context,
|
|
serial_number_expires_at_less_or_equal SerialNumber_ExpiresAt_Field) (
|
|
count int64, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM serial_numbers WHERE serial_numbers.expires_at <= ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, serial_number_expires_at_less_or_equal.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
return count, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Delete_CertRecord_By_Id(ctx context.Context,
|
|
certRecord_id CertRecord_Id_Field) (
|
|
deleted bool, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM certRecords WHERE certRecords.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, certRecord_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (impl postgresImpl) isConstraintError(err error) (
|
|
constraint string, ok bool) {
|
|
if e, ok := err.(*pq.Error); ok {
|
|
if e.Code.Class() == "23" {
|
|
return e.Constraint, true
|
|
}
|
|
}
|
|
return "", false
|
|
}
|
|
|
|
func (obj *postgresImpl) deleteAll(ctx context.Context) (count int64, err error) {
|
|
var __res sql.Result
|
|
var __count int64
|
|
__res, err = obj.driver.Exec("DELETE FROM used_serials;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM project_members;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM api_keys;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM users;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM storagenode_storage_rollups;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM storagenode_bandwidth_rollups;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM serial_numbers;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM registration_tokens;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM projects;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM overlay_cache_nodes;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM nodes;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM irreparabledbs;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM injuredsegments;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM certRecords;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM bwagreements;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM bucket_usages;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM bucket_storage_rollups;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM bucket_bandwidth_rollups;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM accounting_timestamps;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM accounting_rollups;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM accounting_raws;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
|
|
return count, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Create_Irreparabledb(ctx context.Context,
|
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field,
|
|
irreparabledb_segmentdetail Irreparabledb_Segmentdetail_Field,
|
|
irreparabledb_pieces_lost_count Irreparabledb_PiecesLostCount_Field,
|
|
irreparabledb_seg_damaged_unix_sec Irreparabledb_SegDamagedUnixSec_Field,
|
|
irreparabledb_repair_attempt_count Irreparabledb_RepairAttemptCount_Field) (
|
|
irreparabledb *Irreparabledb, err error) {
|
|
__segmentpath_val := irreparabledb_segmentpath.value()
|
|
__segmentdetail_val := irreparabledb_segmentdetail.value()
|
|
__pieces_lost_count_val := irreparabledb_pieces_lost_count.value()
|
|
__seg_damaged_unix_sec_val := irreparabledb_seg_damaged_unix_sec.value()
|
|
__repair_attempt_count_val := irreparabledb_repair_attempt_count.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO irreparabledbs ( segmentpath, segmentdetail, pieces_lost_count, seg_damaged_unix_sec, repair_attempt_count ) VALUES ( ?, ?, ?, ?, ? )")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __segmentpath_val, __segmentdetail_val, __pieces_lost_count_val, __seg_damaged_unix_sec_val, __repair_attempt_count_val)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __segmentpath_val, __segmentdetail_val, __pieces_lost_count_val, __seg_damaged_unix_sec_val, __repair_attempt_count_val)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
__pk, err := __res.LastInsertId()
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return obj.getLastIrreparabledb(ctx, __pk)
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Create_AccountingTimestamps(ctx context.Context,
|
|
accounting_timestamps_name AccountingTimestamps_Name_Field,
|
|
accounting_timestamps_value AccountingTimestamps_Value_Field) (
|
|
accounting_timestamps *AccountingTimestamps, err error) {
|
|
__name_val := accounting_timestamps_name.value()
|
|
__value_val := accounting_timestamps_value.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO accounting_timestamps ( name, value ) VALUES ( ?, ? )")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __name_val, __value_val)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __name_val, __value_val)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
__pk, err := __res.LastInsertId()
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return obj.getLastAccountingTimestamps(ctx, __pk)
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Create_AccountingRollup(ctx context.Context,
|
|
accounting_rollup_node_id AccountingRollup_NodeId_Field,
|
|
accounting_rollup_start_time AccountingRollup_StartTime_Field,
|
|
accounting_rollup_put_total AccountingRollup_PutTotal_Field,
|
|
accounting_rollup_get_total AccountingRollup_GetTotal_Field,
|
|
accounting_rollup_get_audit_total AccountingRollup_GetAuditTotal_Field,
|
|
accounting_rollup_get_repair_total AccountingRollup_GetRepairTotal_Field,
|
|
accounting_rollup_put_repair_total AccountingRollup_PutRepairTotal_Field,
|
|
accounting_rollup_at_rest_total AccountingRollup_AtRestTotal_Field) (
|
|
accounting_rollup *AccountingRollup, err error) {
|
|
__node_id_val := accounting_rollup_node_id.value()
|
|
__start_time_val := accounting_rollup_start_time.value()
|
|
__put_total_val := accounting_rollup_put_total.value()
|
|
__get_total_val := accounting_rollup_get_total.value()
|
|
__get_audit_total_val := accounting_rollup_get_audit_total.value()
|
|
__get_repair_total_val := accounting_rollup_get_repair_total.value()
|
|
__put_repair_total_val := accounting_rollup_put_repair_total.value()
|
|
__at_rest_total_val := accounting_rollup_at_rest_total.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO accounting_rollups ( node_id, start_time, put_total, get_total, get_audit_total, get_repair_total, put_repair_total, at_rest_total ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ? )")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __node_id_val, __start_time_val, __put_total_val, __get_total_val, __get_audit_total_val, __get_repair_total_val, __put_repair_total_val, __at_rest_total_val)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __node_id_val, __start_time_val, __put_total_val, __get_total_val, __get_audit_total_val, __get_repair_total_val, __put_repair_total_val, __at_rest_total_val)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
__pk, err := __res.LastInsertId()
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return obj.getLastAccountingRollup(ctx, __pk)
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Create_AccountingRaw(ctx context.Context,
|
|
accounting_raw_node_id AccountingRaw_NodeId_Field,
|
|
accounting_raw_interval_end_time AccountingRaw_IntervalEndTime_Field,
|
|
accounting_raw_data_total AccountingRaw_DataTotal_Field,
|
|
accounting_raw_data_type AccountingRaw_DataType_Field,
|
|
accounting_raw_created_at AccountingRaw_CreatedAt_Field) (
|
|
accounting_raw *AccountingRaw, err error) {
|
|
__node_id_val := accounting_raw_node_id.value()
|
|
__interval_end_time_val := accounting_raw_interval_end_time.value()
|
|
__data_total_val := accounting_raw_data_total.value()
|
|
__data_type_val := accounting_raw_data_type.value()
|
|
__created_at_val := accounting_raw_created_at.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO accounting_raws ( node_id, interval_end_time, data_total, data_type, created_at ) VALUES ( ?, ?, ?, ?, ? )")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __node_id_val, __interval_end_time_val, __data_total_val, __data_type_val, __created_at_val)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __node_id_val, __interval_end_time_val, __data_total_val, __data_type_val, __created_at_val)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
__pk, err := __res.LastInsertId()
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return obj.getLastAccountingRaw(ctx, __pk)
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Create_Node(ctx context.Context,
|
|
node_id Node_Id_Field,
|
|
node_audit_success_count Node_AuditSuccessCount_Field,
|
|
node_total_audit_count Node_TotalAuditCount_Field,
|
|
node_audit_success_ratio Node_AuditSuccessRatio_Field,
|
|
node_uptime_success_count Node_UptimeSuccessCount_Field,
|
|
node_total_uptime_count Node_TotalUptimeCount_Field,
|
|
node_uptime_ratio Node_UptimeRatio_Field,
|
|
node_wallet Node_Wallet_Field,
|
|
node_email Node_Email_Field) (
|
|
node *Node, err error) {
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__id_val := node_id.value()
|
|
__audit_success_count_val := node_audit_success_count.value()
|
|
__total_audit_count_val := node_total_audit_count.value()
|
|
__audit_success_ratio_val := node_audit_success_ratio.value()
|
|
__uptime_success_count_val := node_uptime_success_count.value()
|
|
__total_uptime_count_val := node_total_uptime_count.value()
|
|
__uptime_ratio_val := node_uptime_ratio.value()
|
|
__created_at_val := __now
|
|
__updated_at_val := __now
|
|
__wallet_val := node_wallet.value()
|
|
__email_val := node_email.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO nodes ( id, audit_success_count, total_audit_count, audit_success_ratio, uptime_success_count, total_uptime_count, uptime_ratio, created_at, updated_at, wallet, email ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? )")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __id_val, __audit_success_count_val, __total_audit_count_val, __audit_success_ratio_val, __uptime_success_count_val, __total_uptime_count_val, __uptime_ratio_val, __created_at_val, __updated_at_val, __wallet_val, __email_val)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __id_val, __audit_success_count_val, __total_audit_count_val, __audit_success_ratio_val, __uptime_success_count_val, __total_uptime_count_val, __uptime_ratio_val, __created_at_val, __updated_at_val, __wallet_val, __email_val)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
__pk, err := __res.LastInsertId()
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return obj.getLastNode(ctx, __pk)
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Create_OverlayCacheNode(ctx context.Context,
|
|
overlay_cache_node_node_id OverlayCacheNode_NodeId_Field,
|
|
overlay_cache_node_node_type OverlayCacheNode_NodeType_Field,
|
|
overlay_cache_node_address OverlayCacheNode_Address_Field,
|
|
overlay_cache_node_protocol OverlayCacheNode_Protocol_Field,
|
|
overlay_cache_node_operator_email OverlayCacheNode_OperatorEmail_Field,
|
|
overlay_cache_node_operator_wallet OverlayCacheNode_OperatorWallet_Field,
|
|
overlay_cache_node_free_bandwidth OverlayCacheNode_FreeBandwidth_Field,
|
|
overlay_cache_node_free_disk OverlayCacheNode_FreeDisk_Field,
|
|
overlay_cache_node_latency_90 OverlayCacheNode_Latency90_Field,
|
|
overlay_cache_node_audit_success_ratio OverlayCacheNode_AuditSuccessRatio_Field,
|
|
overlay_cache_node_audit_uptime_ratio OverlayCacheNode_AuditUptimeRatio_Field,
|
|
overlay_cache_node_audit_count OverlayCacheNode_AuditCount_Field,
|
|
overlay_cache_node_audit_success_count OverlayCacheNode_AuditSuccessCount_Field,
|
|
overlay_cache_node_uptime_count OverlayCacheNode_UptimeCount_Field,
|
|
overlay_cache_node_uptime_success_count OverlayCacheNode_UptimeSuccessCount_Field) (
|
|
overlay_cache_node *OverlayCacheNode, err error) {
|
|
__node_id_val := overlay_cache_node_node_id.value()
|
|
__node_type_val := overlay_cache_node_node_type.value()
|
|
__address_val := overlay_cache_node_address.value()
|
|
__protocol_val := overlay_cache_node_protocol.value()
|
|
__operator_email_val := overlay_cache_node_operator_email.value()
|
|
__operator_wallet_val := overlay_cache_node_operator_wallet.value()
|
|
__free_bandwidth_val := overlay_cache_node_free_bandwidth.value()
|
|
__free_disk_val := overlay_cache_node_free_disk.value()
|
|
__latency_90_val := overlay_cache_node_latency_90.value()
|
|
__audit_success_ratio_val := overlay_cache_node_audit_success_ratio.value()
|
|
__audit_uptime_ratio_val := overlay_cache_node_audit_uptime_ratio.value()
|
|
__audit_count_val := overlay_cache_node_audit_count.value()
|
|
__audit_success_count_val := overlay_cache_node_audit_success_count.value()
|
|
__uptime_count_val := overlay_cache_node_uptime_count.value()
|
|
__uptime_success_count_val := overlay_cache_node_uptime_success_count.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO overlay_cache_nodes ( node_id, node_type, address, protocol, operator_email, operator_wallet, free_bandwidth, free_disk, latency_90, audit_success_ratio, audit_uptime_ratio, audit_count, audit_success_count, uptime_count, uptime_success_count ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? )")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __node_id_val, __node_type_val, __address_val, __protocol_val, __operator_email_val, __operator_wallet_val, __free_bandwidth_val, __free_disk_val, __latency_90_val, __audit_success_ratio_val, __audit_uptime_ratio_val, __audit_count_val, __audit_success_count_val, __uptime_count_val, __uptime_success_count_val)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __node_id_val, __node_type_val, __address_val, __protocol_val, __operator_email_val, __operator_wallet_val, __free_bandwidth_val, __free_disk_val, __latency_90_val, __audit_success_ratio_val, __audit_uptime_ratio_val, __audit_count_val, __audit_success_count_val, __uptime_count_val, __uptime_success_count_val)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
__pk, err := __res.LastInsertId()
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return obj.getLastOverlayCacheNode(ctx, __pk)
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Create_Injuredsegment(ctx context.Context,
|
|
injuredsegment_info Injuredsegment_Info_Field) (
|
|
injuredsegment *Injuredsegment, err error) {
|
|
__info_val := injuredsegment_info.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO injuredsegments ( info ) VALUES ( ? )")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __info_val)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __info_val)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
__pk, err := __res.LastInsertId()
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return obj.getLastInjuredsegment(ctx, __pk)
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Create_User(ctx context.Context,
|
|
user_id User_Id_Field,
|
|
user_first_name User_FirstName_Field,
|
|
user_last_name User_LastName_Field,
|
|
user_email User_Email_Field,
|
|
user_password_hash User_PasswordHash_Field) (
|
|
user *User, err error) {
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__id_val := user_id.value()
|
|
__first_name_val := user_first_name.value()
|
|
__last_name_val := user_last_name.value()
|
|
__email_val := user_email.value()
|
|
__password_hash_val := user_password_hash.value()
|
|
__status_val := int(0)
|
|
__created_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO users ( id, first_name, last_name, email, password_hash, status, created_at ) VALUES ( ?, ?, ?, ?, ?, ?, ? )")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __id_val, __first_name_val, __last_name_val, __email_val, __password_hash_val, __status_val, __created_at_val)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __id_val, __first_name_val, __last_name_val, __email_val, __password_hash_val, __status_val, __created_at_val)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
__pk, err := __res.LastInsertId()
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return obj.getLastUser(ctx, __pk)
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Create_Project(ctx context.Context,
|
|
project_id Project_Id_Field,
|
|
project_name Project_Name_Field,
|
|
project_description Project_Description_Field) (
|
|
project *Project, err error) {
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__id_val := project_id.value()
|
|
__name_val := project_name.value()
|
|
__description_val := project_description.value()
|
|
__created_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO projects ( id, name, description, created_at ) VALUES ( ?, ?, ?, ? )")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __id_val, __name_val, __description_val, __created_at_val)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __id_val, __name_val, __description_val, __created_at_val)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
__pk, err := __res.LastInsertId()
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return obj.getLastProject(ctx, __pk)
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Create_ProjectMember(ctx context.Context,
|
|
project_member_member_id ProjectMember_MemberId_Field,
|
|
project_member_project_id ProjectMember_ProjectId_Field) (
|
|
project_member *ProjectMember, err error) {
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__member_id_val := project_member_member_id.value()
|
|
__project_id_val := project_member_project_id.value()
|
|
__created_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO project_members ( member_id, project_id, created_at ) VALUES ( ?, ?, ? )")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __member_id_val, __project_id_val, __created_at_val)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __member_id_val, __project_id_val, __created_at_val)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
__pk, err := __res.LastInsertId()
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return obj.getLastProjectMember(ctx, __pk)
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Create_ApiKey(ctx context.Context,
|
|
api_key_id ApiKey_Id_Field,
|
|
api_key_project_id ApiKey_ProjectId_Field,
|
|
api_key_key ApiKey_Key_Field,
|
|
api_key_name ApiKey_Name_Field) (
|
|
api_key *ApiKey, err error) {
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__id_val := api_key_id.value()
|
|
__project_id_val := api_key_project_id.value()
|
|
__key_val := api_key_key.value()
|
|
__name_val := api_key_name.value()
|
|
__created_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO api_keys ( id, project_id, key, name, created_at ) VALUES ( ?, ?, ?, ?, ? )")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __id_val, __project_id_val, __key_val, __name_val, __created_at_val)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __id_val, __project_id_val, __key_val, __name_val, __created_at_val)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
__pk, err := __res.LastInsertId()
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return obj.getLastApiKey(ctx, __pk)
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Create_BucketUsage(ctx context.Context,
|
|
bucket_usage_id BucketUsage_Id_Field,
|
|
bucket_usage_bucket_id BucketUsage_BucketId_Field,
|
|
bucket_usage_rollup_end_time BucketUsage_RollupEndTime_Field,
|
|
bucket_usage_remote_stored_data BucketUsage_RemoteStoredData_Field,
|
|
bucket_usage_inline_stored_data BucketUsage_InlineStoredData_Field,
|
|
bucket_usage_remote_segments BucketUsage_RemoteSegments_Field,
|
|
bucket_usage_inline_segments BucketUsage_InlineSegments_Field,
|
|
bucket_usage_objects BucketUsage_Objects_Field,
|
|
bucket_usage_metadata_size BucketUsage_MetadataSize_Field,
|
|
bucket_usage_repair_egress BucketUsage_RepairEgress_Field,
|
|
bucket_usage_get_egress BucketUsage_GetEgress_Field,
|
|
bucket_usage_audit_egress BucketUsage_AuditEgress_Field) (
|
|
bucket_usage *BucketUsage, err error) {
|
|
__id_val := bucket_usage_id.value()
|
|
__bucket_id_val := bucket_usage_bucket_id.value()
|
|
__rollup_end_time_val := bucket_usage_rollup_end_time.value()
|
|
__remote_stored_data_val := bucket_usage_remote_stored_data.value()
|
|
__inline_stored_data_val := bucket_usage_inline_stored_data.value()
|
|
__remote_segments_val := bucket_usage_remote_segments.value()
|
|
__inline_segments_val := bucket_usage_inline_segments.value()
|
|
__objects_val := bucket_usage_objects.value()
|
|
__metadata_size_val := bucket_usage_metadata_size.value()
|
|
__repair_egress_val := bucket_usage_repair_egress.value()
|
|
__get_egress_val := bucket_usage_get_egress.value()
|
|
__audit_egress_val := bucket_usage_audit_egress.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO bucket_usages ( id, bucket_id, rollup_end_time, remote_stored_data, inline_stored_data, remote_segments, inline_segments, objects, metadata_size, repair_egress, get_egress, audit_egress ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? )")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __id_val, __bucket_id_val, __rollup_end_time_val, __remote_stored_data_val, __inline_stored_data_val, __remote_segments_val, __inline_segments_val, __objects_val, __metadata_size_val, __repair_egress_val, __get_egress_val, __audit_egress_val)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __id_val, __bucket_id_val, __rollup_end_time_val, __remote_stored_data_val, __inline_stored_data_val, __remote_segments_val, __inline_segments_val, __objects_val, __metadata_size_val, __repair_egress_val, __get_egress_val, __audit_egress_val)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
__pk, err := __res.LastInsertId()
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return obj.getLastBucketUsage(ctx, __pk)
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Create_SerialNumber(ctx context.Context,
|
|
serial_number_serial_number SerialNumber_SerialNumber_Field,
|
|
serial_number_bucket_id SerialNumber_BucketId_Field,
|
|
serial_number_expires_at SerialNumber_ExpiresAt_Field) (
|
|
serial_number *SerialNumber, err error) {
|
|
__serial_number_val := serial_number_serial_number.value()
|
|
__bucket_id_val := serial_number_bucket_id.value()
|
|
__expires_at_val := serial_number_expires_at.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO serial_numbers ( serial_number, bucket_id, expires_at ) VALUES ( ?, ?, ? )")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __serial_number_val, __bucket_id_val, __expires_at_val)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __serial_number_val, __bucket_id_val, __expires_at_val)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
__pk, err := __res.LastInsertId()
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return obj.getLastSerialNumber(ctx, __pk)
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Create_UsedSerial(ctx context.Context,
|
|
used_serial_serial_number_id UsedSerial_SerialNumberId_Field,
|
|
used_serial_storage_node_id UsedSerial_StorageNodeId_Field) (
|
|
used_serial *UsedSerial, err error) {
|
|
__serial_number_id_val := used_serial_serial_number_id.value()
|
|
__storage_node_id_val := used_serial_storage_node_id.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO used_serials ( serial_number_id, storage_node_id ) VALUES ( ?, ? )")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __serial_number_id_val, __storage_node_id_val)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __serial_number_id_val, __storage_node_id_val)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
__pk, err := __res.LastInsertId()
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return obj.getLastUsedSerial(ctx, __pk)
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Create_CertRecord(ctx context.Context,
|
|
certRecord_publickey CertRecord_Publickey_Field,
|
|
certRecord_id CertRecord_Id_Field) (
|
|
certRecord *CertRecord, err error) {
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__publickey_val := certRecord_publickey.value()
|
|
__id_val := certRecord_id.value()
|
|
__update_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO certRecords ( publickey, id, update_at ) VALUES ( ?, ?, ? )")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __publickey_val, __id_val, __update_at_val)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __publickey_val, __id_val, __update_at_val)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
__pk, err := __res.LastInsertId()
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return obj.getLastCertRecord(ctx, __pk)
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Create_RegistrationToken(ctx context.Context,
|
|
registration_token_secret RegistrationToken_Secret_Field,
|
|
registration_token_project_limit RegistrationToken_ProjectLimit_Field,
|
|
optional RegistrationToken_Create_Fields) (
|
|
registration_token *RegistrationToken, err error) {
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__secret_val := registration_token_secret.value()
|
|
__owner_id_val := optional.OwnerId.value()
|
|
__project_limit_val := registration_token_project_limit.value()
|
|
__created_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO registration_tokens ( secret, owner_id, project_limit, created_at ) VALUES ( ?, ?, ?, ? )")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __secret_val, __owner_id_val, __project_limit_val, __created_at_val)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __secret_val, __owner_id_val, __project_limit_val, __created_at_val)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
__pk, err := __res.LastInsertId()
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return obj.getLastRegistrationToken(ctx, __pk)
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Get_Irreparabledb_By_Segmentpath(ctx context.Context,
|
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field) (
|
|
irreparabledb *Irreparabledb, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT irreparabledbs.segmentpath, irreparabledbs.segmentdetail, irreparabledbs.pieces_lost_count, irreparabledbs.seg_damaged_unix_sec, irreparabledbs.repair_attempt_count FROM irreparabledbs WHERE irreparabledbs.segmentpath = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, irreparabledb_segmentpath.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
irreparabledb = &Irreparabledb{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&irreparabledb.Segmentpath, &irreparabledb.Segmentdetail, &irreparabledb.PiecesLostCount, &irreparabledb.SegDamagedUnixSec, &irreparabledb.RepairAttemptCount)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return irreparabledb, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Limited_Irreparabledb_OrderBy_Asc_Segmentpath(ctx context.Context,
|
|
limit int, offset int64) (
|
|
rows []*Irreparabledb, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT irreparabledbs.segmentpath, irreparabledbs.segmentdetail, irreparabledbs.pieces_lost_count, irreparabledbs.seg_damaged_unix_sec, irreparabledbs.repair_attempt_count FROM irreparabledbs ORDER BY irreparabledbs.segmentpath LIMIT ? OFFSET ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values)
|
|
|
|
__values = append(__values, limit, offset)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
irreparabledb := &Irreparabledb{}
|
|
err = __rows.Scan(&irreparabledb.Segmentpath, &irreparabledb.Segmentdetail, &irreparabledb.PiecesLostCount, &irreparabledb.SegDamagedUnixSec, &irreparabledb.RepairAttemptCount)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, irreparabledb)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Find_AccountingTimestamps_Value_By_Name(ctx context.Context,
|
|
accounting_timestamps_name AccountingTimestamps_Name_Field) (
|
|
row *Value_Row, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT accounting_timestamps.value FROM accounting_timestamps WHERE accounting_timestamps.name = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, accounting_timestamps_name.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
row = &Value_Row{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&row.Value)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return row, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Get_AccountingRollup_By_Id(ctx context.Context,
|
|
accounting_rollup_id AccountingRollup_Id_Field) (
|
|
accounting_rollup *AccountingRollup, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT accounting_rollups.id, accounting_rollups.node_id, accounting_rollups.start_time, accounting_rollups.put_total, accounting_rollups.get_total, accounting_rollups.get_audit_total, accounting_rollups.get_repair_total, accounting_rollups.put_repair_total, accounting_rollups.at_rest_total FROM accounting_rollups WHERE accounting_rollups.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, accounting_rollup_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
accounting_rollup = &AccountingRollup{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&accounting_rollup.Id, &accounting_rollup.NodeId, &accounting_rollup.StartTime, &accounting_rollup.PutTotal, &accounting_rollup.GetTotal, &accounting_rollup.GetAuditTotal, &accounting_rollup.GetRepairTotal, &accounting_rollup.PutRepairTotal, &accounting_rollup.AtRestTotal)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return accounting_rollup, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) All_AccountingRollup_By_StartTime_GreaterOrEqual(ctx context.Context,
|
|
accounting_rollup_start_time_greater_or_equal AccountingRollup_StartTime_Field) (
|
|
rows []*AccountingRollup, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT accounting_rollups.id, accounting_rollups.node_id, accounting_rollups.start_time, accounting_rollups.put_total, accounting_rollups.get_total, accounting_rollups.get_audit_total, accounting_rollups.get_repair_total, accounting_rollups.put_repair_total, accounting_rollups.at_rest_total FROM accounting_rollups WHERE accounting_rollups.start_time >= ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, accounting_rollup_start_time_greater_or_equal.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
accounting_rollup := &AccountingRollup{}
|
|
err = __rows.Scan(&accounting_rollup.Id, &accounting_rollup.NodeId, &accounting_rollup.StartTime, &accounting_rollup.PutTotal, &accounting_rollup.GetTotal, &accounting_rollup.GetAuditTotal, &accounting_rollup.GetRepairTotal, &accounting_rollup.PutRepairTotal, &accounting_rollup.AtRestTotal)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, accounting_rollup)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Get_AccountingRaw_By_Id(ctx context.Context,
|
|
accounting_raw_id AccountingRaw_Id_Field) (
|
|
accounting_raw *AccountingRaw, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT accounting_raws.id, accounting_raws.node_id, accounting_raws.interval_end_time, accounting_raws.data_total, accounting_raws.data_type, accounting_raws.created_at FROM accounting_raws WHERE accounting_raws.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, accounting_raw_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
accounting_raw = &AccountingRaw{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&accounting_raw.Id, &accounting_raw.NodeId, &accounting_raw.IntervalEndTime, &accounting_raw.DataTotal, &accounting_raw.DataType, &accounting_raw.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return accounting_raw, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) All_AccountingRaw(ctx context.Context) (
|
|
rows []*AccountingRaw, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT accounting_raws.id, accounting_raws.node_id, accounting_raws.interval_end_time, accounting_raws.data_total, accounting_raws.data_type, accounting_raws.created_at FROM accounting_raws")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
accounting_raw := &AccountingRaw{}
|
|
err = __rows.Scan(&accounting_raw.Id, &accounting_raw.NodeId, &accounting_raw.IntervalEndTime, &accounting_raw.DataTotal, &accounting_raw.DataType, &accounting_raw.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, accounting_raw)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) All_AccountingRaw_By_IntervalEndTime_GreaterOrEqual(ctx context.Context,
|
|
accounting_raw_interval_end_time_greater_or_equal AccountingRaw_IntervalEndTime_Field) (
|
|
rows []*AccountingRaw, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT accounting_raws.id, accounting_raws.node_id, accounting_raws.interval_end_time, accounting_raws.data_total, accounting_raws.data_type, accounting_raws.created_at FROM accounting_raws WHERE accounting_raws.interval_end_time >= ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, accounting_raw_interval_end_time_greater_or_equal.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
accounting_raw := &AccountingRaw{}
|
|
err = __rows.Scan(&accounting_raw.Id, &accounting_raw.NodeId, &accounting_raw.IntervalEndTime, &accounting_raw.DataTotal, &accounting_raw.DataType, &accounting_raw.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, accounting_raw)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Get_Node_By_Id(ctx context.Context,
|
|
node_id Node_Id_Field) (
|
|
node *Node, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.audit_success_count, nodes.total_audit_count, nodes.audit_success_ratio, nodes.uptime_success_count, nodes.total_uptime_count, nodes.uptime_ratio, nodes.created_at, nodes.updated_at, nodes.wallet, nodes.email FROM nodes WHERE nodes.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, node_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
node = &Node{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&node.Id, &node.AuditSuccessCount, &node.TotalAuditCount, &node.AuditSuccessRatio, &node.UptimeSuccessCount, &node.TotalUptimeCount, &node.UptimeRatio, &node.CreatedAt, &node.UpdatedAt, &node.Wallet, &node.Email)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return node, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Find_Node_By_Id(ctx context.Context,
|
|
node_id Node_Id_Field) (
|
|
node *Node, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.audit_success_count, nodes.total_audit_count, nodes.audit_success_ratio, nodes.uptime_success_count, nodes.total_uptime_count, nodes.uptime_ratio, nodes.created_at, nodes.updated_at, nodes.wallet, nodes.email FROM nodes WHERE nodes.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, node_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
node = &Node{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&node.Id, &node.AuditSuccessCount, &node.TotalAuditCount, &node.AuditSuccessRatio, &node.UptimeSuccessCount, &node.TotalUptimeCount, &node.UptimeRatio, &node.CreatedAt, &node.UpdatedAt, &node.Wallet, &node.Email)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return node, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) All_Node_Id(ctx context.Context) (
|
|
rows []*Id_Row, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id FROM nodes")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
row := &Id_Row{}
|
|
err = __rows.Scan(&row.Id)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, row)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Get_OverlayCacheNode_By_NodeId(ctx context.Context,
|
|
overlay_cache_node_node_id OverlayCacheNode_NodeId_Field) (
|
|
overlay_cache_node *OverlayCacheNode, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT overlay_cache_nodes.node_id, overlay_cache_nodes.node_type, overlay_cache_nodes.address, overlay_cache_nodes.protocol, overlay_cache_nodes.operator_email, overlay_cache_nodes.operator_wallet, overlay_cache_nodes.free_bandwidth, overlay_cache_nodes.free_disk, overlay_cache_nodes.latency_90, overlay_cache_nodes.audit_success_ratio, overlay_cache_nodes.audit_uptime_ratio, overlay_cache_nodes.audit_count, overlay_cache_nodes.audit_success_count, overlay_cache_nodes.uptime_count, overlay_cache_nodes.uptime_success_count FROM overlay_cache_nodes WHERE overlay_cache_nodes.node_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, overlay_cache_node_node_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
overlay_cache_node = &OverlayCacheNode{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&overlay_cache_node.NodeId, &overlay_cache_node.NodeType, &overlay_cache_node.Address, &overlay_cache_node.Protocol, &overlay_cache_node.OperatorEmail, &overlay_cache_node.OperatorWallet, &overlay_cache_node.FreeBandwidth, &overlay_cache_node.FreeDisk, &overlay_cache_node.Latency90, &overlay_cache_node.AuditSuccessRatio, &overlay_cache_node.AuditUptimeRatio, &overlay_cache_node.AuditCount, &overlay_cache_node.AuditSuccessCount, &overlay_cache_node.UptimeCount, &overlay_cache_node.UptimeSuccessCount)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return overlay_cache_node, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Limited_OverlayCacheNode_By_NodeId_GreaterOrEqual(ctx context.Context,
|
|
overlay_cache_node_node_id_greater_or_equal OverlayCacheNode_NodeId_Field,
|
|
limit int, offset int64) (
|
|
rows []*OverlayCacheNode, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT overlay_cache_nodes.node_id, overlay_cache_nodes.node_type, overlay_cache_nodes.address, overlay_cache_nodes.protocol, overlay_cache_nodes.operator_email, overlay_cache_nodes.operator_wallet, overlay_cache_nodes.free_bandwidth, overlay_cache_nodes.free_disk, overlay_cache_nodes.latency_90, overlay_cache_nodes.audit_success_ratio, overlay_cache_nodes.audit_uptime_ratio, overlay_cache_nodes.audit_count, overlay_cache_nodes.audit_success_count, overlay_cache_nodes.uptime_count, overlay_cache_nodes.uptime_success_count FROM overlay_cache_nodes WHERE overlay_cache_nodes.node_id >= ? LIMIT ? OFFSET ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, overlay_cache_node_node_id_greater_or_equal.value())
|
|
|
|
__values = append(__values, limit, offset)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
overlay_cache_node := &OverlayCacheNode{}
|
|
err = __rows.Scan(&overlay_cache_node.NodeId, &overlay_cache_node.NodeType, &overlay_cache_node.Address, &overlay_cache_node.Protocol, &overlay_cache_node.OperatorEmail, &overlay_cache_node.OperatorWallet, &overlay_cache_node.FreeBandwidth, &overlay_cache_node.FreeDisk, &overlay_cache_node.Latency90, &overlay_cache_node.AuditSuccessRatio, &overlay_cache_node.AuditUptimeRatio, &overlay_cache_node.AuditCount, &overlay_cache_node.AuditSuccessCount, &overlay_cache_node.UptimeCount, &overlay_cache_node.UptimeSuccessCount)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, overlay_cache_node)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) First_Injuredsegment(ctx context.Context) (
|
|
injuredsegment *Injuredsegment, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT injuredsegments.id, injuredsegments.info FROM injuredsegments LIMIT 1 OFFSET 0")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
if !__rows.Next() {
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return nil, nil
|
|
}
|
|
|
|
injuredsegment = &Injuredsegment{}
|
|
err = __rows.Scan(&injuredsegment.Id, &injuredsegment.Info)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
|
|
return injuredsegment, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Limited_Injuredsegment(ctx context.Context,
|
|
limit int, offset int64) (
|
|
rows []*Injuredsegment, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT injuredsegments.id, injuredsegments.info FROM injuredsegments LIMIT ? OFFSET ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values)
|
|
|
|
__values = append(__values, limit, offset)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
injuredsegment := &Injuredsegment{}
|
|
err = __rows.Scan(&injuredsegment.Id, &injuredsegment.Info)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, injuredsegment)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Get_User_By_Email_And_Status_Not_Number(ctx context.Context,
|
|
user_email User_Email_Field) (
|
|
user *User, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.first_name, users.last_name, users.email, users.password_hash, users.status, users.created_at FROM users WHERE users.email = ? AND users.status != 0 LIMIT 2")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, user_email.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
if !__rows.Next() {
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return nil, makeErr(sql.ErrNoRows)
|
|
}
|
|
|
|
user = &User{}
|
|
err = __rows.Scan(&user.Id, &user.FirstName, &user.LastName, &user.Email, &user.PasswordHash, &user.Status, &user.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
|
|
if __rows.Next() {
|
|
return nil, tooManyRows("User_By_Email_And_Status_Not_Number")
|
|
}
|
|
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
|
|
return user, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Get_User_By_Id(ctx context.Context,
|
|
user_id User_Id_Field) (
|
|
user *User, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.first_name, users.last_name, users.email, users.password_hash, users.status, users.created_at FROM users WHERE users.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, user_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
user = &User{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&user.Id, &user.FirstName, &user.LastName, &user.Email, &user.PasswordHash, &user.Status, &user.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return user, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) All_Project(ctx context.Context) (
|
|
rows []*Project, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.created_at FROM projects")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
project := &Project{}
|
|
err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, project)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Get_Project_By_Id(ctx context.Context,
|
|
project_id Project_Id_Field) (
|
|
project *Project, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.created_at FROM projects WHERE projects.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, project_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
project = &Project{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&project.Id, &project.Name, &project.Description, &project.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return project, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) All_Project_By_ProjectMember_MemberId_OrderBy_Asc_Project_Name(ctx context.Context,
|
|
project_member_member_id ProjectMember_MemberId_Field) (
|
|
rows []*Project, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.created_at FROM projects JOIN project_members ON projects.id = project_members.project_id WHERE project_members.member_id = ? ORDER BY projects.name")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, project_member_member_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
project := &Project{}
|
|
err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, project)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) All_ProjectMember_By_MemberId(ctx context.Context,
|
|
project_member_member_id ProjectMember_MemberId_Field) (
|
|
rows []*ProjectMember, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT project_members.member_id, project_members.project_id, project_members.created_at FROM project_members WHERE project_members.member_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, project_member_member_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
project_member := &ProjectMember{}
|
|
err = __rows.Scan(&project_member.MemberId, &project_member.ProjectId, &project_member.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, project_member)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Limited_ProjectMember_By_ProjectId(ctx context.Context,
|
|
project_member_project_id ProjectMember_ProjectId_Field,
|
|
limit int, offset int64) (
|
|
rows []*ProjectMember, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT project_members.member_id, project_members.project_id, project_members.created_at FROM project_members WHERE project_members.project_id = ? LIMIT ? OFFSET ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, project_member_project_id.value())
|
|
|
|
__values = append(__values, limit, offset)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
project_member := &ProjectMember{}
|
|
err = __rows.Scan(&project_member.MemberId, &project_member.ProjectId, &project_member.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, project_member)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Get_ApiKey_By_Id(ctx context.Context,
|
|
api_key_id ApiKey_Id_Field) (
|
|
api_key *ApiKey, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.key, api_keys.name, api_keys.created_at FROM api_keys WHERE api_keys.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, api_key_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
api_key = &ApiKey{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&api_key.Id, &api_key.ProjectId, &api_key.Key, &api_key.Name, &api_key.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return api_key, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Get_ApiKey_By_Key(ctx context.Context,
|
|
api_key_key ApiKey_Key_Field) (
|
|
api_key *ApiKey, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.key, api_keys.name, api_keys.created_at FROM api_keys WHERE api_keys.key = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, api_key_key.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
api_key = &ApiKey{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&api_key.Id, &api_key.ProjectId, &api_key.Key, &api_key.Name, &api_key.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return api_key, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) All_ApiKey_By_ProjectId_OrderBy_Asc_Name(ctx context.Context,
|
|
api_key_project_id ApiKey_ProjectId_Field) (
|
|
rows []*ApiKey, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.key, api_keys.name, api_keys.created_at FROM api_keys WHERE api_keys.project_id = ? ORDER BY api_keys.name")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, api_key_project_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
api_key := &ApiKey{}
|
|
err = __rows.Scan(&api_key.Id, &api_key.ProjectId, &api_key.Key, &api_key.Name, &api_key.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, api_key)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Get_BucketUsage_By_Id(ctx context.Context,
|
|
bucket_usage_id BucketUsage_Id_Field) (
|
|
bucket_usage *BucketUsage, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_usages.id, bucket_usages.bucket_id, bucket_usages.rollup_end_time, bucket_usages.remote_stored_data, bucket_usages.inline_stored_data, bucket_usages.remote_segments, bucket_usages.inline_segments, bucket_usages.objects, bucket_usages.metadata_size, bucket_usages.repair_egress, bucket_usages.get_egress, bucket_usages.audit_egress FROM bucket_usages WHERE bucket_usages.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, bucket_usage_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
bucket_usage = &BucketUsage{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&bucket_usage.Id, &bucket_usage.BucketId, &bucket_usage.RollupEndTime, &bucket_usage.RemoteStoredData, &bucket_usage.InlineStoredData, &bucket_usage.RemoteSegments, &bucket_usage.InlineSegments, &bucket_usage.Objects, &bucket_usage.MetadataSize, &bucket_usage.RepairEgress, &bucket_usage.GetEgress, &bucket_usage.AuditEgress)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return bucket_usage, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Limited_BucketUsage_By_BucketId_And_RollupEndTime_Greater_And_RollupEndTime_LessOrEqual_OrderBy_Asc_RollupEndTime(ctx context.Context,
|
|
bucket_usage_bucket_id BucketUsage_BucketId_Field,
|
|
bucket_usage_rollup_end_time_greater BucketUsage_RollupEndTime_Field,
|
|
bucket_usage_rollup_end_time_less_or_equal BucketUsage_RollupEndTime_Field,
|
|
limit int, offset int64) (
|
|
rows []*BucketUsage, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_usages.id, bucket_usages.bucket_id, bucket_usages.rollup_end_time, bucket_usages.remote_stored_data, bucket_usages.inline_stored_data, bucket_usages.remote_segments, bucket_usages.inline_segments, bucket_usages.objects, bucket_usages.metadata_size, bucket_usages.repair_egress, bucket_usages.get_egress, bucket_usages.audit_egress FROM bucket_usages WHERE bucket_usages.bucket_id = ? AND bucket_usages.rollup_end_time > ? AND bucket_usages.rollup_end_time <= ? ORDER BY bucket_usages.rollup_end_time LIMIT ? OFFSET ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, bucket_usage_bucket_id.value(), bucket_usage_rollup_end_time_greater.value(), bucket_usage_rollup_end_time_less_or_equal.value())
|
|
|
|
__values = append(__values, limit, offset)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
bucket_usage := &BucketUsage{}
|
|
err = __rows.Scan(&bucket_usage.Id, &bucket_usage.BucketId, &bucket_usage.RollupEndTime, &bucket_usage.RemoteStoredData, &bucket_usage.InlineStoredData, &bucket_usage.RemoteSegments, &bucket_usage.InlineSegments, &bucket_usage.Objects, &bucket_usage.MetadataSize, &bucket_usage.RepairEgress, &bucket_usage.GetEgress, &bucket_usage.AuditEgress)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, bucket_usage)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Limited_BucketUsage_By_BucketId_And_RollupEndTime_Greater_And_RollupEndTime_LessOrEqual_OrderBy_Desc_RollupEndTime(ctx context.Context,
|
|
bucket_usage_bucket_id BucketUsage_BucketId_Field,
|
|
bucket_usage_rollup_end_time_greater BucketUsage_RollupEndTime_Field,
|
|
bucket_usage_rollup_end_time_less_or_equal BucketUsage_RollupEndTime_Field,
|
|
limit int, offset int64) (
|
|
rows []*BucketUsage, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_usages.id, bucket_usages.bucket_id, bucket_usages.rollup_end_time, bucket_usages.remote_stored_data, bucket_usages.inline_stored_data, bucket_usages.remote_segments, bucket_usages.inline_segments, bucket_usages.objects, bucket_usages.metadata_size, bucket_usages.repair_egress, bucket_usages.get_egress, bucket_usages.audit_egress FROM bucket_usages WHERE bucket_usages.bucket_id = ? AND bucket_usages.rollup_end_time > ? AND bucket_usages.rollup_end_time <= ? ORDER BY bucket_usages.rollup_end_time DESC LIMIT ? OFFSET ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, bucket_usage_bucket_id.value(), bucket_usage_rollup_end_time_greater.value(), bucket_usage_rollup_end_time_less_or_equal.value())
|
|
|
|
__values = append(__values, limit, offset)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
bucket_usage := &BucketUsage{}
|
|
err = __rows.Scan(&bucket_usage.Id, &bucket_usage.BucketId, &bucket_usage.RollupEndTime, &bucket_usage.RemoteStoredData, &bucket_usage.InlineStoredData, &bucket_usage.RemoteSegments, &bucket_usage.InlineSegments, &bucket_usage.Objects, &bucket_usage.MetadataSize, &bucket_usage.RepairEgress, &bucket_usage.GetEgress, &bucket_usage.AuditEgress)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, bucket_usage)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Find_SerialNumber_By_SerialNumber(ctx context.Context,
|
|
serial_number_serial_number SerialNumber_SerialNumber_Field) (
|
|
serial_number *SerialNumber, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT serial_numbers.id, serial_numbers.serial_number, serial_numbers.bucket_id, serial_numbers.expires_at FROM serial_numbers WHERE serial_numbers.serial_number = ? LIMIT 2")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, serial_number_serial_number.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
if !__rows.Next() {
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return nil, nil
|
|
}
|
|
|
|
serial_number = &SerialNumber{}
|
|
err = __rows.Scan(&serial_number.Id, &serial_number.SerialNumber, &serial_number.BucketId, &serial_number.ExpiresAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
|
|
if __rows.Next() {
|
|
return nil, tooManyRows("SerialNumber_By_SerialNumber")
|
|
}
|
|
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
|
|
return serial_number, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Get_CertRecord_By_Id(ctx context.Context,
|
|
certRecord_id CertRecord_Id_Field) (
|
|
certRecord *CertRecord, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT certRecords.publickey, certRecords.id, certRecords.update_at FROM certRecords WHERE certRecords.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, certRecord_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
certRecord = &CertRecord{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&certRecord.Publickey, &certRecord.Id, &certRecord.UpdateAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return certRecord, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Get_RegistrationToken_By_Secret(ctx context.Context,
|
|
registration_token_secret RegistrationToken_Secret_Field) (
|
|
registration_token *RegistrationToken, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT registration_tokens.secret, registration_tokens.owner_id, registration_tokens.project_limit, registration_tokens.created_at FROM registration_tokens WHERE registration_tokens.secret = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, registration_token_secret.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
registration_token = &RegistrationToken{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(®istration_token.Secret, ®istration_token.OwnerId, ®istration_token.ProjectLimit, ®istration_token.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return registration_token, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Get_RegistrationToken_By_OwnerId(ctx context.Context,
|
|
registration_token_owner_id RegistrationToken_OwnerId_Field) (
|
|
registration_token *RegistrationToken, err error) {
|
|
|
|
var __cond_0 = &__sqlbundle_Condition{Left: "registration_tokens.owner_id", Equal: true, Right: "?", Null: true}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("SELECT registration_tokens.secret, registration_tokens.owner_id, registration_tokens.project_limit, registration_tokens.created_at FROM registration_tokens WHERE "), __cond_0}}
|
|
|
|
var __values []interface{}
|
|
__values = append(__values)
|
|
|
|
if !registration_token_owner_id.isnull() {
|
|
__cond_0.Null = false
|
|
__values = append(__values, registration_token_owner_id.value())
|
|
}
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
registration_token = &RegistrationToken{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(®istration_token.Secret, ®istration_token.OwnerId, ®istration_token.ProjectLimit, ®istration_token.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return registration_token, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Update_Irreparabledb_By_Segmentpath(ctx context.Context,
|
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field,
|
|
update Irreparabledb_Update_Fields) (
|
|
irreparabledb *Irreparabledb, err error) {
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE irreparabledbs SET "), __sets, __sqlbundle_Literal(" WHERE irreparabledbs.segmentpath = ?")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.Segmentdetail._set {
|
|
__values = append(__values, update.Segmentdetail.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("segmentdetail = ?"))
|
|
}
|
|
|
|
if update.PiecesLostCount._set {
|
|
__values = append(__values, update.PiecesLostCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("pieces_lost_count = ?"))
|
|
}
|
|
|
|
if update.SegDamagedUnixSec._set {
|
|
__values = append(__values, update.SegDamagedUnixSec.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("seg_damaged_unix_sec = ?"))
|
|
}
|
|
|
|
if update.RepairAttemptCount._set {
|
|
__values = append(__values, update.RepairAttemptCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("repair_attempt_count = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return nil, emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, irreparabledb_segmentpath.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
irreparabledb = &Irreparabledb{}
|
|
_, err = obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
|
|
var __embed_stmt_get = __sqlbundle_Literal("SELECT irreparabledbs.segmentpath, irreparabledbs.segmentdetail, irreparabledbs.pieces_lost_count, irreparabledbs.seg_damaged_unix_sec, irreparabledbs.repair_attempt_count FROM irreparabledbs WHERE irreparabledbs.segmentpath = ?")
|
|
|
|
var __stmt_get = __sqlbundle_Render(obj.dialect, __embed_stmt_get)
|
|
obj.logStmt("(IMPLIED) "+__stmt_get, __args...)
|
|
|
|
err = obj.driver.QueryRow(__stmt_get, __args...).Scan(&irreparabledb.Segmentpath, &irreparabledb.Segmentdetail, &irreparabledb.PiecesLostCount, &irreparabledb.SegDamagedUnixSec, &irreparabledb.RepairAttemptCount)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return irreparabledb, nil
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Update_AccountingTimestamps_By_Name(ctx context.Context,
|
|
accounting_timestamps_name AccountingTimestamps_Name_Field,
|
|
update AccountingTimestamps_Update_Fields) (
|
|
accounting_timestamps *AccountingTimestamps, err error) {
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE accounting_timestamps SET "), __sets, __sqlbundle_Literal(" WHERE accounting_timestamps.name = ?")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.Value._set {
|
|
__values = append(__values, update.Value.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("value = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return nil, emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, accounting_timestamps_name.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
accounting_timestamps = &AccountingTimestamps{}
|
|
_, err = obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
|
|
var __embed_stmt_get = __sqlbundle_Literal("SELECT accounting_timestamps.name, accounting_timestamps.value FROM accounting_timestamps WHERE accounting_timestamps.name = ?")
|
|
|
|
var __stmt_get = __sqlbundle_Render(obj.dialect, __embed_stmt_get)
|
|
obj.logStmt("(IMPLIED) "+__stmt_get, __args...)
|
|
|
|
err = obj.driver.QueryRow(__stmt_get, __args...).Scan(&accounting_timestamps.Name, &accounting_timestamps.Value)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return accounting_timestamps, nil
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Update_Node_By_Id(ctx context.Context,
|
|
node_id Node_Id_Field,
|
|
update Node_Update_Fields) (
|
|
node *Node, err error) {
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE nodes SET "), __sets, __sqlbundle_Literal(" WHERE nodes.id = ?")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.AuditSuccessCount._set {
|
|
__values = append(__values, update.AuditSuccessCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("audit_success_count = ?"))
|
|
}
|
|
|
|
if update.TotalAuditCount._set {
|
|
__values = append(__values, update.TotalAuditCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("total_audit_count = ?"))
|
|
}
|
|
|
|
if update.AuditSuccessRatio._set {
|
|
__values = append(__values, update.AuditSuccessRatio.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("audit_success_ratio = ?"))
|
|
}
|
|
|
|
if update.UptimeSuccessCount._set {
|
|
__values = append(__values, update.UptimeSuccessCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("uptime_success_count = ?"))
|
|
}
|
|
|
|
if update.TotalUptimeCount._set {
|
|
__values = append(__values, update.TotalUptimeCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("total_uptime_count = ?"))
|
|
}
|
|
|
|
if update.UptimeRatio._set {
|
|
__values = append(__values, update.UptimeRatio.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("uptime_ratio = ?"))
|
|
}
|
|
|
|
if update.Wallet._set {
|
|
__values = append(__values, update.Wallet.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("wallet = ?"))
|
|
}
|
|
|
|
if update.Email._set {
|
|
__values = append(__values, update.Email.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("email = ?"))
|
|
}
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
|
|
__values = append(__values, __now)
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("updated_at = ?"))
|
|
|
|
__args = append(__args, node_id.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
node = &Node{}
|
|
_, err = obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
|
|
var __embed_stmt_get = __sqlbundle_Literal("SELECT nodes.id, nodes.audit_success_count, nodes.total_audit_count, nodes.audit_success_ratio, nodes.uptime_success_count, nodes.total_uptime_count, nodes.uptime_ratio, nodes.created_at, nodes.updated_at, nodes.wallet, nodes.email FROM nodes WHERE nodes.id = ?")
|
|
|
|
var __stmt_get = __sqlbundle_Render(obj.dialect, __embed_stmt_get)
|
|
obj.logStmt("(IMPLIED) "+__stmt_get, __args...)
|
|
|
|
err = obj.driver.QueryRow(__stmt_get, __args...).Scan(&node.Id, &node.AuditSuccessCount, &node.TotalAuditCount, &node.AuditSuccessRatio, &node.UptimeSuccessCount, &node.TotalUptimeCount, &node.UptimeRatio, &node.CreatedAt, &node.UpdatedAt, &node.Wallet, &node.Email)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return node, nil
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Update_OverlayCacheNode_By_NodeId(ctx context.Context,
|
|
overlay_cache_node_node_id OverlayCacheNode_NodeId_Field,
|
|
update OverlayCacheNode_Update_Fields) (
|
|
overlay_cache_node *OverlayCacheNode, err error) {
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE overlay_cache_nodes SET "), __sets, __sqlbundle_Literal(" WHERE overlay_cache_nodes.node_id = ?")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.Address._set {
|
|
__values = append(__values, update.Address.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("address = ?"))
|
|
}
|
|
|
|
if update.Protocol._set {
|
|
__values = append(__values, update.Protocol.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("protocol = ?"))
|
|
}
|
|
|
|
if update.OperatorEmail._set {
|
|
__values = append(__values, update.OperatorEmail.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("operator_email = ?"))
|
|
}
|
|
|
|
if update.OperatorWallet._set {
|
|
__values = append(__values, update.OperatorWallet.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("operator_wallet = ?"))
|
|
}
|
|
|
|
if update.FreeBandwidth._set {
|
|
__values = append(__values, update.FreeBandwidth.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("free_bandwidth = ?"))
|
|
}
|
|
|
|
if update.FreeDisk._set {
|
|
__values = append(__values, update.FreeDisk.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("free_disk = ?"))
|
|
}
|
|
|
|
if update.Latency90._set {
|
|
__values = append(__values, update.Latency90.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("latency_90 = ?"))
|
|
}
|
|
|
|
if update.AuditSuccessRatio._set {
|
|
__values = append(__values, update.AuditSuccessRatio.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("audit_success_ratio = ?"))
|
|
}
|
|
|
|
if update.AuditUptimeRatio._set {
|
|
__values = append(__values, update.AuditUptimeRatio.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("audit_uptime_ratio = ?"))
|
|
}
|
|
|
|
if update.AuditCount._set {
|
|
__values = append(__values, update.AuditCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("audit_count = ?"))
|
|
}
|
|
|
|
if update.AuditSuccessCount._set {
|
|
__values = append(__values, update.AuditSuccessCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("audit_success_count = ?"))
|
|
}
|
|
|
|
if update.UptimeCount._set {
|
|
__values = append(__values, update.UptimeCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("uptime_count = ?"))
|
|
}
|
|
|
|
if update.UptimeSuccessCount._set {
|
|
__values = append(__values, update.UptimeSuccessCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("uptime_success_count = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return nil, emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, overlay_cache_node_node_id.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
overlay_cache_node = &OverlayCacheNode{}
|
|
_, err = obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
|
|
var __embed_stmt_get = __sqlbundle_Literal("SELECT overlay_cache_nodes.node_id, overlay_cache_nodes.node_type, overlay_cache_nodes.address, overlay_cache_nodes.protocol, overlay_cache_nodes.operator_email, overlay_cache_nodes.operator_wallet, overlay_cache_nodes.free_bandwidth, overlay_cache_nodes.free_disk, overlay_cache_nodes.latency_90, overlay_cache_nodes.audit_success_ratio, overlay_cache_nodes.audit_uptime_ratio, overlay_cache_nodes.audit_count, overlay_cache_nodes.audit_success_count, overlay_cache_nodes.uptime_count, overlay_cache_nodes.uptime_success_count FROM overlay_cache_nodes WHERE overlay_cache_nodes.node_id = ?")
|
|
|
|
var __stmt_get = __sqlbundle_Render(obj.dialect, __embed_stmt_get)
|
|
obj.logStmt("(IMPLIED) "+__stmt_get, __args...)
|
|
|
|
err = obj.driver.QueryRow(__stmt_get, __args...).Scan(&overlay_cache_node.NodeId, &overlay_cache_node.NodeType, &overlay_cache_node.Address, &overlay_cache_node.Protocol, &overlay_cache_node.OperatorEmail, &overlay_cache_node.OperatorWallet, &overlay_cache_node.FreeBandwidth, &overlay_cache_node.FreeDisk, &overlay_cache_node.Latency90, &overlay_cache_node.AuditSuccessRatio, &overlay_cache_node.AuditUptimeRatio, &overlay_cache_node.AuditCount, &overlay_cache_node.AuditSuccessCount, &overlay_cache_node.UptimeCount, &overlay_cache_node.UptimeSuccessCount)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return overlay_cache_node, nil
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Update_User_By_Id(ctx context.Context,
|
|
user_id User_Id_Field,
|
|
update User_Update_Fields) (
|
|
user *User, err error) {
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE users SET "), __sets, __sqlbundle_Literal(" WHERE users.id = ?")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.FirstName._set {
|
|
__values = append(__values, update.FirstName.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("first_name = ?"))
|
|
}
|
|
|
|
if update.LastName._set {
|
|
__values = append(__values, update.LastName.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("last_name = ?"))
|
|
}
|
|
|
|
if update.Email._set {
|
|
__values = append(__values, update.Email.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("email = ?"))
|
|
}
|
|
|
|
if update.PasswordHash._set {
|
|
__values = append(__values, update.PasswordHash.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("password_hash = ?"))
|
|
}
|
|
|
|
if update.Status._set {
|
|
__values = append(__values, update.Status.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("status = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return nil, emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, user_id.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
user = &User{}
|
|
_, err = obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
|
|
var __embed_stmt_get = __sqlbundle_Literal("SELECT users.id, users.first_name, users.last_name, users.email, users.password_hash, users.status, users.created_at FROM users WHERE users.id = ?")
|
|
|
|
var __stmt_get = __sqlbundle_Render(obj.dialect, __embed_stmt_get)
|
|
obj.logStmt("(IMPLIED) "+__stmt_get, __args...)
|
|
|
|
err = obj.driver.QueryRow(__stmt_get, __args...).Scan(&user.Id, &user.FirstName, &user.LastName, &user.Email, &user.PasswordHash, &user.Status, &user.CreatedAt)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return user, nil
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Update_Project_By_Id(ctx context.Context,
|
|
project_id Project_Id_Field,
|
|
update Project_Update_Fields) (
|
|
project *Project, err error) {
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE projects SET "), __sets, __sqlbundle_Literal(" WHERE projects.id = ?")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.Description._set {
|
|
__values = append(__values, update.Description.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("description = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return nil, emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, project_id.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
project = &Project{}
|
|
_, err = obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
|
|
var __embed_stmt_get = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.created_at FROM projects WHERE projects.id = ?")
|
|
|
|
var __stmt_get = __sqlbundle_Render(obj.dialect, __embed_stmt_get)
|
|
obj.logStmt("(IMPLIED) "+__stmt_get, __args...)
|
|
|
|
err = obj.driver.QueryRow(__stmt_get, __args...).Scan(&project.Id, &project.Name, &project.Description, &project.CreatedAt)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return project, nil
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Update_ApiKey_By_Id(ctx context.Context,
|
|
api_key_id ApiKey_Id_Field,
|
|
update ApiKey_Update_Fields) (
|
|
api_key *ApiKey, err error) {
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE api_keys SET "), __sets, __sqlbundle_Literal(" WHERE api_keys.id = ?")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.Name._set {
|
|
__values = append(__values, update.Name.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("name = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return nil, emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, api_key_id.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
api_key = &ApiKey{}
|
|
_, err = obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
|
|
var __embed_stmt_get = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.key, api_keys.name, api_keys.created_at FROM api_keys WHERE api_keys.id = ?")
|
|
|
|
var __stmt_get = __sqlbundle_Render(obj.dialect, __embed_stmt_get)
|
|
obj.logStmt("(IMPLIED) "+__stmt_get, __args...)
|
|
|
|
err = obj.driver.QueryRow(__stmt_get, __args...).Scan(&api_key.Id, &api_key.ProjectId, &api_key.Key, &api_key.Name, &api_key.CreatedAt)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return api_key, nil
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Update_CertRecord_By_Id(ctx context.Context,
|
|
certRecord_id CertRecord_Id_Field,
|
|
update CertRecord_Update_Fields) (
|
|
certRecord *CertRecord, err error) {
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE certRecords SET "), __sets, __sqlbundle_Literal(" WHERE certRecords.id = ?")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
|
|
__values = append(__values, __now)
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("update_at = ?"))
|
|
|
|
__args = append(__args, certRecord_id.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
certRecord = &CertRecord{}
|
|
_, err = obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
|
|
var __embed_stmt_get = __sqlbundle_Literal("SELECT certRecords.publickey, certRecords.id, certRecords.update_at FROM certRecords WHERE certRecords.id = ?")
|
|
|
|
var __stmt_get = __sqlbundle_Render(obj.dialect, __embed_stmt_get)
|
|
obj.logStmt("(IMPLIED) "+__stmt_get, __args...)
|
|
|
|
err = obj.driver.QueryRow(__stmt_get, __args...).Scan(&certRecord.Publickey, &certRecord.Id, &certRecord.UpdateAt)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return certRecord, nil
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Update_RegistrationToken_By_Secret(ctx context.Context,
|
|
registration_token_secret RegistrationToken_Secret_Field,
|
|
update RegistrationToken_Update_Fields) (
|
|
registration_token *RegistrationToken, err error) {
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE registration_tokens SET "), __sets, __sqlbundle_Literal(" WHERE registration_tokens.secret = ?")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.OwnerId._set {
|
|
__values = append(__values, update.OwnerId.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("owner_id = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return nil, emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, registration_token_secret.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
registration_token = &RegistrationToken{}
|
|
_, err = obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
|
|
var __embed_stmt_get = __sqlbundle_Literal("SELECT registration_tokens.secret, registration_tokens.owner_id, registration_tokens.project_limit, registration_tokens.created_at FROM registration_tokens WHERE registration_tokens.secret = ?")
|
|
|
|
var __stmt_get = __sqlbundle_Render(obj.dialect, __embed_stmt_get)
|
|
obj.logStmt("(IMPLIED) "+__stmt_get, __args...)
|
|
|
|
err = obj.driver.QueryRow(__stmt_get, __args...).Scan(®istration_token.Secret, ®istration_token.OwnerId, ®istration_token.ProjectLimit, ®istration_token.CreatedAt)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return registration_token, nil
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Delete_Irreparabledb_By_Segmentpath(ctx context.Context,
|
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field) (
|
|
deleted bool, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM irreparabledbs WHERE irreparabledbs.segmentpath = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, irreparabledb_segmentpath.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Delete_AccountingRollup_By_Id(ctx context.Context,
|
|
accounting_rollup_id AccountingRollup_Id_Field) (
|
|
deleted bool, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM accounting_rollups WHERE accounting_rollups.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, accounting_rollup_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Delete_AccountingRaw_By_Id(ctx context.Context,
|
|
accounting_raw_id AccountingRaw_Id_Field) (
|
|
deleted bool, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM accounting_raws WHERE accounting_raws.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, accounting_raw_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Delete_Node_By_Id(ctx context.Context,
|
|
node_id Node_Id_Field) (
|
|
deleted bool, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM nodes WHERE nodes.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, node_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Delete_OverlayCacheNode_By_NodeId(ctx context.Context,
|
|
overlay_cache_node_node_id OverlayCacheNode_NodeId_Field) (
|
|
deleted bool, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM overlay_cache_nodes WHERE overlay_cache_nodes.node_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, overlay_cache_node_node_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Delete_Injuredsegment_By_Id(ctx context.Context,
|
|
injuredsegment_id Injuredsegment_Id_Field) (
|
|
deleted bool, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM injuredsegments WHERE injuredsegments.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, injuredsegment_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Delete_User_By_Id(ctx context.Context,
|
|
user_id User_Id_Field) (
|
|
deleted bool, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM users WHERE users.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, user_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Delete_Project_By_Id(ctx context.Context,
|
|
project_id Project_Id_Field) (
|
|
deleted bool, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM projects WHERE projects.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, project_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Delete_ProjectMember_By_MemberId_And_ProjectId(ctx context.Context,
|
|
project_member_member_id ProjectMember_MemberId_Field,
|
|
project_member_project_id ProjectMember_ProjectId_Field) (
|
|
deleted bool, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM project_members WHERE project_members.member_id = ? AND project_members.project_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, project_member_member_id.value(), project_member_project_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Delete_ApiKey_By_Id(ctx context.Context,
|
|
api_key_id ApiKey_Id_Field) (
|
|
deleted bool, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM api_keys WHERE api_keys.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, api_key_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Delete_BucketUsage_By_Id(ctx context.Context,
|
|
bucket_usage_id BucketUsage_Id_Field) (
|
|
deleted bool, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM bucket_usages WHERE bucket_usages.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, bucket_usage_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Delete_SerialNumber_By_ExpiresAt_LessOrEqual(ctx context.Context,
|
|
serial_number_expires_at_less_or_equal SerialNumber_ExpiresAt_Field) (
|
|
count int64, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM serial_numbers WHERE serial_numbers.expires_at <= ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, serial_number_expires_at_less_or_equal.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
return count, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Delete_CertRecord_By_Id(ctx context.Context,
|
|
certRecord_id CertRecord_Id_Field) (
|
|
deleted bool, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM certRecords WHERE certRecords.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, certRecord_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) getLastIrreparabledb(ctx context.Context,
|
|
pk int64) (
|
|
irreparabledb *Irreparabledb, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT irreparabledbs.segmentpath, irreparabledbs.segmentdetail, irreparabledbs.pieces_lost_count, irreparabledbs.seg_damaged_unix_sec, irreparabledbs.repair_attempt_count FROM irreparabledbs WHERE _rowid_ = ?")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, pk)
|
|
|
|
irreparabledb = &Irreparabledb{}
|
|
err = obj.driver.QueryRow(__stmt, pk).Scan(&irreparabledb.Segmentpath, &irreparabledb.Segmentdetail, &irreparabledb.PiecesLostCount, &irreparabledb.SegDamagedUnixSec, &irreparabledb.RepairAttemptCount)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return irreparabledb, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) getLastAccountingTimestamps(ctx context.Context,
|
|
pk int64) (
|
|
accounting_timestamps *AccountingTimestamps, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT accounting_timestamps.name, accounting_timestamps.value FROM accounting_timestamps WHERE _rowid_ = ?")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, pk)
|
|
|
|
accounting_timestamps = &AccountingTimestamps{}
|
|
err = obj.driver.QueryRow(__stmt, pk).Scan(&accounting_timestamps.Name, &accounting_timestamps.Value)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return accounting_timestamps, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) getLastAccountingRollup(ctx context.Context,
|
|
pk int64) (
|
|
accounting_rollup *AccountingRollup, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT accounting_rollups.id, accounting_rollups.node_id, accounting_rollups.start_time, accounting_rollups.put_total, accounting_rollups.get_total, accounting_rollups.get_audit_total, accounting_rollups.get_repair_total, accounting_rollups.put_repair_total, accounting_rollups.at_rest_total FROM accounting_rollups WHERE _rowid_ = ?")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, pk)
|
|
|
|
accounting_rollup = &AccountingRollup{}
|
|
err = obj.driver.QueryRow(__stmt, pk).Scan(&accounting_rollup.Id, &accounting_rollup.NodeId, &accounting_rollup.StartTime, &accounting_rollup.PutTotal, &accounting_rollup.GetTotal, &accounting_rollup.GetAuditTotal, &accounting_rollup.GetRepairTotal, &accounting_rollup.PutRepairTotal, &accounting_rollup.AtRestTotal)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return accounting_rollup, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) getLastAccountingRaw(ctx context.Context,
|
|
pk int64) (
|
|
accounting_raw *AccountingRaw, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT accounting_raws.id, accounting_raws.node_id, accounting_raws.interval_end_time, accounting_raws.data_total, accounting_raws.data_type, accounting_raws.created_at FROM accounting_raws WHERE _rowid_ = ?")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, pk)
|
|
|
|
accounting_raw = &AccountingRaw{}
|
|
err = obj.driver.QueryRow(__stmt, pk).Scan(&accounting_raw.Id, &accounting_raw.NodeId, &accounting_raw.IntervalEndTime, &accounting_raw.DataTotal, &accounting_raw.DataType, &accounting_raw.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return accounting_raw, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) getLastNode(ctx context.Context,
|
|
pk int64) (
|
|
node *Node, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.audit_success_count, nodes.total_audit_count, nodes.audit_success_ratio, nodes.uptime_success_count, nodes.total_uptime_count, nodes.uptime_ratio, nodes.created_at, nodes.updated_at, nodes.wallet, nodes.email FROM nodes WHERE _rowid_ = ?")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, pk)
|
|
|
|
node = &Node{}
|
|
err = obj.driver.QueryRow(__stmt, pk).Scan(&node.Id, &node.AuditSuccessCount, &node.TotalAuditCount, &node.AuditSuccessRatio, &node.UptimeSuccessCount, &node.TotalUptimeCount, &node.UptimeRatio, &node.CreatedAt, &node.UpdatedAt, &node.Wallet, &node.Email)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return node, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) getLastOverlayCacheNode(ctx context.Context,
|
|
pk int64) (
|
|
overlay_cache_node *OverlayCacheNode, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT overlay_cache_nodes.node_id, overlay_cache_nodes.node_type, overlay_cache_nodes.address, overlay_cache_nodes.protocol, overlay_cache_nodes.operator_email, overlay_cache_nodes.operator_wallet, overlay_cache_nodes.free_bandwidth, overlay_cache_nodes.free_disk, overlay_cache_nodes.latency_90, overlay_cache_nodes.audit_success_ratio, overlay_cache_nodes.audit_uptime_ratio, overlay_cache_nodes.audit_count, overlay_cache_nodes.audit_success_count, overlay_cache_nodes.uptime_count, overlay_cache_nodes.uptime_success_count FROM overlay_cache_nodes WHERE _rowid_ = ?")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, pk)
|
|
|
|
overlay_cache_node = &OverlayCacheNode{}
|
|
err = obj.driver.QueryRow(__stmt, pk).Scan(&overlay_cache_node.NodeId, &overlay_cache_node.NodeType, &overlay_cache_node.Address, &overlay_cache_node.Protocol, &overlay_cache_node.OperatorEmail, &overlay_cache_node.OperatorWallet, &overlay_cache_node.FreeBandwidth, &overlay_cache_node.FreeDisk, &overlay_cache_node.Latency90, &overlay_cache_node.AuditSuccessRatio, &overlay_cache_node.AuditUptimeRatio, &overlay_cache_node.AuditCount, &overlay_cache_node.AuditSuccessCount, &overlay_cache_node.UptimeCount, &overlay_cache_node.UptimeSuccessCount)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return overlay_cache_node, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) getLastInjuredsegment(ctx context.Context,
|
|
pk int64) (
|
|
injuredsegment *Injuredsegment, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT injuredsegments.id, injuredsegments.info FROM injuredsegments WHERE _rowid_ = ?")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, pk)
|
|
|
|
injuredsegment = &Injuredsegment{}
|
|
err = obj.driver.QueryRow(__stmt, pk).Scan(&injuredsegment.Id, &injuredsegment.Info)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return injuredsegment, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) getLastUser(ctx context.Context,
|
|
pk int64) (
|
|
user *User, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.first_name, users.last_name, users.email, users.password_hash, users.status, users.created_at FROM users WHERE _rowid_ = ?")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, pk)
|
|
|
|
user = &User{}
|
|
err = obj.driver.QueryRow(__stmt, pk).Scan(&user.Id, &user.FirstName, &user.LastName, &user.Email, &user.PasswordHash, &user.Status, &user.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return user, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) getLastProject(ctx context.Context,
|
|
pk int64) (
|
|
project *Project, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.created_at FROM projects WHERE _rowid_ = ?")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, pk)
|
|
|
|
project = &Project{}
|
|
err = obj.driver.QueryRow(__stmt, pk).Scan(&project.Id, &project.Name, &project.Description, &project.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return project, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) getLastProjectMember(ctx context.Context,
|
|
pk int64) (
|
|
project_member *ProjectMember, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT project_members.member_id, project_members.project_id, project_members.created_at FROM project_members WHERE _rowid_ = ?")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, pk)
|
|
|
|
project_member = &ProjectMember{}
|
|
err = obj.driver.QueryRow(__stmt, pk).Scan(&project_member.MemberId, &project_member.ProjectId, &project_member.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return project_member, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) getLastApiKey(ctx context.Context,
|
|
pk int64) (
|
|
api_key *ApiKey, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.key, api_keys.name, api_keys.created_at FROM api_keys WHERE _rowid_ = ?")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, pk)
|
|
|
|
api_key = &ApiKey{}
|
|
err = obj.driver.QueryRow(__stmt, pk).Scan(&api_key.Id, &api_key.ProjectId, &api_key.Key, &api_key.Name, &api_key.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return api_key, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) getLastBucketUsage(ctx context.Context,
|
|
pk int64) (
|
|
bucket_usage *BucketUsage, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_usages.id, bucket_usages.bucket_id, bucket_usages.rollup_end_time, bucket_usages.remote_stored_data, bucket_usages.inline_stored_data, bucket_usages.remote_segments, bucket_usages.inline_segments, bucket_usages.objects, bucket_usages.metadata_size, bucket_usages.repair_egress, bucket_usages.get_egress, bucket_usages.audit_egress FROM bucket_usages WHERE _rowid_ = ?")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, pk)
|
|
|
|
bucket_usage = &BucketUsage{}
|
|
err = obj.driver.QueryRow(__stmt, pk).Scan(&bucket_usage.Id, &bucket_usage.BucketId, &bucket_usage.RollupEndTime, &bucket_usage.RemoteStoredData, &bucket_usage.InlineStoredData, &bucket_usage.RemoteSegments, &bucket_usage.InlineSegments, &bucket_usage.Objects, &bucket_usage.MetadataSize, &bucket_usage.RepairEgress, &bucket_usage.GetEgress, &bucket_usage.AuditEgress)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return bucket_usage, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) getLastSerialNumber(ctx context.Context,
|
|
pk int64) (
|
|
serial_number *SerialNumber, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT serial_numbers.id, serial_numbers.serial_number, serial_numbers.bucket_id, serial_numbers.expires_at FROM serial_numbers WHERE _rowid_ = ?")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, pk)
|
|
|
|
serial_number = &SerialNumber{}
|
|
err = obj.driver.QueryRow(__stmt, pk).Scan(&serial_number.Id, &serial_number.SerialNumber, &serial_number.BucketId, &serial_number.ExpiresAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return serial_number, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) getLastUsedSerial(ctx context.Context,
|
|
pk int64) (
|
|
used_serial *UsedSerial, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT used_serials.serial_number_id, used_serials.storage_node_id FROM used_serials WHERE _rowid_ = ?")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, pk)
|
|
|
|
used_serial = &UsedSerial{}
|
|
err = obj.driver.QueryRow(__stmt, pk).Scan(&used_serial.SerialNumberId, &used_serial.StorageNodeId)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return used_serial, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) getLastCertRecord(ctx context.Context,
|
|
pk int64) (
|
|
certRecord *CertRecord, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT certRecords.publickey, certRecords.id, certRecords.update_at FROM certRecords WHERE _rowid_ = ?")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, pk)
|
|
|
|
certRecord = &CertRecord{}
|
|
err = obj.driver.QueryRow(__stmt, pk).Scan(&certRecord.Publickey, &certRecord.Id, &certRecord.UpdateAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return certRecord, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) getLastRegistrationToken(ctx context.Context,
|
|
pk int64) (
|
|
registration_token *RegistrationToken, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT registration_tokens.secret, registration_tokens.owner_id, registration_tokens.project_limit, registration_tokens.created_at FROM registration_tokens WHERE _rowid_ = ?")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, pk)
|
|
|
|
registration_token = &RegistrationToken{}
|
|
err = obj.driver.QueryRow(__stmt, pk).Scan(®istration_token.Secret, ®istration_token.OwnerId, ®istration_token.ProjectLimit, ®istration_token.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return registration_token, nil
|
|
|
|
}
|
|
|
|
func (impl sqlite3Impl) isConstraintError(err error) (
|
|
constraint string, ok bool) {
|
|
if e, ok := err.(sqlite3.Error); ok {
|
|
if e.Code == sqlite3.ErrConstraint {
|
|
msg := err.Error()
|
|
colon := strings.LastIndex(msg, ":")
|
|
if colon != -1 {
|
|
return strings.TrimSpace(msg[colon:]), true
|
|
}
|
|
return "", true
|
|
}
|
|
}
|
|
return "", false
|
|
}
|
|
|
|
func (obj *sqlite3Impl) deleteAll(ctx context.Context) (count int64, err error) {
|
|
var __res sql.Result
|
|
var __count int64
|
|
__res, err = obj.driver.Exec("DELETE FROM used_serials;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM project_members;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM api_keys;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM users;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM storagenode_storage_rollups;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM storagenode_bandwidth_rollups;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM serial_numbers;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM registration_tokens;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM projects;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM overlay_cache_nodes;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM nodes;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM irreparabledbs;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM injuredsegments;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM certRecords;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM bwagreements;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM bucket_usages;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM bucket_storage_rollups;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM bucket_bandwidth_rollups;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM accounting_timestamps;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM accounting_rollups;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM accounting_raws;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
|
|
return count, nil
|
|
|
|
}
|
|
|
|
type Rx struct {
|
|
db *DB
|
|
tx *Tx
|
|
}
|
|
|
|
func (rx *Rx) UnsafeTx(ctx context.Context) (unsafe_tx *sql.Tx, err error) {
|
|
tx, err := rx.getTx(ctx)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
return tx.Tx, nil
|
|
}
|
|
|
|
func (rx *Rx) getTx(ctx context.Context) (tx *Tx, err error) {
|
|
if rx.tx == nil {
|
|
if rx.tx, err = rx.db.Open(ctx); err != nil {
|
|
return nil, err
|
|
}
|
|
}
|
|
return rx.tx, nil
|
|
}
|
|
|
|
func (rx *Rx) Rebind(s string) string {
|
|
return rx.db.Rebind(s)
|
|
}
|
|
|
|
func (rx *Rx) Commit() (err error) {
|
|
if rx.tx != nil {
|
|
err = rx.tx.Commit()
|
|
rx.tx = nil
|
|
}
|
|
return err
|
|
}
|
|
|
|
func (rx *Rx) Rollback() (err error) {
|
|
if rx.tx != nil {
|
|
err = rx.tx.Rollback()
|
|
rx.tx = nil
|
|
}
|
|
return err
|
|
}
|
|
|
|
func (rx *Rx) All_AccountingRaw(ctx context.Context) (
|
|
rows []*AccountingRaw, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.All_AccountingRaw(ctx)
|
|
}
|
|
|
|
func (rx *Rx) All_AccountingRaw_By_IntervalEndTime_GreaterOrEqual(ctx context.Context,
|
|
accounting_raw_interval_end_time_greater_or_equal AccountingRaw_IntervalEndTime_Field) (
|
|
rows []*AccountingRaw, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.All_AccountingRaw_By_IntervalEndTime_GreaterOrEqual(ctx, accounting_raw_interval_end_time_greater_or_equal)
|
|
}
|
|
|
|
func (rx *Rx) All_AccountingRollup_By_StartTime_GreaterOrEqual(ctx context.Context,
|
|
accounting_rollup_start_time_greater_or_equal AccountingRollup_StartTime_Field) (
|
|
rows []*AccountingRollup, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.All_AccountingRollup_By_StartTime_GreaterOrEqual(ctx, accounting_rollup_start_time_greater_or_equal)
|
|
}
|
|
|
|
func (rx *Rx) All_ApiKey_By_ProjectId_OrderBy_Asc_Name(ctx context.Context,
|
|
api_key_project_id ApiKey_ProjectId_Field) (
|
|
rows []*ApiKey, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.All_ApiKey_By_ProjectId_OrderBy_Asc_Name(ctx, api_key_project_id)
|
|
}
|
|
|
|
func (rx *Rx) All_Node_Id(ctx context.Context) (
|
|
rows []*Id_Row, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.All_Node_Id(ctx)
|
|
}
|
|
|
|
func (rx *Rx) All_Project(ctx context.Context) (
|
|
rows []*Project, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.All_Project(ctx)
|
|
}
|
|
|
|
func (rx *Rx) All_ProjectMember_By_MemberId(ctx context.Context,
|
|
project_member_member_id ProjectMember_MemberId_Field) (
|
|
rows []*ProjectMember, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.All_ProjectMember_By_MemberId(ctx, project_member_member_id)
|
|
}
|
|
|
|
func (rx *Rx) All_Project_By_ProjectMember_MemberId_OrderBy_Asc_Project_Name(ctx context.Context,
|
|
project_member_member_id ProjectMember_MemberId_Field) (
|
|
rows []*Project, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.All_Project_By_ProjectMember_MemberId_OrderBy_Asc_Project_Name(ctx, project_member_member_id)
|
|
}
|
|
|
|
func (rx *Rx) Create_AccountingRaw(ctx context.Context,
|
|
accounting_raw_node_id AccountingRaw_NodeId_Field,
|
|
accounting_raw_interval_end_time AccountingRaw_IntervalEndTime_Field,
|
|
accounting_raw_data_total AccountingRaw_DataTotal_Field,
|
|
accounting_raw_data_type AccountingRaw_DataType_Field,
|
|
accounting_raw_created_at AccountingRaw_CreatedAt_Field) (
|
|
accounting_raw *AccountingRaw, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Create_AccountingRaw(ctx, accounting_raw_node_id, accounting_raw_interval_end_time, accounting_raw_data_total, accounting_raw_data_type, accounting_raw_created_at)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Create_AccountingRollup(ctx context.Context,
|
|
accounting_rollup_node_id AccountingRollup_NodeId_Field,
|
|
accounting_rollup_start_time AccountingRollup_StartTime_Field,
|
|
accounting_rollup_put_total AccountingRollup_PutTotal_Field,
|
|
accounting_rollup_get_total AccountingRollup_GetTotal_Field,
|
|
accounting_rollup_get_audit_total AccountingRollup_GetAuditTotal_Field,
|
|
accounting_rollup_get_repair_total AccountingRollup_GetRepairTotal_Field,
|
|
accounting_rollup_put_repair_total AccountingRollup_PutRepairTotal_Field,
|
|
accounting_rollup_at_rest_total AccountingRollup_AtRestTotal_Field) (
|
|
accounting_rollup *AccountingRollup, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Create_AccountingRollup(ctx, accounting_rollup_node_id, accounting_rollup_start_time, accounting_rollup_put_total, accounting_rollup_get_total, accounting_rollup_get_audit_total, accounting_rollup_get_repair_total, accounting_rollup_put_repair_total, accounting_rollup_at_rest_total)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Create_AccountingTimestamps(ctx context.Context,
|
|
accounting_timestamps_name AccountingTimestamps_Name_Field,
|
|
accounting_timestamps_value AccountingTimestamps_Value_Field) (
|
|
accounting_timestamps *AccountingTimestamps, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Create_AccountingTimestamps(ctx, accounting_timestamps_name, accounting_timestamps_value)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Create_ApiKey(ctx context.Context,
|
|
api_key_id ApiKey_Id_Field,
|
|
api_key_project_id ApiKey_ProjectId_Field,
|
|
api_key_key ApiKey_Key_Field,
|
|
api_key_name ApiKey_Name_Field) (
|
|
api_key *ApiKey, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Create_ApiKey(ctx, api_key_id, api_key_project_id, api_key_key, api_key_name)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Create_BucketUsage(ctx context.Context,
|
|
bucket_usage_id BucketUsage_Id_Field,
|
|
bucket_usage_bucket_id BucketUsage_BucketId_Field,
|
|
bucket_usage_rollup_end_time BucketUsage_RollupEndTime_Field,
|
|
bucket_usage_remote_stored_data BucketUsage_RemoteStoredData_Field,
|
|
bucket_usage_inline_stored_data BucketUsage_InlineStoredData_Field,
|
|
bucket_usage_remote_segments BucketUsage_RemoteSegments_Field,
|
|
bucket_usage_inline_segments BucketUsage_InlineSegments_Field,
|
|
bucket_usage_objects BucketUsage_Objects_Field,
|
|
bucket_usage_metadata_size BucketUsage_MetadataSize_Field,
|
|
bucket_usage_repair_egress BucketUsage_RepairEgress_Field,
|
|
bucket_usage_get_egress BucketUsage_GetEgress_Field,
|
|
bucket_usage_audit_egress BucketUsage_AuditEgress_Field) (
|
|
bucket_usage *BucketUsage, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Create_BucketUsage(ctx, bucket_usage_id, bucket_usage_bucket_id, bucket_usage_rollup_end_time, bucket_usage_remote_stored_data, bucket_usage_inline_stored_data, bucket_usage_remote_segments, bucket_usage_inline_segments, bucket_usage_objects, bucket_usage_metadata_size, bucket_usage_repair_egress, bucket_usage_get_egress, bucket_usage_audit_egress)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Create_CertRecord(ctx context.Context,
|
|
certRecord_publickey CertRecord_Publickey_Field,
|
|
certRecord_id CertRecord_Id_Field) (
|
|
certRecord *CertRecord, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Create_CertRecord(ctx, certRecord_publickey, certRecord_id)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Create_Injuredsegment(ctx context.Context,
|
|
injuredsegment_info Injuredsegment_Info_Field) (
|
|
injuredsegment *Injuredsegment, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Create_Injuredsegment(ctx, injuredsegment_info)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Create_Irreparabledb(ctx context.Context,
|
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field,
|
|
irreparabledb_segmentdetail Irreparabledb_Segmentdetail_Field,
|
|
irreparabledb_pieces_lost_count Irreparabledb_PiecesLostCount_Field,
|
|
irreparabledb_seg_damaged_unix_sec Irreparabledb_SegDamagedUnixSec_Field,
|
|
irreparabledb_repair_attempt_count Irreparabledb_RepairAttemptCount_Field) (
|
|
irreparabledb *Irreparabledb, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Create_Irreparabledb(ctx, irreparabledb_segmentpath, irreparabledb_segmentdetail, irreparabledb_pieces_lost_count, irreparabledb_seg_damaged_unix_sec, irreparabledb_repair_attempt_count)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Create_Node(ctx context.Context,
|
|
node_id Node_Id_Field,
|
|
node_audit_success_count Node_AuditSuccessCount_Field,
|
|
node_total_audit_count Node_TotalAuditCount_Field,
|
|
node_audit_success_ratio Node_AuditSuccessRatio_Field,
|
|
node_uptime_success_count Node_UptimeSuccessCount_Field,
|
|
node_total_uptime_count Node_TotalUptimeCount_Field,
|
|
node_uptime_ratio Node_UptimeRatio_Field,
|
|
node_wallet Node_Wallet_Field,
|
|
node_email Node_Email_Field) (
|
|
node *Node, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Create_Node(ctx, node_id, node_audit_success_count, node_total_audit_count, node_audit_success_ratio, node_uptime_success_count, node_total_uptime_count, node_uptime_ratio, node_wallet, node_email)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Create_OverlayCacheNode(ctx context.Context,
|
|
overlay_cache_node_node_id OverlayCacheNode_NodeId_Field,
|
|
overlay_cache_node_node_type OverlayCacheNode_NodeType_Field,
|
|
overlay_cache_node_address OverlayCacheNode_Address_Field,
|
|
overlay_cache_node_protocol OverlayCacheNode_Protocol_Field,
|
|
overlay_cache_node_operator_email OverlayCacheNode_OperatorEmail_Field,
|
|
overlay_cache_node_operator_wallet OverlayCacheNode_OperatorWallet_Field,
|
|
overlay_cache_node_free_bandwidth OverlayCacheNode_FreeBandwidth_Field,
|
|
overlay_cache_node_free_disk OverlayCacheNode_FreeDisk_Field,
|
|
overlay_cache_node_latency_90 OverlayCacheNode_Latency90_Field,
|
|
overlay_cache_node_audit_success_ratio OverlayCacheNode_AuditSuccessRatio_Field,
|
|
overlay_cache_node_audit_uptime_ratio OverlayCacheNode_AuditUptimeRatio_Field,
|
|
overlay_cache_node_audit_count OverlayCacheNode_AuditCount_Field,
|
|
overlay_cache_node_audit_success_count OverlayCacheNode_AuditSuccessCount_Field,
|
|
overlay_cache_node_uptime_count OverlayCacheNode_UptimeCount_Field,
|
|
overlay_cache_node_uptime_success_count OverlayCacheNode_UptimeSuccessCount_Field) (
|
|
overlay_cache_node *OverlayCacheNode, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Create_OverlayCacheNode(ctx, overlay_cache_node_node_id, overlay_cache_node_node_type, overlay_cache_node_address, overlay_cache_node_protocol, overlay_cache_node_operator_email, overlay_cache_node_operator_wallet, overlay_cache_node_free_bandwidth, overlay_cache_node_free_disk, overlay_cache_node_latency_90, overlay_cache_node_audit_success_ratio, overlay_cache_node_audit_uptime_ratio, overlay_cache_node_audit_count, overlay_cache_node_audit_success_count, overlay_cache_node_uptime_count, overlay_cache_node_uptime_success_count)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Create_Project(ctx context.Context,
|
|
project_id Project_Id_Field,
|
|
project_name Project_Name_Field,
|
|
project_description Project_Description_Field) (
|
|
project *Project, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Create_Project(ctx, project_id, project_name, project_description)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Create_ProjectMember(ctx context.Context,
|
|
project_member_member_id ProjectMember_MemberId_Field,
|
|
project_member_project_id ProjectMember_ProjectId_Field) (
|
|
project_member *ProjectMember, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Create_ProjectMember(ctx, project_member_member_id, project_member_project_id)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Create_RegistrationToken(ctx context.Context,
|
|
registration_token_secret RegistrationToken_Secret_Field,
|
|
registration_token_project_limit RegistrationToken_ProjectLimit_Field,
|
|
optional RegistrationToken_Create_Fields) (
|
|
registration_token *RegistrationToken, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Create_RegistrationToken(ctx, registration_token_secret, registration_token_project_limit, optional)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Create_SerialNumber(ctx context.Context,
|
|
serial_number_serial_number SerialNumber_SerialNumber_Field,
|
|
serial_number_bucket_id SerialNumber_BucketId_Field,
|
|
serial_number_expires_at SerialNumber_ExpiresAt_Field) (
|
|
serial_number *SerialNumber, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Create_SerialNumber(ctx, serial_number_serial_number, serial_number_bucket_id, serial_number_expires_at)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Create_UsedSerial(ctx context.Context,
|
|
used_serial_serial_number_id UsedSerial_SerialNumberId_Field,
|
|
used_serial_storage_node_id UsedSerial_StorageNodeId_Field) (
|
|
used_serial *UsedSerial, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Create_UsedSerial(ctx, used_serial_serial_number_id, used_serial_storage_node_id)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Create_User(ctx context.Context,
|
|
user_id User_Id_Field,
|
|
user_first_name User_FirstName_Field,
|
|
user_last_name User_LastName_Field,
|
|
user_email User_Email_Field,
|
|
user_password_hash User_PasswordHash_Field) (
|
|
user *User, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Create_User(ctx, user_id, user_first_name, user_last_name, user_email, user_password_hash)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Delete_AccountingRaw_By_Id(ctx context.Context,
|
|
accounting_raw_id AccountingRaw_Id_Field) (
|
|
deleted bool, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Delete_AccountingRaw_By_Id(ctx, accounting_raw_id)
|
|
}
|
|
|
|
func (rx *Rx) Delete_AccountingRollup_By_Id(ctx context.Context,
|
|
accounting_rollup_id AccountingRollup_Id_Field) (
|
|
deleted bool, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Delete_AccountingRollup_By_Id(ctx, accounting_rollup_id)
|
|
}
|
|
|
|
func (rx *Rx) Delete_ApiKey_By_Id(ctx context.Context,
|
|
api_key_id ApiKey_Id_Field) (
|
|
deleted bool, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Delete_ApiKey_By_Id(ctx, api_key_id)
|
|
}
|
|
|
|
func (rx *Rx) Delete_BucketUsage_By_Id(ctx context.Context,
|
|
bucket_usage_id BucketUsage_Id_Field) (
|
|
deleted bool, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Delete_BucketUsage_By_Id(ctx, bucket_usage_id)
|
|
}
|
|
|
|
func (rx *Rx) Delete_CertRecord_By_Id(ctx context.Context,
|
|
certRecord_id CertRecord_Id_Field) (
|
|
deleted bool, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Delete_CertRecord_By_Id(ctx, certRecord_id)
|
|
}
|
|
|
|
func (rx *Rx) Delete_Injuredsegment_By_Id(ctx context.Context,
|
|
injuredsegment_id Injuredsegment_Id_Field) (
|
|
deleted bool, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Delete_Injuredsegment_By_Id(ctx, injuredsegment_id)
|
|
}
|
|
|
|
func (rx *Rx) Delete_Irreparabledb_By_Segmentpath(ctx context.Context,
|
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field) (
|
|
deleted bool, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Delete_Irreparabledb_By_Segmentpath(ctx, irreparabledb_segmentpath)
|
|
}
|
|
|
|
func (rx *Rx) Delete_Node_By_Id(ctx context.Context,
|
|
node_id Node_Id_Field) (
|
|
deleted bool, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Delete_Node_By_Id(ctx, node_id)
|
|
}
|
|
|
|
func (rx *Rx) Delete_OverlayCacheNode_By_NodeId(ctx context.Context,
|
|
overlay_cache_node_node_id OverlayCacheNode_NodeId_Field) (
|
|
deleted bool, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Delete_OverlayCacheNode_By_NodeId(ctx, overlay_cache_node_node_id)
|
|
}
|
|
|
|
func (rx *Rx) Delete_ProjectMember_By_MemberId_And_ProjectId(ctx context.Context,
|
|
project_member_member_id ProjectMember_MemberId_Field,
|
|
project_member_project_id ProjectMember_ProjectId_Field) (
|
|
deleted bool, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Delete_ProjectMember_By_MemberId_And_ProjectId(ctx, project_member_member_id, project_member_project_id)
|
|
}
|
|
|
|
func (rx *Rx) Delete_Project_By_Id(ctx context.Context,
|
|
project_id Project_Id_Field) (
|
|
deleted bool, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Delete_Project_By_Id(ctx, project_id)
|
|
}
|
|
|
|
func (rx *Rx) Delete_SerialNumber_By_ExpiresAt_LessOrEqual(ctx context.Context,
|
|
serial_number_expires_at_less_or_equal SerialNumber_ExpiresAt_Field) (
|
|
count int64, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Delete_SerialNumber_By_ExpiresAt_LessOrEqual(ctx, serial_number_expires_at_less_or_equal)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Delete_User_By_Id(ctx context.Context,
|
|
user_id User_Id_Field) (
|
|
deleted bool, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Delete_User_By_Id(ctx, user_id)
|
|
}
|
|
|
|
func (rx *Rx) Find_AccountingTimestamps_Value_By_Name(ctx context.Context,
|
|
accounting_timestamps_name AccountingTimestamps_Name_Field) (
|
|
row *Value_Row, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Find_AccountingTimestamps_Value_By_Name(ctx, accounting_timestamps_name)
|
|
}
|
|
|
|
func (rx *Rx) Find_Node_By_Id(ctx context.Context,
|
|
node_id Node_Id_Field) (
|
|
node *Node, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Find_Node_By_Id(ctx, node_id)
|
|
}
|
|
|
|
func (rx *Rx) Find_SerialNumber_By_SerialNumber(ctx context.Context,
|
|
serial_number_serial_number SerialNumber_SerialNumber_Field) (
|
|
serial_number *SerialNumber, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Find_SerialNumber_By_SerialNumber(ctx, serial_number_serial_number)
|
|
}
|
|
|
|
func (rx *Rx) First_Injuredsegment(ctx context.Context) (
|
|
injuredsegment *Injuredsegment, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.First_Injuredsegment(ctx)
|
|
}
|
|
|
|
func (rx *Rx) Get_AccountingRaw_By_Id(ctx context.Context,
|
|
accounting_raw_id AccountingRaw_Id_Field) (
|
|
accounting_raw *AccountingRaw, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_AccountingRaw_By_Id(ctx, accounting_raw_id)
|
|
}
|
|
|
|
func (rx *Rx) Get_AccountingRollup_By_Id(ctx context.Context,
|
|
accounting_rollup_id AccountingRollup_Id_Field) (
|
|
accounting_rollup *AccountingRollup, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_AccountingRollup_By_Id(ctx, accounting_rollup_id)
|
|
}
|
|
|
|
func (rx *Rx) Get_ApiKey_By_Id(ctx context.Context,
|
|
api_key_id ApiKey_Id_Field) (
|
|
api_key *ApiKey, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_ApiKey_By_Id(ctx, api_key_id)
|
|
}
|
|
|
|
func (rx *Rx) Get_ApiKey_By_Key(ctx context.Context,
|
|
api_key_key ApiKey_Key_Field) (
|
|
api_key *ApiKey, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_ApiKey_By_Key(ctx, api_key_key)
|
|
}
|
|
|
|
func (rx *Rx) Get_BucketUsage_By_Id(ctx context.Context,
|
|
bucket_usage_id BucketUsage_Id_Field) (
|
|
bucket_usage *BucketUsage, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_BucketUsage_By_Id(ctx, bucket_usage_id)
|
|
}
|
|
|
|
func (rx *Rx) Get_CertRecord_By_Id(ctx context.Context,
|
|
certRecord_id CertRecord_Id_Field) (
|
|
certRecord *CertRecord, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_CertRecord_By_Id(ctx, certRecord_id)
|
|
}
|
|
|
|
func (rx *Rx) Get_Irreparabledb_By_Segmentpath(ctx context.Context,
|
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field) (
|
|
irreparabledb *Irreparabledb, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_Irreparabledb_By_Segmentpath(ctx, irreparabledb_segmentpath)
|
|
}
|
|
|
|
func (rx *Rx) Get_Node_By_Id(ctx context.Context,
|
|
node_id Node_Id_Field) (
|
|
node *Node, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_Node_By_Id(ctx, node_id)
|
|
}
|
|
|
|
func (rx *Rx) Get_OverlayCacheNode_By_NodeId(ctx context.Context,
|
|
overlay_cache_node_node_id OverlayCacheNode_NodeId_Field) (
|
|
overlay_cache_node *OverlayCacheNode, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_OverlayCacheNode_By_NodeId(ctx, overlay_cache_node_node_id)
|
|
}
|
|
|
|
func (rx *Rx) Get_Project_By_Id(ctx context.Context,
|
|
project_id Project_Id_Field) (
|
|
project *Project, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_Project_By_Id(ctx, project_id)
|
|
}
|
|
|
|
func (rx *Rx) Get_RegistrationToken_By_OwnerId(ctx context.Context,
|
|
registration_token_owner_id RegistrationToken_OwnerId_Field) (
|
|
registration_token *RegistrationToken, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_RegistrationToken_By_OwnerId(ctx, registration_token_owner_id)
|
|
}
|
|
|
|
func (rx *Rx) Get_RegistrationToken_By_Secret(ctx context.Context,
|
|
registration_token_secret RegistrationToken_Secret_Field) (
|
|
registration_token *RegistrationToken, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_RegistrationToken_By_Secret(ctx, registration_token_secret)
|
|
}
|
|
|
|
func (rx *Rx) Get_User_By_Email_And_Status_Not_Number(ctx context.Context,
|
|
user_email User_Email_Field) (
|
|
user *User, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_User_By_Email_And_Status_Not_Number(ctx, user_email)
|
|
}
|
|
|
|
func (rx *Rx) Get_User_By_Id(ctx context.Context,
|
|
user_id User_Id_Field) (
|
|
user *User, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_User_By_Id(ctx, user_id)
|
|
}
|
|
|
|
func (rx *Rx) Limited_BucketUsage_By_BucketId_And_RollupEndTime_Greater_And_RollupEndTime_LessOrEqual_OrderBy_Asc_RollupEndTime(ctx context.Context,
|
|
bucket_usage_bucket_id BucketUsage_BucketId_Field,
|
|
bucket_usage_rollup_end_time_greater BucketUsage_RollupEndTime_Field,
|
|
bucket_usage_rollup_end_time_less_or_equal BucketUsage_RollupEndTime_Field,
|
|
limit int, offset int64) (
|
|
rows []*BucketUsage, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Limited_BucketUsage_By_BucketId_And_RollupEndTime_Greater_And_RollupEndTime_LessOrEqual_OrderBy_Asc_RollupEndTime(ctx, bucket_usage_bucket_id, bucket_usage_rollup_end_time_greater, bucket_usage_rollup_end_time_less_or_equal, limit, offset)
|
|
}
|
|
|
|
func (rx *Rx) Limited_BucketUsage_By_BucketId_And_RollupEndTime_Greater_And_RollupEndTime_LessOrEqual_OrderBy_Desc_RollupEndTime(ctx context.Context,
|
|
bucket_usage_bucket_id BucketUsage_BucketId_Field,
|
|
bucket_usage_rollup_end_time_greater BucketUsage_RollupEndTime_Field,
|
|
bucket_usage_rollup_end_time_less_or_equal BucketUsage_RollupEndTime_Field,
|
|
limit int, offset int64) (
|
|
rows []*BucketUsage, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Limited_BucketUsage_By_BucketId_And_RollupEndTime_Greater_And_RollupEndTime_LessOrEqual_OrderBy_Desc_RollupEndTime(ctx, bucket_usage_bucket_id, bucket_usage_rollup_end_time_greater, bucket_usage_rollup_end_time_less_or_equal, limit, offset)
|
|
}
|
|
|
|
func (rx *Rx) Limited_Injuredsegment(ctx context.Context,
|
|
limit int, offset int64) (
|
|
rows []*Injuredsegment, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Limited_Injuredsegment(ctx, limit, offset)
|
|
}
|
|
|
|
func (rx *Rx) Limited_Irreparabledb_OrderBy_Asc_Segmentpath(ctx context.Context,
|
|
limit int, offset int64) (
|
|
rows []*Irreparabledb, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Limited_Irreparabledb_OrderBy_Asc_Segmentpath(ctx, limit, offset)
|
|
}
|
|
|
|
func (rx *Rx) Limited_OverlayCacheNode_By_NodeId_GreaterOrEqual(ctx context.Context,
|
|
overlay_cache_node_node_id_greater_or_equal OverlayCacheNode_NodeId_Field,
|
|
limit int, offset int64) (
|
|
rows []*OverlayCacheNode, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Limited_OverlayCacheNode_By_NodeId_GreaterOrEqual(ctx, overlay_cache_node_node_id_greater_or_equal, limit, offset)
|
|
}
|
|
|
|
func (rx *Rx) Limited_ProjectMember_By_ProjectId(ctx context.Context,
|
|
project_member_project_id ProjectMember_ProjectId_Field,
|
|
limit int, offset int64) (
|
|
rows []*ProjectMember, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Limited_ProjectMember_By_ProjectId(ctx, project_member_project_id, limit, offset)
|
|
}
|
|
|
|
func (rx *Rx) Update_AccountingTimestamps_By_Name(ctx context.Context,
|
|
accounting_timestamps_name AccountingTimestamps_Name_Field,
|
|
update AccountingTimestamps_Update_Fields) (
|
|
accounting_timestamps *AccountingTimestamps, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Update_AccountingTimestamps_By_Name(ctx, accounting_timestamps_name, update)
|
|
}
|
|
|
|
func (rx *Rx) Update_ApiKey_By_Id(ctx context.Context,
|
|
api_key_id ApiKey_Id_Field,
|
|
update ApiKey_Update_Fields) (
|
|
api_key *ApiKey, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Update_ApiKey_By_Id(ctx, api_key_id, update)
|
|
}
|
|
|
|
func (rx *Rx) Update_CertRecord_By_Id(ctx context.Context,
|
|
certRecord_id CertRecord_Id_Field,
|
|
update CertRecord_Update_Fields) (
|
|
certRecord *CertRecord, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Update_CertRecord_By_Id(ctx, certRecord_id, update)
|
|
}
|
|
|
|
func (rx *Rx) Update_Irreparabledb_By_Segmentpath(ctx context.Context,
|
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field,
|
|
update Irreparabledb_Update_Fields) (
|
|
irreparabledb *Irreparabledb, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Update_Irreparabledb_By_Segmentpath(ctx, irreparabledb_segmentpath, update)
|
|
}
|
|
|
|
func (rx *Rx) Update_Node_By_Id(ctx context.Context,
|
|
node_id Node_Id_Field,
|
|
update Node_Update_Fields) (
|
|
node *Node, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Update_Node_By_Id(ctx, node_id, update)
|
|
}
|
|
|
|
func (rx *Rx) Update_OverlayCacheNode_By_NodeId(ctx context.Context,
|
|
overlay_cache_node_node_id OverlayCacheNode_NodeId_Field,
|
|
update OverlayCacheNode_Update_Fields) (
|
|
overlay_cache_node *OverlayCacheNode, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Update_OverlayCacheNode_By_NodeId(ctx, overlay_cache_node_node_id, update)
|
|
}
|
|
|
|
func (rx *Rx) Update_Project_By_Id(ctx context.Context,
|
|
project_id Project_Id_Field,
|
|
update Project_Update_Fields) (
|
|
project *Project, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Update_Project_By_Id(ctx, project_id, update)
|
|
}
|
|
|
|
func (rx *Rx) Update_RegistrationToken_By_Secret(ctx context.Context,
|
|
registration_token_secret RegistrationToken_Secret_Field,
|
|
update RegistrationToken_Update_Fields) (
|
|
registration_token *RegistrationToken, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Update_RegistrationToken_By_Secret(ctx, registration_token_secret, update)
|
|
}
|
|
|
|
func (rx *Rx) Update_User_By_Id(ctx context.Context,
|
|
user_id User_Id_Field,
|
|
update User_Update_Fields) (
|
|
user *User, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Update_User_By_Id(ctx, user_id, update)
|
|
}
|
|
|
|
type Methods interface {
|
|
All_AccountingRaw(ctx context.Context) (
|
|
rows []*AccountingRaw, err error)
|
|
|
|
All_AccountingRaw_By_IntervalEndTime_GreaterOrEqual(ctx context.Context,
|
|
accounting_raw_interval_end_time_greater_or_equal AccountingRaw_IntervalEndTime_Field) (
|
|
rows []*AccountingRaw, err error)
|
|
|
|
All_AccountingRollup_By_StartTime_GreaterOrEqual(ctx context.Context,
|
|
accounting_rollup_start_time_greater_or_equal AccountingRollup_StartTime_Field) (
|
|
rows []*AccountingRollup, err error)
|
|
|
|
All_ApiKey_By_ProjectId_OrderBy_Asc_Name(ctx context.Context,
|
|
api_key_project_id ApiKey_ProjectId_Field) (
|
|
rows []*ApiKey, err error)
|
|
|
|
All_Node_Id(ctx context.Context) (
|
|
rows []*Id_Row, err error)
|
|
|
|
All_Project(ctx context.Context) (
|
|
rows []*Project, err error)
|
|
|
|
All_ProjectMember_By_MemberId(ctx context.Context,
|
|
project_member_member_id ProjectMember_MemberId_Field) (
|
|
rows []*ProjectMember, err error)
|
|
|
|
All_Project_By_ProjectMember_MemberId_OrderBy_Asc_Project_Name(ctx context.Context,
|
|
project_member_member_id ProjectMember_MemberId_Field) (
|
|
rows []*Project, err error)
|
|
|
|
Create_AccountingRaw(ctx context.Context,
|
|
accounting_raw_node_id AccountingRaw_NodeId_Field,
|
|
accounting_raw_interval_end_time AccountingRaw_IntervalEndTime_Field,
|
|
accounting_raw_data_total AccountingRaw_DataTotal_Field,
|
|
accounting_raw_data_type AccountingRaw_DataType_Field,
|
|
accounting_raw_created_at AccountingRaw_CreatedAt_Field) (
|
|
accounting_raw *AccountingRaw, err error)
|
|
|
|
Create_AccountingRollup(ctx context.Context,
|
|
accounting_rollup_node_id AccountingRollup_NodeId_Field,
|
|
accounting_rollup_start_time AccountingRollup_StartTime_Field,
|
|
accounting_rollup_put_total AccountingRollup_PutTotal_Field,
|
|
accounting_rollup_get_total AccountingRollup_GetTotal_Field,
|
|
accounting_rollup_get_audit_total AccountingRollup_GetAuditTotal_Field,
|
|
accounting_rollup_get_repair_total AccountingRollup_GetRepairTotal_Field,
|
|
accounting_rollup_put_repair_total AccountingRollup_PutRepairTotal_Field,
|
|
accounting_rollup_at_rest_total AccountingRollup_AtRestTotal_Field) (
|
|
accounting_rollup *AccountingRollup, err error)
|
|
|
|
Create_AccountingTimestamps(ctx context.Context,
|
|
accounting_timestamps_name AccountingTimestamps_Name_Field,
|
|
accounting_timestamps_value AccountingTimestamps_Value_Field) (
|
|
accounting_timestamps *AccountingTimestamps, err error)
|
|
|
|
Create_ApiKey(ctx context.Context,
|
|
api_key_id ApiKey_Id_Field,
|
|
api_key_project_id ApiKey_ProjectId_Field,
|
|
api_key_key ApiKey_Key_Field,
|
|
api_key_name ApiKey_Name_Field) (
|
|
api_key *ApiKey, err error)
|
|
|
|
Create_BucketUsage(ctx context.Context,
|
|
bucket_usage_id BucketUsage_Id_Field,
|
|
bucket_usage_bucket_id BucketUsage_BucketId_Field,
|
|
bucket_usage_rollup_end_time BucketUsage_RollupEndTime_Field,
|
|
bucket_usage_remote_stored_data BucketUsage_RemoteStoredData_Field,
|
|
bucket_usage_inline_stored_data BucketUsage_InlineStoredData_Field,
|
|
bucket_usage_remote_segments BucketUsage_RemoteSegments_Field,
|
|
bucket_usage_inline_segments BucketUsage_InlineSegments_Field,
|
|
bucket_usage_objects BucketUsage_Objects_Field,
|
|
bucket_usage_metadata_size BucketUsage_MetadataSize_Field,
|
|
bucket_usage_repair_egress BucketUsage_RepairEgress_Field,
|
|
bucket_usage_get_egress BucketUsage_GetEgress_Field,
|
|
bucket_usage_audit_egress BucketUsage_AuditEgress_Field) (
|
|
bucket_usage *BucketUsage, err error)
|
|
|
|
Create_CertRecord(ctx context.Context,
|
|
certRecord_publickey CertRecord_Publickey_Field,
|
|
certRecord_id CertRecord_Id_Field) (
|
|
certRecord *CertRecord, err error)
|
|
|
|
Create_Injuredsegment(ctx context.Context,
|
|
injuredsegment_info Injuredsegment_Info_Field) (
|
|
injuredsegment *Injuredsegment, err error)
|
|
|
|
Create_Irreparabledb(ctx context.Context,
|
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field,
|
|
irreparabledb_segmentdetail Irreparabledb_Segmentdetail_Field,
|
|
irreparabledb_pieces_lost_count Irreparabledb_PiecesLostCount_Field,
|
|
irreparabledb_seg_damaged_unix_sec Irreparabledb_SegDamagedUnixSec_Field,
|
|
irreparabledb_repair_attempt_count Irreparabledb_RepairAttemptCount_Field) (
|
|
irreparabledb *Irreparabledb, err error)
|
|
|
|
Create_Node(ctx context.Context,
|
|
node_id Node_Id_Field,
|
|
node_audit_success_count Node_AuditSuccessCount_Field,
|
|
node_total_audit_count Node_TotalAuditCount_Field,
|
|
node_audit_success_ratio Node_AuditSuccessRatio_Field,
|
|
node_uptime_success_count Node_UptimeSuccessCount_Field,
|
|
node_total_uptime_count Node_TotalUptimeCount_Field,
|
|
node_uptime_ratio Node_UptimeRatio_Field,
|
|
node_wallet Node_Wallet_Field,
|
|
node_email Node_Email_Field) (
|
|
node *Node, err error)
|
|
|
|
Create_OverlayCacheNode(ctx context.Context,
|
|
overlay_cache_node_node_id OverlayCacheNode_NodeId_Field,
|
|
overlay_cache_node_node_type OverlayCacheNode_NodeType_Field,
|
|
overlay_cache_node_address OverlayCacheNode_Address_Field,
|
|
overlay_cache_node_protocol OverlayCacheNode_Protocol_Field,
|
|
overlay_cache_node_operator_email OverlayCacheNode_OperatorEmail_Field,
|
|
overlay_cache_node_operator_wallet OverlayCacheNode_OperatorWallet_Field,
|
|
overlay_cache_node_free_bandwidth OverlayCacheNode_FreeBandwidth_Field,
|
|
overlay_cache_node_free_disk OverlayCacheNode_FreeDisk_Field,
|
|
overlay_cache_node_latency_90 OverlayCacheNode_Latency90_Field,
|
|
overlay_cache_node_audit_success_ratio OverlayCacheNode_AuditSuccessRatio_Field,
|
|
overlay_cache_node_audit_uptime_ratio OverlayCacheNode_AuditUptimeRatio_Field,
|
|
overlay_cache_node_audit_count OverlayCacheNode_AuditCount_Field,
|
|
overlay_cache_node_audit_success_count OverlayCacheNode_AuditSuccessCount_Field,
|
|
overlay_cache_node_uptime_count OverlayCacheNode_UptimeCount_Field,
|
|
overlay_cache_node_uptime_success_count OverlayCacheNode_UptimeSuccessCount_Field) (
|
|
overlay_cache_node *OverlayCacheNode, err error)
|
|
|
|
Create_Project(ctx context.Context,
|
|
project_id Project_Id_Field,
|
|
project_name Project_Name_Field,
|
|
project_description Project_Description_Field) (
|
|
project *Project, err error)
|
|
|
|
Create_ProjectMember(ctx context.Context,
|
|
project_member_member_id ProjectMember_MemberId_Field,
|
|
project_member_project_id ProjectMember_ProjectId_Field) (
|
|
project_member *ProjectMember, err error)
|
|
|
|
Create_RegistrationToken(ctx context.Context,
|
|
registration_token_secret RegistrationToken_Secret_Field,
|
|
registration_token_project_limit RegistrationToken_ProjectLimit_Field,
|
|
optional RegistrationToken_Create_Fields) (
|
|
registration_token *RegistrationToken, err error)
|
|
|
|
Create_SerialNumber(ctx context.Context,
|
|
serial_number_serial_number SerialNumber_SerialNumber_Field,
|
|
serial_number_bucket_id SerialNumber_BucketId_Field,
|
|
serial_number_expires_at SerialNumber_ExpiresAt_Field) (
|
|
serial_number *SerialNumber, err error)
|
|
|
|
Create_UsedSerial(ctx context.Context,
|
|
used_serial_serial_number_id UsedSerial_SerialNumberId_Field,
|
|
used_serial_storage_node_id UsedSerial_StorageNodeId_Field) (
|
|
used_serial *UsedSerial, err error)
|
|
|
|
Create_User(ctx context.Context,
|
|
user_id User_Id_Field,
|
|
user_first_name User_FirstName_Field,
|
|
user_last_name User_LastName_Field,
|
|
user_email User_Email_Field,
|
|
user_password_hash User_PasswordHash_Field) (
|
|
user *User, err error)
|
|
|
|
Delete_AccountingRaw_By_Id(ctx context.Context,
|
|
accounting_raw_id AccountingRaw_Id_Field) (
|
|
deleted bool, err error)
|
|
|
|
Delete_AccountingRollup_By_Id(ctx context.Context,
|
|
accounting_rollup_id AccountingRollup_Id_Field) (
|
|
deleted bool, err error)
|
|
|
|
Delete_ApiKey_By_Id(ctx context.Context,
|
|
api_key_id ApiKey_Id_Field) (
|
|
deleted bool, err error)
|
|
|
|
Delete_BucketUsage_By_Id(ctx context.Context,
|
|
bucket_usage_id BucketUsage_Id_Field) (
|
|
deleted bool, err error)
|
|
|
|
Delete_CertRecord_By_Id(ctx context.Context,
|
|
certRecord_id CertRecord_Id_Field) (
|
|
deleted bool, err error)
|
|
|
|
Delete_Injuredsegment_By_Id(ctx context.Context,
|
|
injuredsegment_id Injuredsegment_Id_Field) (
|
|
deleted bool, err error)
|
|
|
|
Delete_Irreparabledb_By_Segmentpath(ctx context.Context,
|
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field) (
|
|
deleted bool, err error)
|
|
|
|
Delete_Node_By_Id(ctx context.Context,
|
|
node_id Node_Id_Field) (
|
|
deleted bool, err error)
|
|
|
|
Delete_OverlayCacheNode_By_NodeId(ctx context.Context,
|
|
overlay_cache_node_node_id OverlayCacheNode_NodeId_Field) (
|
|
deleted bool, err error)
|
|
|
|
Delete_ProjectMember_By_MemberId_And_ProjectId(ctx context.Context,
|
|
project_member_member_id ProjectMember_MemberId_Field,
|
|
project_member_project_id ProjectMember_ProjectId_Field) (
|
|
deleted bool, err error)
|
|
|
|
Delete_Project_By_Id(ctx context.Context,
|
|
project_id Project_Id_Field) (
|
|
deleted bool, err error)
|
|
|
|
Delete_SerialNumber_By_ExpiresAt_LessOrEqual(ctx context.Context,
|
|
serial_number_expires_at_less_or_equal SerialNumber_ExpiresAt_Field) (
|
|
count int64, err error)
|
|
|
|
Delete_User_By_Id(ctx context.Context,
|
|
user_id User_Id_Field) (
|
|
deleted bool, err error)
|
|
|
|
Find_AccountingTimestamps_Value_By_Name(ctx context.Context,
|
|
accounting_timestamps_name AccountingTimestamps_Name_Field) (
|
|
row *Value_Row, err error)
|
|
|
|
Find_Node_By_Id(ctx context.Context,
|
|
node_id Node_Id_Field) (
|
|
node *Node, err error)
|
|
|
|
Find_SerialNumber_By_SerialNumber(ctx context.Context,
|
|
serial_number_serial_number SerialNumber_SerialNumber_Field) (
|
|
serial_number *SerialNumber, err error)
|
|
|
|
First_Injuredsegment(ctx context.Context) (
|
|
injuredsegment *Injuredsegment, err error)
|
|
|
|
Get_AccountingRaw_By_Id(ctx context.Context,
|
|
accounting_raw_id AccountingRaw_Id_Field) (
|
|
accounting_raw *AccountingRaw, err error)
|
|
|
|
Get_AccountingRollup_By_Id(ctx context.Context,
|
|
accounting_rollup_id AccountingRollup_Id_Field) (
|
|
accounting_rollup *AccountingRollup, err error)
|
|
|
|
Get_ApiKey_By_Id(ctx context.Context,
|
|
api_key_id ApiKey_Id_Field) (
|
|
api_key *ApiKey, err error)
|
|
|
|
Get_ApiKey_By_Key(ctx context.Context,
|
|
api_key_key ApiKey_Key_Field) (
|
|
api_key *ApiKey, err error)
|
|
|
|
Get_BucketUsage_By_Id(ctx context.Context,
|
|
bucket_usage_id BucketUsage_Id_Field) (
|
|
bucket_usage *BucketUsage, err error)
|
|
|
|
Get_CertRecord_By_Id(ctx context.Context,
|
|
certRecord_id CertRecord_Id_Field) (
|
|
certRecord *CertRecord, err error)
|
|
|
|
Get_Irreparabledb_By_Segmentpath(ctx context.Context,
|
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field) (
|
|
irreparabledb *Irreparabledb, err error)
|
|
|
|
Get_Node_By_Id(ctx context.Context,
|
|
node_id Node_Id_Field) (
|
|
node *Node, err error)
|
|
|
|
Get_OverlayCacheNode_By_NodeId(ctx context.Context,
|
|
overlay_cache_node_node_id OverlayCacheNode_NodeId_Field) (
|
|
overlay_cache_node *OverlayCacheNode, err error)
|
|
|
|
Get_Project_By_Id(ctx context.Context,
|
|
project_id Project_Id_Field) (
|
|
project *Project, err error)
|
|
|
|
Get_RegistrationToken_By_OwnerId(ctx context.Context,
|
|
registration_token_owner_id RegistrationToken_OwnerId_Field) (
|
|
registration_token *RegistrationToken, err error)
|
|
|
|
Get_RegistrationToken_By_Secret(ctx context.Context,
|
|
registration_token_secret RegistrationToken_Secret_Field) (
|
|
registration_token *RegistrationToken, err error)
|
|
|
|
Get_User_By_Email_And_Status_Not_Number(ctx context.Context,
|
|
user_email User_Email_Field) (
|
|
user *User, err error)
|
|
|
|
Get_User_By_Id(ctx context.Context,
|
|
user_id User_Id_Field) (
|
|
user *User, err error)
|
|
|
|
Limited_BucketUsage_By_BucketId_And_RollupEndTime_Greater_And_RollupEndTime_LessOrEqual_OrderBy_Asc_RollupEndTime(ctx context.Context,
|
|
bucket_usage_bucket_id BucketUsage_BucketId_Field,
|
|
bucket_usage_rollup_end_time_greater BucketUsage_RollupEndTime_Field,
|
|
bucket_usage_rollup_end_time_less_or_equal BucketUsage_RollupEndTime_Field,
|
|
limit int, offset int64) (
|
|
rows []*BucketUsage, err error)
|
|
|
|
Limited_BucketUsage_By_BucketId_And_RollupEndTime_Greater_And_RollupEndTime_LessOrEqual_OrderBy_Desc_RollupEndTime(ctx context.Context,
|
|
bucket_usage_bucket_id BucketUsage_BucketId_Field,
|
|
bucket_usage_rollup_end_time_greater BucketUsage_RollupEndTime_Field,
|
|
bucket_usage_rollup_end_time_less_or_equal BucketUsage_RollupEndTime_Field,
|
|
limit int, offset int64) (
|
|
rows []*BucketUsage, err error)
|
|
|
|
Limited_Injuredsegment(ctx context.Context,
|
|
limit int, offset int64) (
|
|
rows []*Injuredsegment, err error)
|
|
|
|
Limited_Irreparabledb_OrderBy_Asc_Segmentpath(ctx context.Context,
|
|
limit int, offset int64) (
|
|
rows []*Irreparabledb, err error)
|
|
|
|
Limited_OverlayCacheNode_By_NodeId_GreaterOrEqual(ctx context.Context,
|
|
overlay_cache_node_node_id_greater_or_equal OverlayCacheNode_NodeId_Field,
|
|
limit int, offset int64) (
|
|
rows []*OverlayCacheNode, err error)
|
|
|
|
Limited_ProjectMember_By_ProjectId(ctx context.Context,
|
|
project_member_project_id ProjectMember_ProjectId_Field,
|
|
limit int, offset int64) (
|
|
rows []*ProjectMember, err error)
|
|
|
|
Update_AccountingTimestamps_By_Name(ctx context.Context,
|
|
accounting_timestamps_name AccountingTimestamps_Name_Field,
|
|
update AccountingTimestamps_Update_Fields) (
|
|
accounting_timestamps *AccountingTimestamps, err error)
|
|
|
|
Update_ApiKey_By_Id(ctx context.Context,
|
|
api_key_id ApiKey_Id_Field,
|
|
update ApiKey_Update_Fields) (
|
|
api_key *ApiKey, err error)
|
|
|
|
Update_CertRecord_By_Id(ctx context.Context,
|
|
certRecord_id CertRecord_Id_Field,
|
|
update CertRecord_Update_Fields) (
|
|
certRecord *CertRecord, err error)
|
|
|
|
Update_Irreparabledb_By_Segmentpath(ctx context.Context,
|
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field,
|
|
update Irreparabledb_Update_Fields) (
|
|
irreparabledb *Irreparabledb, err error)
|
|
|
|
Update_Node_By_Id(ctx context.Context,
|
|
node_id Node_Id_Field,
|
|
update Node_Update_Fields) (
|
|
node *Node, err error)
|
|
|
|
Update_OverlayCacheNode_By_NodeId(ctx context.Context,
|
|
overlay_cache_node_node_id OverlayCacheNode_NodeId_Field,
|
|
update OverlayCacheNode_Update_Fields) (
|
|
overlay_cache_node *OverlayCacheNode, err error)
|
|
|
|
Update_Project_By_Id(ctx context.Context,
|
|
project_id Project_Id_Field,
|
|
update Project_Update_Fields) (
|
|
project *Project, err error)
|
|
|
|
Update_RegistrationToken_By_Secret(ctx context.Context,
|
|
registration_token_secret RegistrationToken_Secret_Field,
|
|
update RegistrationToken_Update_Fields) (
|
|
registration_token *RegistrationToken, err error)
|
|
|
|
Update_User_By_Id(ctx context.Context,
|
|
user_id User_Id_Field,
|
|
update User_Update_Fields) (
|
|
user *User, err error)
|
|
}
|
|
|
|
type TxMethods interface {
|
|
Methods
|
|
|
|
Rebind(s string) string
|
|
Commit() error
|
|
Rollback() error
|
|
}
|
|
|
|
type txMethods interface {
|
|
TxMethods
|
|
|
|
deleteAll(ctx context.Context) (int64, error)
|
|
makeErr(err error) error
|
|
}
|
|
|
|
type DBMethods interface {
|
|
Methods
|
|
|
|
Schema() string
|
|
Rebind(sql string) string
|
|
}
|
|
|
|
type dbMethods interface {
|
|
DBMethods
|
|
|
|
wrapTx(tx *sql.Tx) txMethods
|
|
makeErr(err error) error
|
|
}
|
|
|
|
func openpostgres(source string) (*sql.DB, error) {
|
|
return sql.Open("postgres", source)
|
|
}
|
|
|
|
var sqlite3DriverName = func() string {
|
|
var id [16]byte
|
|
rand.Read(id[:])
|
|
return fmt.Sprintf("sqlite3_%x", string(id[:]))
|
|
}()
|
|
|
|
func init() {
|
|
sql.Register(sqlite3DriverName, &sqlite3.SQLiteDriver{
|
|
ConnectHook: sqlite3SetupConn,
|
|
})
|
|
}
|
|
|
|
// SQLite3JournalMode controls the journal_mode pragma for all new connections.
|
|
// Since it is read without a mutex, it must be changed to the value you want
|
|
// before any Open calls.
|
|
var SQLite3JournalMode = "WAL"
|
|
|
|
func sqlite3SetupConn(conn *sqlite3.SQLiteConn) (err error) {
|
|
_, err = conn.Exec("PRAGMA foreign_keys = ON", nil)
|
|
if err != nil {
|
|
return makeErr(err)
|
|
}
|
|
_, err = conn.Exec("PRAGMA journal_mode = "+SQLite3JournalMode, nil)
|
|
if err != nil {
|
|
return makeErr(err)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func opensqlite3(source string) (*sql.DB, error) {
|
|
return sql.Open(sqlite3DriverName, source)
|
|
}
|