a819d819e3
* Overlay Cache master db merge * test update * review comments * Fix transaction usage * removed unused variable * added better limit handling * better error handling
4391 lines
121 KiB
Go
4391 lines
121 KiB
Go
// AUTOGENERATED BY gopkg.in/spacemonkeygo/dbx.v1
|
|
// DO NOT EDIT.
|
|
|
|
package satellitedb
|
|
|
|
import (
|
|
"bytes"
|
|
"context"
|
|
"database/sql"
|
|
"errors"
|
|
"fmt"
|
|
"reflect"
|
|
"regexp"
|
|
"strconv"
|
|
"strings"
|
|
"sync"
|
|
"time"
|
|
"unicode"
|
|
|
|
"github.com/lib/pq"
|
|
|
|
"github.com/mattn/go-sqlite3"
|
|
"math/rand"
|
|
)
|
|
|
|
// Prevent conditional imports from causing build failures
|
|
var _ = strconv.Itoa
|
|
var _ = strings.LastIndex
|
|
var _ = fmt.Sprint
|
|
var _ sync.Mutex
|
|
|
|
var (
|
|
WrapErr = func(err *Error) error { return err }
|
|
Logger func(format string, args ...interface{})
|
|
|
|
errTooManyRows = errors.New("too many rows")
|
|
errUnsupportedDriver = errors.New("unsupported driver")
|
|
errEmptyUpdate = errors.New("empty update")
|
|
)
|
|
|
|
func logError(format string, args ...interface{}) {
|
|
if Logger != nil {
|
|
Logger(format, args...)
|
|
}
|
|
}
|
|
|
|
type ErrorCode int
|
|
|
|
const (
|
|
ErrorCode_Unknown ErrorCode = iota
|
|
ErrorCode_UnsupportedDriver
|
|
ErrorCode_NoRows
|
|
ErrorCode_TxDone
|
|
ErrorCode_TooManyRows
|
|
ErrorCode_ConstraintViolation
|
|
ErrorCode_EmptyUpdate
|
|
)
|
|
|
|
type Error struct {
|
|
Err error
|
|
Code ErrorCode
|
|
Driver string
|
|
Constraint string
|
|
QuerySuffix string
|
|
}
|
|
|
|
func (e *Error) Error() string {
|
|
return e.Err.Error()
|
|
}
|
|
|
|
func wrapErr(e *Error) error {
|
|
if WrapErr == nil {
|
|
return e
|
|
}
|
|
return WrapErr(e)
|
|
}
|
|
|
|
func makeErr(err error) error {
|
|
if err == nil {
|
|
return nil
|
|
}
|
|
e := &Error{Err: err}
|
|
switch err {
|
|
case sql.ErrNoRows:
|
|
e.Code = ErrorCode_NoRows
|
|
case sql.ErrTxDone:
|
|
e.Code = ErrorCode_TxDone
|
|
}
|
|
return wrapErr(e)
|
|
}
|
|
|
|
func unsupportedDriver(driver string) error {
|
|
return wrapErr(&Error{
|
|
Err: errUnsupportedDriver,
|
|
Code: ErrorCode_UnsupportedDriver,
|
|
Driver: driver,
|
|
})
|
|
}
|
|
|
|
func emptyUpdate() error {
|
|
return wrapErr(&Error{
|
|
Err: errEmptyUpdate,
|
|
Code: ErrorCode_EmptyUpdate,
|
|
})
|
|
}
|
|
|
|
func tooManyRows(query_suffix string) error {
|
|
return wrapErr(&Error{
|
|
Err: errTooManyRows,
|
|
Code: ErrorCode_TooManyRows,
|
|
QuerySuffix: query_suffix,
|
|
})
|
|
}
|
|
|
|
func constraintViolation(err error, constraint string) error {
|
|
return wrapErr(&Error{
|
|
Err: err,
|
|
Code: ErrorCode_ConstraintViolation,
|
|
Constraint: constraint,
|
|
})
|
|
}
|
|
|
|
type driver interface {
|
|
Exec(query string, args ...interface{}) (sql.Result, error)
|
|
Query(query string, args ...interface{}) (*sql.Rows, error)
|
|
QueryRow(query string, args ...interface{}) *sql.Row
|
|
}
|
|
|
|
var (
|
|
notAPointer = errors.New("destination not a pointer")
|
|
lossyConversion = errors.New("lossy conversion")
|
|
)
|
|
|
|
type DB struct {
|
|
*sql.DB
|
|
dbMethods
|
|
|
|
Hooks struct {
|
|
Now func() time.Time
|
|
}
|
|
}
|
|
|
|
func Open(driver, source string) (db *DB, err error) {
|
|
var sql_db *sql.DB
|
|
switch driver {
|
|
case "postgres":
|
|
sql_db, err = openpostgres(source)
|
|
case "sqlite3":
|
|
sql_db, err = opensqlite3(source)
|
|
default:
|
|
return nil, unsupportedDriver(driver)
|
|
}
|
|
if err != nil {
|
|
return nil, makeErr(err)
|
|
}
|
|
defer func(sql_db *sql.DB) {
|
|
if err != nil {
|
|
sql_db.Close()
|
|
}
|
|
}(sql_db)
|
|
|
|
if err := sql_db.Ping(); err != nil {
|
|
return nil, makeErr(err)
|
|
}
|
|
|
|
db = &DB{
|
|
DB: sql_db,
|
|
}
|
|
db.Hooks.Now = time.Now
|
|
|
|
switch driver {
|
|
case "postgres":
|
|
db.dbMethods = newpostgres(db)
|
|
case "sqlite3":
|
|
db.dbMethods = newsqlite3(db)
|
|
default:
|
|
return nil, unsupportedDriver(driver)
|
|
}
|
|
|
|
return db, nil
|
|
}
|
|
|
|
func (obj *DB) Close() (err error) {
|
|
return obj.makeErr(obj.DB.Close())
|
|
}
|
|
|
|
func (obj *DB) Open(ctx context.Context) (*Tx, error) {
|
|
tx, err := obj.DB.Begin()
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
|
|
return &Tx{
|
|
Tx: tx,
|
|
txMethods: obj.wrapTx(tx),
|
|
}, nil
|
|
}
|
|
|
|
func (obj *DB) NewRx() *Rx {
|
|
return &Rx{db: obj}
|
|
}
|
|
|
|
func DeleteAll(ctx context.Context, db *DB) (int64, error) {
|
|
tx, err := db.Open(ctx)
|
|
if err != nil {
|
|
return 0, err
|
|
}
|
|
defer func() {
|
|
if err == nil {
|
|
err = db.makeErr(tx.Commit())
|
|
return
|
|
}
|
|
|
|
if err_rollback := tx.Rollback(); err_rollback != nil {
|
|
logError("delete-all: rollback failed: %v", db.makeErr(err_rollback))
|
|
}
|
|
}()
|
|
return tx.deleteAll(ctx)
|
|
}
|
|
|
|
type Tx struct {
|
|
Tx *sql.Tx
|
|
txMethods
|
|
}
|
|
|
|
type dialectTx struct {
|
|
tx *sql.Tx
|
|
}
|
|
|
|
func (tx *dialectTx) Commit() (err error) {
|
|
return makeErr(tx.tx.Commit())
|
|
}
|
|
|
|
func (tx *dialectTx) Rollback() (err error) {
|
|
return makeErr(tx.tx.Rollback())
|
|
}
|
|
|
|
type postgresImpl struct {
|
|
db *DB
|
|
dialect __sqlbundle_postgres
|
|
driver driver
|
|
}
|
|
|
|
func (obj *postgresImpl) Rebind(s string) string {
|
|
return obj.dialect.Rebind(s)
|
|
}
|
|
|
|
func (obj *postgresImpl) logStmt(stmt string, args ...interface{}) {
|
|
postgresLogStmt(stmt, args...)
|
|
}
|
|
|
|
func (obj *postgresImpl) makeErr(err error) error {
|
|
constraint, ok := obj.isConstraintError(err)
|
|
if ok {
|
|
return constraintViolation(err, constraint)
|
|
}
|
|
return makeErr(err)
|
|
}
|
|
|
|
type postgresDB struct {
|
|
db *DB
|
|
*postgresImpl
|
|
}
|
|
|
|
func newpostgres(db *DB) *postgresDB {
|
|
return &postgresDB{
|
|
db: db,
|
|
postgresImpl: &postgresImpl{
|
|
db: db,
|
|
driver: db.DB,
|
|
},
|
|
}
|
|
}
|
|
|
|
func (obj *postgresDB) Schema() string {
|
|
return `CREATE TABLE bwagreements (
|
|
signature bytea NOT NULL,
|
|
data bytea NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( signature )
|
|
);
|
|
CREATE TABLE irreparabledbs (
|
|
segmentpath bytea NOT NULL,
|
|
segmentdetail bytea NOT NULL,
|
|
pieces_lost_count bigint NOT NULL,
|
|
seg_damaged_unix_sec bigint NOT NULL,
|
|
repair_attempt_count bigint NOT NULL,
|
|
PRIMARY KEY ( segmentpath )
|
|
);
|
|
CREATE TABLE nodes (
|
|
id bytea NOT NULL,
|
|
audit_success_count bigint NOT NULL,
|
|
total_audit_count bigint NOT NULL,
|
|
audit_success_ratio double precision NOT NULL,
|
|
uptime_success_count bigint NOT NULL,
|
|
total_uptime_count bigint NOT NULL,
|
|
uptime_ratio double precision NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
updated_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( id )
|
|
);
|
|
CREATE TABLE overlay_cache_nodes (
|
|
key bytea NOT NULL,
|
|
value bytea NOT NULL,
|
|
PRIMARY KEY ( key ),
|
|
UNIQUE ( key )
|
|
);
|
|
CREATE TABLE raws (
|
|
id bigserial NOT NULL,
|
|
node_id text NOT NULL,
|
|
interval_end_time timestamp with time zone NOT NULL,
|
|
data_total bigint NOT NULL,
|
|
data_type integer NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
updated_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( id )
|
|
);
|
|
CREATE TABLE rollups (
|
|
id bigserial NOT NULL,
|
|
node_id text NOT NULL,
|
|
start_time timestamp with time zone NOT NULL,
|
|
interval bigint NOT NULL,
|
|
data_type integer NOT NULL,
|
|
created_at timestamp with time zone NOT NULL,
|
|
updated_at timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( id )
|
|
);
|
|
CREATE TABLE timestamps (
|
|
name text NOT NULL,
|
|
value timestamp with time zone NOT NULL,
|
|
PRIMARY KEY ( name )
|
|
);`
|
|
}
|
|
|
|
func (obj *postgresDB) wrapTx(tx *sql.Tx) txMethods {
|
|
return &postgresTx{
|
|
dialectTx: dialectTx{tx: tx},
|
|
postgresImpl: &postgresImpl{
|
|
db: obj.db,
|
|
driver: tx,
|
|
},
|
|
}
|
|
}
|
|
|
|
type postgresTx struct {
|
|
dialectTx
|
|
*postgresImpl
|
|
}
|
|
|
|
func postgresLogStmt(stmt string, args ...interface{}) {
|
|
// TODO: render placeholders
|
|
if Logger != nil {
|
|
out := fmt.Sprintf("stmt: %s\nargs: %v\n", stmt, pretty(args))
|
|
Logger(out)
|
|
}
|
|
}
|
|
|
|
type sqlite3Impl struct {
|
|
db *DB
|
|
dialect __sqlbundle_sqlite3
|
|
driver driver
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Rebind(s string) string {
|
|
return obj.dialect.Rebind(s)
|
|
}
|
|
|
|
func (obj *sqlite3Impl) logStmt(stmt string, args ...interface{}) {
|
|
sqlite3LogStmt(stmt, args...)
|
|
}
|
|
|
|
func (obj *sqlite3Impl) makeErr(err error) error {
|
|
constraint, ok := obj.isConstraintError(err)
|
|
if ok {
|
|
return constraintViolation(err, constraint)
|
|
}
|
|
return makeErr(err)
|
|
}
|
|
|
|
type sqlite3DB struct {
|
|
db *DB
|
|
*sqlite3Impl
|
|
}
|
|
|
|
func newsqlite3(db *DB) *sqlite3DB {
|
|
return &sqlite3DB{
|
|
db: db,
|
|
sqlite3Impl: &sqlite3Impl{
|
|
db: db,
|
|
driver: db.DB,
|
|
},
|
|
}
|
|
}
|
|
|
|
func (obj *sqlite3DB) Schema() string {
|
|
return `CREATE TABLE bwagreements (
|
|
signature BLOB NOT NULL,
|
|
data BLOB NOT NULL,
|
|
created_at TIMESTAMP NOT NULL,
|
|
PRIMARY KEY ( signature )
|
|
);
|
|
CREATE TABLE irreparabledbs (
|
|
segmentpath BLOB NOT NULL,
|
|
segmentdetail BLOB NOT NULL,
|
|
pieces_lost_count INTEGER NOT NULL,
|
|
seg_damaged_unix_sec INTEGER NOT NULL,
|
|
repair_attempt_count INTEGER NOT NULL,
|
|
PRIMARY KEY ( segmentpath )
|
|
);
|
|
CREATE TABLE nodes (
|
|
id BLOB NOT NULL,
|
|
audit_success_count INTEGER NOT NULL,
|
|
total_audit_count INTEGER NOT NULL,
|
|
audit_success_ratio REAL NOT NULL,
|
|
uptime_success_count INTEGER NOT NULL,
|
|
total_uptime_count INTEGER NOT NULL,
|
|
uptime_ratio REAL NOT NULL,
|
|
created_at TIMESTAMP NOT NULL,
|
|
updated_at TIMESTAMP NOT NULL,
|
|
PRIMARY KEY ( id )
|
|
);
|
|
CREATE TABLE overlay_cache_nodes (
|
|
key BLOB NOT NULL,
|
|
value BLOB NOT NULL,
|
|
PRIMARY KEY ( key ),
|
|
UNIQUE ( key )
|
|
);
|
|
CREATE TABLE raws (
|
|
id INTEGER NOT NULL,
|
|
node_id TEXT NOT NULL,
|
|
interval_end_time TIMESTAMP NOT NULL,
|
|
data_total INTEGER NOT NULL,
|
|
data_type INTEGER NOT NULL,
|
|
created_at TIMESTAMP NOT NULL,
|
|
updated_at TIMESTAMP NOT NULL,
|
|
PRIMARY KEY ( id )
|
|
);
|
|
CREATE TABLE rollups (
|
|
id INTEGER NOT NULL,
|
|
node_id TEXT NOT NULL,
|
|
start_time TIMESTAMP NOT NULL,
|
|
interval INTEGER NOT NULL,
|
|
data_type INTEGER NOT NULL,
|
|
created_at TIMESTAMP NOT NULL,
|
|
updated_at TIMESTAMP NOT NULL,
|
|
PRIMARY KEY ( id )
|
|
);
|
|
CREATE TABLE timestamps (
|
|
name TEXT NOT NULL,
|
|
value TIMESTAMP NOT NULL,
|
|
PRIMARY KEY ( name )
|
|
);`
|
|
}
|
|
|
|
func (obj *sqlite3DB) wrapTx(tx *sql.Tx) txMethods {
|
|
return &sqlite3Tx{
|
|
dialectTx: dialectTx{tx: tx},
|
|
sqlite3Impl: &sqlite3Impl{
|
|
db: obj.db,
|
|
driver: tx,
|
|
},
|
|
}
|
|
}
|
|
|
|
type sqlite3Tx struct {
|
|
dialectTx
|
|
*sqlite3Impl
|
|
}
|
|
|
|
func sqlite3LogStmt(stmt string, args ...interface{}) {
|
|
// TODO: render placeholders
|
|
if Logger != nil {
|
|
out := fmt.Sprintf("stmt: %s\nargs: %v\n", stmt, pretty(args))
|
|
Logger(out)
|
|
}
|
|
}
|
|
|
|
type pretty []interface{}
|
|
|
|
func (p pretty) Format(f fmt.State, c rune) {
|
|
fmt.Fprint(f, "[")
|
|
nextval:
|
|
for i, val := range p {
|
|
if i > 0 {
|
|
fmt.Fprint(f, ", ")
|
|
}
|
|
rv := reflect.ValueOf(val)
|
|
if rv.Kind() == reflect.Ptr {
|
|
if rv.IsNil() {
|
|
fmt.Fprint(f, "NULL")
|
|
continue
|
|
}
|
|
val = rv.Elem().Interface()
|
|
}
|
|
switch v := val.(type) {
|
|
case string:
|
|
fmt.Fprintf(f, "%q", v)
|
|
case time.Time:
|
|
fmt.Fprintf(f, "%s", v.Format(time.RFC3339Nano))
|
|
case []byte:
|
|
for _, b := range v {
|
|
if !unicode.IsPrint(rune(b)) {
|
|
fmt.Fprintf(f, "%#x", v)
|
|
continue nextval
|
|
}
|
|
}
|
|
fmt.Fprintf(f, "%q", v)
|
|
default:
|
|
fmt.Fprintf(f, "%v", v)
|
|
}
|
|
}
|
|
fmt.Fprint(f, "]")
|
|
}
|
|
|
|
type Bwagreement struct {
|
|
Signature []byte
|
|
Data []byte
|
|
CreatedAt time.Time
|
|
}
|
|
|
|
func (Bwagreement) _Table() string { return "bwagreements" }
|
|
|
|
type Bwagreement_Update_Fields struct {
|
|
}
|
|
|
|
type Bwagreement_Signature_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func Bwagreement_Signature(v []byte) Bwagreement_Signature_Field {
|
|
return Bwagreement_Signature_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Bwagreement_Signature_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Bwagreement_Signature_Field) _Column() string { return "signature" }
|
|
|
|
type Bwagreement_Data_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func Bwagreement_Data(v []byte) Bwagreement_Data_Field {
|
|
return Bwagreement_Data_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Bwagreement_Data_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Bwagreement_Data_Field) _Column() string { return "data" }
|
|
|
|
type Bwagreement_CreatedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func Bwagreement_CreatedAt(v time.Time) Bwagreement_CreatedAt_Field {
|
|
return Bwagreement_CreatedAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Bwagreement_CreatedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Bwagreement_CreatedAt_Field) _Column() string { return "created_at" }
|
|
|
|
type Irreparabledb struct {
|
|
Segmentpath []byte
|
|
Segmentdetail []byte
|
|
PiecesLostCount int64
|
|
SegDamagedUnixSec int64
|
|
RepairAttemptCount int64
|
|
}
|
|
|
|
func (Irreparabledb) _Table() string { return "irreparabledbs" }
|
|
|
|
type Irreparabledb_Update_Fields struct {
|
|
Segmentdetail Irreparabledb_Segmentdetail_Field
|
|
PiecesLostCount Irreparabledb_PiecesLostCount_Field
|
|
SegDamagedUnixSec Irreparabledb_SegDamagedUnixSec_Field
|
|
RepairAttemptCount Irreparabledb_RepairAttemptCount_Field
|
|
}
|
|
|
|
type Irreparabledb_Segmentpath_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func Irreparabledb_Segmentpath(v []byte) Irreparabledb_Segmentpath_Field {
|
|
return Irreparabledb_Segmentpath_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Irreparabledb_Segmentpath_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Irreparabledb_Segmentpath_Field) _Column() string { return "segmentpath" }
|
|
|
|
type Irreparabledb_Segmentdetail_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func Irreparabledb_Segmentdetail(v []byte) Irreparabledb_Segmentdetail_Field {
|
|
return Irreparabledb_Segmentdetail_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Irreparabledb_Segmentdetail_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Irreparabledb_Segmentdetail_Field) _Column() string { return "segmentdetail" }
|
|
|
|
type Irreparabledb_PiecesLostCount_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func Irreparabledb_PiecesLostCount(v int64) Irreparabledb_PiecesLostCount_Field {
|
|
return Irreparabledb_PiecesLostCount_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Irreparabledb_PiecesLostCount_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Irreparabledb_PiecesLostCount_Field) _Column() string { return "pieces_lost_count" }
|
|
|
|
type Irreparabledb_SegDamagedUnixSec_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func Irreparabledb_SegDamagedUnixSec(v int64) Irreparabledb_SegDamagedUnixSec_Field {
|
|
return Irreparabledb_SegDamagedUnixSec_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Irreparabledb_SegDamagedUnixSec_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Irreparabledb_SegDamagedUnixSec_Field) _Column() string { return "seg_damaged_unix_sec" }
|
|
|
|
type Irreparabledb_RepairAttemptCount_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func Irreparabledb_RepairAttemptCount(v int64) Irreparabledb_RepairAttemptCount_Field {
|
|
return Irreparabledb_RepairAttemptCount_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Irreparabledb_RepairAttemptCount_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Irreparabledb_RepairAttemptCount_Field) _Column() string { return "repair_attempt_count" }
|
|
|
|
type Node struct {
|
|
Id []byte
|
|
AuditSuccessCount int64
|
|
TotalAuditCount int64
|
|
AuditSuccessRatio float64
|
|
UptimeSuccessCount int64
|
|
TotalUptimeCount int64
|
|
UptimeRatio float64
|
|
CreatedAt time.Time
|
|
UpdatedAt time.Time
|
|
}
|
|
|
|
func (Node) _Table() string { return "nodes" }
|
|
|
|
type Node_Update_Fields struct {
|
|
AuditSuccessCount Node_AuditSuccessCount_Field
|
|
TotalAuditCount Node_TotalAuditCount_Field
|
|
AuditSuccessRatio Node_AuditSuccessRatio_Field
|
|
UptimeSuccessCount Node_UptimeSuccessCount_Field
|
|
TotalUptimeCount Node_TotalUptimeCount_Field
|
|
UptimeRatio Node_UptimeRatio_Field
|
|
}
|
|
|
|
type Node_Id_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func Node_Id(v []byte) Node_Id_Field {
|
|
return Node_Id_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_Id_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_Id_Field) _Column() string { return "id" }
|
|
|
|
type Node_AuditSuccessCount_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func Node_AuditSuccessCount(v int64) Node_AuditSuccessCount_Field {
|
|
return Node_AuditSuccessCount_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_AuditSuccessCount_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_AuditSuccessCount_Field) _Column() string { return "audit_success_count" }
|
|
|
|
type Node_TotalAuditCount_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func Node_TotalAuditCount(v int64) Node_TotalAuditCount_Field {
|
|
return Node_TotalAuditCount_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_TotalAuditCount_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_TotalAuditCount_Field) _Column() string { return "total_audit_count" }
|
|
|
|
type Node_AuditSuccessRatio_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value float64
|
|
}
|
|
|
|
func Node_AuditSuccessRatio(v float64) Node_AuditSuccessRatio_Field {
|
|
return Node_AuditSuccessRatio_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_AuditSuccessRatio_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_AuditSuccessRatio_Field) _Column() string { return "audit_success_ratio" }
|
|
|
|
type Node_UptimeSuccessCount_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func Node_UptimeSuccessCount(v int64) Node_UptimeSuccessCount_Field {
|
|
return Node_UptimeSuccessCount_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_UptimeSuccessCount_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_UptimeSuccessCount_Field) _Column() string { return "uptime_success_count" }
|
|
|
|
type Node_TotalUptimeCount_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func Node_TotalUptimeCount(v int64) Node_TotalUptimeCount_Field {
|
|
return Node_TotalUptimeCount_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_TotalUptimeCount_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_TotalUptimeCount_Field) _Column() string { return "total_uptime_count" }
|
|
|
|
type Node_UptimeRatio_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value float64
|
|
}
|
|
|
|
func Node_UptimeRatio(v float64) Node_UptimeRatio_Field {
|
|
return Node_UptimeRatio_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_UptimeRatio_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_UptimeRatio_Field) _Column() string { return "uptime_ratio" }
|
|
|
|
type Node_CreatedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func Node_CreatedAt(v time.Time) Node_CreatedAt_Field {
|
|
return Node_CreatedAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_CreatedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_CreatedAt_Field) _Column() string { return "created_at" }
|
|
|
|
type Node_UpdatedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func Node_UpdatedAt(v time.Time) Node_UpdatedAt_Field {
|
|
return Node_UpdatedAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Node_UpdatedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Node_UpdatedAt_Field) _Column() string { return "updated_at" }
|
|
|
|
type OverlayCacheNode struct {
|
|
Key []byte
|
|
Value []byte
|
|
}
|
|
|
|
func (OverlayCacheNode) _Table() string { return "overlay_cache_nodes" }
|
|
|
|
type OverlayCacheNode_Update_Fields struct {
|
|
Value OverlayCacheNode_Value_Field
|
|
}
|
|
|
|
type OverlayCacheNode_Key_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func OverlayCacheNode_Key(v []byte) OverlayCacheNode_Key_Field {
|
|
return OverlayCacheNode_Key_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f OverlayCacheNode_Key_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (OverlayCacheNode_Key_Field) _Column() string { return "key" }
|
|
|
|
type OverlayCacheNode_Value_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value []byte
|
|
}
|
|
|
|
func OverlayCacheNode_Value(v []byte) OverlayCacheNode_Value_Field {
|
|
return OverlayCacheNode_Value_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f OverlayCacheNode_Value_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (OverlayCacheNode_Value_Field) _Column() string { return "value" }
|
|
|
|
type Raw struct {
|
|
Id int64
|
|
NodeId string
|
|
IntervalEndTime time.Time
|
|
DataTotal int64
|
|
DataType int
|
|
CreatedAt time.Time
|
|
UpdatedAt time.Time
|
|
}
|
|
|
|
func (Raw) _Table() string { return "raws" }
|
|
|
|
type Raw_Update_Fields struct {
|
|
}
|
|
|
|
type Raw_Id_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func Raw_Id(v int64) Raw_Id_Field {
|
|
return Raw_Id_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Raw_Id_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Raw_Id_Field) _Column() string { return "id" }
|
|
|
|
type Raw_NodeId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func Raw_NodeId(v string) Raw_NodeId_Field {
|
|
return Raw_NodeId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Raw_NodeId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Raw_NodeId_Field) _Column() string { return "node_id" }
|
|
|
|
type Raw_IntervalEndTime_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func Raw_IntervalEndTime(v time.Time) Raw_IntervalEndTime_Field {
|
|
return Raw_IntervalEndTime_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Raw_IntervalEndTime_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Raw_IntervalEndTime_Field) _Column() string { return "interval_end_time" }
|
|
|
|
type Raw_DataTotal_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func Raw_DataTotal(v int64) Raw_DataTotal_Field {
|
|
return Raw_DataTotal_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Raw_DataTotal_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Raw_DataTotal_Field) _Column() string { return "data_total" }
|
|
|
|
type Raw_DataType_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func Raw_DataType(v int) Raw_DataType_Field {
|
|
return Raw_DataType_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Raw_DataType_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Raw_DataType_Field) _Column() string { return "data_type" }
|
|
|
|
type Raw_CreatedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func Raw_CreatedAt(v time.Time) Raw_CreatedAt_Field {
|
|
return Raw_CreatedAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Raw_CreatedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Raw_CreatedAt_Field) _Column() string { return "created_at" }
|
|
|
|
type Raw_UpdatedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func Raw_UpdatedAt(v time.Time) Raw_UpdatedAt_Field {
|
|
return Raw_UpdatedAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Raw_UpdatedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Raw_UpdatedAt_Field) _Column() string { return "updated_at" }
|
|
|
|
type Rollup struct {
|
|
Id int64
|
|
NodeId string
|
|
StartTime time.Time
|
|
Interval int64
|
|
DataType int
|
|
CreatedAt time.Time
|
|
UpdatedAt time.Time
|
|
}
|
|
|
|
func (Rollup) _Table() string { return "rollups" }
|
|
|
|
type Rollup_Update_Fields struct {
|
|
}
|
|
|
|
type Rollup_Id_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func Rollup_Id(v int64) Rollup_Id_Field {
|
|
return Rollup_Id_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Rollup_Id_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Rollup_Id_Field) _Column() string { return "id" }
|
|
|
|
type Rollup_NodeId_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func Rollup_NodeId(v string) Rollup_NodeId_Field {
|
|
return Rollup_NodeId_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Rollup_NodeId_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Rollup_NodeId_Field) _Column() string { return "node_id" }
|
|
|
|
type Rollup_StartTime_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func Rollup_StartTime(v time.Time) Rollup_StartTime_Field {
|
|
return Rollup_StartTime_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Rollup_StartTime_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Rollup_StartTime_Field) _Column() string { return "start_time" }
|
|
|
|
type Rollup_Interval_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int64
|
|
}
|
|
|
|
func Rollup_Interval(v int64) Rollup_Interval_Field {
|
|
return Rollup_Interval_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Rollup_Interval_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Rollup_Interval_Field) _Column() string { return "interval" }
|
|
|
|
type Rollup_DataType_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value int
|
|
}
|
|
|
|
func Rollup_DataType(v int) Rollup_DataType_Field {
|
|
return Rollup_DataType_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Rollup_DataType_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Rollup_DataType_Field) _Column() string { return "data_type" }
|
|
|
|
type Rollup_CreatedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func Rollup_CreatedAt(v time.Time) Rollup_CreatedAt_Field {
|
|
return Rollup_CreatedAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Rollup_CreatedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Rollup_CreatedAt_Field) _Column() string { return "created_at" }
|
|
|
|
type Rollup_UpdatedAt_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func Rollup_UpdatedAt(v time.Time) Rollup_UpdatedAt_Field {
|
|
return Rollup_UpdatedAt_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Rollup_UpdatedAt_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Rollup_UpdatedAt_Field) _Column() string { return "updated_at" }
|
|
|
|
type Timestamps struct {
|
|
Name string
|
|
Value time.Time
|
|
}
|
|
|
|
func (Timestamps) _Table() string { return "timestamps" }
|
|
|
|
type Timestamps_Update_Fields struct {
|
|
Value Timestamps_Value_Field
|
|
}
|
|
|
|
type Timestamps_Name_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value string
|
|
}
|
|
|
|
func Timestamps_Name(v string) Timestamps_Name_Field {
|
|
return Timestamps_Name_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Timestamps_Name_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Timestamps_Name_Field) _Column() string { return "name" }
|
|
|
|
type Timestamps_Value_Field struct {
|
|
_set bool
|
|
_null bool
|
|
_value time.Time
|
|
}
|
|
|
|
func Timestamps_Value(v time.Time) Timestamps_Value_Field {
|
|
return Timestamps_Value_Field{_set: true, _value: v}
|
|
}
|
|
|
|
func (f Timestamps_Value_Field) value() interface{} {
|
|
if !f._set || f._null {
|
|
return nil
|
|
}
|
|
return f._value
|
|
}
|
|
|
|
func (Timestamps_Value_Field) _Column() string { return "value" }
|
|
|
|
func toUTC(t time.Time) time.Time {
|
|
return t.UTC()
|
|
}
|
|
|
|
func toDate(t time.Time) time.Time {
|
|
// keep up the minute portion so that translations between timezones will
|
|
// continue to reflect properly.
|
|
return t.Truncate(time.Minute)
|
|
}
|
|
|
|
//
|
|
// runtime support for building sql statements
|
|
//
|
|
|
|
type __sqlbundle_SQL interface {
|
|
Render() string
|
|
|
|
private()
|
|
}
|
|
|
|
type __sqlbundle_Dialect interface {
|
|
Rebind(sql string) string
|
|
}
|
|
|
|
type __sqlbundle_RenderOp int
|
|
|
|
const (
|
|
__sqlbundle_NoFlatten __sqlbundle_RenderOp = iota
|
|
__sqlbundle_NoTerminate
|
|
)
|
|
|
|
func __sqlbundle_Render(dialect __sqlbundle_Dialect, sql __sqlbundle_SQL, ops ...__sqlbundle_RenderOp) string {
|
|
out := sql.Render()
|
|
|
|
flatten := true
|
|
terminate := true
|
|
for _, op := range ops {
|
|
switch op {
|
|
case __sqlbundle_NoFlatten:
|
|
flatten = false
|
|
case __sqlbundle_NoTerminate:
|
|
terminate = false
|
|
}
|
|
}
|
|
|
|
if flatten {
|
|
out = __sqlbundle_flattenSQL(out)
|
|
}
|
|
if terminate {
|
|
out += ";"
|
|
}
|
|
|
|
return dialect.Rebind(out)
|
|
}
|
|
|
|
var __sqlbundle_reSpace = regexp.MustCompile(`\s+`)
|
|
|
|
func __sqlbundle_flattenSQL(s string) string {
|
|
return strings.TrimSpace(__sqlbundle_reSpace.ReplaceAllString(s, " "))
|
|
}
|
|
|
|
// this type is specially named to match up with the name returned by the
|
|
// dialect impl in the sql package.
|
|
type __sqlbundle_postgres struct{}
|
|
|
|
func (p __sqlbundle_postgres) Rebind(sql string) string {
|
|
out := make([]byte, 0, len(sql)+10)
|
|
|
|
j := 1
|
|
for i := 0; i < len(sql); i++ {
|
|
ch := sql[i]
|
|
if ch != '?' {
|
|
out = append(out, ch)
|
|
continue
|
|
}
|
|
|
|
out = append(out, '$')
|
|
out = append(out, strconv.Itoa(j)...)
|
|
j++
|
|
}
|
|
|
|
return string(out)
|
|
}
|
|
|
|
// this type is specially named to match up with the name returned by the
|
|
// dialect impl in the sql package.
|
|
type __sqlbundle_sqlite3 struct{}
|
|
|
|
func (s __sqlbundle_sqlite3) Rebind(sql string) string {
|
|
return sql
|
|
}
|
|
|
|
type __sqlbundle_Literal string
|
|
|
|
func (__sqlbundle_Literal) private() {}
|
|
|
|
func (l __sqlbundle_Literal) Render() string { return string(l) }
|
|
|
|
type __sqlbundle_Literals struct {
|
|
Join string
|
|
SQLs []__sqlbundle_SQL
|
|
}
|
|
|
|
func (__sqlbundle_Literals) private() {}
|
|
|
|
func (l __sqlbundle_Literals) Render() string {
|
|
var out bytes.Buffer
|
|
|
|
first := true
|
|
for _, sql := range l.SQLs {
|
|
if sql == nil {
|
|
continue
|
|
}
|
|
if !first {
|
|
out.WriteString(l.Join)
|
|
}
|
|
first = false
|
|
out.WriteString(sql.Render())
|
|
}
|
|
|
|
return out.String()
|
|
}
|
|
|
|
type __sqlbundle_Condition struct {
|
|
// set at compile/embed time
|
|
Name string
|
|
Left string
|
|
Equal bool
|
|
Right string
|
|
|
|
// set at runtime
|
|
Null bool
|
|
}
|
|
|
|
func (*__sqlbundle_Condition) private() {}
|
|
|
|
func (c *__sqlbundle_Condition) Render() string {
|
|
|
|
switch {
|
|
case c.Equal && c.Null:
|
|
return c.Left + " is null"
|
|
case c.Equal && !c.Null:
|
|
return c.Left + " = " + c.Right
|
|
case !c.Equal && c.Null:
|
|
return c.Left + " is not null"
|
|
case !c.Equal && !c.Null:
|
|
return c.Left + " != " + c.Right
|
|
default:
|
|
panic("unhandled case")
|
|
}
|
|
}
|
|
|
|
type __sqlbundle_Hole struct {
|
|
// set at compiile/embed time
|
|
Name string
|
|
|
|
// set at runtime
|
|
SQL __sqlbundle_SQL
|
|
}
|
|
|
|
func (*__sqlbundle_Hole) private() {}
|
|
|
|
func (h *__sqlbundle_Hole) Render() string { return h.SQL.Render() }
|
|
|
|
//
|
|
// end runtime support for building sql statements
|
|
//
|
|
|
|
type Value_Row struct {
|
|
Value time.Time
|
|
}
|
|
|
|
func (obj *postgresImpl) Create_Bwagreement(ctx context.Context,
|
|
bwagreement_signature Bwagreement_Signature_Field,
|
|
bwagreement_data Bwagreement_Data_Field) (
|
|
bwagreement *Bwagreement, err error) {
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__signature_val := bwagreement_signature.value()
|
|
__data_val := bwagreement_data.value()
|
|
__created_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO bwagreements ( signature, data, created_at ) VALUES ( ?, ?, ? ) RETURNING bwagreements.signature, bwagreements.data, bwagreements.created_at")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __signature_val, __data_val, __created_at_val)
|
|
|
|
bwagreement = &Bwagreement{}
|
|
err = obj.driver.QueryRow(__stmt, __signature_val, __data_val, __created_at_val).Scan(&bwagreement.Signature, &bwagreement.Data, &bwagreement.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return bwagreement, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Create_Irreparabledb(ctx context.Context,
|
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field,
|
|
irreparabledb_segmentdetail Irreparabledb_Segmentdetail_Field,
|
|
irreparabledb_pieces_lost_count Irreparabledb_PiecesLostCount_Field,
|
|
irreparabledb_seg_damaged_unix_sec Irreparabledb_SegDamagedUnixSec_Field,
|
|
irreparabledb_repair_attempt_count Irreparabledb_RepairAttemptCount_Field) (
|
|
irreparabledb *Irreparabledb, err error) {
|
|
__segmentpath_val := irreparabledb_segmentpath.value()
|
|
__segmentdetail_val := irreparabledb_segmentdetail.value()
|
|
__pieces_lost_count_val := irreparabledb_pieces_lost_count.value()
|
|
__seg_damaged_unix_sec_val := irreparabledb_seg_damaged_unix_sec.value()
|
|
__repair_attempt_count_val := irreparabledb_repair_attempt_count.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO irreparabledbs ( segmentpath, segmentdetail, pieces_lost_count, seg_damaged_unix_sec, repair_attempt_count ) VALUES ( ?, ?, ?, ?, ? ) RETURNING irreparabledbs.segmentpath, irreparabledbs.segmentdetail, irreparabledbs.pieces_lost_count, irreparabledbs.seg_damaged_unix_sec, irreparabledbs.repair_attempt_count")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __segmentpath_val, __segmentdetail_val, __pieces_lost_count_val, __seg_damaged_unix_sec_val, __repair_attempt_count_val)
|
|
|
|
irreparabledb = &Irreparabledb{}
|
|
err = obj.driver.QueryRow(__stmt, __segmentpath_val, __segmentdetail_val, __pieces_lost_count_val, __seg_damaged_unix_sec_val, __repair_attempt_count_val).Scan(&irreparabledb.Segmentpath, &irreparabledb.Segmentdetail, &irreparabledb.PiecesLostCount, &irreparabledb.SegDamagedUnixSec, &irreparabledb.RepairAttemptCount)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return irreparabledb, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Create_Timestamps(ctx context.Context,
|
|
timestamps_name Timestamps_Name_Field,
|
|
timestamps_value Timestamps_Value_Field) (
|
|
timestamps *Timestamps, err error) {
|
|
__name_val := timestamps_name.value()
|
|
__value_val := timestamps_value.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO timestamps ( name, value ) VALUES ( ?, ? ) RETURNING timestamps.name, timestamps.value")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __name_val, __value_val)
|
|
|
|
timestamps = &Timestamps{}
|
|
err = obj.driver.QueryRow(__stmt, __name_val, __value_val).Scan(×tamps.Name, ×tamps.Value)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return timestamps, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Create_Rollup(ctx context.Context,
|
|
rollup_node_id Rollup_NodeId_Field,
|
|
rollup_start_time Rollup_StartTime_Field,
|
|
rollup_interval Rollup_Interval_Field,
|
|
rollup_data_type Rollup_DataType_Field) (
|
|
rollup *Rollup, err error) {
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__node_id_val := rollup_node_id.value()
|
|
__start_time_val := rollup_start_time.value()
|
|
__interval_val := rollup_interval.value()
|
|
__data_type_val := rollup_data_type.value()
|
|
__created_at_val := __now
|
|
__updated_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO rollups ( node_id, start_time, interval, data_type, created_at, updated_at ) VALUES ( ?, ?, ?, ?, ?, ? ) RETURNING rollups.id, rollups.node_id, rollups.start_time, rollups.interval, rollups.data_type, rollups.created_at, rollups.updated_at")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __node_id_val, __start_time_val, __interval_val, __data_type_val, __created_at_val, __updated_at_val)
|
|
|
|
rollup = &Rollup{}
|
|
err = obj.driver.QueryRow(__stmt, __node_id_val, __start_time_val, __interval_val, __data_type_val, __created_at_val, __updated_at_val).Scan(&rollup.Id, &rollup.NodeId, &rollup.StartTime, &rollup.Interval, &rollup.DataType, &rollup.CreatedAt, &rollup.UpdatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rollup, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Create_Raw(ctx context.Context,
|
|
raw_node_id Raw_NodeId_Field,
|
|
raw_interval_end_time Raw_IntervalEndTime_Field,
|
|
raw_data_total Raw_DataTotal_Field,
|
|
raw_data_type Raw_DataType_Field) (
|
|
raw *Raw, err error) {
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__node_id_val := raw_node_id.value()
|
|
__interval_end_time_val := raw_interval_end_time.value()
|
|
__data_total_val := raw_data_total.value()
|
|
__data_type_val := raw_data_type.value()
|
|
__created_at_val := __now
|
|
__updated_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO raws ( node_id, interval_end_time, data_total, data_type, created_at, updated_at ) VALUES ( ?, ?, ?, ?, ?, ? ) RETURNING raws.id, raws.node_id, raws.interval_end_time, raws.data_total, raws.data_type, raws.created_at, raws.updated_at")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __node_id_val, __interval_end_time_val, __data_total_val, __data_type_val, __created_at_val, __updated_at_val)
|
|
|
|
raw = &Raw{}
|
|
err = obj.driver.QueryRow(__stmt, __node_id_val, __interval_end_time_val, __data_total_val, __data_type_val, __created_at_val, __updated_at_val).Scan(&raw.Id, &raw.NodeId, &raw.IntervalEndTime, &raw.DataTotal, &raw.DataType, &raw.CreatedAt, &raw.UpdatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return raw, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Create_Node(ctx context.Context,
|
|
node_id Node_Id_Field,
|
|
node_audit_success_count Node_AuditSuccessCount_Field,
|
|
node_total_audit_count Node_TotalAuditCount_Field,
|
|
node_audit_success_ratio Node_AuditSuccessRatio_Field,
|
|
node_uptime_success_count Node_UptimeSuccessCount_Field,
|
|
node_total_uptime_count Node_TotalUptimeCount_Field,
|
|
node_uptime_ratio Node_UptimeRatio_Field) (
|
|
node *Node, err error) {
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__id_val := node_id.value()
|
|
__audit_success_count_val := node_audit_success_count.value()
|
|
__total_audit_count_val := node_total_audit_count.value()
|
|
__audit_success_ratio_val := node_audit_success_ratio.value()
|
|
__uptime_success_count_val := node_uptime_success_count.value()
|
|
__total_uptime_count_val := node_total_uptime_count.value()
|
|
__uptime_ratio_val := node_uptime_ratio.value()
|
|
__created_at_val := __now
|
|
__updated_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO nodes ( id, audit_success_count, total_audit_count, audit_success_ratio, uptime_success_count, total_uptime_count, uptime_ratio, created_at, updated_at ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ? ) RETURNING nodes.id, nodes.audit_success_count, nodes.total_audit_count, nodes.audit_success_ratio, nodes.uptime_success_count, nodes.total_uptime_count, nodes.uptime_ratio, nodes.created_at, nodes.updated_at")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __id_val, __audit_success_count_val, __total_audit_count_val, __audit_success_ratio_val, __uptime_success_count_val, __total_uptime_count_val, __uptime_ratio_val, __created_at_val, __updated_at_val)
|
|
|
|
node = &Node{}
|
|
err = obj.driver.QueryRow(__stmt, __id_val, __audit_success_count_val, __total_audit_count_val, __audit_success_ratio_val, __uptime_success_count_val, __total_uptime_count_val, __uptime_ratio_val, __created_at_val, __updated_at_val).Scan(&node.Id, &node.AuditSuccessCount, &node.TotalAuditCount, &node.AuditSuccessRatio, &node.UptimeSuccessCount, &node.TotalUptimeCount, &node.UptimeRatio, &node.CreatedAt, &node.UpdatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return node, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Create_OverlayCacheNode(ctx context.Context,
|
|
overlay_cache_node_key OverlayCacheNode_Key_Field,
|
|
overlay_cache_node_value OverlayCacheNode_Value_Field) (
|
|
overlay_cache_node *OverlayCacheNode, err error) {
|
|
__key_val := overlay_cache_node_key.value()
|
|
__value_val := overlay_cache_node_value.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO overlay_cache_nodes ( key, value ) VALUES ( ?, ? ) RETURNING overlay_cache_nodes.key, overlay_cache_nodes.value")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __key_val, __value_val)
|
|
|
|
overlay_cache_node = &OverlayCacheNode{}
|
|
err = obj.driver.QueryRow(__stmt, __key_val, __value_val).Scan(&overlay_cache_node.Key, &overlay_cache_node.Value)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return overlay_cache_node, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Get_Bwagreement_By_Signature(ctx context.Context,
|
|
bwagreement_signature Bwagreement_Signature_Field) (
|
|
bwagreement *Bwagreement, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT bwagreements.signature, bwagreements.data, bwagreements.created_at FROM bwagreements WHERE bwagreements.signature = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, bwagreement_signature.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
bwagreement = &Bwagreement{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&bwagreement.Signature, &bwagreement.Data, &bwagreement.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return bwagreement, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Limited_Bwagreement(ctx context.Context,
|
|
limit int, offset int64) (
|
|
rows []*Bwagreement, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT bwagreements.signature, bwagreements.data, bwagreements.created_at FROM bwagreements LIMIT ? OFFSET ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values)
|
|
|
|
__values = append(__values, limit, offset)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
bwagreement := &Bwagreement{}
|
|
err = __rows.Scan(&bwagreement.Signature, &bwagreement.Data, &bwagreement.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, bwagreement)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) All_Bwagreement(ctx context.Context) (
|
|
rows []*Bwagreement, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT bwagreements.signature, bwagreements.data, bwagreements.created_at FROM bwagreements")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
bwagreement := &Bwagreement{}
|
|
err = __rows.Scan(&bwagreement.Signature, &bwagreement.Data, &bwagreement.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, bwagreement)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) All_Bwagreement_By_CreatedAt_Greater(ctx context.Context,
|
|
bwagreement_created_at_greater Bwagreement_CreatedAt_Field) (
|
|
rows []*Bwagreement, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT bwagreements.signature, bwagreements.data, bwagreements.created_at FROM bwagreements WHERE bwagreements.created_at > ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, bwagreement_created_at_greater.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
bwagreement := &Bwagreement{}
|
|
err = __rows.Scan(&bwagreement.Signature, &bwagreement.Data, &bwagreement.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, bwagreement)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Get_Irreparabledb_By_Segmentpath(ctx context.Context,
|
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field) (
|
|
irreparabledb *Irreparabledb, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT irreparabledbs.segmentpath, irreparabledbs.segmentdetail, irreparabledbs.pieces_lost_count, irreparabledbs.seg_damaged_unix_sec, irreparabledbs.repair_attempt_count FROM irreparabledbs WHERE irreparabledbs.segmentpath = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, irreparabledb_segmentpath.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
irreparabledb = &Irreparabledb{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&irreparabledb.Segmentpath, &irreparabledb.Segmentdetail, &irreparabledb.PiecesLostCount, &irreparabledb.SegDamagedUnixSec, &irreparabledb.RepairAttemptCount)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return irreparabledb, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Find_Timestamps_Value_By_Name(ctx context.Context,
|
|
timestamps_name Timestamps_Name_Field) (
|
|
row *Value_Row, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT timestamps.value FROM timestamps WHERE timestamps.name = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, timestamps_name.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
row = &Value_Row{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&row.Value)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return row, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Get_Rollup_By_Id(ctx context.Context,
|
|
rollup_id Rollup_Id_Field) (
|
|
rollup *Rollup, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT rollups.id, rollups.node_id, rollups.start_time, rollups.interval, rollups.data_type, rollups.created_at, rollups.updated_at FROM rollups WHERE rollups.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, rollup_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
rollup = &Rollup{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&rollup.Id, &rollup.NodeId, &rollup.StartTime, &rollup.Interval, &rollup.DataType, &rollup.CreatedAt, &rollup.UpdatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rollup, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) All_Rollup_By_NodeId(ctx context.Context,
|
|
rollup_node_id Rollup_NodeId_Field) (
|
|
rows []*Rollup, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT rollups.id, rollups.node_id, rollups.start_time, rollups.interval, rollups.data_type, rollups.created_at, rollups.updated_at FROM rollups WHERE rollups.node_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, rollup_node_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
rollup := &Rollup{}
|
|
err = __rows.Scan(&rollup.Id, &rollup.NodeId, &rollup.StartTime, &rollup.Interval, &rollup.DataType, &rollup.CreatedAt, &rollup.UpdatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, rollup)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Get_Raw_By_Id(ctx context.Context,
|
|
raw_id Raw_Id_Field) (
|
|
raw *Raw, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT raws.id, raws.node_id, raws.interval_end_time, raws.data_total, raws.data_type, raws.created_at, raws.updated_at FROM raws WHERE raws.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, raw_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
raw = &Raw{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&raw.Id, &raw.NodeId, &raw.IntervalEndTime, &raw.DataTotal, &raw.DataType, &raw.CreatedAt, &raw.UpdatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return raw, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) All_Raw_By_NodeId(ctx context.Context,
|
|
raw_node_id Raw_NodeId_Field) (
|
|
rows []*Raw, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT raws.id, raws.node_id, raws.interval_end_time, raws.data_total, raws.data_type, raws.created_at, raws.updated_at FROM raws WHERE raws.node_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, raw_node_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
raw := &Raw{}
|
|
err = __rows.Scan(&raw.Id, &raw.NodeId, &raw.IntervalEndTime, &raw.DataTotal, &raw.DataType, &raw.CreatedAt, &raw.UpdatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, raw)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Get_Node_By_Id(ctx context.Context,
|
|
node_id Node_Id_Field) (
|
|
node *Node, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.audit_success_count, nodes.total_audit_count, nodes.audit_success_ratio, nodes.uptime_success_count, nodes.total_uptime_count, nodes.uptime_ratio, nodes.created_at, nodes.updated_at FROM nodes WHERE nodes.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, node_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
node = &Node{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&node.Id, &node.AuditSuccessCount, &node.TotalAuditCount, &node.AuditSuccessRatio, &node.UptimeSuccessCount, &node.TotalUptimeCount, &node.UptimeRatio, &node.CreatedAt, &node.UpdatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return node, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Get_OverlayCacheNode_By_Key(ctx context.Context,
|
|
overlay_cache_node_key OverlayCacheNode_Key_Field) (
|
|
overlay_cache_node *OverlayCacheNode, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT overlay_cache_nodes.key, overlay_cache_nodes.value FROM overlay_cache_nodes WHERE overlay_cache_nodes.key = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, overlay_cache_node_key.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
overlay_cache_node = &OverlayCacheNode{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&overlay_cache_node.Key, &overlay_cache_node.Value)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return overlay_cache_node, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Limited_OverlayCacheNode_By_Key_GreaterOrEqual(ctx context.Context,
|
|
overlay_cache_node_key_greater_or_equal OverlayCacheNode_Key_Field,
|
|
limit int, offset int64) (
|
|
rows []*OverlayCacheNode, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT overlay_cache_nodes.key, overlay_cache_nodes.value FROM overlay_cache_nodes WHERE overlay_cache_nodes.key >= ? LIMIT ? OFFSET ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, overlay_cache_node_key_greater_or_equal.value())
|
|
|
|
__values = append(__values, limit, offset)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
overlay_cache_node := &OverlayCacheNode{}
|
|
err = __rows.Scan(&overlay_cache_node.Key, &overlay_cache_node.Value)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, overlay_cache_node)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Update_Irreparabledb_By_Segmentpath(ctx context.Context,
|
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field,
|
|
update Irreparabledb_Update_Fields) (
|
|
irreparabledb *Irreparabledb, err error) {
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE irreparabledbs SET "), __sets, __sqlbundle_Literal(" WHERE irreparabledbs.segmentpath = ? RETURNING irreparabledbs.segmentpath, irreparabledbs.segmentdetail, irreparabledbs.pieces_lost_count, irreparabledbs.seg_damaged_unix_sec, irreparabledbs.repair_attempt_count")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.Segmentdetail._set {
|
|
__values = append(__values, update.Segmentdetail.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("segmentdetail = ?"))
|
|
}
|
|
|
|
if update.PiecesLostCount._set {
|
|
__values = append(__values, update.PiecesLostCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("pieces_lost_count = ?"))
|
|
}
|
|
|
|
if update.SegDamagedUnixSec._set {
|
|
__values = append(__values, update.SegDamagedUnixSec.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("seg_damaged_unix_sec = ?"))
|
|
}
|
|
|
|
if update.RepairAttemptCount._set {
|
|
__values = append(__values, update.RepairAttemptCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("repair_attempt_count = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return nil, emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, irreparabledb_segmentpath.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
irreparabledb = &Irreparabledb{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&irreparabledb.Segmentpath, &irreparabledb.Segmentdetail, &irreparabledb.PiecesLostCount, &irreparabledb.SegDamagedUnixSec, &irreparabledb.RepairAttemptCount)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return irreparabledb, nil
|
|
}
|
|
|
|
func (obj *postgresImpl) Update_Timestamps_By_Name(ctx context.Context,
|
|
timestamps_name Timestamps_Name_Field,
|
|
update Timestamps_Update_Fields) (
|
|
timestamps *Timestamps, err error) {
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE timestamps SET "), __sets, __sqlbundle_Literal(" WHERE timestamps.name = ? RETURNING timestamps.name, timestamps.value")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.Value._set {
|
|
__values = append(__values, update.Value.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("value = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return nil, emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, timestamps_name.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
timestamps = &Timestamps{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(×tamps.Name, ×tamps.Value)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return timestamps, nil
|
|
}
|
|
|
|
func (obj *postgresImpl) Update_Rollup_By_Id(ctx context.Context,
|
|
rollup_id Rollup_Id_Field,
|
|
update Rollup_Update_Fields) (
|
|
rollup *Rollup, err error) {
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE rollups SET "), __sets, __sqlbundle_Literal(" WHERE rollups.id = ? RETURNING rollups.id, rollups.node_id, rollups.start_time, rollups.interval, rollups.data_type, rollups.created_at, rollups.updated_at")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
|
|
__values = append(__values, __now)
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("updated_at = ?"))
|
|
|
|
__args = append(__args, rollup_id.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
rollup = &Rollup{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&rollup.Id, &rollup.NodeId, &rollup.StartTime, &rollup.Interval, &rollup.DataType, &rollup.CreatedAt, &rollup.UpdatedAt)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rollup, nil
|
|
}
|
|
|
|
func (obj *postgresImpl) Update_Raw_By_Id(ctx context.Context,
|
|
raw_id Raw_Id_Field,
|
|
update Raw_Update_Fields) (
|
|
raw *Raw, err error) {
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE raws SET "), __sets, __sqlbundle_Literal(" WHERE raws.id = ? RETURNING raws.id, raws.node_id, raws.interval_end_time, raws.data_total, raws.data_type, raws.created_at, raws.updated_at")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
|
|
__values = append(__values, __now)
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("updated_at = ?"))
|
|
|
|
__args = append(__args, raw_id.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
raw = &Raw{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&raw.Id, &raw.NodeId, &raw.IntervalEndTime, &raw.DataTotal, &raw.DataType, &raw.CreatedAt, &raw.UpdatedAt)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return raw, nil
|
|
}
|
|
|
|
func (obj *postgresImpl) Update_Node_By_Id(ctx context.Context,
|
|
node_id Node_Id_Field,
|
|
update Node_Update_Fields) (
|
|
node *Node, err error) {
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE nodes SET "), __sets, __sqlbundle_Literal(" WHERE nodes.id = ? RETURNING nodes.id, nodes.audit_success_count, nodes.total_audit_count, nodes.audit_success_ratio, nodes.uptime_success_count, nodes.total_uptime_count, nodes.uptime_ratio, nodes.created_at, nodes.updated_at")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.AuditSuccessCount._set {
|
|
__values = append(__values, update.AuditSuccessCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("audit_success_count = ?"))
|
|
}
|
|
|
|
if update.TotalAuditCount._set {
|
|
__values = append(__values, update.TotalAuditCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("total_audit_count = ?"))
|
|
}
|
|
|
|
if update.AuditSuccessRatio._set {
|
|
__values = append(__values, update.AuditSuccessRatio.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("audit_success_ratio = ?"))
|
|
}
|
|
|
|
if update.UptimeSuccessCount._set {
|
|
__values = append(__values, update.UptimeSuccessCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("uptime_success_count = ?"))
|
|
}
|
|
|
|
if update.TotalUptimeCount._set {
|
|
__values = append(__values, update.TotalUptimeCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("total_uptime_count = ?"))
|
|
}
|
|
|
|
if update.UptimeRatio._set {
|
|
__values = append(__values, update.UptimeRatio.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("uptime_ratio = ?"))
|
|
}
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
|
|
__values = append(__values, __now)
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("updated_at = ?"))
|
|
|
|
__args = append(__args, node_id.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
node = &Node{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&node.Id, &node.AuditSuccessCount, &node.TotalAuditCount, &node.AuditSuccessRatio, &node.UptimeSuccessCount, &node.TotalUptimeCount, &node.UptimeRatio, &node.CreatedAt, &node.UpdatedAt)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return node, nil
|
|
}
|
|
|
|
func (obj *postgresImpl) Update_OverlayCacheNode_By_Key(ctx context.Context,
|
|
overlay_cache_node_key OverlayCacheNode_Key_Field,
|
|
update OverlayCacheNode_Update_Fields) (
|
|
overlay_cache_node *OverlayCacheNode, err error) {
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE overlay_cache_nodes SET "), __sets, __sqlbundle_Literal(" WHERE overlay_cache_nodes.key = ? RETURNING overlay_cache_nodes.key, overlay_cache_nodes.value")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.Value._set {
|
|
__values = append(__values, update.Value.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("value = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return nil, emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, overlay_cache_node_key.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
overlay_cache_node = &OverlayCacheNode{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&overlay_cache_node.Key, &overlay_cache_node.Value)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return overlay_cache_node, nil
|
|
}
|
|
|
|
func (obj *postgresImpl) Delete_Bwagreement_By_Signature(ctx context.Context,
|
|
bwagreement_signature Bwagreement_Signature_Field) (
|
|
deleted bool, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM bwagreements WHERE bwagreements.signature = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, bwagreement_signature.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Delete_Irreparabledb_By_Segmentpath(ctx context.Context,
|
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field) (
|
|
deleted bool, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM irreparabledbs WHERE irreparabledbs.segmentpath = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, irreparabledb_segmentpath.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Delete_Rollup_By_Id(ctx context.Context,
|
|
rollup_id Rollup_Id_Field) (
|
|
deleted bool, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM rollups WHERE rollups.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, rollup_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Delete_Raw_By_Id(ctx context.Context,
|
|
raw_id Raw_Id_Field) (
|
|
deleted bool, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM raws WHERE raws.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, raw_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Delete_Node_By_Id(ctx context.Context,
|
|
node_id Node_Id_Field) (
|
|
deleted bool, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM nodes WHERE nodes.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, node_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *postgresImpl) Delete_OverlayCacheNode_By_Key(ctx context.Context,
|
|
overlay_cache_node_key OverlayCacheNode_Key_Field) (
|
|
deleted bool, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM overlay_cache_nodes WHERE overlay_cache_nodes.key = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, overlay_cache_node_key.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (impl postgresImpl) isConstraintError(err error) (
|
|
constraint string, ok bool) {
|
|
if e, ok := err.(*pq.Error); ok {
|
|
if e.Code.Class() == "23" {
|
|
return e.Constraint, true
|
|
}
|
|
}
|
|
return "", false
|
|
}
|
|
|
|
func (obj *postgresImpl) deleteAll(ctx context.Context) (count int64, err error) {
|
|
var __res sql.Result
|
|
var __count int64
|
|
__res, err = obj.driver.Exec("DELETE FROM timestamps;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM rollups;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM raws;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM overlay_cache_nodes;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM nodes;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM irreparabledbs;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM bwagreements;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
|
|
return count, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Create_Bwagreement(ctx context.Context,
|
|
bwagreement_signature Bwagreement_Signature_Field,
|
|
bwagreement_data Bwagreement_Data_Field) (
|
|
bwagreement *Bwagreement, err error) {
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__signature_val := bwagreement_signature.value()
|
|
__data_val := bwagreement_data.value()
|
|
__created_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO bwagreements ( signature, data, created_at ) VALUES ( ?, ?, ? )")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __signature_val, __data_val, __created_at_val)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __signature_val, __data_val, __created_at_val)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
__pk, err := __res.LastInsertId()
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return obj.getLastBwagreement(ctx, __pk)
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Create_Irreparabledb(ctx context.Context,
|
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field,
|
|
irreparabledb_segmentdetail Irreparabledb_Segmentdetail_Field,
|
|
irreparabledb_pieces_lost_count Irreparabledb_PiecesLostCount_Field,
|
|
irreparabledb_seg_damaged_unix_sec Irreparabledb_SegDamagedUnixSec_Field,
|
|
irreparabledb_repair_attempt_count Irreparabledb_RepairAttemptCount_Field) (
|
|
irreparabledb *Irreparabledb, err error) {
|
|
__segmentpath_val := irreparabledb_segmentpath.value()
|
|
__segmentdetail_val := irreparabledb_segmentdetail.value()
|
|
__pieces_lost_count_val := irreparabledb_pieces_lost_count.value()
|
|
__seg_damaged_unix_sec_val := irreparabledb_seg_damaged_unix_sec.value()
|
|
__repair_attempt_count_val := irreparabledb_repair_attempt_count.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO irreparabledbs ( segmentpath, segmentdetail, pieces_lost_count, seg_damaged_unix_sec, repair_attempt_count ) VALUES ( ?, ?, ?, ?, ? )")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __segmentpath_val, __segmentdetail_val, __pieces_lost_count_val, __seg_damaged_unix_sec_val, __repair_attempt_count_val)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __segmentpath_val, __segmentdetail_val, __pieces_lost_count_val, __seg_damaged_unix_sec_val, __repair_attempt_count_val)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
__pk, err := __res.LastInsertId()
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return obj.getLastIrreparabledb(ctx, __pk)
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Create_Timestamps(ctx context.Context,
|
|
timestamps_name Timestamps_Name_Field,
|
|
timestamps_value Timestamps_Value_Field) (
|
|
timestamps *Timestamps, err error) {
|
|
__name_val := timestamps_name.value()
|
|
__value_val := timestamps_value.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO timestamps ( name, value ) VALUES ( ?, ? )")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __name_val, __value_val)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __name_val, __value_val)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
__pk, err := __res.LastInsertId()
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return obj.getLastTimestamps(ctx, __pk)
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Create_Rollup(ctx context.Context,
|
|
rollup_node_id Rollup_NodeId_Field,
|
|
rollup_start_time Rollup_StartTime_Field,
|
|
rollup_interval Rollup_Interval_Field,
|
|
rollup_data_type Rollup_DataType_Field) (
|
|
rollup *Rollup, err error) {
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__node_id_val := rollup_node_id.value()
|
|
__start_time_val := rollup_start_time.value()
|
|
__interval_val := rollup_interval.value()
|
|
__data_type_val := rollup_data_type.value()
|
|
__created_at_val := __now
|
|
__updated_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO rollups ( node_id, start_time, interval, data_type, created_at, updated_at ) VALUES ( ?, ?, ?, ?, ?, ? )")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __node_id_val, __start_time_val, __interval_val, __data_type_val, __created_at_val, __updated_at_val)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __node_id_val, __start_time_val, __interval_val, __data_type_val, __created_at_val, __updated_at_val)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
__pk, err := __res.LastInsertId()
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return obj.getLastRollup(ctx, __pk)
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Create_Raw(ctx context.Context,
|
|
raw_node_id Raw_NodeId_Field,
|
|
raw_interval_end_time Raw_IntervalEndTime_Field,
|
|
raw_data_total Raw_DataTotal_Field,
|
|
raw_data_type Raw_DataType_Field) (
|
|
raw *Raw, err error) {
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__node_id_val := raw_node_id.value()
|
|
__interval_end_time_val := raw_interval_end_time.value()
|
|
__data_total_val := raw_data_total.value()
|
|
__data_type_val := raw_data_type.value()
|
|
__created_at_val := __now
|
|
__updated_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO raws ( node_id, interval_end_time, data_total, data_type, created_at, updated_at ) VALUES ( ?, ?, ?, ?, ?, ? )")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __node_id_val, __interval_end_time_val, __data_total_val, __data_type_val, __created_at_val, __updated_at_val)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __node_id_val, __interval_end_time_val, __data_total_val, __data_type_val, __created_at_val, __updated_at_val)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
__pk, err := __res.LastInsertId()
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return obj.getLastRaw(ctx, __pk)
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Create_Node(ctx context.Context,
|
|
node_id Node_Id_Field,
|
|
node_audit_success_count Node_AuditSuccessCount_Field,
|
|
node_total_audit_count Node_TotalAuditCount_Field,
|
|
node_audit_success_ratio Node_AuditSuccessRatio_Field,
|
|
node_uptime_success_count Node_UptimeSuccessCount_Field,
|
|
node_total_uptime_count Node_TotalUptimeCount_Field,
|
|
node_uptime_ratio Node_UptimeRatio_Field) (
|
|
node *Node, err error) {
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
__id_val := node_id.value()
|
|
__audit_success_count_val := node_audit_success_count.value()
|
|
__total_audit_count_val := node_total_audit_count.value()
|
|
__audit_success_ratio_val := node_audit_success_ratio.value()
|
|
__uptime_success_count_val := node_uptime_success_count.value()
|
|
__total_uptime_count_val := node_total_uptime_count.value()
|
|
__uptime_ratio_val := node_uptime_ratio.value()
|
|
__created_at_val := __now
|
|
__updated_at_val := __now
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO nodes ( id, audit_success_count, total_audit_count, audit_success_ratio, uptime_success_count, total_uptime_count, uptime_ratio, created_at, updated_at ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ? )")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __id_val, __audit_success_count_val, __total_audit_count_val, __audit_success_ratio_val, __uptime_success_count_val, __total_uptime_count_val, __uptime_ratio_val, __created_at_val, __updated_at_val)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __id_val, __audit_success_count_val, __total_audit_count_val, __audit_success_ratio_val, __uptime_success_count_val, __total_uptime_count_val, __uptime_ratio_val, __created_at_val, __updated_at_val)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
__pk, err := __res.LastInsertId()
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return obj.getLastNode(ctx, __pk)
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Create_OverlayCacheNode(ctx context.Context,
|
|
overlay_cache_node_key OverlayCacheNode_Key_Field,
|
|
overlay_cache_node_value OverlayCacheNode_Value_Field) (
|
|
overlay_cache_node *OverlayCacheNode, err error) {
|
|
__key_val := overlay_cache_node_key.value()
|
|
__value_val := overlay_cache_node_value.value()
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO overlay_cache_nodes ( key, value ) VALUES ( ?, ? )")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __key_val, __value_val)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __key_val, __value_val)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
__pk, err := __res.LastInsertId()
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return obj.getLastOverlayCacheNode(ctx, __pk)
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Get_Bwagreement_By_Signature(ctx context.Context,
|
|
bwagreement_signature Bwagreement_Signature_Field) (
|
|
bwagreement *Bwagreement, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT bwagreements.signature, bwagreements.data, bwagreements.created_at FROM bwagreements WHERE bwagreements.signature = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, bwagreement_signature.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
bwagreement = &Bwagreement{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&bwagreement.Signature, &bwagreement.Data, &bwagreement.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return bwagreement, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Limited_Bwagreement(ctx context.Context,
|
|
limit int, offset int64) (
|
|
rows []*Bwagreement, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT bwagreements.signature, bwagreements.data, bwagreements.created_at FROM bwagreements LIMIT ? OFFSET ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values)
|
|
|
|
__values = append(__values, limit, offset)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
bwagreement := &Bwagreement{}
|
|
err = __rows.Scan(&bwagreement.Signature, &bwagreement.Data, &bwagreement.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, bwagreement)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) All_Bwagreement(ctx context.Context) (
|
|
rows []*Bwagreement, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT bwagreements.signature, bwagreements.data, bwagreements.created_at FROM bwagreements")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
bwagreement := &Bwagreement{}
|
|
err = __rows.Scan(&bwagreement.Signature, &bwagreement.Data, &bwagreement.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, bwagreement)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) All_Bwagreement_By_CreatedAt_Greater(ctx context.Context,
|
|
bwagreement_created_at_greater Bwagreement_CreatedAt_Field) (
|
|
rows []*Bwagreement, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT bwagreements.signature, bwagreements.data, bwagreements.created_at FROM bwagreements WHERE bwagreements.created_at > ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, bwagreement_created_at_greater.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
bwagreement := &Bwagreement{}
|
|
err = __rows.Scan(&bwagreement.Signature, &bwagreement.Data, &bwagreement.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, bwagreement)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Get_Irreparabledb_By_Segmentpath(ctx context.Context,
|
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field) (
|
|
irreparabledb *Irreparabledb, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT irreparabledbs.segmentpath, irreparabledbs.segmentdetail, irreparabledbs.pieces_lost_count, irreparabledbs.seg_damaged_unix_sec, irreparabledbs.repair_attempt_count FROM irreparabledbs WHERE irreparabledbs.segmentpath = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, irreparabledb_segmentpath.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
irreparabledb = &Irreparabledb{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&irreparabledb.Segmentpath, &irreparabledb.Segmentdetail, &irreparabledb.PiecesLostCount, &irreparabledb.SegDamagedUnixSec, &irreparabledb.RepairAttemptCount)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return irreparabledb, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Find_Timestamps_Value_By_Name(ctx context.Context,
|
|
timestamps_name Timestamps_Name_Field) (
|
|
row *Value_Row, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT timestamps.value FROM timestamps WHERE timestamps.name = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, timestamps_name.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
row = &Value_Row{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&row.Value)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return row, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Get_Rollup_By_Id(ctx context.Context,
|
|
rollup_id Rollup_Id_Field) (
|
|
rollup *Rollup, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT rollups.id, rollups.node_id, rollups.start_time, rollups.interval, rollups.data_type, rollups.created_at, rollups.updated_at FROM rollups WHERE rollups.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, rollup_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
rollup = &Rollup{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&rollup.Id, &rollup.NodeId, &rollup.StartTime, &rollup.Interval, &rollup.DataType, &rollup.CreatedAt, &rollup.UpdatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rollup, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) All_Rollup_By_NodeId(ctx context.Context,
|
|
rollup_node_id Rollup_NodeId_Field) (
|
|
rows []*Rollup, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT rollups.id, rollups.node_id, rollups.start_time, rollups.interval, rollups.data_type, rollups.created_at, rollups.updated_at FROM rollups WHERE rollups.node_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, rollup_node_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
rollup := &Rollup{}
|
|
err = __rows.Scan(&rollup.Id, &rollup.NodeId, &rollup.StartTime, &rollup.Interval, &rollup.DataType, &rollup.CreatedAt, &rollup.UpdatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, rollup)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Get_Raw_By_Id(ctx context.Context,
|
|
raw_id Raw_Id_Field) (
|
|
raw *Raw, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT raws.id, raws.node_id, raws.interval_end_time, raws.data_total, raws.data_type, raws.created_at, raws.updated_at FROM raws WHERE raws.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, raw_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
raw = &Raw{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&raw.Id, &raw.NodeId, &raw.IntervalEndTime, &raw.DataTotal, &raw.DataType, &raw.CreatedAt, &raw.UpdatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return raw, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) All_Raw_By_NodeId(ctx context.Context,
|
|
raw_node_id Raw_NodeId_Field) (
|
|
rows []*Raw, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT raws.id, raws.node_id, raws.interval_end_time, raws.data_total, raws.data_type, raws.created_at, raws.updated_at FROM raws WHERE raws.node_id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, raw_node_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
raw := &Raw{}
|
|
err = __rows.Scan(&raw.Id, &raw.NodeId, &raw.IntervalEndTime, &raw.DataTotal, &raw.DataType, &raw.CreatedAt, &raw.UpdatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, raw)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Get_Node_By_Id(ctx context.Context,
|
|
node_id Node_Id_Field) (
|
|
node *Node, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.audit_success_count, nodes.total_audit_count, nodes.audit_success_ratio, nodes.uptime_success_count, nodes.total_uptime_count, nodes.uptime_ratio, nodes.created_at, nodes.updated_at FROM nodes WHERE nodes.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, node_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
node = &Node{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&node.Id, &node.AuditSuccessCount, &node.TotalAuditCount, &node.AuditSuccessRatio, &node.UptimeSuccessCount, &node.TotalUptimeCount, &node.UptimeRatio, &node.CreatedAt, &node.UpdatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return node, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Get_OverlayCacheNode_By_Key(ctx context.Context,
|
|
overlay_cache_node_key OverlayCacheNode_Key_Field) (
|
|
overlay_cache_node *OverlayCacheNode, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT overlay_cache_nodes.key, overlay_cache_nodes.value FROM overlay_cache_nodes WHERE overlay_cache_nodes.key = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, overlay_cache_node_key.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
overlay_cache_node = &OverlayCacheNode{}
|
|
err = obj.driver.QueryRow(__stmt, __values...).Scan(&overlay_cache_node.Key, &overlay_cache_node.Value)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return overlay_cache_node, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Limited_OverlayCacheNode_By_Key_GreaterOrEqual(ctx context.Context,
|
|
overlay_cache_node_key_greater_or_equal OverlayCacheNode_Key_Field,
|
|
limit int, offset int64) (
|
|
rows []*OverlayCacheNode, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT overlay_cache_nodes.key, overlay_cache_nodes.value FROM overlay_cache_nodes WHERE overlay_cache_nodes.key >= ? LIMIT ? OFFSET ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, overlay_cache_node_key_greater_or_equal.value())
|
|
|
|
__values = append(__values, limit, offset)
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
defer __rows.Close()
|
|
|
|
for __rows.Next() {
|
|
overlay_cache_node := &OverlayCacheNode{}
|
|
err = __rows.Scan(&overlay_cache_node.Key, &overlay_cache_node.Value)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
rows = append(rows, overlay_cache_node)
|
|
}
|
|
if err := __rows.Err(); err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rows, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Update_Irreparabledb_By_Segmentpath(ctx context.Context,
|
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field,
|
|
update Irreparabledb_Update_Fields) (
|
|
irreparabledb *Irreparabledb, err error) {
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE irreparabledbs SET "), __sets, __sqlbundle_Literal(" WHERE irreparabledbs.segmentpath = ?")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.Segmentdetail._set {
|
|
__values = append(__values, update.Segmentdetail.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("segmentdetail = ?"))
|
|
}
|
|
|
|
if update.PiecesLostCount._set {
|
|
__values = append(__values, update.PiecesLostCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("pieces_lost_count = ?"))
|
|
}
|
|
|
|
if update.SegDamagedUnixSec._set {
|
|
__values = append(__values, update.SegDamagedUnixSec.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("seg_damaged_unix_sec = ?"))
|
|
}
|
|
|
|
if update.RepairAttemptCount._set {
|
|
__values = append(__values, update.RepairAttemptCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("repair_attempt_count = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return nil, emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, irreparabledb_segmentpath.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
irreparabledb = &Irreparabledb{}
|
|
_, err = obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
|
|
var __embed_stmt_get = __sqlbundle_Literal("SELECT irreparabledbs.segmentpath, irreparabledbs.segmentdetail, irreparabledbs.pieces_lost_count, irreparabledbs.seg_damaged_unix_sec, irreparabledbs.repair_attempt_count FROM irreparabledbs WHERE irreparabledbs.segmentpath = ?")
|
|
|
|
var __stmt_get = __sqlbundle_Render(obj.dialect, __embed_stmt_get)
|
|
obj.logStmt("(IMPLIED) "+__stmt_get, __args...)
|
|
|
|
err = obj.driver.QueryRow(__stmt_get, __args...).Scan(&irreparabledb.Segmentpath, &irreparabledb.Segmentdetail, &irreparabledb.PiecesLostCount, &irreparabledb.SegDamagedUnixSec, &irreparabledb.RepairAttemptCount)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return irreparabledb, nil
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Update_Timestamps_By_Name(ctx context.Context,
|
|
timestamps_name Timestamps_Name_Field,
|
|
update Timestamps_Update_Fields) (
|
|
timestamps *Timestamps, err error) {
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE timestamps SET "), __sets, __sqlbundle_Literal(" WHERE timestamps.name = ?")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.Value._set {
|
|
__values = append(__values, update.Value.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("value = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return nil, emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, timestamps_name.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
timestamps = &Timestamps{}
|
|
_, err = obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
|
|
var __embed_stmt_get = __sqlbundle_Literal("SELECT timestamps.name, timestamps.value FROM timestamps WHERE timestamps.name = ?")
|
|
|
|
var __stmt_get = __sqlbundle_Render(obj.dialect, __embed_stmt_get)
|
|
obj.logStmt("(IMPLIED) "+__stmt_get, __args...)
|
|
|
|
err = obj.driver.QueryRow(__stmt_get, __args...).Scan(×tamps.Name, ×tamps.Value)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return timestamps, nil
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Update_Rollup_By_Id(ctx context.Context,
|
|
rollup_id Rollup_Id_Field,
|
|
update Rollup_Update_Fields) (
|
|
rollup *Rollup, err error) {
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE rollups SET "), __sets, __sqlbundle_Literal(" WHERE rollups.id = ?")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
|
|
__values = append(__values, __now)
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("updated_at = ?"))
|
|
|
|
__args = append(__args, rollup_id.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
rollup = &Rollup{}
|
|
_, err = obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
|
|
var __embed_stmt_get = __sqlbundle_Literal("SELECT rollups.id, rollups.node_id, rollups.start_time, rollups.interval, rollups.data_type, rollups.created_at, rollups.updated_at FROM rollups WHERE rollups.id = ?")
|
|
|
|
var __stmt_get = __sqlbundle_Render(obj.dialect, __embed_stmt_get)
|
|
obj.logStmt("(IMPLIED) "+__stmt_get, __args...)
|
|
|
|
err = obj.driver.QueryRow(__stmt_get, __args...).Scan(&rollup.Id, &rollup.NodeId, &rollup.StartTime, &rollup.Interval, &rollup.DataType, &rollup.CreatedAt, &rollup.UpdatedAt)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rollup, nil
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Update_Raw_By_Id(ctx context.Context,
|
|
raw_id Raw_Id_Field,
|
|
update Raw_Update_Fields) (
|
|
raw *Raw, err error) {
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE raws SET "), __sets, __sqlbundle_Literal(" WHERE raws.id = ?")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
|
|
__values = append(__values, __now)
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("updated_at = ?"))
|
|
|
|
__args = append(__args, raw_id.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
raw = &Raw{}
|
|
_, err = obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
|
|
var __embed_stmt_get = __sqlbundle_Literal("SELECT raws.id, raws.node_id, raws.interval_end_time, raws.data_total, raws.data_type, raws.created_at, raws.updated_at FROM raws WHERE raws.id = ?")
|
|
|
|
var __stmt_get = __sqlbundle_Render(obj.dialect, __embed_stmt_get)
|
|
obj.logStmt("(IMPLIED) "+__stmt_get, __args...)
|
|
|
|
err = obj.driver.QueryRow(__stmt_get, __args...).Scan(&raw.Id, &raw.NodeId, &raw.IntervalEndTime, &raw.DataTotal, &raw.DataType, &raw.CreatedAt, &raw.UpdatedAt)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return raw, nil
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Update_Node_By_Id(ctx context.Context,
|
|
node_id Node_Id_Field,
|
|
update Node_Update_Fields) (
|
|
node *Node, err error) {
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE nodes SET "), __sets, __sqlbundle_Literal(" WHERE nodes.id = ?")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.AuditSuccessCount._set {
|
|
__values = append(__values, update.AuditSuccessCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("audit_success_count = ?"))
|
|
}
|
|
|
|
if update.TotalAuditCount._set {
|
|
__values = append(__values, update.TotalAuditCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("total_audit_count = ?"))
|
|
}
|
|
|
|
if update.AuditSuccessRatio._set {
|
|
__values = append(__values, update.AuditSuccessRatio.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("audit_success_ratio = ?"))
|
|
}
|
|
|
|
if update.UptimeSuccessCount._set {
|
|
__values = append(__values, update.UptimeSuccessCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("uptime_success_count = ?"))
|
|
}
|
|
|
|
if update.TotalUptimeCount._set {
|
|
__values = append(__values, update.TotalUptimeCount.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("total_uptime_count = ?"))
|
|
}
|
|
|
|
if update.UptimeRatio._set {
|
|
__values = append(__values, update.UptimeRatio.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("uptime_ratio = ?"))
|
|
}
|
|
|
|
__now := obj.db.Hooks.Now().UTC()
|
|
|
|
__values = append(__values, __now)
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("updated_at = ?"))
|
|
|
|
__args = append(__args, node_id.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
node = &Node{}
|
|
_, err = obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
|
|
var __embed_stmt_get = __sqlbundle_Literal("SELECT nodes.id, nodes.audit_success_count, nodes.total_audit_count, nodes.audit_success_ratio, nodes.uptime_success_count, nodes.total_uptime_count, nodes.uptime_ratio, nodes.created_at, nodes.updated_at FROM nodes WHERE nodes.id = ?")
|
|
|
|
var __stmt_get = __sqlbundle_Render(obj.dialect, __embed_stmt_get)
|
|
obj.logStmt("(IMPLIED) "+__stmt_get, __args...)
|
|
|
|
err = obj.driver.QueryRow(__stmt_get, __args...).Scan(&node.Id, &node.AuditSuccessCount, &node.TotalAuditCount, &node.AuditSuccessRatio, &node.UptimeSuccessCount, &node.TotalUptimeCount, &node.UptimeRatio, &node.CreatedAt, &node.UpdatedAt)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return node, nil
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Update_OverlayCacheNode_By_Key(ctx context.Context,
|
|
overlay_cache_node_key OverlayCacheNode_Key_Field,
|
|
update OverlayCacheNode_Update_Fields) (
|
|
overlay_cache_node *OverlayCacheNode, err error) {
|
|
var __sets = &__sqlbundle_Hole{}
|
|
|
|
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE overlay_cache_nodes SET "), __sets, __sqlbundle_Literal(" WHERE overlay_cache_nodes.key = ?")}}
|
|
|
|
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
|
var __values []interface{}
|
|
var __args []interface{}
|
|
|
|
if update.Value._set {
|
|
__values = append(__values, update.Value.value())
|
|
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("value = ?"))
|
|
}
|
|
|
|
if len(__sets_sql.SQLs) == 0 {
|
|
return nil, emptyUpdate()
|
|
}
|
|
|
|
__args = append(__args, overlay_cache_node_key.value())
|
|
|
|
__values = append(__values, __args...)
|
|
__sets.SQL = __sets_sql
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
overlay_cache_node = &OverlayCacheNode{}
|
|
_, err = obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
|
|
var __embed_stmt_get = __sqlbundle_Literal("SELECT overlay_cache_nodes.key, overlay_cache_nodes.value FROM overlay_cache_nodes WHERE overlay_cache_nodes.key = ?")
|
|
|
|
var __stmt_get = __sqlbundle_Render(obj.dialect, __embed_stmt_get)
|
|
obj.logStmt("(IMPLIED) "+__stmt_get, __args...)
|
|
|
|
err = obj.driver.QueryRow(__stmt_get, __args...).Scan(&overlay_cache_node.Key, &overlay_cache_node.Value)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return overlay_cache_node, nil
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Delete_Bwagreement_By_Signature(ctx context.Context,
|
|
bwagreement_signature Bwagreement_Signature_Field) (
|
|
deleted bool, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM bwagreements WHERE bwagreements.signature = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, bwagreement_signature.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Delete_Irreparabledb_By_Segmentpath(ctx context.Context,
|
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field) (
|
|
deleted bool, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM irreparabledbs WHERE irreparabledbs.segmentpath = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, irreparabledb_segmentpath.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Delete_Rollup_By_Id(ctx context.Context,
|
|
rollup_id Rollup_Id_Field) (
|
|
deleted bool, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM rollups WHERE rollups.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, rollup_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Delete_Raw_By_Id(ctx context.Context,
|
|
raw_id Raw_Id_Field) (
|
|
deleted bool, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM raws WHERE raws.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, raw_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Delete_Node_By_Id(ctx context.Context,
|
|
node_id Node_Id_Field) (
|
|
deleted bool, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM nodes WHERE nodes.id = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, node_id.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) Delete_OverlayCacheNode_By_Key(ctx context.Context,
|
|
overlay_cache_node_key OverlayCacheNode_Key_Field) (
|
|
deleted bool, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("DELETE FROM overlay_cache_nodes WHERE overlay_cache_nodes.key = ?")
|
|
|
|
var __values []interface{}
|
|
__values = append(__values, overlay_cache_node_key.value())
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, __values...)
|
|
|
|
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err := __res.RowsAffected()
|
|
if err != nil {
|
|
return false, obj.makeErr(err)
|
|
}
|
|
|
|
return __count > 0, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) getLastBwagreement(ctx context.Context,
|
|
pk int64) (
|
|
bwagreement *Bwagreement, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT bwagreements.signature, bwagreements.data, bwagreements.created_at FROM bwagreements WHERE _rowid_ = ?")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, pk)
|
|
|
|
bwagreement = &Bwagreement{}
|
|
err = obj.driver.QueryRow(__stmt, pk).Scan(&bwagreement.Signature, &bwagreement.Data, &bwagreement.CreatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return bwagreement, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) getLastIrreparabledb(ctx context.Context,
|
|
pk int64) (
|
|
irreparabledb *Irreparabledb, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT irreparabledbs.segmentpath, irreparabledbs.segmentdetail, irreparabledbs.pieces_lost_count, irreparabledbs.seg_damaged_unix_sec, irreparabledbs.repair_attempt_count FROM irreparabledbs WHERE _rowid_ = ?")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, pk)
|
|
|
|
irreparabledb = &Irreparabledb{}
|
|
err = obj.driver.QueryRow(__stmt, pk).Scan(&irreparabledb.Segmentpath, &irreparabledb.Segmentdetail, &irreparabledb.PiecesLostCount, &irreparabledb.SegDamagedUnixSec, &irreparabledb.RepairAttemptCount)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return irreparabledb, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) getLastTimestamps(ctx context.Context,
|
|
pk int64) (
|
|
timestamps *Timestamps, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT timestamps.name, timestamps.value FROM timestamps WHERE _rowid_ = ?")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, pk)
|
|
|
|
timestamps = &Timestamps{}
|
|
err = obj.driver.QueryRow(__stmt, pk).Scan(×tamps.Name, ×tamps.Value)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return timestamps, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) getLastRollup(ctx context.Context,
|
|
pk int64) (
|
|
rollup *Rollup, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT rollups.id, rollups.node_id, rollups.start_time, rollups.interval, rollups.data_type, rollups.created_at, rollups.updated_at FROM rollups WHERE _rowid_ = ?")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, pk)
|
|
|
|
rollup = &Rollup{}
|
|
err = obj.driver.QueryRow(__stmt, pk).Scan(&rollup.Id, &rollup.NodeId, &rollup.StartTime, &rollup.Interval, &rollup.DataType, &rollup.CreatedAt, &rollup.UpdatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return rollup, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) getLastRaw(ctx context.Context,
|
|
pk int64) (
|
|
raw *Raw, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT raws.id, raws.node_id, raws.interval_end_time, raws.data_total, raws.data_type, raws.created_at, raws.updated_at FROM raws WHERE _rowid_ = ?")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, pk)
|
|
|
|
raw = &Raw{}
|
|
err = obj.driver.QueryRow(__stmt, pk).Scan(&raw.Id, &raw.NodeId, &raw.IntervalEndTime, &raw.DataTotal, &raw.DataType, &raw.CreatedAt, &raw.UpdatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return raw, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) getLastNode(ctx context.Context,
|
|
pk int64) (
|
|
node *Node, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.audit_success_count, nodes.total_audit_count, nodes.audit_success_ratio, nodes.uptime_success_count, nodes.total_uptime_count, nodes.uptime_ratio, nodes.created_at, nodes.updated_at FROM nodes WHERE _rowid_ = ?")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, pk)
|
|
|
|
node = &Node{}
|
|
err = obj.driver.QueryRow(__stmt, pk).Scan(&node.Id, &node.AuditSuccessCount, &node.TotalAuditCount, &node.AuditSuccessRatio, &node.UptimeSuccessCount, &node.TotalUptimeCount, &node.UptimeRatio, &node.CreatedAt, &node.UpdatedAt)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return node, nil
|
|
|
|
}
|
|
|
|
func (obj *sqlite3Impl) getLastOverlayCacheNode(ctx context.Context,
|
|
pk int64) (
|
|
overlay_cache_node *OverlayCacheNode, err error) {
|
|
|
|
var __embed_stmt = __sqlbundle_Literal("SELECT overlay_cache_nodes.key, overlay_cache_nodes.value FROM overlay_cache_nodes WHERE _rowid_ = ?")
|
|
|
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
obj.logStmt(__stmt, pk)
|
|
|
|
overlay_cache_node = &OverlayCacheNode{}
|
|
err = obj.driver.QueryRow(__stmt, pk).Scan(&overlay_cache_node.Key, &overlay_cache_node.Value)
|
|
if err != nil {
|
|
return nil, obj.makeErr(err)
|
|
}
|
|
return overlay_cache_node, nil
|
|
|
|
}
|
|
|
|
func (impl sqlite3Impl) isConstraintError(err error) (
|
|
constraint string, ok bool) {
|
|
if e, ok := err.(sqlite3.Error); ok {
|
|
if e.Code == sqlite3.ErrConstraint {
|
|
msg := err.Error()
|
|
colon := strings.LastIndex(msg, ":")
|
|
if colon != -1 {
|
|
return strings.TrimSpace(msg[colon:]), true
|
|
}
|
|
return "", true
|
|
}
|
|
}
|
|
return "", false
|
|
}
|
|
|
|
func (obj *sqlite3Impl) deleteAll(ctx context.Context) (count int64, err error) {
|
|
var __res sql.Result
|
|
var __count int64
|
|
__res, err = obj.driver.Exec("DELETE FROM timestamps;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM rollups;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM raws;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM overlay_cache_nodes;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM nodes;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM irreparabledbs;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
__res, err = obj.driver.Exec("DELETE FROM bwagreements;")
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
|
|
__count, err = __res.RowsAffected()
|
|
if err != nil {
|
|
return 0, obj.makeErr(err)
|
|
}
|
|
count += __count
|
|
|
|
return count, nil
|
|
|
|
}
|
|
|
|
type Rx struct {
|
|
db *DB
|
|
tx *Tx
|
|
}
|
|
|
|
func (rx *Rx) UnsafeTx(ctx context.Context) (unsafe_tx *sql.Tx, err error) {
|
|
tx, err := rx.getTx(ctx)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
return tx.Tx, nil
|
|
}
|
|
|
|
func (rx *Rx) getTx(ctx context.Context) (tx *Tx, err error) {
|
|
if rx.tx == nil {
|
|
if rx.tx, err = rx.db.Open(ctx); err != nil {
|
|
return nil, err
|
|
}
|
|
}
|
|
return rx.tx, nil
|
|
}
|
|
|
|
func (rx *Rx) Rebind(s string) string {
|
|
return rx.db.Rebind(s)
|
|
}
|
|
|
|
func (rx *Rx) Commit() (err error) {
|
|
if rx.tx != nil {
|
|
err = rx.tx.Commit()
|
|
rx.tx = nil
|
|
}
|
|
return err
|
|
}
|
|
|
|
func (rx *Rx) Rollback() (err error) {
|
|
if rx.tx != nil {
|
|
err = rx.tx.Rollback()
|
|
rx.tx = nil
|
|
}
|
|
return err
|
|
}
|
|
|
|
func (rx *Rx) All_Bwagreement(ctx context.Context) (
|
|
rows []*Bwagreement, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.All_Bwagreement(ctx)
|
|
}
|
|
|
|
func (rx *Rx) All_Bwagreement_By_CreatedAt_Greater(ctx context.Context,
|
|
bwagreement_created_at_greater Bwagreement_CreatedAt_Field) (
|
|
rows []*Bwagreement, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.All_Bwagreement_By_CreatedAt_Greater(ctx, bwagreement_created_at_greater)
|
|
}
|
|
|
|
func (rx *Rx) All_Raw_By_NodeId(ctx context.Context,
|
|
raw_node_id Raw_NodeId_Field) (
|
|
rows []*Raw, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.All_Raw_By_NodeId(ctx, raw_node_id)
|
|
}
|
|
|
|
func (rx *Rx) All_Rollup_By_NodeId(ctx context.Context,
|
|
rollup_node_id Rollup_NodeId_Field) (
|
|
rows []*Rollup, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.All_Rollup_By_NodeId(ctx, rollup_node_id)
|
|
}
|
|
|
|
func (rx *Rx) Create_Bwagreement(ctx context.Context,
|
|
bwagreement_signature Bwagreement_Signature_Field,
|
|
bwagreement_data Bwagreement_Data_Field) (
|
|
bwagreement *Bwagreement, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Create_Bwagreement(ctx, bwagreement_signature, bwagreement_data)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Create_Irreparabledb(ctx context.Context,
|
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field,
|
|
irreparabledb_segmentdetail Irreparabledb_Segmentdetail_Field,
|
|
irreparabledb_pieces_lost_count Irreparabledb_PiecesLostCount_Field,
|
|
irreparabledb_seg_damaged_unix_sec Irreparabledb_SegDamagedUnixSec_Field,
|
|
irreparabledb_repair_attempt_count Irreparabledb_RepairAttemptCount_Field) (
|
|
irreparabledb *Irreparabledb, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Create_Irreparabledb(ctx, irreparabledb_segmentpath, irreparabledb_segmentdetail, irreparabledb_pieces_lost_count, irreparabledb_seg_damaged_unix_sec, irreparabledb_repair_attempt_count)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Create_Node(ctx context.Context,
|
|
node_id Node_Id_Field,
|
|
node_audit_success_count Node_AuditSuccessCount_Field,
|
|
node_total_audit_count Node_TotalAuditCount_Field,
|
|
node_audit_success_ratio Node_AuditSuccessRatio_Field,
|
|
node_uptime_success_count Node_UptimeSuccessCount_Field,
|
|
node_total_uptime_count Node_TotalUptimeCount_Field,
|
|
node_uptime_ratio Node_UptimeRatio_Field) (
|
|
node *Node, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Create_Node(ctx, node_id, node_audit_success_count, node_total_audit_count, node_audit_success_ratio, node_uptime_success_count, node_total_uptime_count, node_uptime_ratio)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Create_OverlayCacheNode(ctx context.Context,
|
|
overlay_cache_node_key OverlayCacheNode_Key_Field,
|
|
overlay_cache_node_value OverlayCacheNode_Value_Field) (
|
|
overlay_cache_node *OverlayCacheNode, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Create_OverlayCacheNode(ctx, overlay_cache_node_key, overlay_cache_node_value)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Create_Raw(ctx context.Context,
|
|
raw_node_id Raw_NodeId_Field,
|
|
raw_interval_end_time Raw_IntervalEndTime_Field,
|
|
raw_data_total Raw_DataTotal_Field,
|
|
raw_data_type Raw_DataType_Field) (
|
|
raw *Raw, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Create_Raw(ctx, raw_node_id, raw_interval_end_time, raw_data_total, raw_data_type)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Create_Rollup(ctx context.Context,
|
|
rollup_node_id Rollup_NodeId_Field,
|
|
rollup_start_time Rollup_StartTime_Field,
|
|
rollup_interval Rollup_Interval_Field,
|
|
rollup_data_type Rollup_DataType_Field) (
|
|
rollup *Rollup, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Create_Rollup(ctx, rollup_node_id, rollup_start_time, rollup_interval, rollup_data_type)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Create_Timestamps(ctx context.Context,
|
|
timestamps_name Timestamps_Name_Field,
|
|
timestamps_value Timestamps_Value_Field) (
|
|
timestamps *Timestamps, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Create_Timestamps(ctx, timestamps_name, timestamps_value)
|
|
|
|
}
|
|
|
|
func (rx *Rx) Delete_Bwagreement_By_Signature(ctx context.Context,
|
|
bwagreement_signature Bwagreement_Signature_Field) (
|
|
deleted bool, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Delete_Bwagreement_By_Signature(ctx, bwagreement_signature)
|
|
}
|
|
|
|
func (rx *Rx) Delete_Irreparabledb_By_Segmentpath(ctx context.Context,
|
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field) (
|
|
deleted bool, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Delete_Irreparabledb_By_Segmentpath(ctx, irreparabledb_segmentpath)
|
|
}
|
|
|
|
func (rx *Rx) Delete_Node_By_Id(ctx context.Context,
|
|
node_id Node_Id_Field) (
|
|
deleted bool, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Delete_Node_By_Id(ctx, node_id)
|
|
}
|
|
|
|
func (rx *Rx) Delete_OverlayCacheNode_By_Key(ctx context.Context,
|
|
overlay_cache_node_key OverlayCacheNode_Key_Field) (
|
|
deleted bool, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Delete_OverlayCacheNode_By_Key(ctx, overlay_cache_node_key)
|
|
}
|
|
|
|
func (rx *Rx) Delete_Raw_By_Id(ctx context.Context,
|
|
raw_id Raw_Id_Field) (
|
|
deleted bool, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Delete_Raw_By_Id(ctx, raw_id)
|
|
}
|
|
|
|
func (rx *Rx) Delete_Rollup_By_Id(ctx context.Context,
|
|
rollup_id Rollup_Id_Field) (
|
|
deleted bool, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Delete_Rollup_By_Id(ctx, rollup_id)
|
|
}
|
|
|
|
func (rx *Rx) Find_Timestamps_Value_By_Name(ctx context.Context,
|
|
timestamps_name Timestamps_Name_Field) (
|
|
row *Value_Row, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Find_Timestamps_Value_By_Name(ctx, timestamps_name)
|
|
}
|
|
|
|
func (rx *Rx) Get_Bwagreement_By_Signature(ctx context.Context,
|
|
bwagreement_signature Bwagreement_Signature_Field) (
|
|
bwagreement *Bwagreement, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_Bwagreement_By_Signature(ctx, bwagreement_signature)
|
|
}
|
|
|
|
func (rx *Rx) Get_Irreparabledb_By_Segmentpath(ctx context.Context,
|
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field) (
|
|
irreparabledb *Irreparabledb, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_Irreparabledb_By_Segmentpath(ctx, irreparabledb_segmentpath)
|
|
}
|
|
|
|
func (rx *Rx) Get_Node_By_Id(ctx context.Context,
|
|
node_id Node_Id_Field) (
|
|
node *Node, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_Node_By_Id(ctx, node_id)
|
|
}
|
|
|
|
func (rx *Rx) Get_OverlayCacheNode_By_Key(ctx context.Context,
|
|
overlay_cache_node_key OverlayCacheNode_Key_Field) (
|
|
overlay_cache_node *OverlayCacheNode, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_OverlayCacheNode_By_Key(ctx, overlay_cache_node_key)
|
|
}
|
|
|
|
func (rx *Rx) Get_Raw_By_Id(ctx context.Context,
|
|
raw_id Raw_Id_Field) (
|
|
raw *Raw, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_Raw_By_Id(ctx, raw_id)
|
|
}
|
|
|
|
func (rx *Rx) Get_Rollup_By_Id(ctx context.Context,
|
|
rollup_id Rollup_Id_Field) (
|
|
rollup *Rollup, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Get_Rollup_By_Id(ctx, rollup_id)
|
|
}
|
|
|
|
func (rx *Rx) Limited_Bwagreement(ctx context.Context,
|
|
limit int, offset int64) (
|
|
rows []*Bwagreement, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Limited_Bwagreement(ctx, limit, offset)
|
|
}
|
|
|
|
func (rx *Rx) Limited_OverlayCacheNode_By_Key_GreaterOrEqual(ctx context.Context,
|
|
overlay_cache_node_key_greater_or_equal OverlayCacheNode_Key_Field,
|
|
limit int, offset int64) (
|
|
rows []*OverlayCacheNode, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Limited_OverlayCacheNode_By_Key_GreaterOrEqual(ctx, overlay_cache_node_key_greater_or_equal, limit, offset)
|
|
}
|
|
|
|
func (rx *Rx) Update_Irreparabledb_By_Segmentpath(ctx context.Context,
|
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field,
|
|
update Irreparabledb_Update_Fields) (
|
|
irreparabledb *Irreparabledb, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Update_Irreparabledb_By_Segmentpath(ctx, irreparabledb_segmentpath, update)
|
|
}
|
|
|
|
func (rx *Rx) Update_Node_By_Id(ctx context.Context,
|
|
node_id Node_Id_Field,
|
|
update Node_Update_Fields) (
|
|
node *Node, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Update_Node_By_Id(ctx, node_id, update)
|
|
}
|
|
|
|
func (rx *Rx) Update_OverlayCacheNode_By_Key(ctx context.Context,
|
|
overlay_cache_node_key OverlayCacheNode_Key_Field,
|
|
update OverlayCacheNode_Update_Fields) (
|
|
overlay_cache_node *OverlayCacheNode, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Update_OverlayCacheNode_By_Key(ctx, overlay_cache_node_key, update)
|
|
}
|
|
|
|
func (rx *Rx) Update_Raw_By_Id(ctx context.Context,
|
|
raw_id Raw_Id_Field,
|
|
update Raw_Update_Fields) (
|
|
raw *Raw, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Update_Raw_By_Id(ctx, raw_id, update)
|
|
}
|
|
|
|
func (rx *Rx) Update_Rollup_By_Id(ctx context.Context,
|
|
rollup_id Rollup_Id_Field,
|
|
update Rollup_Update_Fields) (
|
|
rollup *Rollup, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Update_Rollup_By_Id(ctx, rollup_id, update)
|
|
}
|
|
|
|
func (rx *Rx) Update_Timestamps_By_Name(ctx context.Context,
|
|
timestamps_name Timestamps_Name_Field,
|
|
update Timestamps_Update_Fields) (
|
|
timestamps *Timestamps, err error) {
|
|
var tx *Tx
|
|
if tx, err = rx.getTx(ctx); err != nil {
|
|
return
|
|
}
|
|
return tx.Update_Timestamps_By_Name(ctx, timestamps_name, update)
|
|
}
|
|
|
|
type Methods interface {
|
|
All_Bwagreement(ctx context.Context) (
|
|
rows []*Bwagreement, err error)
|
|
|
|
All_Bwagreement_By_CreatedAt_Greater(ctx context.Context,
|
|
bwagreement_created_at_greater Bwagreement_CreatedAt_Field) (
|
|
rows []*Bwagreement, err error)
|
|
|
|
All_Raw_By_NodeId(ctx context.Context,
|
|
raw_node_id Raw_NodeId_Field) (
|
|
rows []*Raw, err error)
|
|
|
|
All_Rollup_By_NodeId(ctx context.Context,
|
|
rollup_node_id Rollup_NodeId_Field) (
|
|
rows []*Rollup, err error)
|
|
|
|
Create_Bwagreement(ctx context.Context,
|
|
bwagreement_signature Bwagreement_Signature_Field,
|
|
bwagreement_data Bwagreement_Data_Field) (
|
|
bwagreement *Bwagreement, err error)
|
|
|
|
Create_Irreparabledb(ctx context.Context,
|
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field,
|
|
irreparabledb_segmentdetail Irreparabledb_Segmentdetail_Field,
|
|
irreparabledb_pieces_lost_count Irreparabledb_PiecesLostCount_Field,
|
|
irreparabledb_seg_damaged_unix_sec Irreparabledb_SegDamagedUnixSec_Field,
|
|
irreparabledb_repair_attempt_count Irreparabledb_RepairAttemptCount_Field) (
|
|
irreparabledb *Irreparabledb, err error)
|
|
|
|
Create_Node(ctx context.Context,
|
|
node_id Node_Id_Field,
|
|
node_audit_success_count Node_AuditSuccessCount_Field,
|
|
node_total_audit_count Node_TotalAuditCount_Field,
|
|
node_audit_success_ratio Node_AuditSuccessRatio_Field,
|
|
node_uptime_success_count Node_UptimeSuccessCount_Field,
|
|
node_total_uptime_count Node_TotalUptimeCount_Field,
|
|
node_uptime_ratio Node_UptimeRatio_Field) (
|
|
node *Node, err error)
|
|
|
|
Create_OverlayCacheNode(ctx context.Context,
|
|
overlay_cache_node_key OverlayCacheNode_Key_Field,
|
|
overlay_cache_node_value OverlayCacheNode_Value_Field) (
|
|
overlay_cache_node *OverlayCacheNode, err error)
|
|
|
|
Create_Raw(ctx context.Context,
|
|
raw_node_id Raw_NodeId_Field,
|
|
raw_interval_end_time Raw_IntervalEndTime_Field,
|
|
raw_data_total Raw_DataTotal_Field,
|
|
raw_data_type Raw_DataType_Field) (
|
|
raw *Raw, err error)
|
|
|
|
Create_Rollup(ctx context.Context,
|
|
rollup_node_id Rollup_NodeId_Field,
|
|
rollup_start_time Rollup_StartTime_Field,
|
|
rollup_interval Rollup_Interval_Field,
|
|
rollup_data_type Rollup_DataType_Field) (
|
|
rollup *Rollup, err error)
|
|
|
|
Create_Timestamps(ctx context.Context,
|
|
timestamps_name Timestamps_Name_Field,
|
|
timestamps_value Timestamps_Value_Field) (
|
|
timestamps *Timestamps, err error)
|
|
|
|
Delete_Bwagreement_By_Signature(ctx context.Context,
|
|
bwagreement_signature Bwagreement_Signature_Field) (
|
|
deleted bool, err error)
|
|
|
|
Delete_Irreparabledb_By_Segmentpath(ctx context.Context,
|
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field) (
|
|
deleted bool, err error)
|
|
|
|
Delete_Node_By_Id(ctx context.Context,
|
|
node_id Node_Id_Field) (
|
|
deleted bool, err error)
|
|
|
|
Delete_OverlayCacheNode_By_Key(ctx context.Context,
|
|
overlay_cache_node_key OverlayCacheNode_Key_Field) (
|
|
deleted bool, err error)
|
|
|
|
Delete_Raw_By_Id(ctx context.Context,
|
|
raw_id Raw_Id_Field) (
|
|
deleted bool, err error)
|
|
|
|
Delete_Rollup_By_Id(ctx context.Context,
|
|
rollup_id Rollup_Id_Field) (
|
|
deleted bool, err error)
|
|
|
|
Find_Timestamps_Value_By_Name(ctx context.Context,
|
|
timestamps_name Timestamps_Name_Field) (
|
|
row *Value_Row, err error)
|
|
|
|
Get_Bwagreement_By_Signature(ctx context.Context,
|
|
bwagreement_signature Bwagreement_Signature_Field) (
|
|
bwagreement *Bwagreement, err error)
|
|
|
|
Get_Irreparabledb_By_Segmentpath(ctx context.Context,
|
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field) (
|
|
irreparabledb *Irreparabledb, err error)
|
|
|
|
Get_Node_By_Id(ctx context.Context,
|
|
node_id Node_Id_Field) (
|
|
node *Node, err error)
|
|
|
|
Get_OverlayCacheNode_By_Key(ctx context.Context,
|
|
overlay_cache_node_key OverlayCacheNode_Key_Field) (
|
|
overlay_cache_node *OverlayCacheNode, err error)
|
|
|
|
Get_Raw_By_Id(ctx context.Context,
|
|
raw_id Raw_Id_Field) (
|
|
raw *Raw, err error)
|
|
|
|
Get_Rollup_By_Id(ctx context.Context,
|
|
rollup_id Rollup_Id_Field) (
|
|
rollup *Rollup, err error)
|
|
|
|
Limited_Bwagreement(ctx context.Context,
|
|
limit int, offset int64) (
|
|
rows []*Bwagreement, err error)
|
|
|
|
Limited_OverlayCacheNode_By_Key_GreaterOrEqual(ctx context.Context,
|
|
overlay_cache_node_key_greater_or_equal OverlayCacheNode_Key_Field,
|
|
limit int, offset int64) (
|
|
rows []*OverlayCacheNode, err error)
|
|
|
|
Update_Irreparabledb_By_Segmentpath(ctx context.Context,
|
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field,
|
|
update Irreparabledb_Update_Fields) (
|
|
irreparabledb *Irreparabledb, err error)
|
|
|
|
Update_Node_By_Id(ctx context.Context,
|
|
node_id Node_Id_Field,
|
|
update Node_Update_Fields) (
|
|
node *Node, err error)
|
|
|
|
Update_OverlayCacheNode_By_Key(ctx context.Context,
|
|
overlay_cache_node_key OverlayCacheNode_Key_Field,
|
|
update OverlayCacheNode_Update_Fields) (
|
|
overlay_cache_node *OverlayCacheNode, err error)
|
|
|
|
Update_Raw_By_Id(ctx context.Context,
|
|
raw_id Raw_Id_Field,
|
|
update Raw_Update_Fields) (
|
|
raw *Raw, err error)
|
|
|
|
Update_Rollup_By_Id(ctx context.Context,
|
|
rollup_id Rollup_Id_Field,
|
|
update Rollup_Update_Fields) (
|
|
rollup *Rollup, err error)
|
|
|
|
Update_Timestamps_By_Name(ctx context.Context,
|
|
timestamps_name Timestamps_Name_Field,
|
|
update Timestamps_Update_Fields) (
|
|
timestamps *Timestamps, err error)
|
|
}
|
|
|
|
type TxMethods interface {
|
|
Methods
|
|
|
|
Rebind(s string) string
|
|
Commit() error
|
|
Rollback() error
|
|
}
|
|
|
|
type txMethods interface {
|
|
TxMethods
|
|
|
|
deleteAll(ctx context.Context) (int64, error)
|
|
makeErr(err error) error
|
|
}
|
|
|
|
type DBMethods interface {
|
|
Methods
|
|
|
|
Schema() string
|
|
Rebind(sql string) string
|
|
}
|
|
|
|
type dbMethods interface {
|
|
DBMethods
|
|
|
|
wrapTx(tx *sql.Tx) txMethods
|
|
makeErr(err error) error
|
|
}
|
|
|
|
func openpostgres(source string) (*sql.DB, error) {
|
|
return sql.Open("postgres", source)
|
|
}
|
|
|
|
var sqlite3DriverName = func() string {
|
|
var id [16]byte
|
|
rand.Read(id[:])
|
|
return fmt.Sprintf("sqlite3_%x", string(id[:]))
|
|
}()
|
|
|
|
func init() {
|
|
sql.Register(sqlite3DriverName, &sqlite3.SQLiteDriver{
|
|
ConnectHook: sqlite3SetupConn,
|
|
})
|
|
}
|
|
|
|
// SQLite3JournalMode controls the journal_mode pragma for all new connections.
|
|
// Since it is read without a mutex, it must be changed to the value you want
|
|
// before any Open calls.
|
|
var SQLite3JournalMode = "WAL"
|
|
|
|
func sqlite3SetupConn(conn *sqlite3.SQLiteConn) (err error) {
|
|
_, err = conn.Exec("PRAGMA foreign_keys = ON", nil)
|
|
if err != nil {
|
|
return makeErr(err)
|
|
}
|
|
_, err = conn.Exec("PRAGMA journal_mode = "+SQLite3JournalMode, nil)
|
|
if err != nil {
|
|
return makeErr(err)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func opensqlite3(source string) (*sql.DB, error) {
|
|
return sql.Open(sqlite3DriverName, source)
|
|
}
|