From cc020dfdea61eb52885e370424a4c6944d8d883b Mon Sep 17 00:00:00 2001 From: Maximillian von Briesen Date: Thu, 16 May 2019 10:11:15 -0400 Subject: [PATCH] Create containment mode database table and migrate scripts (#1970) --- pkg/overlay/cache.go | 1 + satellite/satellitedb/dbx/satellitedb.dbx | 22 + satellite/satellitedb/dbx/satellitedb.dbx.go | 588 +++++++++++++++++- .../dbx/satellitedb.dbx.postgres.sql | 10 + .../dbx/satellitedb.dbx.sqlite3.sql | 10 + satellite/satellitedb/migrate.go | 19 + satellite/satellitedb/overlaycache.go | 2 + .../satellitedb/testdata/postgres.v20.sql | 254 ++++++++ 8 files changed, 881 insertions(+), 25 deletions(-) create mode 100644 satellite/satellitedb/testdata/postgres.v20.sql diff --git a/pkg/overlay/cache.go b/pkg/overlay/cache.go index 5af48b444..9e2f14803 100644 --- a/pkg/overlay/cache.go +++ b/pkg/overlay/cache.go @@ -95,6 +95,7 @@ type NodeDossier struct { Capacity pb.NodeCapacity Reputation NodeStats Version pb.NodeVersion + Contained bool } // NodeStats contains statistics about a node. diff --git a/satellite/satellitedb/dbx/satellitedb.dbx b/satellite/satellitedb/dbx/satellitedb.dbx index 63e8b2842..a20c95d00 100644 --- a/satellite/satellitedb/dbx/satellitedb.dbx +++ b/satellite/satellitedb/dbx/satellitedb.dbx @@ -1,5 +1,25 @@ // dbx.v1 golang satellitedb.dbx . +//--- containment ---// +model pending_audits ( + key node_id + + field node_id blob + field piece_id blob + field stripe_index int64 + field share_size int64 + field expected_share_hash blob + field reverify_count int64 ( updatable ) +) + +create pending_audits ( ) +update pending_audits ( where pending_audits.node_id = ? ) +delete pending_audits ( where pending_audits.node_id = ? ) +read one ( + select pending_audits + where pending_audits.node_id = ? +) + //--- bwagreement ---// model bwagreement ( @@ -119,6 +139,8 @@ model node ( field updated_at timestamp ( autoinsert, autoupdate ) field last_contact_success timestamp ( updatable ) field last_contact_failure timestamp ( updatable ) + + field contained bool ( updatable ) ) create node ( ) diff --git a/satellite/satellitedb/dbx/satellitedb.dbx.go b/satellite/satellitedb/dbx/satellitedb.dbx.go index 639fe7115..1f19427c3 100644 --- a/satellite/satellitedb/dbx/satellitedb.dbx.go +++ b/satellite/satellitedb/dbx/satellitedb.dbx.go @@ -383,8 +383,18 @@ CREATE TABLE nodes ( updated_at timestamp with time zone NOT NULL, last_contact_success timestamp with time zone NOT NULL, last_contact_failure timestamp with time zone NOT NULL, + contained boolean NOT NULL, PRIMARY KEY ( id ) ); +CREATE TABLE pending_audits ( + node_id bytea NOT NULL, + piece_id bytea NOT NULL, + stripe_index bigint NOT NULL, + share_size bigint NOT NULL, + expected_share_hash bytea NOT NULL, + reverify_count bigint NOT NULL, + PRIMARY KEY ( node_id ) +); CREATE TABLE projects ( id bytea NOT NULL, name text NOT NULL, @@ -640,8 +650,18 @@ CREATE TABLE nodes ( updated_at TIMESTAMP NOT NULL, last_contact_success TIMESTAMP NOT NULL, last_contact_failure TIMESTAMP NOT NULL, + contained INTEGER NOT NULL, PRIMARY KEY ( id ) ); +CREATE TABLE pending_audits ( + node_id BLOB NOT NULL, + piece_id BLOB NOT NULL, + stripe_index INTEGER NOT NULL, + share_size INTEGER NOT NULL, + expected_share_hash BLOB NOT NULL, + reverify_count INTEGER NOT NULL, + PRIMARY KEY ( node_id ) +); CREATE TABLE projects ( id BLOB NOT NULL, name TEXT NOT NULL, @@ -2072,6 +2092,7 @@ type Node struct { UpdatedAt time.Time LastContactSuccess time.Time LastContactFailure time.Time + Contained bool } func (Node) _Table() string { return "nodes" } @@ -2099,6 +2120,7 @@ type Node_Update_Fields struct { UptimeRatio Node_UptimeRatio_Field LastContactSuccess Node_LastContactSuccess_Field LastContactFailure Node_LastContactFailure_Field + Contained Node_Contained_Field } type Node_Id_Field struct { @@ -2576,6 +2598,154 @@ func (f Node_LastContactFailure_Field) value() interface{} { func (Node_LastContactFailure_Field) _Column() string { return "last_contact_failure" } +type Node_Contained_Field struct { + _set bool + _null bool + _value bool +} + +func Node_Contained(v bool) Node_Contained_Field { + return Node_Contained_Field{_set: true, _value: v} +} + +func (f Node_Contained_Field) value() interface{} { + if !f._set || f._null { + return nil + } + return f._value +} + +func (Node_Contained_Field) _Column() string { return "contained" } + +type PendingAudits struct { + NodeId []byte + PieceId []byte + StripeIndex int64 + ShareSize int64 + ExpectedShareHash []byte + ReverifyCount int64 +} + +func (PendingAudits) _Table() string { return "pending_audits" } + +type PendingAudits_Update_Fields struct { + ReverifyCount PendingAudits_ReverifyCount_Field +} + +type PendingAudits_NodeId_Field struct { + _set bool + _null bool + _value []byte +} + +func PendingAudits_NodeId(v []byte) PendingAudits_NodeId_Field { + return PendingAudits_NodeId_Field{_set: true, _value: v} +} + +func (f PendingAudits_NodeId_Field) value() interface{} { + if !f._set || f._null { + return nil + } + return f._value +} + +func (PendingAudits_NodeId_Field) _Column() string { return "node_id" } + +type PendingAudits_PieceId_Field struct { + _set bool + _null bool + _value []byte +} + +func PendingAudits_PieceId(v []byte) PendingAudits_PieceId_Field { + return PendingAudits_PieceId_Field{_set: true, _value: v} +} + +func (f PendingAudits_PieceId_Field) value() interface{} { + if !f._set || f._null { + return nil + } + return f._value +} + +func (PendingAudits_PieceId_Field) _Column() string { return "piece_id" } + +type PendingAudits_StripeIndex_Field struct { + _set bool + _null bool + _value int64 +} + +func PendingAudits_StripeIndex(v int64) PendingAudits_StripeIndex_Field { + return PendingAudits_StripeIndex_Field{_set: true, _value: v} +} + +func (f PendingAudits_StripeIndex_Field) value() interface{} { + if !f._set || f._null { + return nil + } + return f._value +} + +func (PendingAudits_StripeIndex_Field) _Column() string { return "stripe_index" } + +type PendingAudits_ShareSize_Field struct { + _set bool + _null bool + _value int64 +} + +func PendingAudits_ShareSize(v int64) PendingAudits_ShareSize_Field { + return PendingAudits_ShareSize_Field{_set: true, _value: v} +} + +func (f PendingAudits_ShareSize_Field) value() interface{} { + if !f._set || f._null { + return nil + } + return f._value +} + +func (PendingAudits_ShareSize_Field) _Column() string { return "share_size" } + +type PendingAudits_ExpectedShareHash_Field struct { + _set bool + _null bool + _value []byte +} + +func PendingAudits_ExpectedShareHash(v []byte) PendingAudits_ExpectedShareHash_Field { + return PendingAudits_ExpectedShareHash_Field{_set: true, _value: v} +} + +func (f PendingAudits_ExpectedShareHash_Field) value() interface{} { + if !f._set || f._null { + return nil + } + return f._value +} + +func (PendingAudits_ExpectedShareHash_Field) _Column() string { return "expected_share_hash" } + +type PendingAudits_ReverifyCount_Field struct { + _set bool + _null bool + _value int64 +} + +func PendingAudits_ReverifyCount(v int64) PendingAudits_ReverifyCount_Field { + return PendingAudits_ReverifyCount_Field{_set: true, _value: v} +} + +func (f PendingAudits_ReverifyCount_Field) value() interface{} { + if !f._set || f._null { + return nil + } + return f._value +} + +func (PendingAudits_ReverifyCount_Field) _Column() string { return "reverify_count" } + type Project struct { Id []byte Name string @@ -3765,6 +3935,35 @@ type Value_Row struct { Value time.Time } +func (obj *postgresImpl) Create_PendingAudits(ctx context.Context, + pending_audits_node_id PendingAudits_NodeId_Field, + pending_audits_piece_id PendingAudits_PieceId_Field, + pending_audits_stripe_index PendingAudits_StripeIndex_Field, + pending_audits_share_size PendingAudits_ShareSize_Field, + pending_audits_expected_share_hash PendingAudits_ExpectedShareHash_Field, + pending_audits_reverify_count PendingAudits_ReverifyCount_Field) ( + pending_audits *PendingAudits, err error) { + __node_id_val := pending_audits_node_id.value() + __piece_id_val := pending_audits_piece_id.value() + __stripe_index_val := pending_audits_stripe_index.value() + __share_size_val := pending_audits_share_size.value() + __expected_share_hash_val := pending_audits_expected_share_hash.value() + __reverify_count_val := pending_audits_reverify_count.value() + + var __embed_stmt = __sqlbundle_Literal("INSERT INTO pending_audits ( node_id, piece_id, stripe_index, share_size, expected_share_hash, reverify_count ) VALUES ( ?, ?, ?, ?, ?, ? ) RETURNING pending_audits.node_id, pending_audits.piece_id, pending_audits.stripe_index, pending_audits.share_size, pending_audits.expected_share_hash, pending_audits.reverify_count") + + var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) + obj.logStmt(__stmt, __node_id_val, __piece_id_val, __stripe_index_val, __share_size_val, __expected_share_hash_val, __reverify_count_val) + + pending_audits = &PendingAudits{} + err = obj.driver.QueryRow(__stmt, __node_id_val, __piece_id_val, __stripe_index_val, __share_size_val, __expected_share_hash_val, __reverify_count_val).Scan(&pending_audits.NodeId, &pending_audits.PieceId, &pending_audits.StripeIndex, &pending_audits.ShareSize, &pending_audits.ExpectedShareHash, &pending_audits.ReverifyCount) + if err != nil { + return nil, obj.makeErr(err) + } + return pending_audits, nil + +} + func (obj *postgresImpl) Create_Irreparabledb(ctx context.Context, irreparabledb_segmentpath Irreparabledb_Segmentpath_Field, irreparabledb_segmentdetail Irreparabledb_Segmentdetail_Field, @@ -3869,7 +4068,8 @@ func (obj *postgresImpl) Create_Node(ctx context.Context, node_total_uptime_count Node_TotalUptimeCount_Field, node_uptime_ratio Node_UptimeRatio_Field, node_last_contact_success Node_LastContactSuccess_Field, - node_last_contact_failure Node_LastContactFailure_Field) ( + node_last_contact_failure Node_LastContactFailure_Field, + node_contained Node_Contained_Field) ( node *Node, err error) { __now := obj.db.Hooks.Now().UTC() @@ -3898,14 +4098,15 @@ func (obj *postgresImpl) Create_Node(ctx context.Context, __updated_at_val := __now __last_contact_success_val := node_last_contact_success.value() __last_contact_failure_val := node_last_contact_failure.value() + __contained_val := node_contained.value() - var __embed_stmt = __sqlbundle_Literal("INSERT INTO nodes ( id, address, protocol, type, email, wallet, free_bandwidth, free_disk, major, minor, patch, hash, timestamp, release, latency_90, audit_success_count, total_audit_count, audit_success_ratio, uptime_success_count, total_uptime_count, uptime_ratio, created_at, updated_at, last_contact_success, last_contact_failure ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? ) RETURNING nodes.id, nodes.address, nodes.protocol, nodes.type, nodes.email, nodes.wallet, nodes.free_bandwidth, nodes.free_disk, nodes.major, nodes.minor, nodes.patch, nodes.hash, nodes.timestamp, nodes.release, nodes.latency_90, nodes.audit_success_count, nodes.total_audit_count, nodes.audit_success_ratio, nodes.uptime_success_count, nodes.total_uptime_count, nodes.uptime_ratio, nodes.created_at, nodes.updated_at, nodes.last_contact_success, nodes.last_contact_failure") + var __embed_stmt = __sqlbundle_Literal("INSERT INTO nodes ( id, address, protocol, type, email, wallet, free_bandwidth, free_disk, major, minor, patch, hash, timestamp, release, latency_90, audit_success_count, total_audit_count, audit_success_ratio, uptime_success_count, total_uptime_count, uptime_ratio, created_at, updated_at, last_contact_success, last_contact_failure, contained ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? ) RETURNING nodes.id, nodes.address, nodes.protocol, nodes.type, nodes.email, nodes.wallet, nodes.free_bandwidth, nodes.free_disk, nodes.major, nodes.minor, nodes.patch, nodes.hash, nodes.timestamp, nodes.release, nodes.latency_90, nodes.audit_success_count, nodes.total_audit_count, nodes.audit_success_ratio, nodes.uptime_success_count, nodes.total_uptime_count, nodes.uptime_ratio, nodes.created_at, nodes.updated_at, nodes.last_contact_success, nodes.last_contact_failure, nodes.contained") var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __id_val, __address_val, __protocol_val, __type_val, __email_val, __wallet_val, __free_bandwidth_val, __free_disk_val, __major_val, __minor_val, __patch_val, __hash_val, __timestamp_val, __release_val, __latency_90_val, __audit_success_count_val, __total_audit_count_val, __audit_success_ratio_val, __uptime_success_count_val, __total_uptime_count_val, __uptime_ratio_val, __created_at_val, __updated_at_val, __last_contact_success_val, __last_contact_failure_val) + obj.logStmt(__stmt, __id_val, __address_val, __protocol_val, __type_val, __email_val, __wallet_val, __free_bandwidth_val, __free_disk_val, __major_val, __minor_val, __patch_val, __hash_val, __timestamp_val, __release_val, __latency_90_val, __audit_success_count_val, __total_audit_count_val, __audit_success_ratio_val, __uptime_success_count_val, __total_uptime_count_val, __uptime_ratio_val, __created_at_val, __updated_at_val, __last_contact_success_val, __last_contact_failure_val, __contained_val) node = &Node{} - err = obj.driver.QueryRow(__stmt, __id_val, __address_val, __protocol_val, __type_val, __email_val, __wallet_val, __free_bandwidth_val, __free_disk_val, __major_val, __minor_val, __patch_val, __hash_val, __timestamp_val, __release_val, __latency_90_val, __audit_success_count_val, __total_audit_count_val, __audit_success_ratio_val, __uptime_success_count_val, __total_uptime_count_val, __uptime_ratio_val, __created_at_val, __updated_at_val, __last_contact_success_val, __last_contact_failure_val).Scan(&node.Id, &node.Address, &node.Protocol, &node.Type, &node.Email, &node.Wallet, &node.FreeBandwidth, &node.FreeDisk, &node.Major, &node.Minor, &node.Patch, &node.Hash, &node.Timestamp, &node.Release, &node.Latency90, &node.AuditSuccessCount, &node.TotalAuditCount, &node.AuditSuccessRatio, &node.UptimeSuccessCount, &node.TotalUptimeCount, &node.UptimeRatio, &node.CreatedAt, &node.UpdatedAt, &node.LastContactSuccess, &node.LastContactFailure) + err = obj.driver.QueryRow(__stmt, __id_val, __address_val, __protocol_val, __type_val, __email_val, __wallet_val, __free_bandwidth_val, __free_disk_val, __major_val, __minor_val, __patch_val, __hash_val, __timestamp_val, __release_val, __latency_90_val, __audit_success_count_val, __total_audit_count_val, __audit_success_ratio_val, __uptime_success_count_val, __total_uptime_count_val, __uptime_ratio_val, __created_at_val, __updated_at_val, __last_contact_success_val, __last_contact_failure_val, __contained_val).Scan(&node.Id, &node.Address, &node.Protocol, &node.Type, &node.Email, &node.Wallet, &node.FreeBandwidth, &node.FreeDisk, &node.Major, &node.Minor, &node.Patch, &node.Hash, &node.Timestamp, &node.Release, &node.Latency90, &node.AuditSuccessCount, &node.TotalAuditCount, &node.AuditSuccessRatio, &node.UptimeSuccessCount, &node.TotalUptimeCount, &node.UptimeRatio, &node.CreatedAt, &node.UpdatedAt, &node.LastContactSuccess, &node.LastContactFailure, &node.Contained) if err != nil { return nil, obj.makeErr(err) } @@ -4239,6 +4440,27 @@ func (obj *postgresImpl) Create_ResetPasswordToken(ctx context.Context, } +func (obj *postgresImpl) Get_PendingAudits_By_NodeId(ctx context.Context, + pending_audits_node_id PendingAudits_NodeId_Field) ( + pending_audits *PendingAudits, err error) { + + var __embed_stmt = __sqlbundle_Literal("SELECT pending_audits.node_id, pending_audits.piece_id, pending_audits.stripe_index, pending_audits.share_size, pending_audits.expected_share_hash, pending_audits.reverify_count FROM pending_audits WHERE pending_audits.node_id = ?") + + var __values []interface{} + __values = append(__values, pending_audits_node_id.value()) + + var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) + obj.logStmt(__stmt, __values...) + + pending_audits = &PendingAudits{} + err = obj.driver.QueryRow(__stmt, __values...).Scan(&pending_audits.NodeId, &pending_audits.PieceId, &pending_audits.StripeIndex, &pending_audits.ShareSize, &pending_audits.ExpectedShareHash, &pending_audits.ReverifyCount) + if err != nil { + return nil, obj.makeErr(err) + } + return pending_audits, nil + +} + func (obj *postgresImpl) Get_Irreparabledb_By_Segmentpath(ctx context.Context, irreparabledb_segmentpath Irreparabledb_Segmentpath_Field) ( irreparabledb *Irreparabledb, err error) { @@ -4377,7 +4599,7 @@ func (obj *postgresImpl) Get_Node_By_Id(ctx context.Context, node_id Node_Id_Field) ( node *Node, err error) { - var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.address, nodes.protocol, nodes.type, nodes.email, nodes.wallet, nodes.free_bandwidth, nodes.free_disk, nodes.major, nodes.minor, nodes.patch, nodes.hash, nodes.timestamp, nodes.release, nodes.latency_90, nodes.audit_success_count, nodes.total_audit_count, nodes.audit_success_ratio, nodes.uptime_success_count, nodes.total_uptime_count, nodes.uptime_ratio, nodes.created_at, nodes.updated_at, nodes.last_contact_success, nodes.last_contact_failure FROM nodes WHERE nodes.id = ?") + var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.address, nodes.protocol, nodes.type, nodes.email, nodes.wallet, nodes.free_bandwidth, nodes.free_disk, nodes.major, nodes.minor, nodes.patch, nodes.hash, nodes.timestamp, nodes.release, nodes.latency_90, nodes.audit_success_count, nodes.total_audit_count, nodes.audit_success_ratio, nodes.uptime_success_count, nodes.total_uptime_count, nodes.uptime_ratio, nodes.created_at, nodes.updated_at, nodes.last_contact_success, nodes.last_contact_failure, nodes.contained FROM nodes WHERE nodes.id = ?") var __values []interface{} __values = append(__values, node_id.value()) @@ -4386,7 +4608,7 @@ func (obj *postgresImpl) Get_Node_By_Id(ctx context.Context, obj.logStmt(__stmt, __values...) node = &Node{} - err = obj.driver.QueryRow(__stmt, __values...).Scan(&node.Id, &node.Address, &node.Protocol, &node.Type, &node.Email, &node.Wallet, &node.FreeBandwidth, &node.FreeDisk, &node.Major, &node.Minor, &node.Patch, &node.Hash, &node.Timestamp, &node.Release, &node.Latency90, &node.AuditSuccessCount, &node.TotalAuditCount, &node.AuditSuccessRatio, &node.UptimeSuccessCount, &node.TotalUptimeCount, &node.UptimeRatio, &node.CreatedAt, &node.UpdatedAt, &node.LastContactSuccess, &node.LastContactFailure) + err = obj.driver.QueryRow(__stmt, __values...).Scan(&node.Id, &node.Address, &node.Protocol, &node.Type, &node.Email, &node.Wallet, &node.FreeBandwidth, &node.FreeDisk, &node.Major, &node.Minor, &node.Patch, &node.Hash, &node.Timestamp, &node.Release, &node.Latency90, &node.AuditSuccessCount, &node.TotalAuditCount, &node.AuditSuccessRatio, &node.UptimeSuccessCount, &node.TotalUptimeCount, &node.UptimeRatio, &node.CreatedAt, &node.UpdatedAt, &node.LastContactSuccess, &node.LastContactFailure, &node.Contained) if err != nil { return nil, obj.makeErr(err) } @@ -4431,7 +4653,7 @@ func (obj *postgresImpl) Limited_Node_By_Id_GreaterOrEqual_OrderBy_Asc_Id(ctx co limit int, offset int64) ( rows []*Node, err error) { - var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.address, nodes.protocol, nodes.type, nodes.email, nodes.wallet, nodes.free_bandwidth, nodes.free_disk, nodes.major, nodes.minor, nodes.patch, nodes.hash, nodes.timestamp, nodes.release, nodes.latency_90, nodes.audit_success_count, nodes.total_audit_count, nodes.audit_success_ratio, nodes.uptime_success_count, nodes.total_uptime_count, nodes.uptime_ratio, nodes.created_at, nodes.updated_at, nodes.last_contact_success, nodes.last_contact_failure FROM nodes WHERE nodes.id >= ? ORDER BY nodes.id LIMIT ? OFFSET ?") + var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.address, nodes.protocol, nodes.type, nodes.email, nodes.wallet, nodes.free_bandwidth, nodes.free_disk, nodes.major, nodes.minor, nodes.patch, nodes.hash, nodes.timestamp, nodes.release, nodes.latency_90, nodes.audit_success_count, nodes.total_audit_count, nodes.audit_success_ratio, nodes.uptime_success_count, nodes.total_uptime_count, nodes.uptime_ratio, nodes.created_at, nodes.updated_at, nodes.last_contact_success, nodes.last_contact_failure, nodes.contained FROM nodes WHERE nodes.id >= ? ORDER BY nodes.id LIMIT ? OFFSET ?") var __values []interface{} __values = append(__values, node_id_greater_or_equal.value()) @@ -4449,7 +4671,7 @@ func (obj *postgresImpl) Limited_Node_By_Id_GreaterOrEqual_OrderBy_Asc_Id(ctx co for __rows.Next() { node := &Node{} - err = __rows.Scan(&node.Id, &node.Address, &node.Protocol, &node.Type, &node.Email, &node.Wallet, &node.FreeBandwidth, &node.FreeDisk, &node.Major, &node.Minor, &node.Patch, &node.Hash, &node.Timestamp, &node.Release, &node.Latency90, &node.AuditSuccessCount, &node.TotalAuditCount, &node.AuditSuccessRatio, &node.UptimeSuccessCount, &node.TotalUptimeCount, &node.UptimeRatio, &node.CreatedAt, &node.UpdatedAt, &node.LastContactSuccess, &node.LastContactFailure) + err = __rows.Scan(&node.Id, &node.Address, &node.Protocol, &node.Type, &node.Email, &node.Wallet, &node.FreeBandwidth, &node.FreeDisk, &node.Major, &node.Minor, &node.Patch, &node.Hash, &node.Timestamp, &node.Release, &node.Latency90, &node.AuditSuccessCount, &node.TotalAuditCount, &node.AuditSuccessRatio, &node.UptimeSuccessCount, &node.TotalUptimeCount, &node.UptimeRatio, &node.CreatedAt, &node.UpdatedAt, &node.LastContactSuccess, &node.LastContactFailure, &node.Contained) if err != nil { return nil, obj.makeErr(err) } @@ -5251,6 +5473,46 @@ func (obj *postgresImpl) Get_ResetPasswordToken_By_OwnerId(ctx context.Context, } +func (obj *postgresImpl) Update_PendingAudits_By_NodeId(ctx context.Context, + pending_audits_node_id PendingAudits_NodeId_Field, + update PendingAudits_Update_Fields) ( + pending_audits *PendingAudits, err error) { + var __sets = &__sqlbundle_Hole{} + + var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE pending_audits SET "), __sets, __sqlbundle_Literal(" WHERE pending_audits.node_id = ? RETURNING pending_audits.node_id, pending_audits.piece_id, pending_audits.stripe_index, pending_audits.share_size, pending_audits.expected_share_hash, pending_audits.reverify_count")}} + + __sets_sql := __sqlbundle_Literals{Join: ", "} + var __values []interface{} + var __args []interface{} + + if update.ReverifyCount._set { + __values = append(__values, update.ReverifyCount.value()) + __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("reverify_count = ?")) + } + + if len(__sets_sql.SQLs) == 0 { + return nil, emptyUpdate() + } + + __args = append(__args, pending_audits_node_id.value()) + + __values = append(__values, __args...) + __sets.SQL = __sets_sql + + var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) + obj.logStmt(__stmt, __values...) + + pending_audits = &PendingAudits{} + err = obj.driver.QueryRow(__stmt, __values...).Scan(&pending_audits.NodeId, &pending_audits.PieceId, &pending_audits.StripeIndex, &pending_audits.ShareSize, &pending_audits.ExpectedShareHash, &pending_audits.ReverifyCount) + if err == sql.ErrNoRows { + return nil, nil + } + if err != nil { + return nil, obj.makeErr(err) + } + return pending_audits, nil +} + func (obj *postgresImpl) Update_Irreparabledb_By_Segmentpath(ctx context.Context, irreparabledb_segmentpath Irreparabledb_Segmentpath_Field, update Irreparabledb_Update_Fields) ( @@ -5352,7 +5614,7 @@ func (obj *postgresImpl) Update_Node_By_Id(ctx context.Context, node *Node, err error) { var __sets = &__sqlbundle_Hole{} - var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE nodes SET "), __sets, __sqlbundle_Literal(" WHERE nodes.id = ? RETURNING nodes.id, nodes.address, nodes.protocol, nodes.type, nodes.email, nodes.wallet, nodes.free_bandwidth, nodes.free_disk, nodes.major, nodes.minor, nodes.patch, nodes.hash, nodes.timestamp, nodes.release, nodes.latency_90, nodes.audit_success_count, nodes.total_audit_count, nodes.audit_success_ratio, nodes.uptime_success_count, nodes.total_uptime_count, nodes.uptime_ratio, nodes.created_at, nodes.updated_at, nodes.last_contact_success, nodes.last_contact_failure")}} + var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE nodes SET "), __sets, __sqlbundle_Literal(" WHERE nodes.id = ? RETURNING nodes.id, nodes.address, nodes.protocol, nodes.type, nodes.email, nodes.wallet, nodes.free_bandwidth, nodes.free_disk, nodes.major, nodes.minor, nodes.patch, nodes.hash, nodes.timestamp, nodes.release, nodes.latency_90, nodes.audit_success_count, nodes.total_audit_count, nodes.audit_success_ratio, nodes.uptime_success_count, nodes.total_uptime_count, nodes.uptime_ratio, nodes.created_at, nodes.updated_at, nodes.last_contact_success, nodes.last_contact_failure, nodes.contained")}} __sets_sql := __sqlbundle_Literals{Join: ", "} var __values []interface{} @@ -5468,6 +5730,11 @@ func (obj *postgresImpl) Update_Node_By_Id(ctx context.Context, __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("last_contact_failure = ?")) } + if update.Contained._set { + __values = append(__values, update.Contained.value()) + __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("contained = ?")) + } + __now := obj.db.Hooks.Now().UTC() __values = append(__values, __now) @@ -5482,7 +5749,7 @@ func (obj *postgresImpl) Update_Node_By_Id(ctx context.Context, obj.logStmt(__stmt, __values...) node = &Node{} - err = obj.driver.QueryRow(__stmt, __values...).Scan(&node.Id, &node.Address, &node.Protocol, &node.Type, &node.Email, &node.Wallet, &node.FreeBandwidth, &node.FreeDisk, &node.Major, &node.Minor, &node.Patch, &node.Hash, &node.Timestamp, &node.Release, &node.Latency90, &node.AuditSuccessCount, &node.TotalAuditCount, &node.AuditSuccessRatio, &node.UptimeSuccessCount, &node.TotalUptimeCount, &node.UptimeRatio, &node.CreatedAt, &node.UpdatedAt, &node.LastContactSuccess, &node.LastContactFailure) + err = obj.driver.QueryRow(__stmt, __values...).Scan(&node.Id, &node.Address, &node.Protocol, &node.Type, &node.Email, &node.Wallet, &node.FreeBandwidth, &node.FreeDisk, &node.Major, &node.Minor, &node.Patch, &node.Hash, &node.Timestamp, &node.Release, &node.Latency90, &node.AuditSuccessCount, &node.TotalAuditCount, &node.AuditSuccessRatio, &node.UptimeSuccessCount, &node.TotalUptimeCount, &node.UptimeRatio, &node.CreatedAt, &node.UpdatedAt, &node.LastContactSuccess, &node.LastContactFailure, &node.Contained) if err == sql.ErrNoRows { return nil, nil } @@ -5708,6 +5975,32 @@ func (obj *postgresImpl) Update_RegistrationToken_By_Secret(ctx context.Context, return registration_token, nil } +func (obj *postgresImpl) Delete_PendingAudits_By_NodeId(ctx context.Context, + pending_audits_node_id PendingAudits_NodeId_Field) ( + deleted bool, err error) { + + var __embed_stmt = __sqlbundle_Literal("DELETE FROM pending_audits WHERE pending_audits.node_id = ?") + + var __values []interface{} + __values = append(__values, pending_audits_node_id.value()) + + var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) + obj.logStmt(__stmt, __values...) + + __res, err := obj.driver.Exec(__stmt, __values...) + if err != nil { + return false, obj.makeErr(err) + } + + __count, err := __res.RowsAffected() + if err != nil { + return false, obj.makeErr(err) + } + + return __count > 0, nil + +} + func (obj *postgresImpl) Delete_Irreparabledb_By_Segmentpath(ctx context.Context, irreparabledb_segmentpath Irreparabledb_Segmentpath_Field) ( deleted bool, err error) { @@ -6129,6 +6422,16 @@ func (obj *postgresImpl) deleteAll(ctx context.Context) (count int64, err error) return 0, obj.makeErr(err) } + __count, err = __res.RowsAffected() + if err != nil { + return 0, obj.makeErr(err) + } + count += __count + __res, err = obj.driver.Exec("DELETE FROM pending_audits;") + if err != nil { + return 0, obj.makeErr(err) + } + __count, err = __res.RowsAffected() if err != nil { return 0, obj.makeErr(err) @@ -6239,6 +6542,38 @@ func (obj *postgresImpl) deleteAll(ctx context.Context) (count int64, err error) } +func (obj *sqlite3Impl) Create_PendingAudits(ctx context.Context, + pending_audits_node_id PendingAudits_NodeId_Field, + pending_audits_piece_id PendingAudits_PieceId_Field, + pending_audits_stripe_index PendingAudits_StripeIndex_Field, + pending_audits_share_size PendingAudits_ShareSize_Field, + pending_audits_expected_share_hash PendingAudits_ExpectedShareHash_Field, + pending_audits_reverify_count PendingAudits_ReverifyCount_Field) ( + pending_audits *PendingAudits, err error) { + __node_id_val := pending_audits_node_id.value() + __piece_id_val := pending_audits_piece_id.value() + __stripe_index_val := pending_audits_stripe_index.value() + __share_size_val := pending_audits_share_size.value() + __expected_share_hash_val := pending_audits_expected_share_hash.value() + __reverify_count_val := pending_audits_reverify_count.value() + + var __embed_stmt = __sqlbundle_Literal("INSERT INTO pending_audits ( node_id, piece_id, stripe_index, share_size, expected_share_hash, reverify_count ) VALUES ( ?, ?, ?, ?, ?, ? )") + + var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) + obj.logStmt(__stmt, __node_id_val, __piece_id_val, __stripe_index_val, __share_size_val, __expected_share_hash_val, __reverify_count_val) + + __res, err := obj.driver.Exec(__stmt, __node_id_val, __piece_id_val, __stripe_index_val, __share_size_val, __expected_share_hash_val, __reverify_count_val) + if err != nil { + return nil, obj.makeErr(err) + } + __pk, err := __res.LastInsertId() + if err != nil { + return nil, obj.makeErr(err) + } + return obj.getLastPendingAudits(ctx, __pk) + +} + func (obj *sqlite3Impl) Create_Irreparabledb(ctx context.Context, irreparabledb_segmentpath Irreparabledb_Segmentpath_Field, irreparabledb_segmentdetail Irreparabledb_Segmentdetail_Field, @@ -6352,7 +6687,8 @@ func (obj *sqlite3Impl) Create_Node(ctx context.Context, node_total_uptime_count Node_TotalUptimeCount_Field, node_uptime_ratio Node_UptimeRatio_Field, node_last_contact_success Node_LastContactSuccess_Field, - node_last_contact_failure Node_LastContactFailure_Field) ( + node_last_contact_failure Node_LastContactFailure_Field, + node_contained Node_Contained_Field) ( node *Node, err error) { __now := obj.db.Hooks.Now().UTC() @@ -6381,13 +6717,14 @@ func (obj *sqlite3Impl) Create_Node(ctx context.Context, __updated_at_val := __now __last_contact_success_val := node_last_contact_success.value() __last_contact_failure_val := node_last_contact_failure.value() + __contained_val := node_contained.value() - var __embed_stmt = __sqlbundle_Literal("INSERT INTO nodes ( id, address, protocol, type, email, wallet, free_bandwidth, free_disk, major, minor, patch, hash, timestamp, release, latency_90, audit_success_count, total_audit_count, audit_success_ratio, uptime_success_count, total_uptime_count, uptime_ratio, created_at, updated_at, last_contact_success, last_contact_failure ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? )") + var __embed_stmt = __sqlbundle_Literal("INSERT INTO nodes ( id, address, protocol, type, email, wallet, free_bandwidth, free_disk, major, minor, patch, hash, timestamp, release, latency_90, audit_success_count, total_audit_count, audit_success_ratio, uptime_success_count, total_uptime_count, uptime_ratio, created_at, updated_at, last_contact_success, last_contact_failure, contained ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? )") var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) - obj.logStmt(__stmt, __id_val, __address_val, __protocol_val, __type_val, __email_val, __wallet_val, __free_bandwidth_val, __free_disk_val, __major_val, __minor_val, __patch_val, __hash_val, __timestamp_val, __release_val, __latency_90_val, __audit_success_count_val, __total_audit_count_val, __audit_success_ratio_val, __uptime_success_count_val, __total_uptime_count_val, __uptime_ratio_val, __created_at_val, __updated_at_val, __last_contact_success_val, __last_contact_failure_val) + obj.logStmt(__stmt, __id_val, __address_val, __protocol_val, __type_val, __email_val, __wallet_val, __free_bandwidth_val, __free_disk_val, __major_val, __minor_val, __patch_val, __hash_val, __timestamp_val, __release_val, __latency_90_val, __audit_success_count_val, __total_audit_count_val, __audit_success_ratio_val, __uptime_success_count_val, __total_uptime_count_val, __uptime_ratio_val, __created_at_val, __updated_at_val, __last_contact_success_val, __last_contact_failure_val, __contained_val) - __res, err := obj.driver.Exec(__stmt, __id_val, __address_val, __protocol_val, __type_val, __email_val, __wallet_val, __free_bandwidth_val, __free_disk_val, __major_val, __minor_val, __patch_val, __hash_val, __timestamp_val, __release_val, __latency_90_val, __audit_success_count_val, __total_audit_count_val, __audit_success_ratio_val, __uptime_success_count_val, __total_uptime_count_val, __uptime_ratio_val, __created_at_val, __updated_at_val, __last_contact_success_val, __last_contact_failure_val) + __res, err := obj.driver.Exec(__stmt, __id_val, __address_val, __protocol_val, __type_val, __email_val, __wallet_val, __free_bandwidth_val, __free_disk_val, __major_val, __minor_val, __patch_val, __hash_val, __timestamp_val, __release_val, __latency_90_val, __audit_success_count_val, __total_audit_count_val, __audit_success_ratio_val, __uptime_success_count_val, __total_uptime_count_val, __uptime_ratio_val, __created_at_val, __updated_at_val, __last_contact_success_val, __last_contact_failure_val, __contained_val) if err != nil { return nil, obj.makeErr(err) } @@ -6761,6 +7098,27 @@ func (obj *sqlite3Impl) Create_ResetPasswordToken(ctx context.Context, } +func (obj *sqlite3Impl) Get_PendingAudits_By_NodeId(ctx context.Context, + pending_audits_node_id PendingAudits_NodeId_Field) ( + pending_audits *PendingAudits, err error) { + + var __embed_stmt = __sqlbundle_Literal("SELECT pending_audits.node_id, pending_audits.piece_id, pending_audits.stripe_index, pending_audits.share_size, pending_audits.expected_share_hash, pending_audits.reverify_count FROM pending_audits WHERE pending_audits.node_id = ?") + + var __values []interface{} + __values = append(__values, pending_audits_node_id.value()) + + var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) + obj.logStmt(__stmt, __values...) + + pending_audits = &PendingAudits{} + err = obj.driver.QueryRow(__stmt, __values...).Scan(&pending_audits.NodeId, &pending_audits.PieceId, &pending_audits.StripeIndex, &pending_audits.ShareSize, &pending_audits.ExpectedShareHash, &pending_audits.ReverifyCount) + if err != nil { + return nil, obj.makeErr(err) + } + return pending_audits, nil + +} + func (obj *sqlite3Impl) Get_Irreparabledb_By_Segmentpath(ctx context.Context, irreparabledb_segmentpath Irreparabledb_Segmentpath_Field) ( irreparabledb *Irreparabledb, err error) { @@ -6899,7 +7257,7 @@ func (obj *sqlite3Impl) Get_Node_By_Id(ctx context.Context, node_id Node_Id_Field) ( node *Node, err error) { - var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.address, nodes.protocol, nodes.type, nodes.email, nodes.wallet, nodes.free_bandwidth, nodes.free_disk, nodes.major, nodes.minor, nodes.patch, nodes.hash, nodes.timestamp, nodes.release, nodes.latency_90, nodes.audit_success_count, nodes.total_audit_count, nodes.audit_success_ratio, nodes.uptime_success_count, nodes.total_uptime_count, nodes.uptime_ratio, nodes.created_at, nodes.updated_at, nodes.last_contact_success, nodes.last_contact_failure FROM nodes WHERE nodes.id = ?") + var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.address, nodes.protocol, nodes.type, nodes.email, nodes.wallet, nodes.free_bandwidth, nodes.free_disk, nodes.major, nodes.minor, nodes.patch, nodes.hash, nodes.timestamp, nodes.release, nodes.latency_90, nodes.audit_success_count, nodes.total_audit_count, nodes.audit_success_ratio, nodes.uptime_success_count, nodes.total_uptime_count, nodes.uptime_ratio, nodes.created_at, nodes.updated_at, nodes.last_contact_success, nodes.last_contact_failure, nodes.contained FROM nodes WHERE nodes.id = ?") var __values []interface{} __values = append(__values, node_id.value()) @@ -6908,7 +7266,7 @@ func (obj *sqlite3Impl) Get_Node_By_Id(ctx context.Context, obj.logStmt(__stmt, __values...) node = &Node{} - err = obj.driver.QueryRow(__stmt, __values...).Scan(&node.Id, &node.Address, &node.Protocol, &node.Type, &node.Email, &node.Wallet, &node.FreeBandwidth, &node.FreeDisk, &node.Major, &node.Minor, &node.Patch, &node.Hash, &node.Timestamp, &node.Release, &node.Latency90, &node.AuditSuccessCount, &node.TotalAuditCount, &node.AuditSuccessRatio, &node.UptimeSuccessCount, &node.TotalUptimeCount, &node.UptimeRatio, &node.CreatedAt, &node.UpdatedAt, &node.LastContactSuccess, &node.LastContactFailure) + err = obj.driver.QueryRow(__stmt, __values...).Scan(&node.Id, &node.Address, &node.Protocol, &node.Type, &node.Email, &node.Wallet, &node.FreeBandwidth, &node.FreeDisk, &node.Major, &node.Minor, &node.Patch, &node.Hash, &node.Timestamp, &node.Release, &node.Latency90, &node.AuditSuccessCount, &node.TotalAuditCount, &node.AuditSuccessRatio, &node.UptimeSuccessCount, &node.TotalUptimeCount, &node.UptimeRatio, &node.CreatedAt, &node.UpdatedAt, &node.LastContactSuccess, &node.LastContactFailure, &node.Contained) if err != nil { return nil, obj.makeErr(err) } @@ -6953,7 +7311,7 @@ func (obj *sqlite3Impl) Limited_Node_By_Id_GreaterOrEqual_OrderBy_Asc_Id(ctx con limit int, offset int64) ( rows []*Node, err error) { - var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.address, nodes.protocol, nodes.type, nodes.email, nodes.wallet, nodes.free_bandwidth, nodes.free_disk, nodes.major, nodes.minor, nodes.patch, nodes.hash, nodes.timestamp, nodes.release, nodes.latency_90, nodes.audit_success_count, nodes.total_audit_count, nodes.audit_success_ratio, nodes.uptime_success_count, nodes.total_uptime_count, nodes.uptime_ratio, nodes.created_at, nodes.updated_at, nodes.last_contact_success, nodes.last_contact_failure FROM nodes WHERE nodes.id >= ? ORDER BY nodes.id LIMIT ? OFFSET ?") + var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.address, nodes.protocol, nodes.type, nodes.email, nodes.wallet, nodes.free_bandwidth, nodes.free_disk, nodes.major, nodes.minor, nodes.patch, nodes.hash, nodes.timestamp, nodes.release, nodes.latency_90, nodes.audit_success_count, nodes.total_audit_count, nodes.audit_success_ratio, nodes.uptime_success_count, nodes.total_uptime_count, nodes.uptime_ratio, nodes.created_at, nodes.updated_at, nodes.last_contact_success, nodes.last_contact_failure, nodes.contained FROM nodes WHERE nodes.id >= ? ORDER BY nodes.id LIMIT ? OFFSET ?") var __values []interface{} __values = append(__values, node_id_greater_or_equal.value()) @@ -6971,7 +7329,7 @@ func (obj *sqlite3Impl) Limited_Node_By_Id_GreaterOrEqual_OrderBy_Asc_Id(ctx con for __rows.Next() { node := &Node{} - err = __rows.Scan(&node.Id, &node.Address, &node.Protocol, &node.Type, &node.Email, &node.Wallet, &node.FreeBandwidth, &node.FreeDisk, &node.Major, &node.Minor, &node.Patch, &node.Hash, &node.Timestamp, &node.Release, &node.Latency90, &node.AuditSuccessCount, &node.TotalAuditCount, &node.AuditSuccessRatio, &node.UptimeSuccessCount, &node.TotalUptimeCount, &node.UptimeRatio, &node.CreatedAt, &node.UpdatedAt, &node.LastContactSuccess, &node.LastContactFailure) + err = __rows.Scan(&node.Id, &node.Address, &node.Protocol, &node.Type, &node.Email, &node.Wallet, &node.FreeBandwidth, &node.FreeDisk, &node.Major, &node.Minor, &node.Patch, &node.Hash, &node.Timestamp, &node.Release, &node.Latency90, &node.AuditSuccessCount, &node.TotalAuditCount, &node.AuditSuccessRatio, &node.UptimeSuccessCount, &node.TotalUptimeCount, &node.UptimeRatio, &node.CreatedAt, &node.UpdatedAt, &node.LastContactSuccess, &node.LastContactFailure, &node.Contained) if err != nil { return nil, obj.makeErr(err) } @@ -7773,6 +8131,56 @@ func (obj *sqlite3Impl) Get_ResetPasswordToken_By_OwnerId(ctx context.Context, } +func (obj *sqlite3Impl) Update_PendingAudits_By_NodeId(ctx context.Context, + pending_audits_node_id PendingAudits_NodeId_Field, + update PendingAudits_Update_Fields) ( + pending_audits *PendingAudits, err error) { + var __sets = &__sqlbundle_Hole{} + + var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE pending_audits SET "), __sets, __sqlbundle_Literal(" WHERE pending_audits.node_id = ?")}} + + __sets_sql := __sqlbundle_Literals{Join: ", "} + var __values []interface{} + var __args []interface{} + + if update.ReverifyCount._set { + __values = append(__values, update.ReverifyCount.value()) + __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("reverify_count = ?")) + } + + if len(__sets_sql.SQLs) == 0 { + return nil, emptyUpdate() + } + + __args = append(__args, pending_audits_node_id.value()) + + __values = append(__values, __args...) + __sets.SQL = __sets_sql + + var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) + obj.logStmt(__stmt, __values...) + + pending_audits = &PendingAudits{} + _, err = obj.driver.Exec(__stmt, __values...) + if err != nil { + return nil, obj.makeErr(err) + } + + var __embed_stmt_get = __sqlbundle_Literal("SELECT pending_audits.node_id, pending_audits.piece_id, pending_audits.stripe_index, pending_audits.share_size, pending_audits.expected_share_hash, pending_audits.reverify_count FROM pending_audits WHERE pending_audits.node_id = ?") + + var __stmt_get = __sqlbundle_Render(obj.dialect, __embed_stmt_get) + obj.logStmt("(IMPLIED) "+__stmt_get, __args...) + + err = obj.driver.QueryRow(__stmt_get, __args...).Scan(&pending_audits.NodeId, &pending_audits.PieceId, &pending_audits.StripeIndex, &pending_audits.ShareSize, &pending_audits.ExpectedShareHash, &pending_audits.ReverifyCount) + if err == sql.ErrNoRows { + return nil, nil + } + if err != nil { + return nil, obj.makeErr(err) + } + return pending_audits, nil +} + func (obj *sqlite3Impl) Update_Irreparabledb_By_Segmentpath(ctx context.Context, irreparabledb_segmentpath Irreparabledb_Segmentpath_Field, update Irreparabledb_Update_Fields) ( @@ -8010,6 +8418,11 @@ func (obj *sqlite3Impl) Update_Node_By_Id(ctx context.Context, __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("last_contact_failure = ?")) } + if update.Contained._set { + __values = append(__values, update.Contained.value()) + __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("contained = ?")) + } + __now := obj.db.Hooks.Now().UTC() __values = append(__values, __now) @@ -8029,12 +8442,12 @@ func (obj *sqlite3Impl) Update_Node_By_Id(ctx context.Context, return nil, obj.makeErr(err) } - var __embed_stmt_get = __sqlbundle_Literal("SELECT nodes.id, nodes.address, nodes.protocol, nodes.type, nodes.email, nodes.wallet, nodes.free_bandwidth, nodes.free_disk, nodes.major, nodes.minor, nodes.patch, nodes.hash, nodes.timestamp, nodes.release, nodes.latency_90, nodes.audit_success_count, nodes.total_audit_count, nodes.audit_success_ratio, nodes.uptime_success_count, nodes.total_uptime_count, nodes.uptime_ratio, nodes.created_at, nodes.updated_at, nodes.last_contact_success, nodes.last_contact_failure FROM nodes WHERE nodes.id = ?") + var __embed_stmt_get = __sqlbundle_Literal("SELECT nodes.id, nodes.address, nodes.protocol, nodes.type, nodes.email, nodes.wallet, nodes.free_bandwidth, nodes.free_disk, nodes.major, nodes.minor, nodes.patch, nodes.hash, nodes.timestamp, nodes.release, nodes.latency_90, nodes.audit_success_count, nodes.total_audit_count, nodes.audit_success_ratio, nodes.uptime_success_count, nodes.total_uptime_count, nodes.uptime_ratio, nodes.created_at, nodes.updated_at, nodes.last_contact_success, nodes.last_contact_failure, nodes.contained FROM nodes WHERE nodes.id = ?") var __stmt_get = __sqlbundle_Render(obj.dialect, __embed_stmt_get) obj.logStmt("(IMPLIED) "+__stmt_get, __args...) - err = obj.driver.QueryRow(__stmt_get, __args...).Scan(&node.Id, &node.Address, &node.Protocol, &node.Type, &node.Email, &node.Wallet, &node.FreeBandwidth, &node.FreeDisk, &node.Major, &node.Minor, &node.Patch, &node.Hash, &node.Timestamp, &node.Release, &node.Latency90, &node.AuditSuccessCount, &node.TotalAuditCount, &node.AuditSuccessRatio, &node.UptimeSuccessCount, &node.TotalUptimeCount, &node.UptimeRatio, &node.CreatedAt, &node.UpdatedAt, &node.LastContactSuccess, &node.LastContactFailure) + err = obj.driver.QueryRow(__stmt_get, __args...).Scan(&node.Id, &node.Address, &node.Protocol, &node.Type, &node.Email, &node.Wallet, &node.FreeBandwidth, &node.FreeDisk, &node.Major, &node.Minor, &node.Patch, &node.Hash, &node.Timestamp, &node.Release, &node.Latency90, &node.AuditSuccessCount, &node.TotalAuditCount, &node.AuditSuccessRatio, &node.UptimeSuccessCount, &node.TotalUptimeCount, &node.UptimeRatio, &node.CreatedAt, &node.UpdatedAt, &node.LastContactSuccess, &node.LastContactFailure, &node.Contained) if err == sql.ErrNoRows { return nil, nil } @@ -8310,6 +8723,32 @@ func (obj *sqlite3Impl) Update_RegistrationToken_By_Secret(ctx context.Context, return registration_token, nil } +func (obj *sqlite3Impl) Delete_PendingAudits_By_NodeId(ctx context.Context, + pending_audits_node_id PendingAudits_NodeId_Field) ( + deleted bool, err error) { + + var __embed_stmt = __sqlbundle_Literal("DELETE FROM pending_audits WHERE pending_audits.node_id = ?") + + var __values []interface{} + __values = append(__values, pending_audits_node_id.value()) + + var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) + obj.logStmt(__stmt, __values...) + + __res, err := obj.driver.Exec(__stmt, __values...) + if err != nil { + return false, obj.makeErr(err) + } + + __count, err := __res.RowsAffected() + if err != nil { + return false, obj.makeErr(err) + } + + return __count > 0, nil + +} + func (obj *sqlite3Impl) Delete_Irreparabledb_By_Segmentpath(ctx context.Context, irreparabledb_segmentpath Irreparabledb_Segmentpath_Field) ( deleted bool, err error) { @@ -8623,6 +9062,24 @@ func (obj *sqlite3Impl) Delete_ResetPasswordToken_By_Secret(ctx context.Context, } +func (obj *sqlite3Impl) getLastPendingAudits(ctx context.Context, + pk int64) ( + pending_audits *PendingAudits, err error) { + + var __embed_stmt = __sqlbundle_Literal("SELECT pending_audits.node_id, pending_audits.piece_id, pending_audits.stripe_index, pending_audits.share_size, pending_audits.expected_share_hash, pending_audits.reverify_count FROM pending_audits WHERE _rowid_ = ?") + + var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) + obj.logStmt(__stmt, pk) + + pending_audits = &PendingAudits{} + err = obj.driver.QueryRow(__stmt, pk).Scan(&pending_audits.NodeId, &pending_audits.PieceId, &pending_audits.StripeIndex, &pending_audits.ShareSize, &pending_audits.ExpectedShareHash, &pending_audits.ReverifyCount) + if err != nil { + return nil, obj.makeErr(err) + } + return pending_audits, nil + +} + func (obj *sqlite3Impl) getLastIrreparabledb(ctx context.Context, pk int64) ( irreparabledb *Irreparabledb, err error) { @@ -8681,13 +9138,13 @@ func (obj *sqlite3Impl) getLastNode(ctx context.Context, pk int64) ( node *Node, err error) { - var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.address, nodes.protocol, nodes.type, nodes.email, nodes.wallet, nodes.free_bandwidth, nodes.free_disk, nodes.major, nodes.minor, nodes.patch, nodes.hash, nodes.timestamp, nodes.release, nodes.latency_90, nodes.audit_success_count, nodes.total_audit_count, nodes.audit_success_ratio, nodes.uptime_success_count, nodes.total_uptime_count, nodes.uptime_ratio, nodes.created_at, nodes.updated_at, nodes.last_contact_success, nodes.last_contact_failure FROM nodes WHERE _rowid_ = ?") + var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.address, nodes.protocol, nodes.type, nodes.email, nodes.wallet, nodes.free_bandwidth, nodes.free_disk, nodes.major, nodes.minor, nodes.patch, nodes.hash, nodes.timestamp, nodes.release, nodes.latency_90, nodes.audit_success_count, nodes.total_audit_count, nodes.audit_success_ratio, nodes.uptime_success_count, nodes.total_uptime_count, nodes.uptime_ratio, nodes.created_at, nodes.updated_at, nodes.last_contact_success, nodes.last_contact_failure, nodes.contained FROM nodes WHERE _rowid_ = ?") var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) obj.logStmt(__stmt, pk) node = &Node{} - err = obj.driver.QueryRow(__stmt, pk).Scan(&node.Id, &node.Address, &node.Protocol, &node.Type, &node.Email, &node.Wallet, &node.FreeBandwidth, &node.FreeDisk, &node.Major, &node.Minor, &node.Patch, &node.Hash, &node.Timestamp, &node.Release, &node.Latency90, &node.AuditSuccessCount, &node.TotalAuditCount, &node.AuditSuccessRatio, &node.UptimeSuccessCount, &node.TotalUptimeCount, &node.UptimeRatio, &node.CreatedAt, &node.UpdatedAt, &node.LastContactSuccess, &node.LastContactFailure) + err = obj.driver.QueryRow(__stmt, pk).Scan(&node.Id, &node.Address, &node.Protocol, &node.Type, &node.Email, &node.Wallet, &node.FreeBandwidth, &node.FreeDisk, &node.Major, &node.Minor, &node.Patch, &node.Hash, &node.Timestamp, &node.Release, &node.Latency90, &node.AuditSuccessCount, &node.TotalAuditCount, &node.AuditSuccessRatio, &node.UptimeSuccessCount, &node.TotalUptimeCount, &node.UptimeRatio, &node.CreatedAt, &node.UpdatedAt, &node.LastContactSuccess, &node.LastContactFailure, &node.Contained) if err != nil { return nil, obj.makeErr(err) } @@ -9024,6 +9481,16 @@ func (obj *sqlite3Impl) deleteAll(ctx context.Context) (count int64, err error) return 0, obj.makeErr(err) } + __count, err = __res.RowsAffected() + if err != nil { + return 0, obj.makeErr(err) + } + count += __count + __res, err = obj.driver.Exec("DELETE FROM pending_audits;") + if err != nil { + return 0, obj.makeErr(err) + } + __count, err = __res.RowsAffected() if err != nil { return 0, obj.makeErr(err) @@ -9411,13 +9878,30 @@ func (rx *Rx) Create_Node(ctx context.Context, node_total_uptime_count Node_TotalUptimeCount_Field, node_uptime_ratio Node_UptimeRatio_Field, node_last_contact_success Node_LastContactSuccess_Field, - node_last_contact_failure Node_LastContactFailure_Field) ( + node_last_contact_failure Node_LastContactFailure_Field, + node_contained Node_Contained_Field) ( node *Node, err error) { var tx *Tx if tx, err = rx.getTx(ctx); err != nil { return } - return tx.Create_Node(ctx, node_id, node_address, node_protocol, node_type, node_email, node_wallet, node_free_bandwidth, node_free_disk, node_major, node_minor, node_patch, node_hash, node_timestamp, node_release, node_latency_90, node_audit_success_count, node_total_audit_count, node_audit_success_ratio, node_uptime_success_count, node_total_uptime_count, node_uptime_ratio, node_last_contact_success, node_last_contact_failure) + return tx.Create_Node(ctx, node_id, node_address, node_protocol, node_type, node_email, node_wallet, node_free_bandwidth, node_free_disk, node_major, node_minor, node_patch, node_hash, node_timestamp, node_release, node_latency_90, node_audit_success_count, node_total_audit_count, node_audit_success_ratio, node_uptime_success_count, node_total_uptime_count, node_uptime_ratio, node_last_contact_success, node_last_contact_failure, node_contained) + +} + +func (rx *Rx) Create_PendingAudits(ctx context.Context, + pending_audits_node_id PendingAudits_NodeId_Field, + pending_audits_piece_id PendingAudits_PieceId_Field, + pending_audits_stripe_index PendingAudits_StripeIndex_Field, + pending_audits_share_size PendingAudits_ShareSize_Field, + pending_audits_expected_share_hash PendingAudits_ExpectedShareHash_Field, + pending_audits_reverify_count PendingAudits_ReverifyCount_Field) ( + pending_audits *PendingAudits, err error) { + var tx *Tx + if tx, err = rx.getTx(ctx); err != nil { + return + } + return tx.Create_PendingAudits(ctx, pending_audits_node_id, pending_audits_piece_id, pending_audits_stripe_index, pending_audits_share_size, pending_audits_expected_share_hash, pending_audits_reverify_count) } @@ -9584,6 +10068,16 @@ func (rx *Rx) Delete_Node_By_Id(ctx context.Context, return tx.Delete_Node_By_Id(ctx, node_id) } +func (rx *Rx) Delete_PendingAudits_By_NodeId(ctx context.Context, + pending_audits_node_id PendingAudits_NodeId_Field) ( + deleted bool, err error) { + var tx *Tx + if tx, err = rx.getTx(ctx); err != nil { + return + } + return tx.Delete_PendingAudits_By_NodeId(ctx, pending_audits_node_id) +} + func (rx *Rx) Delete_ProjectMember_By_MemberId_And_ProjectId(ctx context.Context, project_member_member_id ProjectMember_MemberId_Field, project_member_project_id ProjectMember_ProjectId_Field) ( @@ -9771,6 +10265,16 @@ func (rx *Rx) Get_Node_By_Id(ctx context.Context, return tx.Get_Node_By_Id(ctx, node_id) } +func (rx *Rx) Get_PendingAudits_By_NodeId(ctx context.Context, + pending_audits_node_id PendingAudits_NodeId_Field) ( + pending_audits *PendingAudits, err error) { + var tx *Tx + if tx, err = rx.getTx(ctx); err != nil { + return + } + return tx.Get_PendingAudits_By_NodeId(ctx, pending_audits_node_id) +} + func (rx *Rx) Get_Project_By_Id(ctx context.Context, project_id Project_Id_Field) ( project *Project, err error) { @@ -9964,6 +10468,17 @@ func (rx *Rx) Update_Node_By_Id(ctx context.Context, return tx.Update_Node_By_Id(ctx, node_id, update) } +func (rx *Rx) Update_PendingAudits_By_NodeId(ctx context.Context, + pending_audits_node_id PendingAudits_NodeId_Field, + update PendingAudits_Update_Fields) ( + pending_audits *PendingAudits, err error) { + var tx *Tx + if tx, err = rx.getTx(ctx); err != nil { + return + } + return tx.Update_PendingAudits_By_NodeId(ctx, pending_audits_node_id, update) +} + func (rx *Rx) Update_Project_By_Id(ctx context.Context, project_id Project_Id_Field, update Project_Update_Fields) ( @@ -10124,9 +10639,19 @@ type Methods interface { node_total_uptime_count Node_TotalUptimeCount_Field, node_uptime_ratio Node_UptimeRatio_Field, node_last_contact_success Node_LastContactSuccess_Field, - node_last_contact_failure Node_LastContactFailure_Field) ( + node_last_contact_failure Node_LastContactFailure_Field, + node_contained Node_Contained_Field) ( node *Node, err error) + Create_PendingAudits(ctx context.Context, + pending_audits_node_id PendingAudits_NodeId_Field, + pending_audits_piece_id PendingAudits_PieceId_Field, + pending_audits_stripe_index PendingAudits_StripeIndex_Field, + pending_audits_share_size PendingAudits_ShareSize_Field, + pending_audits_expected_share_hash PendingAudits_ExpectedShareHash_Field, + pending_audits_reverify_count PendingAudits_ReverifyCount_Field) ( + pending_audits *PendingAudits, err error) + Create_Project(ctx context.Context, project_id Project_Id_Field, project_name Project_Name_Field, @@ -10198,6 +10723,10 @@ type Methods interface { node_id Node_Id_Field) ( deleted bool, err error) + Delete_PendingAudits_By_NodeId(ctx context.Context, + pending_audits_node_id PendingAudits_NodeId_Field) ( + deleted bool, err error) + Delete_ProjectMember_By_MemberId_And_ProjectId(ctx context.Context, project_member_member_id ProjectMember_MemberId_Field, project_member_project_id ProjectMember_ProjectId_Field) ( @@ -10276,6 +10805,10 @@ type Methods interface { node_id Node_Id_Field) ( node *Node, err error) + Get_PendingAudits_By_NodeId(ctx context.Context, + pending_audits_node_id PendingAudits_NodeId_Field) ( + pending_audits *PendingAudits, err error) + Get_Project_By_Id(ctx context.Context, project_id Project_Id_Field) ( project *Project, err error) @@ -10361,6 +10894,11 @@ type Methods interface { update Node_Update_Fields) ( node *Node, err error) + Update_PendingAudits_By_NodeId(ctx context.Context, + pending_audits_node_id PendingAudits_NodeId_Field, + update PendingAudits_Update_Fields) ( + pending_audits *PendingAudits, err error) + Update_Project_By_Id(ctx context.Context, project_id Project_Id_Field, update Project_Update_Fields) ( diff --git a/satellite/satellitedb/dbx/satellitedb.dbx.postgres.sql b/satellite/satellitedb/dbx/satellitedb.dbx.postgres.sql index e3861d384..263f3d590 100644 --- a/satellite/satellitedb/dbx/satellitedb.dbx.postgres.sql +++ b/satellite/satellitedb/dbx/satellitedb.dbx.postgres.sql @@ -111,8 +111,18 @@ CREATE TABLE nodes ( updated_at timestamp with time zone NOT NULL, last_contact_success timestamp with time zone NOT NULL, last_contact_failure timestamp with time zone NOT NULL, + contained boolean NOT NULL, PRIMARY KEY ( id ) ); +CREATE TABLE pending_audits ( + node_id bytea NOT NULL, + piece_id bytea NOT NULL, + stripe_index bigint NOT NULL, + share_size bigint NOT NULL, + expected_share_hash bytea NOT NULL, + reverify_count bigint NOT NULL, + PRIMARY KEY ( node_id ) +); CREATE TABLE projects ( id bytea NOT NULL, name text NOT NULL, diff --git a/satellite/satellitedb/dbx/satellitedb.dbx.sqlite3.sql b/satellite/satellitedb/dbx/satellitedb.dbx.sqlite3.sql index de7e9c204..d5dfe0a5b 100644 --- a/satellite/satellitedb/dbx/satellitedb.dbx.sqlite3.sql +++ b/satellite/satellitedb/dbx/satellitedb.dbx.sqlite3.sql @@ -111,8 +111,18 @@ CREATE TABLE nodes ( updated_at TIMESTAMP NOT NULL, last_contact_success TIMESTAMP NOT NULL, last_contact_failure TIMESTAMP NOT NULL, + contained INTEGER NOT NULL, PRIMARY KEY ( id ) ); +CREATE TABLE pending_audits ( + node_id BLOB NOT NULL, + piece_id BLOB NOT NULL, + stripe_index INTEGER NOT NULL, + share_size INTEGER NOT NULL, + expected_share_hash BLOB NOT NULL, + reverify_count INTEGER NOT NULL, + PRIMARY KEY ( node_id ) +); CREATE TABLE projects ( id BLOB NOT NULL, name TEXT NOT NULL, diff --git a/satellite/satellitedb/migrate.go b/satellite/satellitedb/migrate.go index ffeea6eb7..9ff22e108 100644 --- a/satellite/satellitedb/migrate.go +++ b/satellite/satellitedb/migrate.go @@ -644,6 +644,25 @@ func (db *DB) PostgresMigration() *migrate.Migration { );`, }, }, + { + Description: "Adds pending_audits table, adds 'contained' column to nodes table", + Version: 20, + Action: migrate.SQL{ + `ALTER TABLE nodes ADD contained boolean; + UPDATE nodes SET contained = false; + ALTER TABLE nodes ALTER COLUMN contained SET NOT NULL;`, + + `CREATE TABLE pending_audits ( + node_id bytea NOT NULL, + piece_id bytea NOT NULL, + stripe_index bigint NOT NULL, + share_size bigint NOT NULL, + expected_share_hash bytea NOT NULL, + reverify_count bigint NOT NULL, + PRIMARY KEY ( node_id ) + );`, + }, + }, }, } } diff --git a/satellite/satellitedb/overlaycache.go b/satellite/satellitedb/overlaycache.go index 0bfe68552..87a0b1257 100644 --- a/satellite/satellitedb/overlaycache.go +++ b/satellite/satellitedb/overlaycache.go @@ -270,6 +270,7 @@ func (cache *overlaycache) UpdateAddress(ctx context.Context, info *pb.Node) (er dbx.Node_UptimeRatio(1), dbx.Node_LastContactSuccess(time.Now()), dbx.Node_LastContactFailure(time.Time{}), + dbx.Node_Contained(false), ) if err != nil { return Error.Wrap(errs.Combine(err, tx.Rollback())) @@ -547,6 +548,7 @@ func convertDBNode(info *dbx.Node) (*overlay.NodeDossier, error) { Timestamp: pbts, Release: info.Release, }, + Contained: info.Contained, } return node, nil diff --git a/satellite/satellitedb/testdata/postgres.v20.sql b/satellite/satellitedb/testdata/postgres.v20.sql new file mode 100644 index 000000000..a8d6d6266 --- /dev/null +++ b/satellite/satellitedb/testdata/postgres.v20.sql @@ -0,0 +1,254 @@ +-- Copied from the corresponding version of dbx generated schema +CREATE TABLE pending_audits ( + node_id bytea NOT NULL, + piece_id bytea NOT NULL, + stripe_index bigint NOT NULL, + share_size bigint NOT NULL, + expected_share_hash bytea NOT NULL, + reverify_count bigint NOT NULL, + PRIMARY KEY ( node_id ) +); +CREATE TABLE accounting_rollups ( + id bigserial NOT NULL, + node_id bytea NOT NULL, + start_time timestamp with time zone NOT NULL, + put_total bigint NOT NULL, + get_total bigint NOT NULL, + get_audit_total bigint NOT NULL, + get_repair_total bigint NOT NULL, + put_repair_total bigint NOT NULL, + at_rest_total double precision NOT NULL, + PRIMARY KEY ( id ) +); +CREATE TABLE accounting_timestamps ( + name text NOT NULL, + value timestamp with time zone NOT NULL, + PRIMARY KEY ( name ) +); +CREATE TABLE bucket_bandwidth_rollups ( + bucket_name bytea NOT NULL, + project_id bytea NOT NULL, + interval_start timestamp NOT NULL, + interval_seconds integer NOT NULL, + action integer NOT NULL, + inline bigint NOT NULL, + allocated bigint NOT NULL, + settled bigint NOT NULL, + PRIMARY KEY ( bucket_name, project_id, interval_start, action ) +); +CREATE TABLE bucket_storage_tallies ( + bucket_name bytea NOT NULL, + project_id bytea NOT NULL, + interval_start timestamp NOT NULL, + inline bigint NOT NULL, + remote bigint NOT NULL, + remote_segments_count integer NOT NULL, + inline_segments_count integer NOT NULL, + object_count integer NOT NULL, + metadata_size bigint NOT NULL, + PRIMARY KEY ( bucket_name, project_id, interval_start ) +); +CREATE TABLE bucket_usages ( + id bytea NOT NULL, + bucket_id bytea NOT NULL, + rollup_end_time timestamp with time zone NOT NULL, + remote_stored_data bigint NOT NULL, + inline_stored_data bigint NOT NULL, + remote_segments integer NOT NULL, + inline_segments integer NOT NULL, + objects integer NOT NULL, + metadata_size bigint NOT NULL, + repair_egress bigint NOT NULL, + get_egress bigint NOT NULL, + audit_egress bigint NOT NULL, + PRIMARY KEY ( id ) +); +CREATE TABLE bwagreements ( + serialnum text NOT NULL, + storage_node_id bytea NOT NULL, + uplink_id bytea NOT NULL, + action bigint NOT NULL, + total bigint NOT NULL, + created_at timestamp with time zone NOT NULL, + expires_at timestamp with time zone NOT NULL, + PRIMARY KEY ( serialnum ) +); +CREATE TABLE certRecords ( + publickey bytea NOT NULL, + id bytea NOT NULL, + update_at timestamp with time zone NOT NULL, + PRIMARY KEY ( id ) +); +CREATE TABLE injuredsegments ( + path text NOT NULL, + data bytea NOT NULL, + attempted timestamp, + PRIMARY KEY ( path ) +); +CREATE TABLE irreparabledbs ( + segmentpath bytea NOT NULL, + segmentdetail bytea NOT NULL, + pieces_lost_count bigint NOT NULL, + seg_damaged_unix_sec bigint NOT NULL, + repair_attempt_count bigint NOT NULL, + PRIMARY KEY ( segmentpath ) +); +CREATE TABLE nodes ( + id bytea NOT NULL, + address text NOT NULL, + protocol integer NOT NULL, + type integer NOT NULL, + email text NOT NULL, + wallet text NOT NULL, + free_bandwidth bigint NOT NULL, + free_disk bigint NOT NULL, + major bigint NOT NULL, + minor bigint NOT NULL, + patch bigint NOT NULL, + hash text NOT NULL, + timestamp timestamp with time zone NOT NULL, + release boolean NOT NULL, + latency_90 bigint NOT NULL, + audit_success_count bigint NOT NULL, + total_audit_count bigint NOT NULL, + audit_success_ratio double precision NOT NULL, + uptime_success_count bigint NOT NULL, + total_uptime_count bigint NOT NULL, + uptime_ratio double precision NOT NULL, + created_at timestamp with time zone NOT NULL, + updated_at timestamp with time zone NOT NULL, + last_contact_success timestamp with time zone NOT NULL, + last_contact_failure timestamp with time zone NOT NULL, + contained boolean NOT NULL, + PRIMARY KEY ( id ) +); +CREATE TABLE projects ( + id bytea NOT NULL, + name text NOT NULL, + description text NOT NULL, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( id ) +); +CREATE TABLE registration_tokens ( + secret bytea NOT NULL, + owner_id bytea, + project_limit integer NOT NULL, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( secret ), + UNIQUE ( owner_id ) +); +CREATE TABLE serial_numbers ( + id serial NOT NULL, + serial_number bytea NOT NULL, + bucket_id bytea NOT NULL, + expires_at timestamp NOT NULL, + PRIMARY KEY ( id ) +); +CREATE TABLE storagenode_bandwidth_rollups ( + storagenode_id bytea NOT NULL, + interval_start timestamp NOT NULL, + interval_seconds integer NOT NULL, + action integer NOT NULL, + allocated bigint NOT NULL, + settled bigint NOT NULL, + PRIMARY KEY ( storagenode_id, interval_start, action ) +); +CREATE TABLE storagenode_storage_tallies ( + id bigserial NOT NULL, + node_id bytea NOT NULL, + interval_end_time timestamp with time zone NOT NULL, + data_total double precision NOT NULL, + PRIMARY KEY ( id ) +); +CREATE TABLE users ( + id bytea NOT NULL, + full_name text NOT NULL, + short_name text, + email text NOT NULL, + password_hash bytea NOT NULL, + status integer NOT NULL, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( id ) +); +CREATE TABLE api_keys ( + id bytea NOT NULL, + project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE, + key bytea NOT NULL, + name text NOT NULL, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( id ), + UNIQUE ( key ), + UNIQUE ( name, project_id ) +); +CREATE TABLE project_members ( + member_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE, + project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( member_id, project_id ) +); +CREATE TABLE used_serials ( + serial_number_id integer NOT NULL REFERENCES serial_numbers( id ) ON DELETE CASCADE, + storage_node_id bytea NOT NULL, + PRIMARY KEY ( serial_number_id, storage_node_id ) +); +CREATE TABLE reset_password_tokens ( + secret bytea NOT NULL, + owner_id bytea NOT NULL, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( secret ), + UNIQUE ( owner_id ) +); +CREATE INDEX bucket_name_project_id_interval_start_interval_seconds ON bucket_bandwidth_rollups ( bucket_name, project_id, interval_start, interval_seconds ); +CREATE UNIQUE INDEX bucket_id_rollup ON bucket_usages ( bucket_id, rollup_end_time ); +CREATE UNIQUE INDEX serial_number ON serial_numbers ( serial_number ); +CREATE INDEX serial_numbers_expires_at_index ON serial_numbers ( expires_at ); +CREATE INDEX storagenode_id_interval_start_interval_seconds ON storagenode_bandwidth_rollups ( storagenode_id, interval_start, interval_seconds ); + +--- + +INSERT INTO "accounting_rollups"("id", "node_id", "start_time", "put_total", "get_total", "get_audit_total", "get_repair_total", "put_repair_total", "at_rest_total") VALUES (1, E'\\367M\\177\\251]t/\\022\\256\\214\\265\\025\\224\\204:\\217\\212\\0102<\\321\\374\\020&\\271Qc\\325\\261\\354\\246\\233'::bytea, '2019-02-09 00:00:00+00', 1000, 2000, 3000, 4000, 0, 5000); + +INSERT INTO "accounting_timestamps" VALUES ('LastAtRestTally', '0001-01-01 00:00:00+00'); +INSERT INTO "accounting_timestamps" VALUES ('LastRollup', '0001-01-01 00:00:00+00'); +INSERT INTO "accounting_timestamps" VALUES ('LastBandwidthTally', '0001-01-01 00:00:00+00'); + +INSERT INTO "nodes"("id", "address", "protocol", "type", "email", "wallet", "free_bandwidth", "free_disk", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "audit_success_ratio", "uptime_success_count", "total_uptime_count", "uptime_ratio", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', '127.0.0.1:55516', 0, 4, '', '', -1, -1, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 0, 5, 0, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false); +INSERT INTO "nodes"("id", "address", "protocol", "type", "email", "wallet", "free_bandwidth", "free_disk", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "audit_success_ratio", "uptime_success_count", "total_uptime_count", "uptime_ratio", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '127.0.0.1:55518', 0, 4, '', '', -1, -1, 0, 1, 0, '', 'epoch', false, 0, 0, 0, 1, 3, 3, 1, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false); +INSERT INTO "nodes"("id", "address", "protocol", "type", "email", "wallet", "free_bandwidth", "free_disk", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "audit_success_ratio", "uptime_success_count", "total_uptime_count", "uptime_ratio", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014', '127.0.0.1:55517', 0, 4, '', '', -1, -1, 0, 1, 0, '', 'epoch', false, 0, 0, 0, 1, 0, 0, 1, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false); + + +INSERT INTO "projects"("id", "name", "description", "created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, 'ProjectName', 'projects description', '2019-02-14 08:28:24.254934+00'); +INSERT INTO "api_keys"("id", "project_id", "key", "name", "created_at") VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\000]\\326N \\343\\270L\\327\\027\\337\\242\\240\\322mOl\\0318\\251.P I'::bytea, 'key 2', '2019-02-14 08:28:24.267934+00'); + +INSERT INTO "users"("id", "full_name", "short_name", "email", "password_hash", "status", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'Noahson', 'William', '1email1@ukr.net', E'some_readable_hash'::bytea, 1, '2019-02-14 08:28:24.614594+00'); +INSERT INTO "projects"("id", "name", "description", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, 'projName1', 'Test project 1', '2019-02-14 08:28:24.636949+00'); +INSERT INTO "project_members"("member_id", "project_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, '2019-02-14 08:28:24.677953+00'); + +INSERT INTO "bwagreements"("serialnum", "storage_node_id", "action", "total", "created_at", "expires_at", "uplink_id") VALUES ('8fc0ceaa-984c-4d52-bcf4-b5429e1e35e812FpiifDbcJkePa12jxjDEutKrfLmwzT7sz2jfVwpYqgtM8B74c', E'\\245Z[/\\333\\022\\011\\001\\036\\003\\204\\005\\032.\\206\\333E\\261\\342\\227=y,}aRaH6\\240\\370\\000'::bytea, 1, 666, '2019-02-14 15:09:54.420181+00', '2019-02-14 16:09:54+00', E'\\253Z+\\374eFm\\245$\\036\\206\\335\\247\\263\\350x\\\\\\304+\\364\\343\\364+\\276fIJQ\\361\\014\\232\\000'::bytea); +INSERT INTO "irreparabledbs" ("segmentpath", "segmentdetail", "pieces_lost_count", "seg_damaged_unix_sec", "repair_attempt_count") VALUES ('\x49616d5365676d656e746b6579696e666f30', '\x49616d5365676d656e7464657461696c696e666f30', 10, 1550159554, 10); + +INSERT INTO "injuredsegments" ("path", "data") VALUES ('0', '\x0a0130120100'); +INSERT INTO "injuredsegments" ("path", "data") VALUES ('here''s/a/great/path', '\x0a136865726527732f612f67726561742f70617468120a0102030405060708090a'); +INSERT INTO "injuredsegments" ("path", "data") VALUES ('yet/another/cool/path', '\x0a157965742f616e6f746865722f636f6f6c2f70617468120a0102030405060708090a'); +INSERT INTO "injuredsegments" ("path", "data") VALUES ('so/many/iconic/paths/to/choose/from', '\x0a23736f2f6d616e792f69636f6e69632f70617468732f746f2f63686f6f73652f66726f6d120a0102030405060708090a'); + +INSERT INTO "certrecords" VALUES (E'0Y0\\023\\006\\007*\\206H\\316=\\002\\001\\006\\010*\\206H\\316=\\003\\001\\007\\003B\\000\\004\\360\\267\\227\\377\\253u\\222\\337Y\\324C:GQ\\010\\277v\\010\\315D\\271\\333\\337.\\203\\023=C\\343\\014T%6\\027\\362?\\214\\326\\017U\\334\\000\\260\\224\\260J\\221\\304\\331F\\304\\221\\236zF,\\325\\326l\\215\\306\\365\\200\\022', E'L\\301|\\200\\247}F|1\\320\\232\\037n\\335\\241\\206\\244\\242\\207\\204.\\253\\357\\326\\352\\033Dt\\202`\\022\\325', '2019-02-14 08:07:31.335028+00'); + +INSERT INTO "bucket_usages" ("id", "bucket_id", "rollup_end_time", "remote_stored_data", "inline_stored_data", "remote_segments", "inline_segments", "objects", "metadata_size", "repair_egress", "get_egress", "audit_egress") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001",'::bytea, E'\\366\\146\\032\\321\\316\\161\\070\\133\\302\\271",'::bytea, '2019-03-06 08:28:24.677953+00', 10, 11, 12, 13, 14, 15, 16, 17, 18); + +INSERT INTO "registration_tokens" ("secret", "owner_id", "project_limit", "created_at") VALUES (E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, null, 1, '2019-02-14 08:28:24.677953+00'); + +INSERT INTO "serial_numbers" ("id", "serial_number", "bucket_id", "expires_at") VALUES (1, E'0123456701234567'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014/testbucket'::bytea, '2019-03-06 08:28:24.677953+00'); +INSERT INTO "used_serials" ("serial_number_id", "storage_node_id") VALUES (1, E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n'); + +INSERT INTO "storagenode_bandwidth_rollups" ("storagenode_id", "interval_start", "interval_seconds", "action", "allocated", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2019-03-06 08:00:00.000000+00', 3600, 1, 1024, 2024); +INSERT INTO "storagenode_storage_tallies" VALUES (1, E'\\3510\\323\\225"~\\036<\\342\\330m\\0253Jhr\\246\\233K\\246#\\2303\\351\\256\\275j\\212UM\\362\\207', '2019-02-14 08:16:57.812849+00', 1000); + +INSERT INTO "bucket_bandwidth_rollups" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-03-06 08:00:00.000000+00', 3600, 1, 1024, 2024, 3024); +INSERT INTO "bucket_storage_tallies" ("bucket_name", "project_id", "interval_start", "inline", "remote", "remote_segments_count", "inline_segments_count", "object_count", "metadata_size") VALUES (E'testbucket'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-03-06 08:00:00.000000+00', 4024, 5024, 0, 0, 0, 0); + +INSERT INTO "reset_password_tokens" ("secret", "owner_id", "created_at") VALUES (E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-05-08 08:28:24.677953+00'); + +-- NEW DATA -- + +INSERT INTO "pending_audits" ("node_id", "piece_id", "stripe_index", "share_size", "expected_share_hash", "reverify_count") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 5, 1024, E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, 1);