satellite/satellitedb: fixes to row handling

Change-Id: I48fae692bcca152143a12f333296c42471538850
This commit is contained in:
Egon Elbre 2020-01-16 16:27:24 +02:00
parent 0c365d157f
commit 7d79aab14e
13 changed files with 93 additions and 92 deletions

View File

@ -86,13 +86,10 @@ func (keys *apikeys) GetPagedByProjectID(ctx context.Context, projectID uuid.UUI
page.Limit,
page.Offset)
defer func() {
err = errs.Combine(err, rows.Close())
}()
if err != nil {
return nil, err
}
defer func() { err = errs.Combine(err, rows.Close()) }()
var apiKeys []console.APIKeyInfo
for rows.Next() {

View File

@ -155,8 +155,8 @@ func (keys *attributionDB) QueryAttribution(ctx context.Context, partnerID uuid.
if err != nil {
return nil, Error.Wrap(err)
}
defer func() { err = errs.Combine(err, rows.Close()) }()
results := []*attribution.CSVRow{}
for rows.Next() {
r := &attribution.CSVRow{}

View File

@ -247,10 +247,7 @@ func (db *coinPaymentsTransactions) ListUnapplied(ctx context.Context, offset in
if err != nil {
return stripecoinpayments.TransactionsPage{}, err
}
defer func() {
err = errs.Combine(err, rows.Close())
}()
defer func() { err = errs.Combine(err, rows.Close()) }()
var page stripecoinpayments.TransactionsPage

View File

@ -182,10 +182,7 @@ func (db *gracefulexitDB) GetIncomplete(ctx context.Context, nodeID storj.NodeID
if err != nil {
return nil, Error.Wrap(err)
}
defer func() {
err = errs.Combine(err, rows.Close())
}()
defer func() { err = errs.Combine(err, rows.Close()) }()
transferQueueItemRows, err := scanRows(rows)
if err != nil {
@ -208,10 +205,7 @@ func (db *gracefulexitDB) GetIncompleteNotFailed(ctx context.Context, nodeID sto
if err != nil {
return nil, Error.Wrap(err)
}
defer func() {
err = errs.Combine(err, rows.Close())
}()
defer func() { err = errs.Combine(err, rows.Close()) }()
transferQueueItemRows, err := scanRows(rows)
if err != nil {
@ -235,10 +229,7 @@ func (db *gracefulexitDB) GetIncompleteFailed(ctx context.Context, nodeID storj.
if err != nil {
return nil, Error.Wrap(err)
}
defer func() {
err = errs.Combine(err, rows.Close())
}()
defer func() { err = errs.Combine(err, rows.Close()) }()
transferQueueItemRows, err := scanRows(rows)
if err != nil {

View File

@ -11,12 +11,14 @@ import (
"strconv"
"strings"
"testing"
"time"
"github.com/lib/pq"
"github.com/stretchr/testify/require"
"github.com/zeebo/errs"
"go.uber.org/zap/zaptest"
"golang.org/x/sync/errgroup"
"storj.io/common/testcontext"
"storj.io/storj/private/dbutil/dbschema"
@ -146,7 +148,7 @@ type satelliteDB interface {
}
func pgMigrateTest(t *testing.T, connStr string) {
ctx := testcontext.New(t)
ctx := testcontext.NewWithTimeout(t, 5*time.Minute)
defer ctx.Cleanup()
log := zaptest.NewLogger(t)

View File

@ -55,6 +55,7 @@ func (db *offersDB) GetActiveOffersByType(ctx context.Context, offerType rewards
if err != nil {
return nil, rewards.ErrOfferNotExist.Wrap(err)
}
defer func() { err = errs.Combine(err, rows.Close()) }()
var (
awardCreditInCents int
@ -64,7 +65,6 @@ func (db *offersDB) GetActiveOffersByType(ctx context.Context, offerType rewards
redeemableCap sql.NullInt64
)
defer func() { err = errs.Combine(err, rows.Close()) }()
results := rewards.Offers{}
for rows.Next() {
o := rewards.Offer{}
@ -92,7 +92,7 @@ func (db *offersDB) GetActiveOffersByType(ctx context.Context, offerType rewards
if len(results) < 1 {
return results, rewards.ErrOfferNotExist.New("offerType: %d", offerType)
}
return results, nil
return results, rows.Err()
}
// Create inserts a new offer into the db

View File

@ -167,7 +167,7 @@ func (cache *overlaycache) GetNodeIPs(ctx context.Context, nodeIDs []storj.NodeI
}
nodeIPs = append(nodeIPs, ip)
}
return nodeIPs, nil
return nodeIPs, Error.Wrap(rows.Err())
}
func (cache *overlaycache) queryNodes(ctx context.Context, excludedNodes []storj.NodeID, count int, safeQuery string, args ...interface{}) (_ []*pb.Node, err error) {
@ -189,18 +189,19 @@ func (cache *overlaycache) queryNodes(ctx context.Context, excludedNodes []storj
var rows *sql.Rows
rows, err = cache.db.Query(cache.db.Rebind(`SELECT id, type, address, last_net,
free_bandwidth, free_disk, total_audit_count, audit_success_count,
total_uptime_count, uptime_success_count, disqualified, audit_reputation_alpha,
audit_reputation_beta
FROM nodes
`+safeQuery+safeExcludeNodes+`
ORDER BY RANDOM()
LIMIT ?`), args...)
free_bandwidth, free_disk, total_audit_count, audit_success_count,
total_uptime_count, uptime_success_count, disqualified, audit_reputation_alpha,
audit_reputation_beta
FROM nodes
`+safeQuery+safeExcludeNodes+`
ORDER BY RANDOM()
LIMIT ?`), args...)
if err != nil {
return nil, err
}
defer func() { err = errs.Combine(err, rows.Close()) }()
var nodes []*pb.Node
for rows.Next() {
dbNode := &dbx.Node{}
@ -221,7 +222,7 @@ func (cache *overlaycache) queryNodes(ctx context.Context, excludedNodes []storj
nodes = append(nodes, &dossier.Node)
}
return nodes, rows.Err()
return nodes, Error.Wrap(rows.Err())
}
func (cache *overlaycache) queryNodesDistinct(ctx context.Context, excludedNodes []storj.NodeID, excludedIPs []string, count int, safeQuery string, distinctIP bool, args ...interface{}) (_ []*pb.Node, err error) {
@ -249,24 +250,24 @@ func (cache *overlaycache) queryNodesDistinct(ctx context.Context, excludedNodes
args = append(args, count)
rows, err := cache.db.Query(cache.db.Rebind(`
SELECT *
FROM (
SELECT DISTINCT ON (last_net) last_net, -- choose at max 1 node from this IP or network
id, type, address, free_bandwidth, free_disk, total_audit_count,
audit_success_count, total_uptime_count, uptime_success_count,
audit_reputation_alpha, audit_reputation_beta
FROM nodes
`+safeQuery+safeExcludeNodes+safeExcludeIPs+`
AND last_net <> '' -- don't try to IP-filter nodes with no known IP yet
ORDER BY last_net, RANDOM() -- equal chance of choosing any qualified node at this IP or network
) filteredcandidates
ORDER BY RANDOM() -- do the actual node selection from filtered pool
LIMIT ?`), args...)
SELECT *
FROM (
SELECT DISTINCT ON (last_net) last_net, -- choose at max 1 node from this IP or network
id, type, address, free_bandwidth, free_disk, total_audit_count,
audit_success_count, total_uptime_count, uptime_success_count,
audit_reputation_alpha, audit_reputation_beta
FROM nodes
`+safeQuery+safeExcludeNodes+safeExcludeIPs+`
AND last_net <> '' -- don't try to IP-filter nodes with no known IP yet
ORDER BY last_net, RANDOM() -- equal chance of choosing any qualified node at this IP or network
) filteredcandidates
ORDER BY RANDOM() -- do the actual node selection from filtered pool
LIMIT ?`), args...)
if err != nil {
return nil, err
}
defer func() { err = errs.Combine(err, rows.Close()) }()
var nodes []*pb.Node
for rows.Next() {
dbNode := &dbx.Node{}
@ -286,7 +287,7 @@ func (cache *overlaycache) queryNodesDistinct(ctx context.Context, excludedNodes
nodes = append(nodes, &dossier.Node)
}
return nodes, rows.Err()
return nodes, Error.Wrap(rows.Err())
}
// Get looks up the node by nodeID
@ -321,9 +322,7 @@ func (cache *overlaycache) KnownOffline(ctx context.Context, criteria *overlay.N
rows, err = cache.db.Query(cache.db.Rebind(`
SELECT id FROM nodes
WHERE id = any($1::bytea[])
AND (
last_contact_success < $2
)
AND last_contact_success < $2
`), postgresNodeIDList(nodeIds), time.Now().Add(-criteria.OnlineWindow),
)
if err != nil {
@ -339,7 +338,7 @@ func (cache *overlaycache) KnownOffline(ctx context.Context, criteria *overlay.N
}
offlineNodes = append(offlineNodes, id)
}
return offlineNodes, nil
return offlineNodes, Error.Wrap(rows.Err())
}
// KnownUnreliableOrOffline filters a set of nodes to unreliable or offlines node, independent of new
@ -378,7 +377,7 @@ func (cache *overlaycache) KnownUnreliableOrOffline(ctx context.Context, criteri
badNodes = append(badNodes, id)
}
}
return badNodes, nil
return badNodes, Error.Wrap(rows.Err())
}
// KnownReliable filters a set of nodes to reliable (online and qualified) nodes.
@ -414,7 +413,7 @@ func (cache *overlaycache) KnownReliable(ctx context.Context, onlineWindow time.
}
nodes = append(nodes, node)
}
return nodes, nil
return nodes, Error.Wrap(rows.Err())
}
// Reliable returns all reliable nodes.
@ -423,8 +422,8 @@ func (cache *overlaycache) Reliable(ctx context.Context, criteria *overlay.NodeC
rows, err := cache.db.Query(cache.db.Rebind(`
SELECT id FROM nodes
WHERE disqualified IS NULL
AND last_contact_success > ?`),
time.Now().Add(-criteria.OnlineWindow))
AND last_contact_success > ?
`), time.Now().Add(-criteria.OnlineWindow))
if err != nil {
return nil, err
}
@ -440,7 +439,7 @@ func (cache *overlaycache) Reliable(ctx context.Context, criteria *overlay.NodeC
}
nodes = append(nodes, id)
}
return nodes, nil
return nodes, Error.Wrap(rows.Err())
}
// Paginate will run through
@ -898,14 +897,11 @@ func (cache *overlaycache) GetExitingNodes(ctx context.Context) (exitingNodes []
WHERE exit_initiated_at IS NOT NULL
AND exit_finished_at IS NULL
AND disqualified is NULL
`),
)
`))
if err != nil {
return nil, err
}
defer func() {
err = errs.Combine(err, rows.Close())
}()
defer func() { err = errs.Combine(err, rows.Close()) }()
for rows.Next() {
var exitingNodeStatus overlay.ExitStatus
@ -915,26 +911,32 @@ func (cache *overlaycache) GetExitingNodes(ctx context.Context) (exitingNodes []
}
exitingNodes = append(exitingNodes, &exitingNodeStatus)
}
return exitingNodes, nil
return exitingNodes, Error.Wrap(rows.Err())
}
// GetExitStatus returns a node's graceful exit status.
func (cache *overlaycache) GetExitStatus(ctx context.Context, nodeID storj.NodeID) (_ *overlay.ExitStatus, err error) {
defer mon.Task()(&ctx)(&err)
rows, err := cache.db.Query(cache.db.Rebind("select id, exit_initiated_at, exit_loop_completed_at, exit_finished_at, exit_success from nodes where id = ?"), nodeID)
rows, err := cache.db.Query(cache.db.Rebind(`
SELECT id, exit_initiated_at, exit_loop_completed_at, exit_finished_at, exit_success
FROM nodes
WHERE id = ?
`), nodeID)
if err != nil {
return nil, Error.Wrap(err)
}
defer func() {
err = errs.Combine(err, rows.Close())
}()
defer func() { err = errs.Combine(err, rows.Close()) }()
exitStatus := &overlay.ExitStatus{}
if rows.Next() {
err = rows.Scan(&exitStatus.NodeID, &exitStatus.ExitInitiatedAt, &exitStatus.ExitLoopCompletedAt, &exitStatus.ExitFinishedAt, &exitStatus.ExitSuccess)
if err != nil {
return nil, err
}
}
return exitStatus, Error.Wrap(err)
return exitStatus, Error.Wrap(rows.Err())
}
// GetGracefulExitCompletedByTimeFrame returns nodes who have completed graceful exit within a time window (time window is around graceful exit completion).
@ -947,8 +949,7 @@ func (cache *overlaycache) GetGracefulExitCompletedByTimeFrame(ctx context.Conte
AND exit_finished_at IS NOT NULL
AND exit_finished_at >= ?
AND exit_finished_at < ?
`), begin, end,
)
`), begin, end)
if err != nil {
return nil, err
}
@ -964,7 +965,7 @@ func (cache *overlaycache) GetGracefulExitCompletedByTimeFrame(ctx context.Conte
}
exitedNodes = append(exitedNodes, id)
}
return exitedNodes, rows.Err()
return exitedNodes, Error.Wrap(rows.Err())
}
// GetGracefulExitIncompleteByTimeFrame returns nodes who have initiated, but not completed graceful exit within a time window (time window is around graceful exit initiation).
@ -977,8 +978,7 @@ func (cache *overlaycache) GetGracefulExitIncompleteByTimeFrame(ctx context.Cont
AND exit_finished_at IS NULL
AND exit_initiated_at >= ?
AND exit_initiated_at < ?
`), begin, end,
)
`), begin, end)
if err != nil {
return nil, err
}
@ -995,7 +995,7 @@ func (cache *overlaycache) GetGracefulExitIncompleteByTimeFrame(ctx context.Cont
}
exitingNodes = append(exitingNodes, id)
}
return exitingNodes, rows.Err()
return exitingNodes, Error.Wrap(rows.Err())
}
// UpdateExitStatus is used to update a node's graceful exit status.

View File

@ -84,13 +84,14 @@ func (idents *peerIdentities) BatchGet(ctx context.Context, nodeIDs storj.NodeID
// TODO: optimize using arrays like overlay
rows, err := idents.db.Query(idents.db.Rebind(`
SELECT chain FROM peer_identities WHERE node_id IN (?`+strings.Repeat(", ?", len(nodeIDs)-1)+`)`), args...)
SELECT chain
FROM peer_identities
WHERE node_id IN (?`+strings.Repeat(", ?", len(nodeIDs)-1)+`)
`), args...)
if err != nil {
return nil, Error.Wrap(err)
}
defer func() {
err = errs.Combine(err, rows.Close())
}()
defer func() { err = errs.Combine(err, rows.Close()) }()
for rows.Next() {
var peerChain []byte
@ -104,5 +105,5 @@ func (idents *peerIdentities) BatchGet(ctx context.Context, nodeIDs storj.NodeID
}
peerIdents = append(peerIdents, ident)
}
return peerIdents, nil
return peerIdents, Error.Wrap(rows.Err())
}

View File

@ -210,6 +210,7 @@ func (db *ProjectAccounting) GetProjectTotal(ctx context.Context, projectID uuid
for _, bucket := range bucketNames {
storageTallies := make([]*accounting.BucketStorageTally, 0)
storageTalliesRows, err := db.db.QueryContext(ctx, storageQuery, projectID[:], []byte(bucket), since, before)
if err != nil {
return nil, err
@ -221,13 +222,13 @@ func (db *ProjectAccounting) GetProjectTotal(ctx context.Context, projectID uuid
err = storageTalliesRows.Scan(&tally.IntervalStart, &tally.InlineBytes, &tally.RemoteBytes, &tally.ObjectCount)
if err != nil {
return nil, err
return nil, errs.Combine(err, storageTalliesRows.Close())
}
tally.BucketName = bucket
storageTallies = append(storageTallies, &tally)
}
err = storageTalliesRows.Close()
err = errs.Combine(storageTalliesRows.Err(), storageTalliesRows.Close())
if err != nil {
return nil, err
}
@ -340,6 +341,9 @@ func (db *ProjectAccounting) GetBucketUsageRollups(ctx context.Context, projectI
continue
}
}
if err := rollupsRows.Err(); err != nil {
return nil, err
}
bucketStorageTallies, err := storageQuery(ctx,
dbx.BucketStorageTally_ProjectId(projectID[:]),
@ -504,8 +508,8 @@ func (db *ProjectAccounting) GetBucketTotals(ctx context.Context, projectID uuid
if err != nil {
return nil, err
}
defer func() { err = errs.Combine(err, bucketRows.Close()) }()
for bucketRows.Next() {
var bucket string
err = bucketRows.Scan(&bucket)
@ -515,6 +519,9 @@ func (db *ProjectAccounting) GetBucketTotals(ctx context.Context, projectID uuid
buckets = append(buckets, bucket)
}
if err := bucketRows.Err(); err != nil {
return nil, err
}
rollupsQuery := db.db.Rebind(`SELECT SUM(settled), SUM(inline), action
FROM bucket_bandwidth_rollups
@ -558,6 +565,9 @@ func (db *ProjectAccounting) GetBucketTotals(ctx context.Context, projectID uuid
totalEgress += settled + inline
}
}
if err := rollupsRows.Err(); err != nil {
return nil, err
}
bucketUsage.Egress = memory.Size(totalEgress).GB()
@ -615,7 +625,7 @@ func (db *ProjectAccounting) getBuckets(ctx context.Context, projectID uuid.UUID
buckets = append(buckets, bucket)
}
return buckets, nil
return buckets, bucketRows.Err()
}
// timeTruncateDown truncates down to the hour before to be in sync with orders endpoint

View File

@ -140,6 +140,9 @@ func (pm *projectMembers) GetPagedByProjectID(ctx context.Context, projectID uui
projectMembers = append(projectMembers, pm)
}
if err := rows.Err(); err != nil {
return nil, err
}
page.ProjectMembers = projectMembers
page.Order = cursor.Order

View File

@ -73,6 +73,8 @@ func (r *repairQueue) SelectN(ctx context.Context, limit int) (segs []pb.Injured
if err != nil {
return nil, Error.Wrap(err)
}
defer func() { err = errs.Combine(err, rows.Close()) }()
for rows.Next() {
var seg pb.InjuredSegment
err = rows.Scan(&seg)
@ -81,6 +83,7 @@ func (r *repairQueue) SelectN(ctx context.Context, limit int) (segs []pb.Injured
}
segs = append(segs, seg)
}
return segs, Error.Wrap(rows.Err())
}

View File

@ -175,11 +175,13 @@ func (db *StoragenodeAccounting) QueryPaymentInfo(ctx context.Context, start tim
) r
LEFT JOIN nodes n ON n.id = r.node_id
ORDER BY n.id`
rows, err := db.db.DB.QueryContext(ctx, db.db.Rebind(sqlStmt), start.UTC(), end.UTC())
if err != nil {
return nil, Error.Wrap(err)
}
defer func() { err = errs.Combine(err, rows.Close()) }()
csv := []*accounting.CSVRow{}
for rows.Next() {
var nodeID []byte
@ -202,7 +204,7 @@ func (db *StoragenodeAccounting) QueryPaymentInfo(ctx context.Context, start tim
r.Disqualified = disqualified
csv = append(csv, r)
}
return csv, nil
return csv, rows.Err()
}
// QueryStorageNodeUsage returns slice of StorageNodeUsage for given period
@ -242,14 +244,10 @@ func (db *StoragenodeAccounting) QueryStorageNodeUsage(ctx context.Context, node
rows, err := db.db.QueryContext(ctx, db.db.Rebind(query),
nodeID, start, end, accounting.LastRollup,
)
if err != nil {
return nil, Error.Wrap(err)
}
defer func() {
err = errs.Combine(err, rows.Close())
}()
defer func() { err = errs.Combine(err, rows.Close()) }()
var nodeStorageUsages []accounting.StorageNodeUsage
for rows.Next() {
@ -268,7 +266,7 @@ func (db *StoragenodeAccounting) QueryStorageNodeUsage(ctx context.Context, node
})
}
return nodeStorageUsages, nil
return nodeStorageUsages, rows.Err()
}
// DeleteTalliesBefore deletes all raw tallies prior to some time

View File

@ -40,7 +40,6 @@ func (c *usercredits) GetCreditUsage(ctx context.Context, userID uuid.UUID, expi
usage := console.UserCreditUsage{}
for usageRows.Next() {
var (
usedCreditInCents sql.NullInt64
availableCreditInCents sql.NullInt64
@ -56,7 +55,7 @@ func (c *usercredits) GetCreditUsage(ctx context.Context, userID uuid.UUID, expi
usage.AvailableCredits = usage.AvailableCredits.Add(currency.Cents(int(availableCreditInCents.Int64)))
}
return &usage, nil
return &usage, usageRows.Err()
}
// Create insert a new record of user credit