all: fix defers in loop

defer should not be called in a loop.

Change-Id: Ifa5a25a56402814b974bcdfb0c2fce56df8e7e59
This commit is contained in:
Egon Elbre 2020-11-02 14:21:55 +02:00
parent fd8e697ab2
commit 7183dca6cb
11 changed files with 578 additions and 536 deletions

View File

@ -73,13 +73,20 @@ func QuerySchema(ctx context.Context, db dbschema.Queryer) (*dbschema.Schema, er
func discoverTables(ctx context.Context, db dbschema.Queryer, schema *dbschema.Schema, tableDefinitions []*definition) (err error) {
for _, definition := range tableDefinitions {
if err := discoverTable(ctx, db, schema, definition); err != nil {
return err
}
}
return errs.Wrap(err)
}
func discoverTable(ctx context.Context, db dbschema.Queryer, schema *dbschema.Schema, definition *definition) (err error) {
table := schema.EnsureTable(definition.name)
tableRows, err := db.QueryContext(ctx, `PRAGMA table_info(`+definition.name+`)`)
if err != nil {
return errs.Wrap(err)
}
defer func() { err = errs.Combine(err, tableRows.Close()) }()
for tableRows.Next() {
var defaultValue sql.NullString
@ -88,7 +95,7 @@ func discoverTables(ctx context.Context, db dbschema.Queryer, schema *dbschema.S
var notNull bool
err := tableRows.Scan(&index, &name, &columnType, &notNull, &defaultValue, &pk)
if err != nil {
return errs.Wrap(err)
return errs.Wrap(errs.Combine(tableRows.Err(), tableRows.Close(), err))
}
column := &dbschema.Column{
@ -103,7 +110,10 @@ func discoverTables(ctx context.Context, db dbschema.Queryer, schema *dbschema.S
}
table.PrimaryKey = append(table.PrimaryKey, name)
}
}
err = errs.Combine(tableRows.Err(), tableRows.Close())
if err != nil {
return errs.Wrap(err)
}
matches := rxUnique.FindAllStringSubmatch(definition.sql, -1)
@ -121,14 +131,13 @@ func discoverTables(ctx context.Context, db dbschema.Queryer, schema *dbschema.S
if err != nil {
return errs.Wrap(err)
}
defer func() { err = errs.Combine(err, keysRows.Close()) }()
for keysRows.Next() {
var id, sec int
var tableName, from, to, onUpdate, onDelete, match string
err := keysRows.Scan(&id, &sec, &tableName, &from, &to, &onUpdate, &onDelete, &match)
if err != nil {
return errs.Wrap(err)
return errs.Wrap(errs.Combine(keysRows.Err(), keysRows.Close(), err))
}
column, found := table.FindColumn(from)
@ -147,8 +156,12 @@ func discoverTables(ctx context.Context, db dbschema.Queryer, schema *dbschema.S
}
}
}
}
err = errs.Combine(keysRows.Err(), keysRows.Close())
if err != nil {
return errs.Wrap(err)
}
return nil
}
func discoverIndexes(ctx context.Context, db dbschema.Queryer, schema *dbschema.Schema, indexDefinitions []*definition) (err error) {

View File

@ -34,6 +34,7 @@ func TestBasic(t *testing.T) {
for _, sat := range planet.Satellites {
for _, sn := range planet.StorageNodes {
func() {
node := sn.Contact.Service.Local()
conn, err := sn.Dialer.DialNodeURL(ctx, sat.NodeURL())
@ -46,6 +47,7 @@ func TestBasic(t *testing.T) {
Operator: &node.Operator,
})
require.NoError(t, err)
}()
}
}
// wait a bit to see whether some failures occur

View File

@ -39,7 +39,6 @@ func TestAuth_Register(t *testing.T) {
},
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
for i, test := range []struct {
Partner string
ValidPartner bool
@ -50,6 +49,7 @@ func TestAuth_Register(t *testing.T) {
{Partner: "Raiden nEtwork", ValidPartner: true},
{Partner: "invalid-name", ValidPartner: false},
} {
func() {
registerData := struct {
FullName string `json:"fullName"`
ShortName string `json:"shortName"`
@ -96,6 +96,7 @@ func TestAuth_Register(t *testing.T) {
} else {
require.Equal(t, uuid.UUID{}, user.PartnerID)
}
}()
}
})
}

View File

@ -215,6 +215,7 @@ func TestInvalidAPIKey(t *testing.T) {
require.NoError(t, err)
for _, invalidAPIKey := range []string{"", "invalid", "testKey"} {
func() {
client, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], throwawayKey)
require.NoError(t, err)
defer ctx.Check(client.Close)
@ -294,6 +295,7 @@ func TestInvalidAPIKey(t *testing.T) {
err = client.CommitSegment(ctx, metainfo.CommitSegmentParams{SegmentID: segmentID})
assertInvalidArgument(t, err, false)
}()
}
})
}

View File

@ -76,6 +76,7 @@ func TestSettlementWithWindowEndpointManyOrders(t *testing.T) {
}
for _, tt := range testCases {
func() {
// create serial number to use in test. must be unique for each run.
serialNumber1 := testrand.SerialNumber()
err = ordersDB.CreateSerialInfo(ctx, serialNumber1, []byte(bucketID), now.AddDate(1, 0, 10))
@ -176,6 +177,7 @@ func TestSettlementWithWindowEndpointManyOrders(t *testing.T) {
newBbw, err := ordersDB.GetBucketBandwidth(ctx, projectID, []byte(bucketname), time.Time{}, tt.orderCreation)
require.NoError(t, err)
require.EqualValues(t, tt.settledAmt, newBbw)
}()
}
})
}
@ -223,6 +225,7 @@ func TestSettlementWithWindowEndpointSingleOrder(t *testing.T) {
}
for _, tt := range testCases {
func() {
// create signed orderlimit or order to test with
limit := &pb.OrderLimit{
SerialNumber: serialNumber,
@ -289,6 +292,7 @@ func TestSettlementWithWindowEndpointSingleOrder(t *testing.T) {
newBbw, err := ordersDB.GetBucketBandwidth(ctx, projectID, []byte(bucketname), time.Time{}, now)
require.NoError(t, err)
require.Equal(t, dataAmount, newBbw)
}()
}
})
}

View File

@ -358,6 +358,7 @@ func (db *ProjectAccounting) GetBucketUsageRollups(ctx context.Context, projectI
var bucketUsageRollups []accounting.BucketUsageRollup
for _, bucket := range buckets {
err := func() error {
bucketRollup := accounting.BucketUsageRollup{
ProjectID: projectID,
BucketName: []byte(bucket),
@ -368,7 +369,7 @@ func (db *ProjectAccounting) GetBucketUsageRollups(ctx context.Context, projectI
// get bucket_bandwidth_rollups
rollupsRows, err := db.db.QueryContext(ctx, roullupsQuery, projectID[:], []byte(bucket), since, before)
if err != nil {
return nil, err
return err
}
defer func() { err = errs.Combine(err, rollupsRows.Close()) }()
@ -379,7 +380,7 @@ func (db *ProjectAccounting) GetBucketUsageRollups(ctx context.Context, projectI
err = rollupsRows.Scan(&settled, &inline, &action)
if err != nil {
return nil, err
return err
}
switch action {
@ -394,7 +395,7 @@ func (db *ProjectAccounting) GetBucketUsageRollups(ctx context.Context, projectI
}
}
if err := rollupsRows.Err(); err != nil {
return nil, err
return err
}
bucketStorageTallies, err := storageQuery(ctx,
@ -404,7 +405,7 @@ func (db *ProjectAccounting) GetBucketUsageRollups(ctx context.Context, projectI
dbx.BucketStorageTally_IntervalStart(before))
if err != nil {
return nil, err
return err
}
// fill metadata, objects and stored data
@ -424,6 +425,11 @@ func (db *ProjectAccounting) GetBucketUsageRollups(ctx context.Context, projectI
}
bucketUsageRollups = append(bucketUsageRollups, bucketRollup)
return nil
}()
if err != nil {
return nil, err
}
}
return bucketUsageRollups, nil

View File

@ -163,6 +163,7 @@ func testConstraints(t *testing.T, ctx *testcontext.Context, store storage.KeyVa
{storage.Value("old-value"), nil},
{storage.Value("old-value"), storage.Value("new-value")},
} {
func() {
errTag := fmt.Sprintf("%d. %+v", i, tt)
key := storage.Key("test-key")
val := storage.Value("test-value")
@ -173,6 +174,7 @@ func testConstraints(t *testing.T, ctx *testcontext.Context, store storage.KeyVa
err = store.CompareAndSwap(ctx, key, tt.old, tt.new)
assert.True(t, storage.ErrValueChanged.Has(err), "%s: unexpected error: %+v", errTag, err)
}()
}
})

View File

@ -398,7 +398,6 @@ func (service *Service) sendOrdersFromFileStore(ctx context.Context, now time.Ti
var group errgroup.Group
attemptedSatellites := 0
ctx, cancel := context.WithTimeout(ctx, service.config.SenderTimeout)
defer cancel()
for satelliteID, unsentInfo := range ordersBySatellite {
satelliteID, unsentInfo := satelliteID, unsentInfo
@ -430,6 +429,7 @@ func (service *Service) sendOrdersFromFileStore(ctx context.Context, now time.Ti
}
_ = group.Wait() // doesn't return errors
cancel()
// if all satellites that orders need to be sent to are offline, exit and try again later.
if attemptedSatellites == 0 {

View File

@ -50,6 +50,7 @@ func TestUploadAndPartialDownload(t *testing.T) {
{1513, 1584},
{13581, 4783},
} {
func() {
if piecestore.DefaultConfig.InitialStep < tt.size {
t.Fatal("test expects initial step to be larger than size to download")
}
@ -67,6 +68,7 @@ func TestUploadAndPartialDownload(t *testing.T) {
assert.Equal(t, expectedData[tt.offset:tt.offset+tt.size], data)
require.NoError(t, download.Close())
}()
}
var totalBandwidthUsage bandwidth.Usage

View File

@ -226,6 +226,7 @@ func TestOrderLimitGetValidation(t *testing.T) {
err: "expected get or get repair or audit action got PUT",
},
} {
func() {
client, err := planet.Uplinks[0].DialPiecestore(ctx, planet.StorageNodes[0])
require.NoError(t, err)
defer ctx.Check(client.Close)
@ -265,6 +266,7 @@ func TestOrderLimitGetValidation(t *testing.T) {
} else {
require.NoError(t, err)
}
}()
}
})
}

View File

@ -349,6 +349,14 @@ func (db *DB) MigrateToLatest(ctx context.Context) error {
// Preflight conducts a pre-flight check to ensure correct schemas and minimal read+write functionality of the database tables.
func (db *DB) Preflight(ctx context.Context) (err error) {
for dbName, dbContainer := range db.SQLDBs {
if err := db.preflight(ctx, dbName, dbContainer); err != nil {
return err
}
}
return nil
}
func (db *DB) preflight(ctx context.Context, dbName string, dbContainer DBContainer) error {
nextDB := dbContainer.GetDB()
// Preflight stage 1: test schema correctness
schema, err := sqliteutil.QuerySchema(ctx, nextDB)
@ -442,7 +450,7 @@ func (db *DB) Preflight(ctx context.Context) (err error) {
if err != nil {
return ErrPreflight.New("database %q: failed drop test_table %w", dbName, err)
}
}
return nil
}