all: fix defers in loop
defer should not be called in a loop. Change-Id: Ifa5a25a56402814b974bcdfb0c2fce56df8e7e59
This commit is contained in:
parent
fd8e697ab2
commit
7183dca6cb
@ -73,13 +73,20 @@ func QuerySchema(ctx context.Context, db dbschema.Queryer) (*dbschema.Schema, er
|
|||||||
|
|
||||||
func discoverTables(ctx context.Context, db dbschema.Queryer, schema *dbschema.Schema, tableDefinitions []*definition) (err error) {
|
func discoverTables(ctx context.Context, db dbschema.Queryer, schema *dbschema.Schema, tableDefinitions []*definition) (err error) {
|
||||||
for _, definition := range tableDefinitions {
|
for _, definition := range tableDefinitions {
|
||||||
|
if err := discoverTable(ctx, db, schema, definition); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return errs.Wrap(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func discoverTable(ctx context.Context, db dbschema.Queryer, schema *dbschema.Schema, definition *definition) (err error) {
|
||||||
table := schema.EnsureTable(definition.name)
|
table := schema.EnsureTable(definition.name)
|
||||||
|
|
||||||
tableRows, err := db.QueryContext(ctx, `PRAGMA table_info(`+definition.name+`)`)
|
tableRows, err := db.QueryContext(ctx, `PRAGMA table_info(`+definition.name+`)`)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errs.Wrap(err)
|
return errs.Wrap(err)
|
||||||
}
|
}
|
||||||
defer func() { err = errs.Combine(err, tableRows.Close()) }()
|
|
||||||
|
|
||||||
for tableRows.Next() {
|
for tableRows.Next() {
|
||||||
var defaultValue sql.NullString
|
var defaultValue sql.NullString
|
||||||
@ -88,7 +95,7 @@ func discoverTables(ctx context.Context, db dbschema.Queryer, schema *dbschema.S
|
|||||||
var notNull bool
|
var notNull bool
|
||||||
err := tableRows.Scan(&index, &name, &columnType, ¬Null, &defaultValue, &pk)
|
err := tableRows.Scan(&index, &name, &columnType, ¬Null, &defaultValue, &pk)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errs.Wrap(err)
|
return errs.Wrap(errs.Combine(tableRows.Err(), tableRows.Close(), err))
|
||||||
}
|
}
|
||||||
|
|
||||||
column := &dbschema.Column{
|
column := &dbschema.Column{
|
||||||
@ -103,7 +110,10 @@ func discoverTables(ctx context.Context, db dbschema.Queryer, schema *dbschema.S
|
|||||||
}
|
}
|
||||||
table.PrimaryKey = append(table.PrimaryKey, name)
|
table.PrimaryKey = append(table.PrimaryKey, name)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
err = errs.Combine(tableRows.Err(), tableRows.Close())
|
||||||
|
if err != nil {
|
||||||
|
return errs.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
matches := rxUnique.FindAllStringSubmatch(definition.sql, -1)
|
matches := rxUnique.FindAllStringSubmatch(definition.sql, -1)
|
||||||
@ -121,14 +131,13 @@ func discoverTables(ctx context.Context, db dbschema.Queryer, schema *dbschema.S
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return errs.Wrap(err)
|
return errs.Wrap(err)
|
||||||
}
|
}
|
||||||
defer func() { err = errs.Combine(err, keysRows.Close()) }()
|
|
||||||
|
|
||||||
for keysRows.Next() {
|
for keysRows.Next() {
|
||||||
var id, sec int
|
var id, sec int
|
||||||
var tableName, from, to, onUpdate, onDelete, match string
|
var tableName, from, to, onUpdate, onDelete, match string
|
||||||
err := keysRows.Scan(&id, &sec, &tableName, &from, &to, &onUpdate, &onDelete, &match)
|
err := keysRows.Scan(&id, &sec, &tableName, &from, &to, &onUpdate, &onDelete, &match)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errs.Wrap(err)
|
return errs.Wrap(errs.Combine(keysRows.Err(), keysRows.Close(), err))
|
||||||
}
|
}
|
||||||
|
|
||||||
column, found := table.FindColumn(from)
|
column, found := table.FindColumn(from)
|
||||||
@ -147,8 +156,12 @@ func discoverTables(ctx context.Context, db dbschema.Queryer, schema *dbschema.S
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
err = errs.Combine(keysRows.Err(), keysRows.Close())
|
||||||
|
if err != nil {
|
||||||
return errs.Wrap(err)
|
return errs.Wrap(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func discoverIndexes(ctx context.Context, db dbschema.Queryer, schema *dbschema.Schema, indexDefinitions []*definition) (err error) {
|
func discoverIndexes(ctx context.Context, db dbschema.Queryer, schema *dbschema.Schema, indexDefinitions []*definition) (err error) {
|
||||||
|
@ -34,6 +34,7 @@ func TestBasic(t *testing.T) {
|
|||||||
|
|
||||||
for _, sat := range planet.Satellites {
|
for _, sat := range planet.Satellites {
|
||||||
for _, sn := range planet.StorageNodes {
|
for _, sn := range planet.StorageNodes {
|
||||||
|
func() {
|
||||||
node := sn.Contact.Service.Local()
|
node := sn.Contact.Service.Local()
|
||||||
conn, err := sn.Dialer.DialNodeURL(ctx, sat.NodeURL())
|
conn, err := sn.Dialer.DialNodeURL(ctx, sat.NodeURL())
|
||||||
|
|
||||||
@ -46,6 +47,7 @@ func TestBasic(t *testing.T) {
|
|||||||
Operator: &node.Operator,
|
Operator: &node.Operator,
|
||||||
})
|
})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
}()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// wait a bit to see whether some failures occur
|
// wait a bit to see whether some failures occur
|
||||||
|
@ -39,7 +39,6 @@ func TestAuth_Register(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||||
|
|
||||||
for i, test := range []struct {
|
for i, test := range []struct {
|
||||||
Partner string
|
Partner string
|
||||||
ValidPartner bool
|
ValidPartner bool
|
||||||
@ -50,6 +49,7 @@ func TestAuth_Register(t *testing.T) {
|
|||||||
{Partner: "Raiden nEtwork", ValidPartner: true},
|
{Partner: "Raiden nEtwork", ValidPartner: true},
|
||||||
{Partner: "invalid-name", ValidPartner: false},
|
{Partner: "invalid-name", ValidPartner: false},
|
||||||
} {
|
} {
|
||||||
|
func() {
|
||||||
registerData := struct {
|
registerData := struct {
|
||||||
FullName string `json:"fullName"`
|
FullName string `json:"fullName"`
|
||||||
ShortName string `json:"shortName"`
|
ShortName string `json:"shortName"`
|
||||||
@ -96,6 +96,7 @@ func TestAuth_Register(t *testing.T) {
|
|||||||
} else {
|
} else {
|
||||||
require.Equal(t, uuid.UUID{}, user.PartnerID)
|
require.Equal(t, uuid.UUID{}, user.PartnerID)
|
||||||
}
|
}
|
||||||
|
}()
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -215,6 +215,7 @@ func TestInvalidAPIKey(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
for _, invalidAPIKey := range []string{"", "invalid", "testKey"} {
|
for _, invalidAPIKey := range []string{"", "invalid", "testKey"} {
|
||||||
|
func() {
|
||||||
client, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], throwawayKey)
|
client, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], throwawayKey)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer ctx.Check(client.Close)
|
defer ctx.Check(client.Close)
|
||||||
@ -294,6 +295,7 @@ func TestInvalidAPIKey(t *testing.T) {
|
|||||||
|
|
||||||
err = client.CommitSegment(ctx, metainfo.CommitSegmentParams{SegmentID: segmentID})
|
err = client.CommitSegment(ctx, metainfo.CommitSegmentParams{SegmentID: segmentID})
|
||||||
assertInvalidArgument(t, err, false)
|
assertInvalidArgument(t, err, false)
|
||||||
|
}()
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -76,6 +76,7 @@ func TestSettlementWithWindowEndpointManyOrders(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range testCases {
|
for _, tt := range testCases {
|
||||||
|
func() {
|
||||||
// create serial number to use in test. must be unique for each run.
|
// create serial number to use in test. must be unique for each run.
|
||||||
serialNumber1 := testrand.SerialNumber()
|
serialNumber1 := testrand.SerialNumber()
|
||||||
err = ordersDB.CreateSerialInfo(ctx, serialNumber1, []byte(bucketID), now.AddDate(1, 0, 10))
|
err = ordersDB.CreateSerialInfo(ctx, serialNumber1, []byte(bucketID), now.AddDate(1, 0, 10))
|
||||||
@ -176,6 +177,7 @@ func TestSettlementWithWindowEndpointManyOrders(t *testing.T) {
|
|||||||
newBbw, err := ordersDB.GetBucketBandwidth(ctx, projectID, []byte(bucketname), time.Time{}, tt.orderCreation)
|
newBbw, err := ordersDB.GetBucketBandwidth(ctx, projectID, []byte(bucketname), time.Time{}, tt.orderCreation)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.EqualValues(t, tt.settledAmt, newBbw)
|
require.EqualValues(t, tt.settledAmt, newBbw)
|
||||||
|
}()
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -223,6 +225,7 @@ func TestSettlementWithWindowEndpointSingleOrder(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range testCases {
|
for _, tt := range testCases {
|
||||||
|
func() {
|
||||||
// create signed orderlimit or order to test with
|
// create signed orderlimit or order to test with
|
||||||
limit := &pb.OrderLimit{
|
limit := &pb.OrderLimit{
|
||||||
SerialNumber: serialNumber,
|
SerialNumber: serialNumber,
|
||||||
@ -289,6 +292,7 @@ func TestSettlementWithWindowEndpointSingleOrder(t *testing.T) {
|
|||||||
newBbw, err := ordersDB.GetBucketBandwidth(ctx, projectID, []byte(bucketname), time.Time{}, now)
|
newBbw, err := ordersDB.GetBucketBandwidth(ctx, projectID, []byte(bucketname), time.Time{}, now)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, dataAmount, newBbw)
|
require.Equal(t, dataAmount, newBbw)
|
||||||
|
}()
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -358,6 +358,7 @@ func (db *ProjectAccounting) GetBucketUsageRollups(ctx context.Context, projectI
|
|||||||
|
|
||||||
var bucketUsageRollups []accounting.BucketUsageRollup
|
var bucketUsageRollups []accounting.BucketUsageRollup
|
||||||
for _, bucket := range buckets {
|
for _, bucket := range buckets {
|
||||||
|
err := func() error {
|
||||||
bucketRollup := accounting.BucketUsageRollup{
|
bucketRollup := accounting.BucketUsageRollup{
|
||||||
ProjectID: projectID,
|
ProjectID: projectID,
|
||||||
BucketName: []byte(bucket),
|
BucketName: []byte(bucket),
|
||||||
@ -368,7 +369,7 @@ func (db *ProjectAccounting) GetBucketUsageRollups(ctx context.Context, projectI
|
|||||||
// get bucket_bandwidth_rollups
|
// get bucket_bandwidth_rollups
|
||||||
rollupsRows, err := db.db.QueryContext(ctx, roullupsQuery, projectID[:], []byte(bucket), since, before)
|
rollupsRows, err := db.db.QueryContext(ctx, roullupsQuery, projectID[:], []byte(bucket), since, before)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
defer func() { err = errs.Combine(err, rollupsRows.Close()) }()
|
defer func() { err = errs.Combine(err, rollupsRows.Close()) }()
|
||||||
|
|
||||||
@ -379,7 +380,7 @@ func (db *ProjectAccounting) GetBucketUsageRollups(ctx context.Context, projectI
|
|||||||
|
|
||||||
err = rollupsRows.Scan(&settled, &inline, &action)
|
err = rollupsRows.Scan(&settled, &inline, &action)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
switch action {
|
switch action {
|
||||||
@ -394,7 +395,7 @@ func (db *ProjectAccounting) GetBucketUsageRollups(ctx context.Context, projectI
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err := rollupsRows.Err(); err != nil {
|
if err := rollupsRows.Err(); err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
bucketStorageTallies, err := storageQuery(ctx,
|
bucketStorageTallies, err := storageQuery(ctx,
|
||||||
@ -404,7 +405,7 @@ func (db *ProjectAccounting) GetBucketUsageRollups(ctx context.Context, projectI
|
|||||||
dbx.BucketStorageTally_IntervalStart(before))
|
dbx.BucketStorageTally_IntervalStart(before))
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// fill metadata, objects and stored data
|
// fill metadata, objects and stored data
|
||||||
@ -424,6 +425,11 @@ func (db *ProjectAccounting) GetBucketUsageRollups(ctx context.Context, projectI
|
|||||||
}
|
}
|
||||||
|
|
||||||
bucketUsageRollups = append(bucketUsageRollups, bucketRollup)
|
bucketUsageRollups = append(bucketUsageRollups, bucketRollup)
|
||||||
|
return nil
|
||||||
|
}()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return bucketUsageRollups, nil
|
return bucketUsageRollups, nil
|
||||||
|
@ -163,6 +163,7 @@ func testConstraints(t *testing.T, ctx *testcontext.Context, store storage.KeyVa
|
|||||||
{storage.Value("old-value"), nil},
|
{storage.Value("old-value"), nil},
|
||||||
{storage.Value("old-value"), storage.Value("new-value")},
|
{storage.Value("old-value"), storage.Value("new-value")},
|
||||||
} {
|
} {
|
||||||
|
func() {
|
||||||
errTag := fmt.Sprintf("%d. %+v", i, tt)
|
errTag := fmt.Sprintf("%d. %+v", i, tt)
|
||||||
key := storage.Key("test-key")
|
key := storage.Key("test-key")
|
||||||
val := storage.Value("test-value")
|
val := storage.Value("test-value")
|
||||||
@ -173,6 +174,7 @@ func testConstraints(t *testing.T, ctx *testcontext.Context, store storage.KeyVa
|
|||||||
|
|
||||||
err = store.CompareAndSwap(ctx, key, tt.old, tt.new)
|
err = store.CompareAndSwap(ctx, key, tt.old, tt.new)
|
||||||
assert.True(t, storage.ErrValueChanged.Has(err), "%s: unexpected error: %+v", errTag, err)
|
assert.True(t, storage.ErrValueChanged.Has(err), "%s: unexpected error: %+v", errTag, err)
|
||||||
|
}()
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -398,7 +398,6 @@ func (service *Service) sendOrdersFromFileStore(ctx context.Context, now time.Ti
|
|||||||
var group errgroup.Group
|
var group errgroup.Group
|
||||||
attemptedSatellites := 0
|
attemptedSatellites := 0
|
||||||
ctx, cancel := context.WithTimeout(ctx, service.config.SenderTimeout)
|
ctx, cancel := context.WithTimeout(ctx, service.config.SenderTimeout)
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
for satelliteID, unsentInfo := range ordersBySatellite {
|
for satelliteID, unsentInfo := range ordersBySatellite {
|
||||||
satelliteID, unsentInfo := satelliteID, unsentInfo
|
satelliteID, unsentInfo := satelliteID, unsentInfo
|
||||||
@ -430,6 +429,7 @@ func (service *Service) sendOrdersFromFileStore(ctx context.Context, now time.Ti
|
|||||||
|
|
||||||
}
|
}
|
||||||
_ = group.Wait() // doesn't return errors
|
_ = group.Wait() // doesn't return errors
|
||||||
|
cancel()
|
||||||
|
|
||||||
// if all satellites that orders need to be sent to are offline, exit and try again later.
|
// if all satellites that orders need to be sent to are offline, exit and try again later.
|
||||||
if attemptedSatellites == 0 {
|
if attemptedSatellites == 0 {
|
||||||
|
@ -50,6 +50,7 @@ func TestUploadAndPartialDownload(t *testing.T) {
|
|||||||
{1513, 1584},
|
{1513, 1584},
|
||||||
{13581, 4783},
|
{13581, 4783},
|
||||||
} {
|
} {
|
||||||
|
func() {
|
||||||
if piecestore.DefaultConfig.InitialStep < tt.size {
|
if piecestore.DefaultConfig.InitialStep < tt.size {
|
||||||
t.Fatal("test expects initial step to be larger than size to download")
|
t.Fatal("test expects initial step to be larger than size to download")
|
||||||
}
|
}
|
||||||
@ -67,6 +68,7 @@ func TestUploadAndPartialDownload(t *testing.T) {
|
|||||||
assert.Equal(t, expectedData[tt.offset:tt.offset+tt.size], data)
|
assert.Equal(t, expectedData[tt.offset:tt.offset+tt.size], data)
|
||||||
|
|
||||||
require.NoError(t, download.Close())
|
require.NoError(t, download.Close())
|
||||||
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
var totalBandwidthUsage bandwidth.Usage
|
var totalBandwidthUsage bandwidth.Usage
|
||||||
|
@ -226,6 +226,7 @@ func TestOrderLimitGetValidation(t *testing.T) {
|
|||||||
err: "expected get or get repair or audit action got PUT",
|
err: "expected get or get repair or audit action got PUT",
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
|
func() {
|
||||||
client, err := planet.Uplinks[0].DialPiecestore(ctx, planet.StorageNodes[0])
|
client, err := planet.Uplinks[0].DialPiecestore(ctx, planet.StorageNodes[0])
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer ctx.Check(client.Close)
|
defer ctx.Check(client.Close)
|
||||||
@ -265,6 +266,7 @@ func TestOrderLimitGetValidation(t *testing.T) {
|
|||||||
} else {
|
} else {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
}()
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -349,6 +349,14 @@ func (db *DB) MigrateToLatest(ctx context.Context) error {
|
|||||||
// Preflight conducts a pre-flight check to ensure correct schemas and minimal read+write functionality of the database tables.
|
// Preflight conducts a pre-flight check to ensure correct schemas and minimal read+write functionality of the database tables.
|
||||||
func (db *DB) Preflight(ctx context.Context) (err error) {
|
func (db *DB) Preflight(ctx context.Context) (err error) {
|
||||||
for dbName, dbContainer := range db.SQLDBs {
|
for dbName, dbContainer := range db.SQLDBs {
|
||||||
|
if err := db.preflight(ctx, dbName, dbContainer); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (db *DB) preflight(ctx context.Context, dbName string, dbContainer DBContainer) error {
|
||||||
nextDB := dbContainer.GetDB()
|
nextDB := dbContainer.GetDB()
|
||||||
// Preflight stage 1: test schema correctness
|
// Preflight stage 1: test schema correctness
|
||||||
schema, err := sqliteutil.QuerySchema(ctx, nextDB)
|
schema, err := sqliteutil.QuerySchema(ctx, nextDB)
|
||||||
@ -442,7 +450,7 @@ func (db *DB) Preflight(ctx context.Context) (err error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return ErrPreflight.New("database %q: failed drop test_table %w", dbName, err)
|
return ErrPreflight.New("database %q: failed drop test_table %w", dbName, err)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user