2019-02-14 21:55:21 +00:00
|
|
|
// Copyright (C) 2019 Storj Labs, Inc.
|
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
|
|
|
package satellitedb_test
|
|
|
|
|
|
|
|
import (
|
2020-01-13 13:18:48 +00:00
|
|
|
"context"
|
2020-06-28 04:56:29 +01:00
|
|
|
"errors"
|
2019-02-14 21:55:21 +00:00
|
|
|
"fmt"
|
2022-10-11 12:39:08 +01:00
|
|
|
"os"
|
2019-02-14 21:55:21 +00:00
|
|
|
"path/filepath"
|
2021-10-11 15:08:26 +01:00
|
|
|
"sort"
|
2019-02-14 21:55:21 +00:00
|
|
|
"strconv"
|
2021-02-23 08:22:19 +00:00
|
|
|
"strings"
|
2019-02-14 21:55:21 +00:00
|
|
|
"testing"
|
2020-01-16 14:27:24 +00:00
|
|
|
"time"
|
2019-02-14 21:55:21 +00:00
|
|
|
|
2020-06-28 04:56:29 +01:00
|
|
|
"github.com/jackc/pgconn"
|
2021-02-22 16:55:06 +00:00
|
|
|
"github.com/stretchr/testify/assert"
|
2019-02-14 21:55:21 +00:00
|
|
|
"github.com/stretchr/testify/require"
|
2019-12-04 03:36:21 +00:00
|
|
|
"github.com/zeebo/errs"
|
2020-01-30 17:23:07 +00:00
|
|
|
"go.uber.org/zap"
|
2019-02-14 21:55:21 +00:00
|
|
|
"go.uber.org/zap/zaptest"
|
2019-12-20 19:03:58 +00:00
|
|
|
"golang.org/x/sync/errgroup"
|
2020-01-16 14:27:24 +00:00
|
|
|
|
2021-10-11 15:08:26 +01:00
|
|
|
"storj.io/common/sync2"
|
2020-01-13 13:18:48 +00:00
|
|
|
"storj.io/common/testcontext"
|
2021-04-23 10:52:40 +01:00
|
|
|
"storj.io/private/dbutil/dbschema"
|
|
|
|
"storj.io/private/dbutil/pgtest"
|
|
|
|
"storj.io/private/dbutil/pgutil"
|
|
|
|
"storj.io/private/dbutil/tempdb"
|
2020-11-30 11:18:45 +00:00
|
|
|
"storj.io/storj/private/migrate"
|
2023-02-06 12:15:36 +00:00
|
|
|
"storj.io/storj/satellite"
|
2019-02-14 21:55:21 +00:00
|
|
|
"storj.io/storj/satellite/satellitedb"
|
|
|
|
)
|
|
|
|
|
2022-08-04 14:22:22 +01:00
|
|
|
const maxMigrationsToTest = 10
|
|
|
|
|
2020-07-16 16:27:24 +01:00
|
|
|
// loadSnapshots loads all the dbschemas from `testdata/postgres.*`.
|
2022-08-04 14:22:22 +01:00
|
|
|
func loadSnapshots(ctx context.Context, connstr, dbxscript string, maxSnapshots int) (*dbschema.Snapshots, *dbschema.Schema, error) {
|
2019-02-14 21:55:21 +00:00
|
|
|
snapshots := &dbschema.Snapshots{}
|
|
|
|
|
|
|
|
// find all postgres sql files
|
|
|
|
matches, err := filepath.Glob("testdata/postgres.*")
|
|
|
|
if err != nil {
|
2019-12-20 19:03:58 +00:00
|
|
|
return nil, nil, err
|
2019-02-14 21:55:21 +00:00
|
|
|
}
|
2021-10-11 15:08:26 +01:00
|
|
|
sort.Strings(matches)
|
|
|
|
|
2022-08-04 14:22:22 +01:00
|
|
|
// Limit the number of snapshots we are checking
|
2021-10-11 15:08:26 +01:00
|
|
|
// because the database creation is not as fast.
|
2022-08-04 14:22:22 +01:00
|
|
|
if len(matches) > maxSnapshots {
|
|
|
|
matches = matches[len(matches)-maxSnapshots:]
|
2021-10-11 15:08:26 +01:00
|
|
|
}
|
2019-02-14 21:55:21 +00:00
|
|
|
|
2019-12-20 19:03:58 +00:00
|
|
|
snapshots.List = make([]*dbschema.Snapshot, len(matches))
|
2021-10-11 15:08:26 +01:00
|
|
|
|
|
|
|
var sem sync2.Semaphore
|
|
|
|
if strings.Contains(connstr, "cockroach") {
|
|
|
|
sem.Init(4)
|
|
|
|
} else {
|
|
|
|
sem.Init(16)
|
|
|
|
}
|
|
|
|
|
2019-12-20 19:03:58 +00:00
|
|
|
var group errgroup.Group
|
|
|
|
for i, match := range matches {
|
|
|
|
i, match := i, match
|
|
|
|
group.Go(func() error {
|
2021-10-11 15:08:26 +01:00
|
|
|
sem.Lock()
|
|
|
|
defer sem.Unlock()
|
|
|
|
|
2021-02-23 08:22:19 +00:00
|
|
|
version := parseTestdataVersion(match)
|
|
|
|
if version < 0 {
|
2019-12-20 19:03:58 +00:00
|
|
|
return errs.New("invalid testdata file %q: %v", match, err)
|
|
|
|
}
|
2019-02-14 21:55:21 +00:00
|
|
|
|
2022-10-11 12:39:08 +01:00
|
|
|
scriptData, err := os.ReadFile(match)
|
2019-12-20 19:03:58 +00:00
|
|
|
if err != nil {
|
|
|
|
return errs.New("could not read testdata file for version %d: %v", version, err)
|
|
|
|
}
|
2019-02-14 21:55:21 +00:00
|
|
|
|
2020-01-13 13:18:48 +00:00
|
|
|
snapshot, err := loadSnapshotFromSQL(ctx, connstr, string(scriptData))
|
2019-12-20 19:03:58 +00:00
|
|
|
if err != nil {
|
2020-06-28 04:56:29 +01:00
|
|
|
var pgErr *pgconn.PgError
|
|
|
|
if errors.As(err, &pgErr) {
|
2022-03-21 14:48:03 +00:00
|
|
|
return fmt.Errorf("Version %d error: %w\nDetail: %s\nHint: %s", version, pgErr, pgErr.Detail, pgErr.Hint)
|
2019-12-20 19:03:58 +00:00
|
|
|
}
|
2021-05-14 16:05:42 +01:00
|
|
|
return fmt.Errorf("Version %d error: %w", version, err)
|
2019-12-04 03:36:21 +00:00
|
|
|
}
|
2019-12-20 19:03:58 +00:00
|
|
|
snapshot.Version = version
|
2019-02-14 21:55:21 +00:00
|
|
|
|
2019-12-20 19:03:58 +00:00
|
|
|
snapshots.List[i] = snapshot
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
}
|
|
|
|
var dbschema *dbschema.Schema
|
|
|
|
group.Go(func() error {
|
|
|
|
var err error
|
2020-01-13 13:18:48 +00:00
|
|
|
dbschema, err = loadSchemaFromSQL(ctx, connstr, dbxscript)
|
2019-12-20 19:03:58 +00:00
|
|
|
return err
|
|
|
|
})
|
|
|
|
if err := group.Wait(); err != nil {
|
|
|
|
return nil, nil, err
|
2019-02-14 21:55:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
snapshots.Sort()
|
|
|
|
|
2019-12-20 19:03:58 +00:00
|
|
|
return snapshots, dbschema, nil
|
2019-02-14 21:55:21 +00:00
|
|
|
}
|
|
|
|
|
2021-02-23 08:22:19 +00:00
|
|
|
func parseTestdataVersion(path string) int {
|
|
|
|
path = filepath.ToSlash(strings.ToLower(path))
|
|
|
|
path = strings.TrimPrefix(path, "testdata/postgres.v")
|
|
|
|
path = strings.TrimSuffix(path, ".sql")
|
|
|
|
|
|
|
|
v, err := strconv.Atoi(path)
|
|
|
|
if err != nil {
|
|
|
|
return -1
|
|
|
|
}
|
|
|
|
return v
|
|
|
|
}
|
|
|
|
|
2019-12-04 03:36:21 +00:00
|
|
|
// loadSnapshotFromSQL inserts script into connstr and loads schema.
|
2020-01-13 13:18:48 +00:00
|
|
|
func loadSnapshotFromSQL(ctx context.Context, connstr, script string) (_ *dbschema.Snapshot, err error) {
|
|
|
|
db, err := tempdb.OpenUnique(ctx, connstr, "load-schema")
|
2019-12-04 03:36:21 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
defer func() { err = errs.Combine(err, db.Close()) }()
|
|
|
|
|
2020-05-11 20:31:49 +01:00
|
|
|
sections := dbschema.NewSections(script)
|
|
|
|
|
|
|
|
_, err = db.ExecContext(ctx, sections.LookupSection(dbschema.Main))
|
2019-12-04 03:36:21 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2021-02-22 16:55:06 +00:00
|
|
|
_, err = db.ExecContext(ctx, sections.LookupSection(dbschema.MainData))
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2020-05-11 20:31:49 +01:00
|
|
|
_, err = db.ExecContext(ctx, sections.LookupSection(dbschema.NewData))
|
2019-12-04 03:36:21 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2020-05-11 20:31:49 +01:00
|
|
|
snapshot, err := pgutil.QuerySnapshot(ctx, db)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-12-04 03:36:21 +00:00
|
|
|
|
2020-05-11 20:31:49 +01:00
|
|
|
snapshot.Sections = sections
|
2019-02-14 21:55:21 +00:00
|
|
|
|
2020-05-11 20:31:49 +01:00
|
|
|
return snapshot, nil
|
2019-02-14 21:55:21 +00:00
|
|
|
}
|
|
|
|
|
2019-12-04 03:36:21 +00:00
|
|
|
// loadSchemaFromSQL inserts script into connstr and loads schema.
|
2020-01-13 13:18:48 +00:00
|
|
|
func loadSchemaFromSQL(ctx context.Context, connstr, script string) (_ *dbschema.Schema, err error) {
|
|
|
|
db, err := tempdb.OpenUnique(ctx, connstr, "load-schema")
|
2019-12-04 03:36:21 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
defer func() { err = errs.Combine(err, db.Close()) }()
|
|
|
|
|
2020-01-19 14:41:23 +00:00
|
|
|
_, err = db.ExecContext(ctx, script)
|
2019-12-04 03:36:21 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2020-01-13 13:18:48 +00:00
|
|
|
return pgutil.QuerySchema(ctx, db)
|
2019-12-04 03:36:21 +00:00
|
|
|
}
|
|
|
|
|
2021-09-29 14:01:10 +01:00
|
|
|
func TestMigratePostgres(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
connstr := pgtest.PickPostgres(t)
|
|
|
|
t.Run("Versions", func(t *testing.T) { migrateTest(t, connstr) })
|
|
|
|
t.Run("Generated", func(t *testing.T) { migrateGeneratedTest(t, connstr, connstr) })
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestMigrateCockroach(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
connstr := pgtest.PickCockroachAlt(t)
|
|
|
|
t.Run("Versions", func(t *testing.T) { migrateTest(t, connstr) })
|
|
|
|
t.Run("Generated", func(t *testing.T) { migrateGeneratedTest(t, connstr, connstr) })
|
|
|
|
}
|
2020-01-30 17:23:07 +00:00
|
|
|
|
2020-01-19 14:41:23 +00:00
|
|
|
func migrateTest(t *testing.T, connStr string) {
|
2020-03-09 12:30:50 +00:00
|
|
|
ctx := testcontext.NewWithTimeout(t, 8*time.Minute)
|
2020-01-13 13:18:48 +00:00
|
|
|
defer ctx.Cleanup()
|
|
|
|
|
2019-12-09 15:26:58 +00:00
|
|
|
log := zaptest.NewLogger(t)
|
|
|
|
|
|
|
|
// create tempDB
|
2020-01-13 13:18:48 +00:00
|
|
|
tempDB, err := tempdb.OpenUnique(ctx, connStr, "migrate")
|
2019-12-09 15:26:58 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
defer func() { require.NoError(t, tempDB.Close()) }()
|
2019-02-14 21:55:21 +00:00
|
|
|
|
2019-12-09 15:26:58 +00:00
|
|
|
// create a new satellitedb connection
|
2020-12-04 10:24:39 +00:00
|
|
|
db, err := satellitedb.Open(ctx, log, tempDB.ConnStr, satellitedb.Options{ApplicationName: "satellite-migration-test"})
|
2019-12-09 15:26:58 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
defer func() { require.NoError(t, db.Close()) }()
|
2019-02-14 21:55:21 +00:00
|
|
|
|
2019-12-09 15:26:58 +00:00
|
|
|
// we need raw database access unfortunately
|
2023-02-06 12:15:36 +00:00
|
|
|
rawdb := db.Testing().RawDB()
|
2019-02-14 21:55:21 +00:00
|
|
|
|
2022-08-04 14:22:22 +01:00
|
|
|
loadingStart := time.Now()
|
2023-02-06 12:15:36 +00:00
|
|
|
snapshots, dbxschema, err := loadSnapshots(ctx, connStr, db.Testing().Schema(), maxMigrationsToTest)
|
2019-12-20 19:03:58 +00:00
|
|
|
require.NoError(t, err)
|
2022-08-04 14:22:22 +01:00
|
|
|
t.Logf("snapshot loading %v", time.Since(loadingStart))
|
2019-12-20 19:03:58 +00:00
|
|
|
|
2019-12-09 15:26:58 +00:00
|
|
|
// get migration for this database
|
2023-02-06 12:15:36 +00:00
|
|
|
migrations := db.Testing().ProductionMigration()
|
2021-10-11 15:08:26 +01:00
|
|
|
|
|
|
|
// find the first matching migration step for the snapshots
|
|
|
|
firstSnapshot := snapshots.List[0]
|
|
|
|
stepIndex := func() int {
|
|
|
|
for i, step := range migrations.Steps {
|
|
|
|
if step.Version == firstSnapshot.Version {
|
|
|
|
return i
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return -1
|
|
|
|
}()
|
|
|
|
|
|
|
|
// migrate up to the first loaded snapshot
|
|
|
|
err = migrations.TargetVersion(firstSnapshot.Version).Run(ctx, log.Named("initial-migration"))
|
|
|
|
require.NoError(t, err)
|
|
|
|
_, err = rawdb.ExecContext(ctx, firstSnapshot.LookupSection(dbschema.MainData))
|
|
|
|
require.NoError(t, err)
|
|
|
|
_, err = rawdb.ExecContext(ctx, firstSnapshot.LookupSection(dbschema.NewData))
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// test rest of the steps with snapshots
|
|
|
|
var finalSchema *dbschema.Schema
|
|
|
|
for i, step := range migrations.Steps[stepIndex+1:] {
|
2019-12-09 15:26:58 +00:00
|
|
|
tag := fmt.Sprintf("#%d - v%d", i, step.Version)
|
2019-02-14 21:55:21 +00:00
|
|
|
|
2019-12-09 15:26:58 +00:00
|
|
|
// find the matching expected version
|
|
|
|
expected, ok := snapshots.FindVersion(step.Version)
|
|
|
|
require.True(t, ok, "Missing snapshot v%d. Did you forget to add a snapshot for the new migration?", step.Version)
|
2019-02-14 21:55:21 +00:00
|
|
|
|
2020-05-11 20:31:49 +01:00
|
|
|
// run any queries that should happen before the migration
|
|
|
|
if oldData := expected.LookupSection(dbschema.OldData); oldData != "" {
|
|
|
|
_, err = rawdb.ExecContext(ctx, oldData)
|
|
|
|
require.NoError(t, err, tag)
|
|
|
|
}
|
|
|
|
|
|
|
|
// run migration up to a specific version
|
|
|
|
err := migrations.TargetVersion(step.Version).Run(ctx, log.Named("migrate"))
|
|
|
|
require.NoError(t, err, tag)
|
|
|
|
|
2019-12-09 15:26:58 +00:00
|
|
|
// insert data for new tables
|
2020-05-11 20:31:49 +01:00
|
|
|
if newData := expected.LookupSection(dbschema.NewData); newData != "" {
|
|
|
|
_, err = rawdb.ExecContext(ctx, newData)
|
2019-12-09 15:26:58 +00:00
|
|
|
require.NoError(t, err, tag)
|
|
|
|
}
|
2019-02-14 21:55:21 +00:00
|
|
|
|
2019-12-09 15:26:58 +00:00
|
|
|
// load schema from database
|
2020-01-13 13:18:48 +00:00
|
|
|
currentSchema, err := pgutil.QuerySchema(ctx, rawdb)
|
2019-12-09 15:26:58 +00:00
|
|
|
require.NoError(t, err, tag)
|
2019-02-14 21:55:21 +00:00
|
|
|
|
2019-12-09 15:26:58 +00:00
|
|
|
// we don't care changes in versions table
|
|
|
|
currentSchema.DropTable("versions")
|
2019-02-14 21:55:21 +00:00
|
|
|
|
2019-12-09 15:26:58 +00:00
|
|
|
// load data from database
|
2020-01-13 13:18:48 +00:00
|
|
|
currentData, err := pgutil.QueryData(ctx, rawdb, currentSchema)
|
2019-12-09 15:26:58 +00:00
|
|
|
require.NoError(t, err, tag)
|
2019-02-14 21:55:21 +00:00
|
|
|
|
2019-12-09 15:26:58 +00:00
|
|
|
// verify schema and data
|
|
|
|
require.Equal(t, expected.Schema, currentSchema, tag)
|
|
|
|
require.Equal(t, expected.Data, currentData, tag)
|
2019-02-14 21:55:21 +00:00
|
|
|
|
2019-12-09 15:26:58 +00:00
|
|
|
// keep the last version around
|
|
|
|
finalSchema = currentSchema
|
|
|
|
}
|
2019-02-14 21:55:21 +00:00
|
|
|
|
2022-03-24 16:19:47 +00:00
|
|
|
// TODO(thepaul): remove these exceptions on adding migration to remove _gob columns
|
|
|
|
coinpaymentsTransactions, ok := finalSchema.FindTable("coinpayments_transactions")
|
|
|
|
if ok {
|
|
|
|
coinpaymentsTransactions.RemoveColumn("amount_gob")
|
|
|
|
coinpaymentsTransactions.RemoveColumn("received_gob")
|
|
|
|
}
|
|
|
|
conversionRates, ok := finalSchema.FindTable("stripecoinpayments_tx_conversion_rates")
|
|
|
|
if ok {
|
|
|
|
conversionRates.RemoveColumn("rate_gob")
|
|
|
|
}
|
|
|
|
|
2023-01-03 20:55:57 +00:00
|
|
|
// TODO(lizzy): remove this check with the migration step to drop the column last_verification_reminders.
|
|
|
|
users, ok := finalSchema.FindTable("users")
|
|
|
|
if ok {
|
|
|
|
users.RemoveColumn("last_verification_reminder")
|
|
|
|
}
|
|
|
|
|
2019-12-09 15:26:58 +00:00
|
|
|
// verify that we also match the dbx version
|
2020-02-11 15:33:34 +00:00
|
|
|
require.Equal(t, dbxschema, finalSchema, "result of all migration scripts did not match dbx schema")
|
2019-02-14 21:55:21 +00:00
|
|
|
}
|
2020-04-27 20:34:42 +01:00
|
|
|
|
2021-02-22 16:55:06 +00:00
|
|
|
// migrateGeneratedTest verifies whether the generated code in `migratez.go` is on par with migrate.go.
|
|
|
|
func migrateGeneratedTest(t *testing.T, connStrProd, connStrTest string) {
|
|
|
|
ctx := testcontext.NewWithTimeout(t, 8*time.Minute)
|
|
|
|
defer ctx.Cleanup()
|
|
|
|
|
2023-02-06 12:15:36 +00:00
|
|
|
prodVersion, prodSnapshot := schemaFromMigration(t, ctx, connStrProd, func(db satellite.DB) *migrate.Migration {
|
|
|
|
return db.Testing().ProductionMigration()
|
2021-02-22 16:55:06 +00:00
|
|
|
})
|
|
|
|
|
2023-02-06 12:15:36 +00:00
|
|
|
testVersion, testSnapshot := schemaFromMigration(t, ctx, connStrTest, func(db satellite.DB) *migrate.Migration {
|
|
|
|
return db.Testing().TestMigration()
|
2021-02-22 16:55:06 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
assert.Equal(t, prodVersion, testVersion, "migratez version does not match migration. Run `go generate` to update.")
|
|
|
|
|
|
|
|
prodSnapshot.DropTable("versions")
|
|
|
|
testSnapshot.DropTable("versions")
|
|
|
|
|
|
|
|
require.Equal(t, prodSnapshot.Schema, testSnapshot.Schema, "migratez schema does not match migration. Run `go generate` to update.")
|
|
|
|
require.Equal(t, prodSnapshot.Data, testSnapshot.Data, "migratez data does not match migration. Run `go generate` to update.")
|
|
|
|
}
|
|
|
|
|
2023-02-06 12:15:36 +00:00
|
|
|
func schemaFromMigration(t *testing.T, ctx *testcontext.Context, connStr string, getMigration func(db satellite.DB) *migrate.Migration) (version int, _ *dbschema.Snapshot) {
|
2021-02-22 16:55:06 +00:00
|
|
|
// create tempDB
|
|
|
|
log := zaptest.NewLogger(t)
|
|
|
|
|
|
|
|
tempDB, err := tempdb.OpenUnique(ctx, connStr, "migrate")
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer func() { require.NoError(t, tempDB.Close()) }()
|
|
|
|
|
|
|
|
// create a new satellitedb connection
|
|
|
|
db, err := satellitedb.Open(ctx, log, tempDB.ConnStr, satellitedb.Options{
|
|
|
|
ApplicationName: "satellite-migration-test",
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer func() { require.NoError(t, db.Close()) }()
|
|
|
|
|
2023-02-06 12:15:36 +00:00
|
|
|
migration := getMigration(db)
|
2021-02-22 16:55:06 +00:00
|
|
|
require.NoError(t, migration.Run(ctx, log))
|
|
|
|
|
2023-02-06 12:15:36 +00:00
|
|
|
snapshot, err := pgutil.QuerySnapshot(ctx, db.Testing().RawDB())
|
2021-02-22 16:55:06 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
return migration.Steps[len(migration.Steps)-1].Version, snapshot
|
|
|
|
}
|
|
|
|
|
2020-04-27 20:34:42 +01:00
|
|
|
func BenchmarkSetup_Postgres(b *testing.B) {
|
|
|
|
connstr := pgtest.PickPostgres(b)
|
|
|
|
b.Run("merged", func(b *testing.B) {
|
|
|
|
benchmarkSetup(b, connstr, true)
|
|
|
|
})
|
|
|
|
b.Run("separate", func(b *testing.B) {
|
|
|
|
benchmarkSetup(b, connstr, false)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkSetup_Cockroach(b *testing.B) {
|
|
|
|
connstr := pgtest.PickCockroach(b)
|
|
|
|
b.Run("merged", func(b *testing.B) {
|
|
|
|
benchmarkSetup(b, connstr, true)
|
|
|
|
})
|
|
|
|
b.Run("separate", func(b *testing.B) {
|
|
|
|
benchmarkSetup(b, connstr, false)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func benchmarkSetup(b *testing.B, connStr string, merged bool) {
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
func() {
|
|
|
|
ctx := context.Background()
|
|
|
|
log := zap.NewNop()
|
|
|
|
|
|
|
|
// create tempDB
|
|
|
|
tempDB, err := tempdb.OpenUnique(ctx, connStr, "migrate")
|
|
|
|
require.NoError(b, err)
|
|
|
|
defer func() { require.NoError(b, tempDB.Close()) }()
|
|
|
|
|
|
|
|
// create a new satellitedb connection
|
2020-12-04 10:24:39 +00:00
|
|
|
db, err := satellitedb.Open(ctx, log, tempDB.ConnStr, satellitedb.Options{ApplicationName: "satellite-migration-test"})
|
2020-04-27 20:34:42 +01:00
|
|
|
require.NoError(b, err)
|
|
|
|
defer func() { require.NoError(b, db.Close()) }()
|
|
|
|
|
|
|
|
if merged {
|
2023-02-06 12:15:36 +00:00
|
|
|
err = db.Testing().TestMigrateToLatest(ctx)
|
2020-04-27 20:34:42 +01:00
|
|
|
require.NoError(b, err)
|
|
|
|
} else {
|
2020-04-30 07:36:59 +01:00
|
|
|
err = db.MigrateToLatest(ctx)
|
2020-04-27 20:34:42 +01:00
|
|
|
require.NoError(b, err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
}
|