2019-02-14 21:55:21 +00:00
|
|
|
// Copyright (C) 2019 Storj Labs, Inc.
|
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
|
|
|
package satellitedb_test
|
|
|
|
|
|
|
|
import (
|
2020-01-13 13:18:48 +00:00
|
|
|
"context"
|
2020-06-28 04:56:29 +01:00
|
|
|
"errors"
|
2019-02-14 21:55:21 +00:00
|
|
|
"fmt"
|
|
|
|
"io/ioutil"
|
|
|
|
"path/filepath"
|
|
|
|
"strconv"
|
2021-02-23 08:22:19 +00:00
|
|
|
"strings"
|
2019-02-14 21:55:21 +00:00
|
|
|
"testing"
|
2020-01-16 14:27:24 +00:00
|
|
|
"time"
|
2019-02-14 21:55:21 +00:00
|
|
|
|
2020-06-28 04:56:29 +01:00
|
|
|
"github.com/jackc/pgconn"
|
2021-02-22 16:55:06 +00:00
|
|
|
"github.com/stretchr/testify/assert"
|
2019-02-14 21:55:21 +00:00
|
|
|
"github.com/stretchr/testify/require"
|
2019-12-04 03:36:21 +00:00
|
|
|
"github.com/zeebo/errs"
|
2020-01-30 17:23:07 +00:00
|
|
|
"go.uber.org/zap"
|
2019-02-14 21:55:21 +00:00
|
|
|
"go.uber.org/zap/zaptest"
|
2019-12-20 19:03:58 +00:00
|
|
|
"golang.org/x/sync/errgroup"
|
2020-01-16 14:27:24 +00:00
|
|
|
|
2020-01-13 13:18:48 +00:00
|
|
|
"storj.io/common/testcontext"
|
2021-04-23 10:52:40 +01:00
|
|
|
"storj.io/private/dbutil/dbschema"
|
|
|
|
"storj.io/private/dbutil/pgtest"
|
|
|
|
"storj.io/private/dbutil/pgutil"
|
|
|
|
"storj.io/private/dbutil/tempdb"
|
2020-11-30 11:18:45 +00:00
|
|
|
"storj.io/storj/private/migrate"
|
2019-02-14 21:55:21 +00:00
|
|
|
"storj.io/storj/satellite/satellitedb"
|
2020-11-30 11:18:45 +00:00
|
|
|
"storj.io/storj/satellite/satellitedb/dbx"
|
2019-02-14 21:55:21 +00:00
|
|
|
)
|
|
|
|
|
2020-07-16 16:27:24 +01:00
|
|
|
// loadSnapshots loads all the dbschemas from `testdata/postgres.*`.
|
2020-01-13 13:18:48 +00:00
|
|
|
func loadSnapshots(ctx context.Context, connstr, dbxscript string) (*dbschema.Snapshots, *dbschema.Schema, error) {
|
2019-02-14 21:55:21 +00:00
|
|
|
snapshots := &dbschema.Snapshots{}
|
|
|
|
|
|
|
|
// find all postgres sql files
|
|
|
|
matches, err := filepath.Glob("testdata/postgres.*")
|
|
|
|
if err != nil {
|
2019-12-20 19:03:58 +00:00
|
|
|
return nil, nil, err
|
2019-02-14 21:55:21 +00:00
|
|
|
}
|
|
|
|
|
2019-12-20 19:03:58 +00:00
|
|
|
snapshots.List = make([]*dbschema.Snapshot, len(matches))
|
|
|
|
var group errgroup.Group
|
|
|
|
for i, match := range matches {
|
|
|
|
i, match := i, match
|
|
|
|
group.Go(func() error {
|
2021-02-23 08:22:19 +00:00
|
|
|
version := parseTestdataVersion(match)
|
|
|
|
if version < 0 {
|
2019-12-20 19:03:58 +00:00
|
|
|
return errs.New("invalid testdata file %q: %v", match, err)
|
|
|
|
}
|
2019-02-14 21:55:21 +00:00
|
|
|
|
2019-12-20 19:03:58 +00:00
|
|
|
scriptData, err := ioutil.ReadFile(match)
|
|
|
|
if err != nil {
|
|
|
|
return errs.New("could not read testdata file for version %d: %v", version, err)
|
|
|
|
}
|
2019-02-14 21:55:21 +00:00
|
|
|
|
2020-01-13 13:18:48 +00:00
|
|
|
snapshot, err := loadSnapshotFromSQL(ctx, connstr, string(scriptData))
|
2019-12-20 19:03:58 +00:00
|
|
|
if err != nil {
|
2020-06-28 04:56:29 +01:00
|
|
|
var pgErr *pgconn.PgError
|
|
|
|
if errors.As(err, &pgErr) {
|
|
|
|
return fmt.Errorf("Version %d error: %v\nDetail: %s\nHint: %s", version, pgErr, pgErr.Detail, pgErr.Hint)
|
2019-12-20 19:03:58 +00:00
|
|
|
}
|
|
|
|
return fmt.Errorf("Version %d error: %+v", version, err)
|
2019-12-04 03:36:21 +00:00
|
|
|
}
|
2019-12-20 19:03:58 +00:00
|
|
|
snapshot.Version = version
|
2019-02-14 21:55:21 +00:00
|
|
|
|
2019-12-20 19:03:58 +00:00
|
|
|
snapshots.List[i] = snapshot
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
}
|
|
|
|
var dbschema *dbschema.Schema
|
|
|
|
group.Go(func() error {
|
|
|
|
var err error
|
2020-01-13 13:18:48 +00:00
|
|
|
dbschema, err = loadSchemaFromSQL(ctx, connstr, dbxscript)
|
2019-12-20 19:03:58 +00:00
|
|
|
return err
|
|
|
|
})
|
|
|
|
if err := group.Wait(); err != nil {
|
|
|
|
return nil, nil, err
|
2019-02-14 21:55:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
snapshots.Sort()
|
|
|
|
|
2019-12-20 19:03:58 +00:00
|
|
|
return snapshots, dbschema, nil
|
2019-02-14 21:55:21 +00:00
|
|
|
}
|
|
|
|
|
2021-02-23 08:22:19 +00:00
|
|
|
func parseTestdataVersion(path string) int {
|
|
|
|
path = filepath.ToSlash(strings.ToLower(path))
|
|
|
|
path = strings.TrimPrefix(path, "testdata/postgres.v")
|
|
|
|
path = strings.TrimSuffix(path, ".sql")
|
|
|
|
|
|
|
|
v, err := strconv.Atoi(path)
|
|
|
|
if err != nil {
|
|
|
|
return -1
|
|
|
|
}
|
|
|
|
return v
|
|
|
|
}
|
|
|
|
|
2019-12-04 03:36:21 +00:00
|
|
|
// loadSnapshotFromSQL inserts script into connstr and loads schema.
|
2020-01-13 13:18:48 +00:00
|
|
|
func loadSnapshotFromSQL(ctx context.Context, connstr, script string) (_ *dbschema.Snapshot, err error) {
|
|
|
|
db, err := tempdb.OpenUnique(ctx, connstr, "load-schema")
|
2019-12-04 03:36:21 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
defer func() { err = errs.Combine(err, db.Close()) }()
|
|
|
|
|
2020-05-11 20:31:49 +01:00
|
|
|
sections := dbschema.NewSections(script)
|
|
|
|
|
|
|
|
_, err = db.ExecContext(ctx, sections.LookupSection(dbschema.Main))
|
2019-12-04 03:36:21 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2021-02-22 16:55:06 +00:00
|
|
|
_, err = db.ExecContext(ctx, sections.LookupSection(dbschema.MainData))
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2020-05-11 20:31:49 +01:00
|
|
|
_, err = db.ExecContext(ctx, sections.LookupSection(dbschema.NewData))
|
2019-12-04 03:36:21 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2020-05-11 20:31:49 +01:00
|
|
|
snapshot, err := pgutil.QuerySnapshot(ctx, db)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-12-04 03:36:21 +00:00
|
|
|
|
2020-05-11 20:31:49 +01:00
|
|
|
snapshot.Sections = sections
|
2019-02-14 21:55:21 +00:00
|
|
|
|
2020-05-11 20:31:49 +01:00
|
|
|
return snapshot, nil
|
2019-02-14 21:55:21 +00:00
|
|
|
}
|
|
|
|
|
2019-12-04 03:36:21 +00:00
|
|
|
// loadSchemaFromSQL inserts script into connstr and loads schema.
|
2020-01-13 13:18:48 +00:00
|
|
|
func loadSchemaFromSQL(ctx context.Context, connstr, script string) (_ *dbschema.Schema, err error) {
|
|
|
|
db, err := tempdb.OpenUnique(ctx, connstr, "load-schema")
|
2019-12-04 03:36:21 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
defer func() { err = errs.Combine(err, db.Close()) }()
|
|
|
|
|
2020-01-19 14:41:23 +00:00
|
|
|
_, err = db.ExecContext(ctx, script)
|
2019-12-04 03:36:21 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2020-01-13 13:18:48 +00:00
|
|
|
return pgutil.QuerySchema(ctx, db)
|
2019-12-04 03:36:21 +00:00
|
|
|
}
|
|
|
|
|
2020-04-27 20:34:42 +01:00
|
|
|
func TestMigratePostgres(t *testing.T) { migrateTest(t, pgtest.PickPostgres(t)) }
|
2020-09-28 08:33:00 +01:00
|
|
|
func TestMigrateCockroach(t *testing.T) { migrateTest(t, pgtest.PickCockroachAlt(t)) }
|
2020-01-30 17:23:07 +00:00
|
|
|
|
2020-11-30 11:18:45 +00:00
|
|
|
type migrationTestingAccess interface {
|
|
|
|
// MigrationTestingDefaultDB assists in testing migrations themselves
|
|
|
|
// against the default database.
|
|
|
|
MigrationTestingDefaultDB() interface {
|
|
|
|
TestDBAccess() *dbx.DB
|
2021-02-22 16:55:06 +00:00
|
|
|
TestPostgresMigration() *migrate.Migration
|
2020-11-30 11:18:45 +00:00
|
|
|
PostgresMigration() *migrate.Migration
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-19 14:41:23 +00:00
|
|
|
func migrateTest(t *testing.T, connStr string) {
|
2020-04-27 20:34:42 +01:00
|
|
|
t.Parallel()
|
|
|
|
|
2020-03-09 12:30:50 +00:00
|
|
|
ctx := testcontext.NewWithTimeout(t, 8*time.Minute)
|
2020-01-13 13:18:48 +00:00
|
|
|
defer ctx.Cleanup()
|
|
|
|
|
2019-12-09 15:26:58 +00:00
|
|
|
log := zaptest.NewLogger(t)
|
|
|
|
|
|
|
|
// create tempDB
|
2020-01-13 13:18:48 +00:00
|
|
|
tempDB, err := tempdb.OpenUnique(ctx, connStr, "migrate")
|
2019-12-09 15:26:58 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
defer func() { require.NoError(t, tempDB.Close()) }()
|
2019-02-14 21:55:21 +00:00
|
|
|
|
2019-12-09 15:26:58 +00:00
|
|
|
// create a new satellitedb connection
|
2020-12-04 10:24:39 +00:00
|
|
|
db, err := satellitedb.Open(ctx, log, tempDB.ConnStr, satellitedb.Options{ApplicationName: "satellite-migration-test"})
|
2019-12-09 15:26:58 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
defer func() { require.NoError(t, db.Close()) }()
|
2019-02-14 21:55:21 +00:00
|
|
|
|
2019-12-09 15:26:58 +00:00
|
|
|
// we need raw database access unfortunately
|
2020-11-30 11:18:45 +00:00
|
|
|
rawdb := db.(migrationTestingAccess).MigrationTestingDefaultDB().TestDBAccess()
|
2019-02-14 21:55:21 +00:00
|
|
|
|
2020-01-13 13:18:48 +00:00
|
|
|
snapshots, dbxschema, err := loadSnapshots(ctx, connStr, rawdb.Schema())
|
2019-12-20 19:03:58 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-12-09 15:26:58 +00:00
|
|
|
var finalSchema *dbschema.Schema
|
2019-02-14 21:55:21 +00:00
|
|
|
|
2019-12-09 15:26:58 +00:00
|
|
|
// get migration for this database
|
2020-11-30 11:18:45 +00:00
|
|
|
migrations := db.(migrationTestingAccess).MigrationTestingDefaultDB().PostgresMigration()
|
2019-12-09 15:26:58 +00:00
|
|
|
for i, step := range migrations.Steps {
|
|
|
|
tag := fmt.Sprintf("#%d - v%d", i, step.Version)
|
2019-02-14 21:55:21 +00:00
|
|
|
|
2019-12-09 15:26:58 +00:00
|
|
|
// find the matching expected version
|
|
|
|
expected, ok := snapshots.FindVersion(step.Version)
|
|
|
|
require.True(t, ok, "Missing snapshot v%d. Did you forget to add a snapshot for the new migration?", step.Version)
|
2019-02-14 21:55:21 +00:00
|
|
|
|
2020-05-11 20:31:49 +01:00
|
|
|
// run any queries that should happen before the migration
|
|
|
|
if oldData := expected.LookupSection(dbschema.OldData); oldData != "" {
|
|
|
|
_, err = rawdb.ExecContext(ctx, oldData)
|
|
|
|
require.NoError(t, err, tag)
|
|
|
|
}
|
|
|
|
|
|
|
|
// run migration up to a specific version
|
|
|
|
err := migrations.TargetVersion(step.Version).Run(ctx, log.Named("migrate"))
|
|
|
|
require.NoError(t, err, tag)
|
|
|
|
|
2019-12-09 15:26:58 +00:00
|
|
|
// insert data for new tables
|
2020-05-11 20:31:49 +01:00
|
|
|
if newData := expected.LookupSection(dbschema.NewData); newData != "" {
|
|
|
|
_, err = rawdb.ExecContext(ctx, newData)
|
2019-12-09 15:26:58 +00:00
|
|
|
require.NoError(t, err, tag)
|
|
|
|
}
|
2019-02-14 21:55:21 +00:00
|
|
|
|
2019-12-09 15:26:58 +00:00
|
|
|
// load schema from database
|
2020-01-13 13:18:48 +00:00
|
|
|
currentSchema, err := pgutil.QuerySchema(ctx, rawdb)
|
2019-12-09 15:26:58 +00:00
|
|
|
require.NoError(t, err, tag)
|
2019-02-14 21:55:21 +00:00
|
|
|
|
2019-12-09 15:26:58 +00:00
|
|
|
// we don't care changes in versions table
|
|
|
|
currentSchema.DropTable("versions")
|
2019-02-14 21:55:21 +00:00
|
|
|
|
2019-12-09 15:26:58 +00:00
|
|
|
// load data from database
|
2020-01-13 13:18:48 +00:00
|
|
|
currentData, err := pgutil.QueryData(ctx, rawdb, currentSchema)
|
2019-12-09 15:26:58 +00:00
|
|
|
require.NoError(t, err, tag)
|
2019-02-14 21:55:21 +00:00
|
|
|
|
2019-12-09 15:26:58 +00:00
|
|
|
// verify schema and data
|
|
|
|
require.Equal(t, expected.Schema, currentSchema, tag)
|
|
|
|
require.Equal(t, expected.Data, currentData, tag)
|
2019-02-14 21:55:21 +00:00
|
|
|
|
2019-12-09 15:26:58 +00:00
|
|
|
// keep the last version around
|
|
|
|
finalSchema = currentSchema
|
|
|
|
}
|
2019-02-14 21:55:21 +00:00
|
|
|
|
2021-03-01 21:28:21 +00:00
|
|
|
// TODO(cam): remove this check with the migration step to drop the columns
|
|
|
|
nodes, ok := finalSchema.FindTable("nodes")
|
|
|
|
if ok {
|
|
|
|
nodes.RemoveColumn("total_uptime_count")
|
|
|
|
nodes.RemoveColumn("uptime_success_count")
|
|
|
|
}
|
|
|
|
|
2019-12-09 15:26:58 +00:00
|
|
|
// verify that we also match the dbx version
|
2020-02-11 15:33:34 +00:00
|
|
|
require.Equal(t, dbxschema, finalSchema, "result of all migration scripts did not match dbx schema")
|
2019-02-14 21:55:21 +00:00
|
|
|
}
|
2020-04-27 20:34:42 +01:00
|
|
|
|
2021-02-22 16:55:06 +00:00
|
|
|
func TestMigrateGeneratedPostgres(t *testing.T) {
|
|
|
|
migrateGeneratedTest(t, pgtest.PickPostgres(t), pgtest.PickPostgres(t))
|
|
|
|
}
|
2021-04-28 14:22:54 +01:00
|
|
|
|
2021-02-22 16:55:06 +00:00
|
|
|
func TestMigrateGeneratedCockroach(t *testing.T) {
|
|
|
|
migrateGeneratedTest(t, pgtest.PickCockroachAlt(t), pgtest.PickCockroachAlt(t))
|
|
|
|
}
|
|
|
|
|
|
|
|
// migrateGeneratedTest verifies whether the generated code in `migratez.go` is on par with migrate.go.
|
|
|
|
func migrateGeneratedTest(t *testing.T, connStrProd, connStrTest string) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
ctx := testcontext.NewWithTimeout(t, 8*time.Minute)
|
|
|
|
defer ctx.Cleanup()
|
|
|
|
|
|
|
|
prodVersion, prodSnapshot := schemaFromMigration(t, ctx, connStrProd, func(db migrationTestingAccess) *migrate.Migration {
|
|
|
|
return db.MigrationTestingDefaultDB().PostgresMigration()
|
|
|
|
})
|
|
|
|
|
|
|
|
testVersion, testSnapshot := schemaFromMigration(t, ctx, connStrTest, func(db migrationTestingAccess) *migrate.Migration {
|
|
|
|
return db.MigrationTestingDefaultDB().TestPostgresMigration()
|
|
|
|
})
|
|
|
|
|
|
|
|
assert.Equal(t, prodVersion, testVersion, "migratez version does not match migration. Run `go generate` to update.")
|
|
|
|
|
|
|
|
prodSnapshot.DropTable("versions")
|
|
|
|
testSnapshot.DropTable("versions")
|
|
|
|
|
|
|
|
require.Equal(t, prodSnapshot.Schema, testSnapshot.Schema, "migratez schema does not match migration. Run `go generate` to update.")
|
|
|
|
require.Equal(t, prodSnapshot.Data, testSnapshot.Data, "migratez data does not match migration. Run `go generate` to update.")
|
|
|
|
}
|
|
|
|
|
|
|
|
func schemaFromMigration(t *testing.T, ctx *testcontext.Context, connStr string, getMigration func(migrationTestingAccess) *migrate.Migration) (version int, _ *dbschema.Snapshot) {
|
|
|
|
// create tempDB
|
|
|
|
log := zaptest.NewLogger(t)
|
|
|
|
|
|
|
|
tempDB, err := tempdb.OpenUnique(ctx, connStr, "migrate")
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer func() { require.NoError(t, tempDB.Close()) }()
|
|
|
|
|
|
|
|
// create a new satellitedb connection
|
|
|
|
db, err := satellitedb.Open(ctx, log, tempDB.ConnStr, satellitedb.Options{
|
|
|
|
ApplicationName: "satellite-migration-test",
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer func() { require.NoError(t, db.Close()) }()
|
|
|
|
|
|
|
|
testAccess := db.(migrationTestingAccess)
|
|
|
|
|
|
|
|
migration := getMigration(testAccess)
|
|
|
|
require.NoError(t, migration.Run(ctx, log))
|
|
|
|
|
|
|
|
rawdb := testAccess.MigrationTestingDefaultDB().TestDBAccess()
|
|
|
|
snapshot, err := pgutil.QuerySnapshot(ctx, rawdb)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
return migration.Steps[len(migration.Steps)-1].Version, snapshot
|
|
|
|
}
|
|
|
|
|
2020-04-27 20:34:42 +01:00
|
|
|
func BenchmarkSetup_Postgres(b *testing.B) {
|
|
|
|
connstr := pgtest.PickPostgres(b)
|
|
|
|
b.Run("merged", func(b *testing.B) {
|
|
|
|
benchmarkSetup(b, connstr, true)
|
|
|
|
})
|
|
|
|
b.Run("separate", func(b *testing.B) {
|
|
|
|
benchmarkSetup(b, connstr, false)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkSetup_Cockroach(b *testing.B) {
|
|
|
|
connstr := pgtest.PickCockroach(b)
|
|
|
|
b.Run("merged", func(b *testing.B) {
|
|
|
|
benchmarkSetup(b, connstr, true)
|
|
|
|
})
|
|
|
|
b.Run("separate", func(b *testing.B) {
|
|
|
|
benchmarkSetup(b, connstr, false)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func benchmarkSetup(b *testing.B, connStr string, merged bool) {
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
func() {
|
|
|
|
ctx := context.Background()
|
|
|
|
log := zap.NewNop()
|
|
|
|
|
|
|
|
// create tempDB
|
|
|
|
tempDB, err := tempdb.OpenUnique(ctx, connStr, "migrate")
|
|
|
|
require.NoError(b, err)
|
|
|
|
defer func() { require.NoError(b, tempDB.Close()) }()
|
|
|
|
|
|
|
|
// create a new satellitedb connection
|
2020-12-04 10:24:39 +00:00
|
|
|
db, err := satellitedb.Open(ctx, log, tempDB.ConnStr, satellitedb.Options{ApplicationName: "satellite-migration-test"})
|
2020-04-27 20:34:42 +01:00
|
|
|
require.NoError(b, err)
|
|
|
|
defer func() { require.NoError(b, db.Close()) }()
|
|
|
|
|
|
|
|
if merged {
|
2020-04-30 07:36:59 +01:00
|
|
|
err = db.TestingMigrateToLatest(ctx)
|
2020-04-27 20:34:42 +01:00
|
|
|
require.NoError(b, err)
|
|
|
|
} else {
|
2020-04-30 07:36:59 +01:00
|
|
|
err = db.MigrateToLatest(ctx)
|
2020-04-27 20:34:42 +01:00
|
|
|
require.NoError(b, err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
}
|