storj/satellite/satellitedb/migrate_postgres_test.go
paul cannon b5ddfc6fa5 satellite/satellitedb: unexport satellitedb.DB
Backstory: I needed a better way to pass around information about the
underlying driver and implementation to all the various db-using things
in satellitedb (at least until some new "cockroach driver" support makes
it to DBX). After hitting a few dead ends, I decided I wanted to have a
type that could act like a *dbx.DB but which would also carry
information about the implementation, etc. Then I could pass around that
type to all the things in satellitedb that previously wanted *dbx.DB.

But then I realized that *satellitedb.DB was, essentially, exactly that
already.

One thing that might have kept *satellitedb.DB from being directly
usable was that embedding a *dbx.DB inside it would make a lot of dbx
methods publicly available on a *satellitedb.DB instance that previously
were nicely encapsulated and hidden. But after a quick look, I realized
that _nothing_ outside of satellite/satellitedb even needs to use
satellitedb.DB at all. It didn't even need to be exported, except for
some trivially-replaceable code in migrate_postgres_test.go. And once
I made it unexported, any concerns about exposing new methods on it were
entirely moot.

So I have here changed the exported *satellitedb.DB type into the
unexported *satellitedb.satelliteDB type, and I have changed all the
places here that wanted raw dbx.DB handles to use this new type instead.
Now they can just take a gander at the implementation member on it and
know all they need to know about the underlying database.

This will make it possible for some other pending code here to
differentiate between postgres and cockroach backends.

Change-Id: I27af99f8ae23b50782333da5277b553b34634edc
2019-12-16 19:09:30 +00:00

213 lines
5.6 KiB
Go

// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package satellitedb_test
import (
"fmt"
"io/ioutil"
"path/filepath"
"strconv"
"strings"
"sync"
"testing"
"github.com/lib/pq"
"github.com/stretchr/testify/require"
"github.com/zeebo/errs"
"go.uber.org/zap/zaptest"
"storj.io/storj/private/dbutil/dbschema"
"storj.io/storj/private/dbutil/pgutil"
"storj.io/storj/private/dbutil/pgutil/pgtest"
"storj.io/storj/private/dbutil/tempdb"
"storj.io/storj/private/migrate"
"storj.io/storj/satellite/satellitedb"
dbx "storj.io/storj/satellite/satellitedb/dbx"
)
// loadSnapshots loads all the dbschemas from testdata/postgres.* caching the result
func loadSnapshots(connstr string) (*dbschema.Snapshots, error) {
snapshots := &dbschema.Snapshots{}
// find all postgres sql files
matches, err := filepath.Glob("testdata/postgres.*")
if err != nil {
return nil, err
}
for _, match := range matches {
versionStr := match[19 : len(match)-4] // hack to avoid trim issues with path differences in windows/linux
version, err := strconv.Atoi(versionStr)
if err != nil {
return nil, errs.New("invalid testdata file %q: %v", match, err)
}
scriptData, err := ioutil.ReadFile(match)
if err != nil {
return nil, errs.New("could not read testdata file for version %d: %v", version, err)
}
snapshot, err := loadSnapshotFromSQL(connstr, string(scriptData))
if err != nil {
if pqErr, ok := err.(*pq.Error); ok && pqErr.Detail != "" {
return nil, fmt.Errorf("Version %d error: %v\nDetail: %s\nHint: %s", version, pqErr, pqErr.Detail, pqErr.Hint)
}
return nil, fmt.Errorf("Version %d error: %+v", version, err)
}
snapshot.Version = version
snapshots.Add(snapshot)
}
snapshots.Sort()
return snapshots, nil
}
// loadSnapshotFromSQL inserts script into connstr and loads schema.
func loadSnapshotFromSQL(connstr, script string) (_ *dbschema.Snapshot, err error) {
db, err := tempdb.OpenUnique(connstr, "load-schema")
if err != nil {
return nil, err
}
defer func() { err = errs.Combine(err, db.Close()) }()
_, err = db.Exec(script)
if err != nil {
return nil, err
}
snapshot, err := pgutil.QuerySnapshot(db)
if err != nil {
return nil, err
}
snapshot.Script = script
return snapshot, nil
}
const newDataSeparator = `-- NEW DATA --`
func newData(snap *dbschema.Snapshot) string {
tokens := strings.SplitN(snap.Script, newDataSeparator, 2)
if len(tokens) != 2 {
return ""
}
return tokens[1]
}
var (
dbxschema struct {
sync.Once
*dbschema.Schema
err error
}
)
// loadDBXSChema loads dbxscript schema only once and caches it,
// it shouldn't change during the test
func loadDBXSchema(connstr, dbxscript string) (*dbschema.Schema, error) {
dbxschema.Do(func() {
dbxschema.Schema, dbxschema.err = loadSchemaFromSQL(connstr, dbxscript)
})
return dbxschema.Schema, dbxschema.err
}
// loadSchemaFromSQL inserts script into connstr and loads schema.
func loadSchemaFromSQL(connstr, script string) (_ *dbschema.Schema, err error) {
db, err := tempdb.OpenUnique(connstr, "load-schema")
if err != nil {
return nil, err
}
defer func() { err = errs.Combine(err, db.Close()) }()
_, err = db.Exec(script)
if err != nil {
return nil, err
}
return pgutil.QuerySchema(db)
}
func TestMigratePostgres(t *testing.T) {
if *pgtest.ConnStr == "" {
t.Skip("Postgres flag missing, example: -postgres-test-db=" + pgtest.DefaultConnStr)
}
pgMigrateTest(t, *pgtest.ConnStr)
}
// satelliteDB provides access to certain methods on a *satellitedb.satelliteDB
// instance, since that type is not exported.
type satelliteDB interface {
TestDBAccess() *dbx.DB
PostgresMigration() *migrate.Migration
}
func pgMigrateTest(t *testing.T, connStr string) {
log := zaptest.NewLogger(t)
snapshots, err := loadSnapshots(connStr)
require.NoError(t, err)
// create tempDB
tempDB, err := tempdb.OpenUnique(connStr, "migrate")
require.NoError(t, err)
defer func() { require.NoError(t, tempDB.Close()) }()
// create a new satellitedb connection
db, err := satellitedb.New(log, tempDB.ConnStr)
require.NoError(t, err)
defer func() { require.NoError(t, db.Close()) }()
// we need raw database access unfortunately
rawdb := db.(satelliteDB).TestDBAccess()
var finalSchema *dbschema.Schema
// get migration for this database
migrations := db.(satelliteDB).PostgresMigration()
for i, step := range migrations.Steps {
tag := fmt.Sprintf("#%d - v%d", i, step.Version)
// run migration up to a specific version
err := migrations.TargetVersion(step.Version).Run(log.Named("migrate"))
require.NoError(t, err, tag)
// find the matching expected version
expected, ok := snapshots.FindVersion(step.Version)
require.True(t, ok, "Missing snapshot v%d. Did you forget to add a snapshot for the new migration?", step.Version)
// insert data for new tables
if newdata := newData(expected); newdata != "" {
_, err = rawdb.Exec(newdata)
require.NoError(t, err, tag)
}
// load schema from database
currentSchema, err := pgutil.QuerySchema(rawdb)
require.NoError(t, err, tag)
// we don't care changes in versions table
currentSchema.DropTable("versions")
// load data from database
currentData, err := pgutil.QueryData(rawdb, currentSchema)
require.NoError(t, err, tag)
// verify schema and data
require.Equal(t, expected.Schema, currentSchema, tag)
require.Equal(t, expected.Data, currentData, tag)
// keep the last version around
finalSchema = currentSchema
}
// verify that we also match the dbx version
dbxschema, err := loadDBXSchema(connStr, rawdb.Schema())
require.NoError(t, err)
require.Equal(t, dbxschema, finalSchema, "dbx")
}