2019-04-02 08:54:09 +01:00
|
|
|
// Copyright (C) 2019 Storj Labs, Inc.
|
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
|
|
|
package storagenodedb_test
|
|
|
|
|
|
|
|
import (
|
2020-01-13 13:03:30 +00:00
|
|
|
"context"
|
2019-04-02 08:54:09 +01:00
|
|
|
"fmt"
|
2019-09-18 17:17:28 +01:00
|
|
|
"path/filepath"
|
2019-04-02 08:54:09 +01:00
|
|
|
"testing"
|
|
|
|
|
|
|
|
"github.com/stretchr/testify/require"
|
2019-07-12 20:29:09 +01:00
|
|
|
"github.com/zeebo/errs"
|
2019-04-02 08:54:09 +01:00
|
|
|
"go.uber.org/zap/zaptest"
|
|
|
|
|
2019-12-27 11:48:47 +00:00
|
|
|
"storj.io/common/testcontext"
|
2019-11-14 19:46:15 +00:00
|
|
|
"storj.io/storj/private/dbutil/dbschema"
|
|
|
|
"storj.io/storj/private/dbutil/sqliteutil"
|
2020-04-14 13:39:42 +01:00
|
|
|
"storj.io/storj/storage/filestore"
|
2019-04-02 08:54:09 +01:00
|
|
|
"storj.io/storj/storagenode/storagenodedb"
|
2019-09-11 21:31:46 +01:00
|
|
|
"storj.io/storj/storagenode/storagenodedb/testdata"
|
2019-04-02 08:54:09 +01:00
|
|
|
)
|
|
|
|
|
2021-02-17 18:07:06 +00:00
|
|
|
// insertOldData will insert any OldData from the MultiDBState into the
|
|
|
|
// appropriate rawDB. This prepares the rawDB for the test comparing schema and
|
|
|
|
// data and any changes to rows.
|
|
|
|
func insertOldData(ctx context.Context, mdbs *testdata.MultiDBState, rawDBs map[string]storagenodedb.DBContainer) error {
|
|
|
|
for dbName, dbState := range mdbs.DBStates {
|
|
|
|
if dbState.OldData == "" {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
rawDB, ok := rawDBs[dbName]
|
|
|
|
if !ok {
|
|
|
|
return errs.New("Failed to find DB %s", dbName)
|
|
|
|
}
|
|
|
|
_, err := rawDB.GetDB().ExecContext(ctx, dbState.OldData)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-09-11 21:31:46 +01:00
|
|
|
// insertNewData will insert any NewData from the MultiDBState into the
|
|
|
|
// appropriate rawDB. This prepares the rawDB for the test comparing schema and
|
2021-02-17 18:07:06 +00:00
|
|
|
// data. It will not insert NewData if OldData is set: the migration is expected
|
|
|
|
// to convert OldData into what NewData would insert.
|
2020-01-13 13:03:30 +00:00
|
|
|
func insertNewData(ctx context.Context, mdbs *testdata.MultiDBState, rawDBs map[string]storagenodedb.DBContainer) error {
|
2019-09-11 21:31:46 +01:00
|
|
|
for dbName, dbState := range mdbs.DBStates {
|
2021-02-17 18:07:06 +00:00
|
|
|
if dbState.NewData == "" || dbState.OldData != "" {
|
2019-09-11 21:31:46 +01:00
|
|
|
continue
|
2019-04-02 08:54:09 +01:00
|
|
|
}
|
|
|
|
|
2019-09-11 21:31:46 +01:00
|
|
|
rawDB, ok := rawDBs[dbName]
|
|
|
|
if !ok {
|
|
|
|
return errs.New("Failed to find DB %s", dbName)
|
|
|
|
}
|
2020-01-14 11:07:35 +00:00
|
|
|
_, err := rawDB.GetDB().ExecContext(ctx, dbState.NewData)
|
2019-04-02 08:54:09 +01:00
|
|
|
if err != nil {
|
2019-09-11 21:31:46 +01:00
|
|
|
return err
|
2019-04-02 08:54:09 +01:00
|
|
|
}
|
2019-09-11 21:31:46 +01:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2019-04-02 08:54:09 +01:00
|
|
|
|
2019-09-11 21:31:46 +01:00
|
|
|
// getSchemas queries the schema of each rawDB and returns a map of each rawDB's
|
2020-07-16 15:18:02 +01:00
|
|
|
// schema keyed by dbName.
|
2020-01-13 13:03:30 +00:00
|
|
|
func getSchemas(ctx context.Context, rawDBs map[string]storagenodedb.DBContainer) (map[string]*dbschema.Schema, error) {
|
2019-09-11 21:31:46 +01:00
|
|
|
schemas := make(map[string]*dbschema.Schema)
|
|
|
|
for dbName, rawDB := range rawDBs {
|
2020-10-09 13:35:34 +01:00
|
|
|
db := rawDB.GetDB()
|
|
|
|
if db == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2020-01-13 13:03:30 +00:00
|
|
|
schema, err := sqliteutil.QuerySchema(ctx, rawDB.GetDB())
|
2019-04-02 08:54:09 +01:00
|
|
|
if err != nil {
|
2019-09-11 21:31:46 +01:00
|
|
|
return nil, err
|
2019-04-02 08:54:09 +01:00
|
|
|
}
|
|
|
|
|
2019-09-11 21:31:46 +01:00
|
|
|
// we don't care changes in versions table
|
|
|
|
schema.DropTable("versions")
|
2019-04-02 08:54:09 +01:00
|
|
|
|
2019-09-11 21:31:46 +01:00
|
|
|
schemas[dbName] = schema
|
|
|
|
}
|
|
|
|
return schemas, nil
|
2019-04-02 08:54:09 +01:00
|
|
|
}
|
|
|
|
|
2019-09-11 21:31:46 +01:00
|
|
|
// getSchemas queries the data of each rawDB and returns a map of each rawDB's
|
2020-07-16 15:18:02 +01:00
|
|
|
// data keyed by dbName.
|
2020-01-13 13:03:30 +00:00
|
|
|
func getData(ctx context.Context, rawDBs map[string]storagenodedb.DBContainer, schemas map[string]*dbschema.Schema) (map[string]*dbschema.Data, error) {
|
2019-09-11 21:31:46 +01:00
|
|
|
data := make(map[string]*dbschema.Data)
|
|
|
|
for dbName, rawDB := range rawDBs {
|
2020-10-09 13:35:34 +01:00
|
|
|
db := rawDB.GetDB()
|
|
|
|
if db == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2020-01-13 13:03:30 +00:00
|
|
|
datum, err := sqliteutil.QueryData(ctx, rawDB.GetDB(), schemas[dbName])
|
2019-09-11 21:31:46 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-04-02 08:54:09 +01:00
|
|
|
|
2019-09-11 21:31:46 +01:00
|
|
|
data[dbName] = datum
|
2019-04-02 08:54:09 +01:00
|
|
|
}
|
2019-09-11 21:31:46 +01:00
|
|
|
return data, nil
|
2019-04-02 08:54:09 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestMigrate(t *testing.T) {
|
2019-08-21 15:32:25 +01:00
|
|
|
ctx := testcontext.New(t)
|
2019-12-17 14:16:38 +00:00
|
|
|
defer ctx.Cleanup()
|
2019-04-02 08:54:09 +01:00
|
|
|
|
|
|
|
log := zaptest.NewLogger(t)
|
|
|
|
|
2019-09-18 17:17:28 +01:00
|
|
|
storageDir := ctx.Dir("storage")
|
2019-08-21 15:32:25 +01:00
|
|
|
cfg := storagenodedb.Config{
|
2020-04-14 13:39:42 +01:00
|
|
|
Pieces: storageDir,
|
|
|
|
Storage: storageDir,
|
|
|
|
Info: filepath.Join(storageDir, "piecestore.db"),
|
|
|
|
Info2: filepath.Join(storageDir, "info.db"),
|
|
|
|
Filestore: filestore.DefaultConfig,
|
2019-08-21 15:32:25 +01:00
|
|
|
}
|
|
|
|
|
2019-04-02 08:54:09 +01:00
|
|
|
// create a new satellitedb connection
|
2020-10-29 07:52:37 +00:00
|
|
|
db, err := storagenodedb.OpenNew(ctx, log, cfg)
|
2019-04-02 08:54:09 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
defer func() { require.NoError(t, db.Close()) }()
|
2021-02-17 18:07:06 +00:00
|
|
|
rawDBs := db.RawDatabases()
|
2019-04-02 08:54:09 +01:00
|
|
|
|
|
|
|
// get migration for this database
|
2019-09-23 20:36:46 +01:00
|
|
|
migrations := db.Migration(ctx)
|
2019-04-02 08:54:09 +01:00
|
|
|
for i, step := range migrations.Steps {
|
|
|
|
// the schema is different when migration step is before the step, cannot test the layout
|
|
|
|
tag := fmt.Sprintf("#%d - v%d", i, step.Version)
|
|
|
|
|
|
|
|
// find the matching expected version
|
2019-09-11 21:31:46 +01:00
|
|
|
expected, ok := testdata.States.FindVersion(step.Version)
|
2019-04-02 08:54:09 +01:00
|
|
|
require.True(t, ok)
|
|
|
|
|
2021-02-17 18:07:06 +00:00
|
|
|
// insert old data for any tables
|
|
|
|
err = insertOldData(ctx, expected, rawDBs)
|
|
|
|
require.NoError(t, err, tag)
|
|
|
|
|
|
|
|
// run migration up to a specific version
|
|
|
|
err := migrations.TargetVersion(step.Version).Run(ctx, log.Named("migrate"))
|
|
|
|
require.NoError(t, err, tag)
|
2019-09-23 20:36:46 +01:00
|
|
|
|
2019-04-02 08:54:09 +01:00
|
|
|
// insert data for new tables
|
2020-01-13 13:03:30 +00:00
|
|
|
err = insertNewData(ctx, expected, rawDBs)
|
2019-09-11 21:31:46 +01:00
|
|
|
require.NoError(t, err, tag)
|
2019-04-02 08:54:09 +01:00
|
|
|
|
|
|
|
// load schema from database
|
2020-01-13 13:03:30 +00:00
|
|
|
schemas, err := getSchemas(ctx, rawDBs)
|
2019-04-02 08:54:09 +01:00
|
|
|
require.NoError(t, err, tag)
|
|
|
|
|
|
|
|
// load data from database
|
2020-01-13 13:03:30 +00:00
|
|
|
data, err := getData(ctx, rawDBs, schemas)
|
2019-04-02 08:54:09 +01:00
|
|
|
require.NoError(t, err, tag)
|
|
|
|
|
2020-01-13 13:03:30 +00:00
|
|
|
multiDBSnapshot, err := testdata.LoadMultiDBSnapshot(ctx, expected)
|
2019-09-11 21:31:46 +01:00
|
|
|
require.NoError(t, err, tag)
|
|
|
|
|
|
|
|
// verify schema and data for each db in the expected snapshot
|
|
|
|
for dbName, dbSnapshot := range multiDBSnapshot.DBSnapshots {
|
2019-09-18 17:17:28 +01:00
|
|
|
// If the tables and indexes of the schema are empty, that's
|
|
|
|
// semantically the same as nil. Set to nil explicitly to help with
|
|
|
|
// comparison to snapshot.
|
|
|
|
schema, ok := schemas[dbName]
|
|
|
|
if ok && len(schema.Tables) == 0 {
|
|
|
|
schema.Tables = nil
|
|
|
|
}
|
|
|
|
if ok && len(schema.Indexes) == 0 {
|
|
|
|
schema.Indexes = nil
|
|
|
|
}
|
|
|
|
|
2019-09-11 21:31:46 +01:00
|
|
|
require.Equal(t, dbSnapshot.Schema, schemas[dbName], tag)
|
|
|
|
require.Equal(t, dbSnapshot.Data, data[dbName], tag)
|
2020-01-09 20:23:47 +00:00
|
|
|
|
|
|
|
// verify schema for last migration step matches expected production schema
|
|
|
|
if i == len(migrations.Steps)-1 {
|
|
|
|
prodSchema := storagenodedb.Schema()[dbName]
|
|
|
|
require.Equal(t, dbSnapshot.Schema, prodSchema, tag)
|
|
|
|
}
|
2019-09-11 21:31:46 +01:00
|
|
|
}
|
2019-04-02 08:54:09 +01:00
|
|
|
}
|
|
|
|
}
|