satellite/metainfo/metabase: basic migrate

Change-Id: Ia34f38e763af9ba88f75038b8ecfc9eb6550eae0
This commit is contained in:
Egon Elbre 2020-11-05 15:30:39 +02:00
parent 56e6bc884c
commit 0a77deec81
5 changed files with 163 additions and 81 deletions

View File

@ -64,7 +64,7 @@ pipeline {
'cockroach://root@localhost:26259/testcockroach?sslmode=disable'
STORJ_TEST_COCKROACH_ALT = 'cockroach://root@localhost:26260/testcockroach?sslmode=disable'
STORJ_TEST_POSTGRES = 'postgres://postgres@localhost/teststorj?sslmode=disable'
STORJ_TEST_DATABASES = 'crdb|pgx|postgres://root@localhost:26259/testmetabase?sslmode=disable;pg|pgx|postgres://postgres@localhost/testmetabase?sslmode=disable'
STORJ_TEST_DATABASES = 'crdb|pgx|cockroach://root@localhost:26259/testmetabase?sslmode=disable;pg|pgx|postgres://postgres@localhost/testmetabase?sslmode=disable'
COVERFLAGS = "${ env.BRANCH_NAME != 'master' ? '' : '-coverprofile=.build/coverprofile -coverpkg=storj.io/storj/private/...,storj.io/storj/pkg/...,storj.io/storj/satellite/...,storj.io/storj/storage/...,storj.io/storj/storagenode/...,storj.io/storj/versioncontrol/...'}"
}
steps {

View File

@ -437,6 +437,18 @@ func cmdMigrationRun(cmd *cobra.Command, args []string) (err error) {
return errs.New("Error creating tables for pointer database on satellite: %+v", err)
}
metabaseDB, err := metainfo.OpenMetabase(ctx, log.Named("metabase"), runCfg.Metainfo.DatabaseURL)
if err != nil {
return errs.New("Error creating metabase connection: %+v", err)
}
defer func() {
err = errs.Combine(err, metabaseDB.Close())
}()
err = metabaseDB.MigrateToLatest(ctx)
if err != nil {
return errs.New("Error creating metabase tables: %+v", err)
}
return nil
}

View File

@ -112,10 +112,14 @@ type MetabaseDB interface {
io.Closer
// MigrateToLatest migrates to latest schema version.
MigrateToLatest(ctx context.Context) error
// InternalImplementation returns *metabase.DB.
// TODO: remove.
InternalImplementation() interface{}
}
// OpenMetabase returns database for storing objects and segments.
func OpenMetabase(ctx context.Context, logger *zap.Logger, dbURLString string) (db MetabaseDB, err error) {
func OpenMetabase(ctx context.Context, log *zap.Logger, dbURLString string) (db MetabaseDB, err error) {
_, source, implementation, err := dbutil.SplitConnStr(dbURLString)
if err != nil {
return nil, err
@ -123,9 +127,9 @@ func OpenMetabase(ctx context.Context, logger *zap.Logger, dbURLString string) (
switch implementation {
case dbutil.Postgres:
db, err = metabase.Open(ctx, "pgx", dbURLString)
db, err = metabase.Open(ctx, log, "pgx", dbURLString)
case dbutil.Cockroach:
db, err = metabase.Open(ctx, "cockroach", dbURLString)
db, err = metabase.Open(ctx, log, "cockroach", dbURLString)
default:
err = Error.New("unsupported db implementation: %s", dbURLString)
}
@ -134,6 +138,6 @@ func OpenMetabase(ctx context.Context, logger *zap.Logger, dbURLString string) (
return nil, err
}
logger.Debug("Connected to:", zap.String("db source", source))
log.Debug("Connected to:", zap.String("db source", source))
return db, nil
}

View File

@ -6,11 +6,15 @@ package metabase
import (
"context"
"strconv"
_ "github.com/jackc/pgx/v4" // registers pgx as a tagsql driver.
_ "github.com/jackc/pgx/v4/stdlib" // registers pgx as a tagsql driver.
"github.com/spacemonkeygo/monkit/v3"
"go.uber.org/zap"
"storj.io/storj/private/dbutil"
"storj.io/storj/private/migrate"
"storj.io/storj/private/tagsql"
)
@ -20,19 +24,25 @@ var (
// DB implements a database for storing objects and segments.
type DB struct {
db tagsql.DB
log *zap.Logger
db tagsql.DB
}
// Open opens a connection to metabase.
func Open(ctx context.Context, driverName, connstr string) (*DB, error) {
func Open(ctx context.Context, log *zap.Logger, driverName, connstr string) (*DB, error) {
db, err := tagsql.Open(ctx, driverName, connstr)
if err != nil {
return nil, Error.Wrap(err)
}
dbutil.Configure(ctx, db, "metabase", mon)
return &DB{db: db}, nil
return &DB{log: log, db: postgresRebind{db}}, nil
}
// InternalImplementation returns *metabase.DB
// TODO: remove.
func (db *DB) InternalImplementation() interface{} { return db }
// Ping checks whether connection has been established.
func (db *DB) Ping(ctx context.Context) error {
return Error.Wrap(db.db.PingContext(ctx))
@ -58,65 +68,123 @@ func (db *DB) DestroyTables(ctx context.Context) error {
//
// TODO: use migrate package.
func (db *DB) MigrateToLatest(ctx context.Context) error {
var err error
// TODO: verify whether this is all we need.
_, err = db.db.ExecContext(ctx, `
CREATE TABLE IF NOT EXISTS objects (
project_id BYTEA NOT NULL,
bucket_name BYTEA NOT NULL, -- we're using bucket_name here to avoid a lookup into buckets table
object_key BYTEA NOT NULL, -- using 'object_key' instead of 'key' to avoid reserved word
version INT4 NOT NULL,
stream_id BYTEA NOT NULL,
created_at TIMESTAMPTZ NOT NULL default now(),
expires_at TIMESTAMPTZ,
status INT2 NOT NULL default 0,
segment_count INT4 NOT NULL default 0,
encrypted_metadata_nonce BYTEA default NULL,
encrypted_metadata BYTEA default NULL,
total_encrypted_size INT4 NOT NULL default 0,
fixed_segment_size INT4 NOT NULL default 0,
encryption INT8 NOT NULL default 0,
zombie_deletion_deadline TIMESTAMPTZ default now() + '1 day', -- should this be in a separate table?
PRIMARY KEY (project_id, bucket_name, object_key, version)
);
`)
if err != nil {
return Error.New("failed to create objects table: %w", err)
}
// TODO: verify whether this is all we need.
_, err = db.db.ExecContext(ctx, `
CREATE TABLE IF NOT EXISTS segments (
stream_id BYTEA NOT NULL,
position INT8 NOT NULL,
root_piece_id BYTEA NOT NULL,
encrypted_key_nonce BYTEA NOT NULL,
encrypted_key BYTEA NOT NULL,
encrypted_size INT4 NOT NULL, -- maybe this can be skipped?
plain_offset INT8 NOT NULL, -- this is needed to find segment based on plain byte offset
plain_size INT4 NOT NULL,
redundancy INT8 NOT NULL default 0,
inline_data BYTEA DEFAULT NULL,
remote_pieces BYTEA[],
PRIMARY KEY (stream_id, position) -- TODO: should this use plain_offset for the primary index?
)
`)
if err != nil {
return Error.New("failed to create segments table: %w", err)
}
return nil
migration := db.PostgresMigration()
return migration.Run(ctx, db.log.Named("migrate"))
}
// PostgresMigration returns steps needed for migrating postgres database.
func (db *DB) PostgresMigration() *migrate.Migration {
// TODO: merge this with satellite migration code or a way to keep them in sync.
return &migrate.Migration{
Table: "metabase_versions",
Steps: []*migrate.Step{
{
DB: &db.db,
Description: "initial setup",
Version: 1,
Action: migrate.SQL{
`CREATE TABLE objects (
project_id BYTEA NOT NULL,
bucket_name BYTEA NOT NULL, -- we're using bucket_name here to avoid a lookup into buckets table
object_key BYTEA NOT NULL, -- using 'object_key' instead of 'key' to avoid reserved word
version INT4 NOT NULL,
stream_id BYTEA NOT NULL,
created_at TIMESTAMPTZ NOT NULL default now(),
expires_at TIMESTAMPTZ,
status INT2 NOT NULL default 0,
segment_count INT4 NOT NULL default 0,
encrypted_metadata_nonce BYTEA default NULL,
encrypted_metadata BYTEA default NULL,
total_encrypted_size INT4 NOT NULL default 0,
fixed_segment_size INT4 NOT NULL default 0,
encryption INT8 NOT NULL default 0,
zombie_deletion_deadline TIMESTAMPTZ default now() + '1 day', -- should this be in a separate table?
PRIMARY KEY (project_id, bucket_name, object_key, version)
)`,
`CREATE TABLE segments (
stream_id BYTEA NOT NULL,
position INT8 NOT NULL,
root_piece_id BYTEA NOT NULL,
encrypted_key_nonce BYTEA NOT NULL,
encrypted_key BYTEA NOT NULL,
encrypted_size INT4 NOT NULL, -- maybe this can be skipped?
plain_offset INT8 NOT NULL, -- this is needed to find segment based on plain byte offset
plain_size INT4 NOT NULL,
redundancy INT8 NOT NULL default 0,
inline_data BYTEA DEFAULT NULL,
remote_pieces BYTEA[],
PRIMARY KEY (stream_id, position) -- TODO: should this use plain_offset for the primary index?
)`,
},
},
},
}
}
// This is needed for migrate to work.
// TODO: clean this up.
type postgresRebind struct{ tagsql.DB }
func (pq postgresRebind) Rebind(sql string) string {
type sqlParseState int
const (
sqlParseStart sqlParseState = iota
sqlParseInStringLiteral
sqlParseInQuotedIdentifier
sqlParseInComment
)
out := make([]byte, 0, len(sql)+10)
j := 1
state := sqlParseStart
for i := 0; i < len(sql); i++ {
ch := sql[i]
switch state {
case sqlParseStart:
switch ch {
case '?':
out = append(out, '$')
out = append(out, strconv.Itoa(j)...)
state = sqlParseStart
j++
continue
case '-':
if i+1 < len(sql) && sql[i+1] == '-' {
state = sqlParseInComment
}
case '"':
state = sqlParseInQuotedIdentifier
case '\'':
state = sqlParseInStringLiteral
}
case sqlParseInStringLiteral:
if ch == '\'' {
state = sqlParseStart
}
case sqlParseInQuotedIdentifier:
if ch == '"' {
state = sqlParseStart
}
case sqlParseInComment:
if ch == '\n' {
state = sqlParseStart
}
}
out = append(out, ch)
}
return string(out)
}

View File

@ -10,9 +10,12 @@ import (
"testing"
"github.com/stretchr/testify/require"
"go.uber.org/zap/zaptest"
"storj.io/common/testcontext"
_ "storj.io/storj/private/dbutil/cockroachutil" // register cockroach driver
"storj.io/storj/satellite/metainfo/metabase"
"storj.io/storj/satellite/satellitedb/satellitedbtest"
)
var databases = flag.String("databases", os.Getenv("STORJ_TEST_DATABASES"), "databases to use for testing")
@ -26,7 +29,7 @@ func All(t *testing.T, fn func(ctx *testcontext.Context, t *testing.T, db *metab
infos := []dbinfo{
{"pg", "pgx", "postgres://storj:storj-pass@localhost/metabase?sslmode=disable"},
{"crdb", "pgx", "postgres://root@localhost:26257/metabase?sslmode=disable"},
{"crdb", "pgx", "cockroach://root@localhost:26257/metabase?sslmode=disable"},
}
if *databases != "" {
infos = nil
@ -44,7 +47,11 @@ func All(t *testing.T, fn func(ctx *testcontext.Context, t *testing.T, db *metab
ctx := testcontext.New(t)
defer ctx.Cleanup()
db, err := metabase.Open(ctx, info.driver, info.connstr)
db, err := satellitedbtest.CreateMetabaseDB(ctx, zaptest.NewLogger(t), t.Name(), "M", 0, satellitedbtest.Database{
Name: info.name,
URL: info.connstr,
Message: "",
})
if err != nil {
t.Fatal(err)
}
@ -54,20 +61,11 @@ func All(t *testing.T, fn func(ctx *testcontext.Context, t *testing.T, db *metab
}
}()
// TODO: use schemas instead
if err := db.DestroyTables(ctx); err != nil {
t.Fatal(err)
}
if err := db.MigrateToLatest(ctx); err != nil {
t.Fatal(err)
}
defer func() {
if err := db.DestroyTables(ctx); err != nil {
t.Fatal(err)
}
}()
fn(ctx, t, db)
fn(ctx, t, db.InternalImplementation().(*metabase.DB))
})
}
}