diff --git a/cmd/tools/nullify-bad-user-agents/limitedmigrations.go b/cmd/tools/nullify-bad-user-agents/limitedmigrations.go deleted file mode 100644 index 018c3b8b9..000000000 --- a/cmd/tools/nullify-bad-user-agents/limitedmigrations.go +++ /dev/null @@ -1,308 +0,0 @@ -// Copyright (C) 2021 Storj Labs, Inc. -// See LICENSE for copying information. - -package main - -import ( - "context" - - "github.com/jackc/pgtype" - pgx "github.com/jackc/pgx/v4" - "github.com/zeebo/errs" - "go.uber.org/zap" - - "storj.io/private/dbutil/pgutil" -) - -// MigrateTablesLimited runs the migration for each table with update count limits. -func MigrateTablesLimited(ctx context.Context, log *zap.Logger, conn *pgx.Conn, config Config) (err error) { - err = MigrateUsersLimited(ctx, log, conn, config) - if err != nil { - return errs.New("error migrating users: %w", err) - } - err = MigrateProjectsLimited(ctx, log, conn, config) - if err != nil { - return errs.New("error migrating projects: %w", err) - } - err = MigrateAPIKeysLimited(ctx, log, conn, config) - if err != nil { - return errs.New("error migrating api_keys: %w", err) - } - err = MigrateBucketMetainfosLimited(ctx, log, conn, config) - if err != nil { - return errs.New("error migrating bucket_metainfos: %w", err) - } - err = MigrateValueAttributionsLimited(ctx, log, conn, config) - if err != nil { - return errs.New("error migrating value_attributions: %w", err) - } - return nil -} - -// MigrateUsersLimited updates the user_agent column to corresponding Partners.Names or partner_id if applicable for a limited number -// of rows. -func MigrateUsersLimited(ctx context.Context, log *zap.Logger, conn *pgx.Conn, config Config) (err error) { - defer mon.Task()(&ctx)(&err) - - log.Info("beginning users migration", zap.Int("max updates", config.MaxUpdates)) - - // wrap select in anonymous function for deferred rows.Close() - selected, err := func() (ids [][]byte, err error) { - rows, err := conn.Query(ctx, ` - SELECT id FROM users - WHERE user_agent = partner_id - LIMIT $1 - `, config.MaxUpdates) - if err != nil { - return nil, errs.New("selecting ids for update: %w", err) - } - defer rows.Close() - - for rows.Next() { - var id pgtype.GenericBinary - err = rows.Scan(&id) - if err != nil { - return nil, errs.New("scanning row: %w", err) - } - ids = append(ids, id.Bytes) - } - return ids, rows.Err() - }() - if err != nil { - return err - } - - row := conn.QueryRow(ctx, ` - WITH updated as ( - UPDATE users - SET user_agent = NULL - WHERE users.id IN (SELECT unnest($1::bytea[])) - RETURNING 1 - ) - SELECT count(*) - FROM updated - `, pgutil.ByteaArray(selected), - ) - var updated int - err = row.Scan(&updated) - if err != nil { - return errs.New("error scanning results: %w", err) - } - log.Info("updated rows", zap.Int("count", updated)) - return nil -} - -// MigrateProjectsLimited updates the user_agent column to corresponding Partners.Names or partner_id if applicable for a limited number -// of rows. -func MigrateProjectsLimited(ctx context.Context, log *zap.Logger, conn *pgx.Conn, config Config) (err error) { - defer mon.Task()(&ctx)(&err) - - log.Info("beginning projects migration", zap.Int("max updates", config.MaxUpdates)) - - // wrap select in anonymous function for deferred rows.Close() - selected, err := func() (ids [][]byte, err error) { - rows, err := conn.Query(ctx, ` - SELECT id FROM projects - WHERE user_agent = partner_id - LIMIT $1 - `, config.MaxUpdates) - if err != nil { - return nil, errs.New("selecting ids for update: %w", err) - } - defer rows.Close() - - for rows.Next() { - var id pgtype.GenericBinary - err = rows.Scan(&id) - if err != nil { - return nil, errs.New("scanning row: %w", err) - } - ids = append(ids, id.Bytes) - } - return ids, rows.Err() - }() - if err != nil { - return err - } - - row := conn.QueryRow(ctx, ` - WITH updated as ( - UPDATE projects - SET user_agent = NULL - WHERE projects.id IN (SELECT unnest($1::bytea[])) - RETURNING 1 - ) - SELECT count(*) - FROM updated - `, pgutil.ByteaArray(selected), - ) - var updated int - err = row.Scan(&updated) - if err != nil { - return errs.New("error scanning results: %w", err) - } - log.Info("updated rows", zap.Int("count", updated)) - return nil -} - -// MigrateAPIKeysLimited updates the user_agent column to corresponding Partners.Names or partner_id if applicable for a limited number -// of rows. -func MigrateAPIKeysLimited(ctx context.Context, log *zap.Logger, conn *pgx.Conn, config Config) (err error) { - defer mon.Task()(&ctx)(&err) - - log.Info("beginning api_keys migration", zap.Int("max updates", config.MaxUpdates)) - - // wrap select in anonymous function for deferred rows.Close() - selected, err := func() (ids [][]byte, err error) { - rows, err := conn.Query(ctx, ` - SELECT id FROM api_keys - WHERE user_agent = partner_id - LIMIT $1 - `, config.MaxUpdates) - if err != nil { - return nil, errs.New("selecting ids for update: %w", err) - } - defer rows.Close() - - for rows.Next() { - var id pgtype.GenericBinary - err = rows.Scan(&id) - if err != nil { - return nil, errs.New("scanning row: %w", err) - } - ids = append(ids, id.Bytes) - } - return ids, rows.Err() - }() - if err != nil { - return err - } - - row := conn.QueryRow(ctx, ` - WITH updated as ( - UPDATE api_keys - SET user_agent = NULL - WHERE api_keys.id IN (SELECT unnest($1::bytea[])) - RETURNING 1 - ) - SELECT count(*) - FROM updated - `, pgutil.ByteaArray(selected), - ) - var updated int - err = row.Scan(&updated) - if err != nil { - return errs.New("error scanning results: %w", err) - } - log.Info("updated rows", zap.Int("count", updated)) - return nil -} - -// MigrateBucketMetainfosLimited updates the user_agent column to corresponding Partners.Names or partner_id if applicable for a limited number -// of rows. -func MigrateBucketMetainfosLimited(ctx context.Context, log *zap.Logger, conn *pgx.Conn, config Config) (err error) { - defer mon.Task()(&ctx)(&err) - - log.Info("beginning bucket_metainfos migration", zap.Int("max updates", config.MaxUpdates)) - - // wrap select in anonymous function for deferred rows.Close() - selected, err := func() (ids [][]byte, err error) { - rows, err := conn.Query(ctx, ` - SELECT id FROM bucket_metainfos - WHERE user_agent = partner_id - LIMIT $1 - `, config.MaxUpdates) - if err != nil { - return nil, errs.New("selecting ids for update: %w", err) - } - defer rows.Close() - - for rows.Next() { - var id pgtype.GenericBinary - err = rows.Scan(&id) - if err != nil { - return nil, errs.New("scanning row: %w", err) - } - ids = append(ids, id.Bytes) - } - return ids, rows.Err() - }() - if err != nil { - return err - } - - row := conn.QueryRow(ctx, ` - WITH updated as ( - UPDATE bucket_metainfos - SET user_agent = NULL - WHERE bucket_metainfos.id IN (SELECT unnest($1::bytea[])) - RETURNING 1 - ) - SELECT count(*) - FROM updated - `, pgutil.ByteaArray(selected), - ) - var updated int - err = row.Scan(&updated) - if err != nil { - return errs.New("error scanning results: %w", err) - } - log.Info("updated rows", zap.Int("count", updated)) - return nil -} - -// MigrateValueAttributionsLimited updates the user_agent column to corresponding Partners.Names or partner_id if applicable for a limited number -// of rows. -func MigrateValueAttributionsLimited(ctx context.Context, log *zap.Logger, conn *pgx.Conn, config Config) (err error) { - defer mon.Task()(&ctx)(&err) - - log.Info("beginning value_attributions migration", zap.Int("max updates", config.MaxUpdates)) - - // wrap select in anonymous function for deferred rows.Close() - projects, buckets, err := func() (projectIDs [][]byte, buckets [][]byte, err error) { - rows, err := conn.Query(ctx, ` - SELECT project_id, bucket_name FROM value_attributions - WHERE user_agent = partner_id - LIMIT $1 - `, config.MaxUpdates) - if err != nil { - return nil, nil, errs.New("selecting rows for update: %w", err) - } - defer rows.Close() - - for rows.Next() { - var projectID, bucketName pgtype.GenericBinary - err = rows.Scan(&projectID, &bucketName) - if err != nil { - return nil, nil, errs.New("scanning row: %w", err) - } - projectIDs = append(projectIDs, projectID.Bytes) - buckets = append(buckets, bucketName.Bytes) - } - return projectIDs, buckets, rows.Err() - }() - if err != nil { - return err - } - - row := conn.QueryRow(ctx, ` - WITH updated as ( - UPDATE value_attributions - SET user_agent = NULL - WHERE value_attributions.project_id IN (SELECT unnest($1::bytea[])) - AND value_attributions.bucket_name IN (SELECT unnest($2::bytea[])) - AND user_agent = partner_id - RETURNING 1 - ) - SELECT count(*) - FROM updated - `, pgutil.ByteaArray(projects), pgutil.ByteaArray(buckets), - ) - var updated int - err = row.Scan(&updated) - if err != nil { - return errs.New("error scanning results: %w", err) - } - log.Info("updated rows", zap.Int("count", updated)) - return nil -} diff --git a/cmd/tools/nullify-bad-user-agents/main.go b/cmd/tools/nullify-bad-user-agents/main.go deleted file mode 100644 index ad5841f7c..000000000 --- a/cmd/tools/nullify-bad-user-agents/main.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright (C) 2021 Storj Labs, Inc. -// See LICENSE for copying information. - -package main - -import ( - "context" - "errors" - - pgx "github.com/jackc/pgx/v4" - "github.com/spacemonkeygo/monkit/v3" - "github.com/spf13/cobra" - flag "github.com/spf13/pflag" - "github.com/zeebo/errs" - "go.uber.org/zap" - - "storj.io/private/process" -) - -var mon = monkit.Package() - -var ( - rootCmd = &cobra.Command{ - Use: "nullify-bad-user-agents", - Short: "nullify-bad-user-agents", - } - - runCmd = &cobra.Command{ - Use: "run", - Short: "run nullify-bad-user-agents", - RunE: run, - } - - config Config -) - -func init() { - rootCmd.AddCommand(runCmd) - - config.BindFlags(runCmd.Flags()) -} - -// Config defines configuration for migration. -type Config struct { - SatelliteDB string - Limit int - MaxUpdates int -} - -// BindFlags adds bench flags to the flagset. -func (config *Config) BindFlags(flag *flag.FlagSet) { - flag.StringVar(&config.SatelliteDB, "satellitedb", "", "connection URL for satelliteDB") - flag.IntVar(&config.Limit, "limit", 1000, "number of updates to perform at once") - flag.IntVar(&config.MaxUpdates, "max-updates", 0, "max number of updates to perform on each table") -} - -// VerifyFlags verifies whether the values provided are valid. -func (config *Config) VerifyFlags() error { - var errlist errs.Group - if config.SatelliteDB == "" { - errlist.Add(errors.New("flag '--satellitedb' is not set")) - } - return errlist.Err() -} - -func run(cmd *cobra.Command, args []string) error { - if err := config.VerifyFlags(); err != nil { - return err - } - - ctx, _ := process.Ctx(cmd) - log := zap.L() - return Migrate(ctx, log, config) -} - -func main() { - process.Exec(rootCmd) -} - -// Migrate updates the user_agent column if user_agent = '\x00000000000000000000000000000000' and sets -// it to NULL. -// Affected tables: -// -// users -// projects -// api_keys -// bucket_metainfos -// value_attributions. -func Migrate(ctx context.Context, log *zap.Logger, config Config) (err error) { - defer mon.Task()(&ctx)(&err) - - conn, err := pgx.Connect(ctx, config.SatelliteDB) - if err != nil { - return errs.New("unable to connect %q: %w", config.SatelliteDB, err) - } - defer func() { - err = errs.Combine(err, conn.Close(ctx)) - }() - - // The original migrations are already somewhat complex and in my opinion, - // trying to edit them to be able to handle conditionally limiting updates increased the - // complexity. While I think splitting out the limited update migrations isn't the - // most ideal solution, since this code is temporary we don't need to worry about - // maintenance concerns with having multiple queries. - if config.MaxUpdates > 1000 { - return errs.New("When running limited migration, set --max-updates to something less than 1000") - } - if config.MaxUpdates > 0 { - return MigrateTablesLimited(ctx, log, conn, config) - } - - return MigrateTables(ctx, log, conn, config) -} diff --git a/cmd/tools/nullify-bad-user-agents/main_test.go b/cmd/tools/nullify-bad-user-agents/main_test.go deleted file mode 100644 index 441560498..000000000 --- a/cmd/tools/nullify-bad-user-agents/main_test.go +++ /dev/null @@ -1,1188 +0,0 @@ -// Copyright (C) 2021 Storj Labs, Inc. -// See LICENSE for copying information. - -package main_test - -import ( - "context" - "strings" - "testing" - - pgx "github.com/jackc/pgx/v4" - "github.com/stretchr/testify/require" - "go.uber.org/zap" - "go.uber.org/zap/zaptest" - - "storj.io/common/macaroon" - "storj.io/common/storj" - "storj.io/common/testcontext" - "storj.io/common/testrand" - "storj.io/common/uuid" - "storj.io/private/dbutil" - "storj.io/private/dbutil/tempdb" - migrator "storj.io/storj/cmd/tools/nullify-bad-user-agents" - "storj.io/storj/satellite" - "storj.io/storj/satellite/attribution" - "storj.io/storj/satellite/console" - "storj.io/storj/satellite/satellitedb/satellitedbtest" -) - -// Test no entries in table doesn't error. -func TestMigrateUsersSelectNoRows(t *testing.T) { - t.Parallel() - ctx := testcontext.New(t) - defer ctx.Cleanup() - - prepare := func(t *testing.T, ctx *testcontext.Context, rawDB *dbutil.TempDatabase, db satellite.DB) {} - check := func(t *testing.T, ctx context.Context, db satellite.DB) {} - test(t, prepare, migrator.MigrateUsers, check, &migrator.Config{ - Limit: 8, - }) -} - -// Test no entries in table doesn't error. -func TestMigrateUsersLimitedSelectNoRows(t *testing.T) { - t.Parallel() - ctx := testcontext.New(t) - defer ctx.Cleanup() - - prepare := func(t *testing.T, ctx *testcontext.Context, rawDB *dbutil.TempDatabase, db satellite.DB) {} - check := func(t *testing.T, ctx context.Context, db satellite.DB) {} - test(t, prepare, migrator.MigrateUsersLimited, check, &migrator.Config{ - MaxUpdates: 1, - }) -} - -// Test no rows to update returns no error. -func TestMigrateUsersUpdateNoRows(t *testing.T) { - t.Parallel() - ctx := testcontext.New(t) - defer ctx.Cleanup() - - userAgent := []byte("teststorj") - prepare := func(t *testing.T, ctx *testcontext.Context, rawDB *dbutil.TempDatabase, db satellite.DB) { - - _, err := db.Console().Users().Insert(ctx, &console.User{ - ID: testrand.UUID(), - Email: "test@storj.test", - FullName: "Test Test", - PasswordHash: []byte{0, 1, 2, 3}, - UserAgent: userAgent, - }) - require.NoError(t, err) - } - check := func(t *testing.T, ctx context.Context, db satellite.DB) { - _, users, err := db.Console().Users().GetByEmailWithUnverified(ctx, "test@storj.test") - require.NoError(t, err) - require.Len(t, users, 1) - require.Equal(t, userAgent, users[0].UserAgent) - } - test(t, prepare, migrator.MigrateUsers, check, &migrator.Config{ - Limit: 8, - }) -} - -// Test select offset beyond final row. -// With only one row, selecting with an offset of 1 will return 0 rows. -// Test that this is accounted for and updates the row correctly. -func TestMigrateUsersSelectOffsetBeyondRowCount(t *testing.T) { - t.Parallel() - ctx := testcontext.New(t) - defer ctx.Cleanup() - - userID := testrand.UUID() - - prepare := func(t *testing.T, ctx *testcontext.Context, rawDB *dbutil.TempDatabase, db satellite.DB) { - _, err := db.Console().Users().Insert(ctx, &console.User{ - ID: userID, - Email: "test@storj.test", - FullName: "Test Test", - PasswordHash: []byte{0, 1, 2, 3}, - PartnerID: userID, - UserAgent: userID.Bytes(), - }) - require.NoError(t, err) - } - check := func(t *testing.T, ctx context.Context, db satellite.DB) { - user, err := db.Console().Users().Get(ctx, userID) - require.NoError(t, err) - require.Nil(t, user.UserAgent) - } - test(t, prepare, migrator.MigrateUsers, check, &migrator.Config{ - Limit: 8, - }) -} - -// Test user_agent field is updated correctly. -func TestMigrateUsers(t *testing.T) { - t.Parallel() - ctx := testcontext.New(t) - defer ctx.Cleanup() - - var n int - prepare := func(t *testing.T, ctx *testcontext.Context, rawDB *dbutil.TempDatabase, db satellite.DB) { - // insert with user_agent = partner_id - id := testrand.UUID() - _, err := db.Console().Users().Insert(ctx, &console.User{ - ID: id, - Email: "test@storj.test", - FullName: "Test Test", - PasswordHash: []byte{0, 1, 2, 3}, - PartnerID: id, - UserAgent: id.Bytes(), - }) - require.NoError(t, err) - n++ - // insert with user_agent = partner_id - id = testrand.UUID() - _, err = db.Console().Users().Insert(ctx, &console.User{ - ID: id, - Email: "test@storj.test", - FullName: "Test Test", - PasswordHash: []byte{0, 1, 2, 3}, - PartnerID: id, - UserAgent: id.Bytes(), - }) - require.NoError(t, err) - n++ - // insert an entry with something not matching - _, err = db.Console().Users().Insert(ctx, &console.User{ - ID: testrand.UUID(), - Email: "test@storj.test", - FullName: "Test Test", - PasswordHash: []byte{0, 1, 2, 3}, - UserAgent: []byte("teststorj"), - }) - require.NoError(t, err) - } - - check := func(t *testing.T, ctx context.Context, db satellite.DB) { - _, users, err := db.Console().Users().GetByEmailWithUnverified(ctx, "test@storj.test") - require.NoError(t, err) - - var updated int - for _, u := range users { - if u.UserAgent == nil { - updated++ - } - } - require.Equal(t, n, updated) - n = 0 - } - - test(t, prepare, migrator.MigrateUsers, check, &migrator.Config{ - Limit: 1, - }) -} - -// Test limited number of user_agent fields are updated correctly. -func TestMigrateUsersLimited(t *testing.T) { - t.Parallel() - ctx := testcontext.New(t) - defer ctx.Cleanup() - - prepare := func(t *testing.T, ctx *testcontext.Context, rawDB *dbutil.TempDatabase, db satellite.DB) { - // insert an entry with valid user agent - _, err := db.Console().Users().Insert(ctx, &console.User{ - ID: testrand.UUID(), - Email: "test@storj.test", - FullName: "Test Test", - PasswordHash: []byte{0, 1, 2, 3}, - UserAgent: []byte("teststorj"), - }) - require.NoError(t, err) - - // insert matching user_agent and partner id - id := testrand.UUID() - _, err = db.Console().Users().Insert(ctx, &console.User{ - ID: testrand.UUID(), - Email: "test@storj.test", - FullName: "Test Test", - PasswordHash: []byte{0, 1, 2, 3}, - PartnerID: id, - UserAgent: id.Bytes(), - }) - require.NoError(t, err) - - // insert '\x00000000000000000000000000000000' user_agent - id = testrand.UUID() - _, err = db.Console().Projects().Insert(ctx, &console.Project{ - Name: "test", - Description: "test", - PartnerID: id, - OwnerID: testrand.UUID(), - UserAgent: id.Bytes(), - }) - require.NoError(t, err) - } - - maxUpdates := 1 - - check := func(t *testing.T, ctx context.Context, db satellite.DB) { - _, users, err := db.Console().Users().GetByEmailWithUnverified(ctx, "test@storj.test") - require.NoError(t, err) - - var updated int - for _, u := range users { - if u.UserAgent == nil { - updated++ - } - } - require.Equal(t, maxUpdates, updated) - } - test(t, prepare, migrator.MigrateUsersLimited, check, &migrator.Config{ - MaxUpdates: maxUpdates, - }) -} - -// Test no entries in table doesn't error. -func TestMigrateProjectsSelectNoRows(t *testing.T) { - t.Parallel() - ctx := testcontext.New(t) - defer ctx.Cleanup() - - prepare := func(t *testing.T, ctx *testcontext.Context, rawDB *dbutil.TempDatabase, db satellite.DB) {} - check := func(t *testing.T, ctx context.Context, db satellite.DB) {} - test(t, prepare, migrator.MigrateProjects, check, &migrator.Config{ - Limit: 8, - }) -} - -// Test no entries in table doesn't error. -func TestMigrateProjectsLimitedSelectNoRows(t *testing.T) { - t.Parallel() - ctx := testcontext.New(t) - defer ctx.Cleanup() - - prepare := func(t *testing.T, ctx *testcontext.Context, rawDB *dbutil.TempDatabase, db satellite.DB) {} - check := func(t *testing.T, ctx context.Context, db satellite.DB) {} - test(t, prepare, migrator.MigrateProjectsLimited, check, &migrator.Config{ - MaxUpdates: 1, - }) -} - -// Test no rows to update returns no error. -func TestMigrateProjectsUpdateNoRows(t *testing.T) { - t.Parallel() - ctx := testcontext.New(t) - defer ctx.Cleanup() - - var id uuid.UUID - userAgent := []byte("teststorj") - prepare := func(t *testing.T, ctx *testcontext.Context, rawDB *dbutil.TempDatabase, db satellite.DB) { - - proj, err := db.Console().Projects().Insert(ctx, &console.Project{ - Name: "test", - Description: "test", - OwnerID: testrand.UUID(), - UserAgent: userAgent, - }) - require.NoError(t, err) - id = proj.ID - } - check := func(t *testing.T, ctx context.Context, db satellite.DB) { - proj, err := db.Console().Projects().Get(ctx, id) - require.NoError(t, err) - require.Equal(t, userAgent, proj.UserAgent) - } - test(t, prepare, migrator.MigrateProjects, check, &migrator.Config{ - Limit: 8, - }) -} - -// Test select offset beyond final row. -// With only one row, selecting with an offset of 1 will return 0 rows. -// Test that this is accounted for and updates the row correctly. -func TestMigrateProjectsSelectOffsetBeyondRowCount(t *testing.T) { - t.Parallel() - ctx := testcontext.New(t) - defer ctx.Cleanup() - - var projID uuid.UUID - id := testrand.UUID() - prepare := func(t *testing.T, ctx *testcontext.Context, rawDB *dbutil.TempDatabase, db satellite.DB) { - prj, err := db.Console().Projects().Insert(ctx, &console.Project{ - Name: "test", - Description: "test", - PartnerID: id, - OwnerID: testrand.UUID(), - UserAgent: id.Bytes(), - }) - require.NoError(t, err) - projID = prj.ID - } - check := func(t *testing.T, ctx context.Context, db satellite.DB) { - proj, err := db.Console().Projects().Get(ctx, projID) - require.NoError(t, err) - require.Nil(t, proj.UserAgent) - } - test(t, prepare, migrator.MigrateProjects, check, &migrator.Config{ - Limit: 8, - }) -} - -// Test user_agent field is updated correctly. -func TestMigrateProjects(t *testing.T) { - t.Parallel() - ctx := testcontext.New(t) - defer ctx.Cleanup() - - var n int - prepare := func(t *testing.T, ctx *testcontext.Context, rawDB *dbutil.TempDatabase, db satellite.DB) { - // insert matching user_agent partner_id - id := testrand.UUID() - _, err := db.Console().Projects().Insert(ctx, &console.Project{ - Name: "test", - Description: "test", - OwnerID: testrand.UUID(), - PartnerID: id, - UserAgent: id.Bytes(), - }) - require.NoError(t, err) - n++ - // insert matching user_agent - id = testrand.UUID() - _, err = db.Console().Projects().Insert(ctx, &console.Project{ - Name: "test1", - Description: "test1", - OwnerID: testrand.UUID(), - PartnerID: id, - UserAgent: id.Bytes(), - }) - require.NoError(t, err) - n++ - // insert an entry with something not zero - _, err = db.Console().Projects().Insert(ctx, &console.Project{ - Name: "test", - Description: "test", - PartnerID: testrand.UUID(), - OwnerID: testrand.UUID(), - UserAgent: []byte("teststorj"), - }) - require.NoError(t, err) - } - - check := func(t *testing.T, ctx context.Context, db satellite.DB) { - projects, err := db.Console().Projects().GetAll(ctx) - require.NoError(t, err) - - var updated int - for _, prj := range projects { - if prj.UserAgent == nil { - updated++ - } - } - require.Equal(t, n, updated) - n = 0 - } - - test(t, prepare, migrator.MigrateProjects, check, &migrator.Config{ - Limit: 1, - }) -} - -// Test user_agent field is updated correctly. -func TestMigrateProjectsLimited(t *testing.T) { - t.Parallel() - ctx := testcontext.New(t) - defer ctx.Cleanup() - - prepare := func(t *testing.T, ctx *testcontext.Context, rawDB *dbutil.TempDatabase, db satellite.DB) { - // insert an entry with valid user agent - _, err := db.Console().Projects().Insert(ctx, &console.Project{ - Name: "test", - Description: "test", - OwnerID: testrand.UUID(), - UserAgent: []byte("teststorj"), - }) - require.NoError(t, err) - - // insert matching user_agent - id := testrand.UUID() - _, err = db.Console().Projects().Insert(ctx, &console.Project{ - Name: "test", - Description: "test", - OwnerID: testrand.UUID(), - UserAgent: id.Bytes(), - PartnerID: id, - }) - require.NoError(t, err) - - // insert matching user_agent and partner id - id = testrand.UUID() - _, err = db.Console().Projects().Insert(ctx, &console.Project{ - Name: "test", - Description: "test", - OwnerID: testrand.UUID(), - UserAgent: id.Bytes(), - PartnerID: id, - }) - require.NoError(t, err) - } - - maxUpdates := 1 - - check := func(t *testing.T, ctx context.Context, db satellite.DB) { - projects, err := db.Console().Projects().GetAll(ctx) - require.NoError(t, err) - - var updated int - for _, prj := range projects { - if prj.UserAgent == nil { - updated++ - } - } - require.Equal(t, maxUpdates, updated) - } - test(t, prepare, migrator.MigrateProjectsLimited, check, &migrator.Config{ - MaxUpdates: maxUpdates, - }) -} - -// Test no entries in table doesn't error. -func TestMigrateAPIKeysSelectNoRows(t *testing.T) { - t.Parallel() - ctx := testcontext.New(t) - defer ctx.Cleanup() - - prepare := func(t *testing.T, ctx *testcontext.Context, rawDB *dbutil.TempDatabase, db satellite.DB) {} - check := func(t *testing.T, ctx context.Context, db satellite.DB) {} - test(t, prepare, migrator.MigrateAPIKeys, check, &migrator.Config{ - Limit: 8, - }) -} - -// Test no entries in table doesn't error. -func TestMigrateAPIKeysLimitedSelectNoRows(t *testing.T) { - t.Parallel() - ctx := testcontext.New(t) - defer ctx.Cleanup() - - prepare := func(t *testing.T, ctx *testcontext.Context, rawDB *dbutil.TempDatabase, db satellite.DB) {} - check := func(t *testing.T, ctx context.Context, db satellite.DB) {} - test(t, prepare, migrator.MigrateAPIKeysLimited, check, &migrator.Config{ - MaxUpdates: 1, - }) -} - -// Test no rows to update returns no error. -func TestMigrateAPIKeysUpdateNoRows(t *testing.T) { - t.Parallel() - ctx := testcontext.New(t) - defer ctx.Cleanup() - - var testID uuid.UUID - id := testrand.UUID() - prepare := func(t *testing.T, ctx *testcontext.Context, rawDB *dbutil.TempDatabase, db satellite.DB) { - proj, err := db.Console().Projects().Insert(ctx, &console.Project{ - Name: "test", - Description: "test", - OwnerID: testrand.UUID(), - }) - require.NoError(t, err) - - apikey, err := db.Console().APIKeys().Create(ctx, testrand.UUID().Bytes(), console.APIKeyInfo{ - ProjectID: proj.ID, - Name: "test0", - Secret: []byte("test"), - PartnerID: id, - UserAgent: id.Bytes(), - }) - require.NoError(t, err) - testID = apikey.ID - } - check := func(t *testing.T, ctx context.Context, db satellite.DB) { - apikey, err := db.Console().APIKeys().Get(ctx, testID) - require.NoError(t, err) - require.Nil(t, apikey.UserAgent) - } - test(t, prepare, migrator.MigrateAPIKeys, check, &migrator.Config{ - Limit: 8, - }) -} - -// Test select offset beyond final row. -// With only one row, selecting with an offset of 1 will return 0 rows. -// Test that this is accounted for and updates the row correctly. -func TestMigrateAPIKeysSelectOffsetBeyondRowCount(t *testing.T) { - t.Parallel() - ctx := testcontext.New(t) - defer ctx.Cleanup() - - var testID uuid.UUID - - prepare := func(t *testing.T, ctx *testcontext.Context, rawDB *dbutil.TempDatabase, db satellite.DB) { - prj, err := db.Console().Projects().Insert(ctx, &console.Project{ - Name: "test", - Description: "test", - PartnerID: testrand.UUID(), - OwnerID: testrand.UUID(), - }) - require.NoError(t, err) - - id := testrand.UUID() - apiKey, err := db.Console().APIKeys().Create(ctx, testrand.UUID().Bytes(), console.APIKeyInfo{ - ProjectID: prj.ID, - PartnerID: id, - Name: "test0", - Secret: []byte("test"), - UserAgent: id.Bytes(), - }) - require.NoError(t, err) - testID = apiKey.ID - } - check := func(t *testing.T, ctx context.Context, db satellite.DB) { - apiKey, err := db.Console().APIKeys().Get(ctx, testID) - require.NoError(t, err) - require.Nil(t, apiKey.UserAgent) - } - test(t, prepare, migrator.MigrateAPIKeys, check, &migrator.Config{ - Limit: 8, - }) -} - -// Test user_agent field is updated correctly. -func TestMigrateAPIKeys(t *testing.T) { - t.Parallel() - ctx := testcontext.New(t) - defer ctx.Cleanup() - - var n int - var projID uuid.UUID - - prepare := func(t *testing.T, ctx *testcontext.Context, rawDB *dbutil.TempDatabase, db satellite.DB) { - proj, err := db.Console().Projects().Insert(ctx, &console.Project{ - Name: "test", - Description: "test", - OwnerID: testrand.UUID(), - }) - require.NoError(t, err) - projID = proj.ID - - // insert matching user_agent and partner id - id := testrand.UUID() - _, err = db.Console().APIKeys().Create(ctx, testrand.UUID().Bytes(), console.APIKeyInfo{ - ProjectID: projID, - Name: "test0", - Secret: []byte("test"), - PartnerID: id, - UserAgent: id.Bytes(), - }) - require.NoError(t, err) - n++ - - // insert another matching user_agent and partner id - id = testrand.UUID() - _, err = db.Console().APIKeys().Create(ctx, testrand.UUID().Bytes(), console.APIKeyInfo{ - ProjectID: projID, - Name: "test1", - Secret: []byte("test1"), - PartnerID: id, - UserAgent: id.Bytes(), - }) - require.NoError(t, err) - n++ - - // insert an entry with something not zero - _, err = db.Console().APIKeys().Create(ctx, testrand.UUID().Bytes(), console.APIKeyInfo{ - ProjectID: projID, - PartnerID: testrand.UUID(), - Name: "test2", - Secret: []byte("test"), - UserAgent: []byte("teststorj"), - }) - require.NoError(t, err) - } - - check := func(t *testing.T, ctx context.Context, db satellite.DB) { - keyPage, err := db.Console().APIKeys().GetPagedByProjectID(ctx, projID, console.APIKeyCursor{Page: 1, Limit: 1000}) - require.NoError(t, err) - - var updated int - for _, key := range keyPage.APIKeys { - if key.UserAgent == nil { - updated++ - } - } - require.Equal(t, n, updated) - n = 0 - } - - test(t, prepare, migrator.MigrateAPIKeys, check, &migrator.Config{ - Limit: 8, - }) -} - -// Test user_agent field is updated correctly. -func TestMigrateAPIKeysLimited(t *testing.T) { - t.Parallel() - ctx := testcontext.New(t) - defer ctx.Cleanup() - - var projID uuid.UUID - - prepare := func(t *testing.T, ctx *testcontext.Context, rawDB *dbutil.TempDatabase, db satellite.DB) { - proj, err := db.Console().Projects().Insert(ctx, &console.Project{ - Name: "test", - Description: "test", - OwnerID: testrand.UUID(), - }) - require.NoError(t, err) - - projID = proj.ID - - // insert an entry with valid user agent - _, err = db.Console().APIKeys().Create(ctx, testrand.UUID().Bytes(), console.APIKeyInfo{ - ProjectID: projID, - Name: "test0", - Secret: []byte("test"), - UserAgent: []byte("teststorj"), - }) - require.NoError(t, err) - - // insert matching user_agent and partner id - id := testrand.UUID() - _, err = db.Console().APIKeys().Create(ctx, testrand.UUID().Bytes(), console.APIKeyInfo{ - ProjectID: projID, - Name: "test1", - Secret: []byte("test"), - PartnerID: id, - UserAgent: id.Bytes(), - }) - require.NoError(t, err) - - // insert another matching user_agent and partner id - id = testrand.UUID() - _, err = db.Console().APIKeys().Create(ctx, testrand.UUID().Bytes(), console.APIKeyInfo{ - ProjectID: projID, - PartnerID: id, - Name: "test2", - Secret: []byte("test"), - UserAgent: id.Bytes(), - }) - require.NoError(t, err) - } - - maxUpdates := 1 - - check := func(t *testing.T, ctx context.Context, db satellite.DB) { - keyPage, err := db.Console().APIKeys().GetPagedByProjectID(ctx, projID, console.APIKeyCursor{Page: 1, Limit: 1000}) - require.NoError(t, err) - - var updated int - for _, key := range keyPage.APIKeys { - if key.UserAgent == nil { - updated++ - } - } - require.Equal(t, maxUpdates, updated) - - } - test(t, prepare, migrator.MigrateAPIKeysLimited, check, &migrator.Config{ - MaxUpdates: maxUpdates, - }) -} - -// Test no entries in table doesn't error. -func TestMigrateBucketMetainfosSelectNoRows(t *testing.T) { - t.Parallel() - ctx := testcontext.New(t) - defer ctx.Cleanup() - - prepare := func(t *testing.T, ctx *testcontext.Context, rawDB *dbutil.TempDatabase, db satellite.DB) {} - check := func(t *testing.T, ctx context.Context, db satellite.DB) {} - test(t, prepare, migrator.MigrateBucketMetainfos, check, &migrator.Config{ - Limit: 8, - }) -} - -// Test no entries in table doesn't error. -func TestMigrateBucketMetainfosLimitedSelectNoRows(t *testing.T) { - t.Parallel() - ctx := testcontext.New(t) - defer ctx.Cleanup() - - prepare := func(t *testing.T, ctx *testcontext.Context, rawDB *dbutil.TempDatabase, db satellite.DB) {} - check := func(t *testing.T, ctx context.Context, db satellite.DB) {} - test(t, prepare, migrator.MigrateBucketMetainfosLimited, check, &migrator.Config{ - MaxUpdates: 1, - }) -} - -// Test no rows to update returns no error. -func TestMigrateBucketMetainfosUpdateNoRows(t *testing.T) { - t.Parallel() - ctx := testcontext.New(t) - defer ctx.Cleanup() - - bName := "test1" - var projID uuid.UUID - prepare := func(t *testing.T, ctx *testcontext.Context, rawDB *dbutil.TempDatabase, db satellite.DB) { - proj, err := db.Console().Projects().Insert(ctx, &console.Project{ - Name: "test", - Description: "test", - OwnerID: testrand.UUID(), - }) - require.NoError(t, err) - projID = proj.ID - - _, err = db.Buckets().CreateBucket(ctx, storj.Bucket{ - ID: testrand.UUID(), - Name: "test1", - ProjectID: projID, - UserAgent: []byte("teststorj"), - }) - require.NoError(t, err) - } - check := func(t *testing.T, ctx context.Context, db satellite.DB) { - b, err := db.Buckets().GetBucket(ctx, []byte(bName), projID) - require.NoError(t, err) - require.NotNil(t, b.UserAgent) - } - test(t, prepare, migrator.MigrateBucketMetainfos, check, &migrator.Config{ - Limit: 8, - }) -} - -// Test select offset beyond final row. -// With only one row, selecting with an offset of 1 will return 0 rows. -// Test that this is accounted for and updates the row correctly. -func TestMigrateBucketMetainfosSelectOffsetBeyondRowCount(t *testing.T) { - t.Parallel() - ctx := testcontext.New(t) - defer ctx.Cleanup() - - var projID uuid.UUID - bucket := []byte("test") - - prepare := func(t *testing.T, ctx *testcontext.Context, rawDB *dbutil.TempDatabase, db satellite.DB) { - prj, err := db.Console().Projects().Insert(ctx, &console.Project{ - Name: "test", - Description: "test", - OwnerID: testrand.UUID(), - }) - require.NoError(t, err) - projID = prj.ID - - id := testrand.UUID() - _, err = db.Buckets().CreateBucket(ctx, storj.Bucket{ - ID: testrand.UUID(), - Name: string(bucket), - ProjectID: projID, - PartnerID: id, - UserAgent: id.Bytes(), - }) - require.NoError(t, err) - } - check := func(t *testing.T, ctx context.Context, db satellite.DB) { - b, err := db.Buckets().GetBucket(ctx, bucket, projID) - require.NoError(t, err) - require.Nil(t, b.UserAgent) - } - test(t, prepare, migrator.MigrateBucketMetainfos, check, &migrator.Config{ - Limit: 8, - }) -} - -// Test user_agent field is updated correctly. -func TestMigrateBucketMetainfos(t *testing.T) { - t.Parallel() - ctx := testcontext.New(t) - defer ctx.Cleanup() - - var n int - var projID uuid.UUID - zeroedUUID := uuid.UUID{}.Bytes() - require.NotNil(t, zeroedUUID) - prepare := func(t *testing.T, ctx *testcontext.Context, rawDB *dbutil.TempDatabase, db satellite.DB) { - proj, err := db.Console().Projects().Insert(ctx, &console.Project{ - Name: "test", - Description: "test", - OwnerID: testrand.UUID(), - }) - require.NoError(t, err) - projID = proj.ID - - // insert matching user_agent and partner id - id := testrand.UUID() - _, err = db.Buckets().CreateBucket(ctx, storj.Bucket{ - ID: id, - Name: "test0", - ProjectID: projID, - PartnerID: id, - UserAgent: id.Bytes(), - }) - require.NoError(t, err) - n++ - - // insert another matching user_agent and partner id - id = testrand.UUID() - _, err = db.Buckets().CreateBucket(ctx, storj.Bucket{ - ID: id, - Name: "test1", - ProjectID: projID, - PartnerID: id, - UserAgent: id.Bytes(), - }) - require.NoError(t, err) - n++ - - // insert an entry with something not zero - _, err = db.Buckets().CreateBucket(ctx, storj.Bucket{ - ID: testrand.UUID(), - Name: "test2", - ProjectID: projID, - PartnerID: testrand.UUID(), - UserAgent: []byte("teststorj"), - }) - require.NoError(t, err) - } - - check := func(t *testing.T, ctx context.Context, db satellite.DB) { - list, err := db.Buckets().ListBuckets(ctx, projID, storj.BucketListOptions{Direction: storj.Forward}, macaroon.AllowedBuckets{All: true}) - require.NoError(t, err) - - var updated int - for _, b := range list.Items { - if b.UserAgent == nil { - updated++ - } - } - require.Equal(t, n, updated) - n = 0 - } - - test(t, prepare, migrator.MigrateBucketMetainfos, check, &migrator.Config{ - Limit: 8, - }) -} - -// Test user_agent field is updated correctly. -func TestMigrateBucketMetainfosLimited(t *testing.T) { - t.Parallel() - ctx := testcontext.New(t) - defer ctx.Cleanup() - - testID := testrand.UUID() - zeroedUUID := uuid.UUID{}.Bytes() - require.NotNil(t, zeroedUUID) - var projID uuid.UUID - prepare := func(t *testing.T, ctx *testcontext.Context, rawDB *dbutil.TempDatabase, db satellite.DB) { - proj, err := db.Console().Projects().Insert(ctx, &console.Project{ - Name: "test", - Description: "test", - OwnerID: testrand.UUID(), - UserAgent: []byte("teststorj"), - }) - require.NoError(t, err) - - projID = proj.ID - - // insert matching user_agent - id := testrand.UUID() - _, err = db.Buckets().CreateBucket(ctx, storj.Bucket{ - ID: testID, - Name: "test0", - ProjectID: projID, - PartnerID: id, - UserAgent: id.Bytes(), - }) - require.NoError(t, err) - - // insert another matching user_agent and partner id - id = testrand.UUID() - _, err = db.Buckets().CreateBucket(ctx, storj.Bucket{ - ID: testrand.UUID(), - Name: "test1", - ProjectID: projID, - PartnerID: id, - UserAgent: id.Bytes(), - }) - require.NoError(t, err) - } - - maxUpdates := 1 - - check := func(t *testing.T, ctx context.Context, db satellite.DB) { - list, err := db.Buckets().ListBuckets(ctx, projID, storj.BucketListOptions{Direction: storj.Forward}, macaroon.AllowedBuckets{All: true}) - require.NoError(t, err) - - var updated int - for _, b := range list.Items { - if b.UserAgent == nil { - updated++ - } - } - require.Equal(t, maxUpdates, updated) - } - test(t, prepare, migrator.MigrateBucketMetainfosLimited, check, &migrator.Config{ - MaxUpdates: maxUpdates, - }) -} - -// Test no entries in table doesn't error. -func TestMigrateValueAttributionsSelectNoRows(t *testing.T) { - ctx := testcontext.New(t) - defer ctx.Cleanup() - - prepare := func(t *testing.T, ctx *testcontext.Context, rawDB *dbutil.TempDatabase, db satellite.DB) {} - check := func(t *testing.T, ctx context.Context, db satellite.DB) {} - test(t, prepare, migrator.MigrateValueAttributions, check, &migrator.Config{ - Limit: 8, - }) -} - -// Test no entries in table doesn't error. -func TestMigrateValueAttributionsLimitedSelectNoRows(t *testing.T) { - ctx := testcontext.New(t) - defer ctx.Cleanup() - - prepare := func(t *testing.T, ctx *testcontext.Context, rawDB *dbutil.TempDatabase, db satellite.DB) {} - check := func(t *testing.T, ctx context.Context, db satellite.DB) {} - test(t, prepare, migrator.MigrateValueAttributionsLimited, check, &migrator.Config{ - MaxUpdates: 1, - }) -} - -// Test no rows to update returns no error. -func TestMigrateValueAttributionsUpdateNoRows(t *testing.T) { - ctx := testcontext.New(t) - defer ctx.Cleanup() - - partnerID := testrand.UUID() - ua := []byte("test") - projID := testrand.UUID() - bName := []byte("test") - prepare := func(t *testing.T, ctx *testcontext.Context, rawDB *dbutil.TempDatabase, db satellite.DB) { - - _, err := db.Attribution().Insert(ctx, &attribution.Info{ - ProjectID: projID, - PartnerID: partnerID, - BucketName: bName, - UserAgent: ua, - }) - require.NoError(t, err) - } - check := func(t *testing.T, ctx context.Context, db satellite.DB) { - att, err := db.Attribution().Get(ctx, projID, bName) - require.NoError(t, err) - require.Equal(t, partnerID, att.PartnerID) - require.Equal(t, ua, att.UserAgent) - } - test(t, prepare, migrator.MigrateValueAttributions, check, &migrator.Config{ - Limit: 8, - }) -} - -// Test select offset beyond final row. -// With only one row, selecting with an offset of 1 will return 0 rows. -// Test that this is accounted for and updates the row correctly. -func TestMigrateValueAttributionsSelectOffsetBeyondRowCount(t *testing.T) { - ctx := testcontext.New(t) - defer ctx.Cleanup() - - projID := testrand.UUID() - bucket := []byte("test") - id := testrand.UUID() - prepare := func(t *testing.T, ctx *testcontext.Context, rawDB *dbutil.TempDatabase, db satellite.DB) { - _, err := db.Attribution().Insert(ctx, &attribution.Info{ - ProjectID: projID, - PartnerID: id, - BucketName: bucket, - UserAgent: id.Bytes(), - }) - require.NoError(t, err) - } - check := func(t *testing.T, ctx context.Context, db satellite.DB) { - att, err := db.Attribution().Get(ctx, projID, bucket) - require.NoError(t, err) - require.Nil(t, att.UserAgent) - } - test(t, prepare, migrator.MigrateValueAttributions, check, &migrator.Config{ - Limit: 8, - }) -} - -// Test user_agent field is updated correctly. -func TestMigrateValueAttributions(t *testing.T) { - ctx := testcontext.New(t) - defer ctx.Cleanup() - - type info struct { - bucket []byte - project uuid.UUID - } - - var n int - zeroedUUID := uuid.UUID{}.Bytes() - require.NotNil(t, zeroedUUID) - var infos []info - prepare := func(t *testing.T, ctx *testcontext.Context, rawDB *dbutil.TempDatabase, db satellite.DB) { - - // insert matching user_agent partner id - id := testrand.UUID() - b := []byte("test0") - infos = append(infos, info{b, id}) - _, err := db.Attribution().Insert(ctx, &attribution.Info{ - ProjectID: id, - PartnerID: id, - BucketName: b, - UserAgent: id.Bytes(), - }) - require.NoError(t, err) - n++ - - // insert another matching user_agent partner id - id = testrand.UUID() - infos = append(infos, info{b, id}) - _, err = db.Attribution().Insert(ctx, &attribution.Info{ - ProjectID: id, - PartnerID: id, - BucketName: b, - UserAgent: id.Bytes(), - }) - require.NoError(t, err) - n++ - - // insert without zeroes - id = testrand.UUID() - _, err = db.Attribution().Insert(ctx, &attribution.Info{ - ProjectID: id, - PartnerID: id, - BucketName: b, - UserAgent: []byte("teststorj"), - }) - require.NoError(t, err) - } - - check := func(t *testing.T, ctx context.Context, db satellite.DB) { - var updated int - for _, in := range infos { - att, err := db.Attribution().Get(ctx, in.project, in.bucket) - require.NoError(t, err) - if att.UserAgent == nil { - updated++ - } - } - require.Equal(t, n, updated) - n = 0 - // clear infos for the subsequent CRDB test - infos = []info{} - } - - test(t, prepare, migrator.MigrateValueAttributions, check, &migrator.Config{ - Limit: 1, - }) -} - -// Test user_agent field is updated correctly. -func TestMigrateValueAttributionsLimited(t *testing.T) { - ctx := testcontext.New(t) - defer ctx.Cleanup() - - type info struct { - bucket []byte - project uuid.UUID - } - - zeroedUUID := uuid.UUID{}.Bytes() - require.NotNil(t, zeroedUUID) - var infos []info - prepare := func(t *testing.T, ctx *testcontext.Context, rawDB *dbutil.TempDatabase, db satellite.DB) { - // insert with matching user agent and partner id - id := testrand.UUID() - b := []byte("test0") - infos = append(infos, info{b, id}) - _, err := db.Attribution().Insert(ctx, &attribution.Info{ - ProjectID: id, - PartnerID: id, - BucketName: b, - UserAgent: id.Bytes(), - }) - require.NoError(t, err) - - // insert another with zeroes - id = testrand.UUID() - infos = append(infos, info{b, id}) - _, err = db.Attribution().Insert(ctx, &attribution.Info{ - ProjectID: id, - PartnerID: id, - BucketName: b, - UserAgent: id.Bytes(), - }) - require.NoError(t, err) - - // insert without zeroes - id = testrand.UUID() - infos = append(infos, info{b, id}) - _, err = db.Attribution().Insert(ctx, &attribution.Info{ - ProjectID: id, - PartnerID: id, - BucketName: b, - UserAgent: []byte("teststorj"), - }) - require.NoError(t, err) - } - - maxUpdates := 1 - - check := func(t *testing.T, ctx context.Context, db satellite.DB) { - var updated int - for _, in := range infos { - att, err := db.Attribution().Get(ctx, in.project, in.bucket) - require.NoError(t, err) - if att.UserAgent == nil { - updated++ - } - } - require.Equal(t, maxUpdates, updated) - - // clear infos for the subsequent CRDB test - infos = []info{} - } - test(t, prepare, migrator.MigrateValueAttributionsLimited, check, &migrator.Config{ - MaxUpdates: maxUpdates, - }) -} - -func test(t *testing.T, prepare func(t *testing.T, ctx *testcontext.Context, rawDB *dbutil.TempDatabase, db satellite.DB), - migrate func(ctx context.Context, log *zap.Logger, conn *pgx.Conn, config migrator.Config) (err error), - check func(t *testing.T, ctx context.Context, db satellite.DB), config *migrator.Config) { - - ctx := testcontext.New(t) - defer ctx.Cleanup() - - log := zaptest.NewLogger(t) - - for _, satelliteDB := range satellitedbtest.Databases() { - satelliteDB := satelliteDB - t.Run(satelliteDB.Name, func(t *testing.T) { - schemaSuffix := satellitedbtest.SchemaSuffix() - schema := satellitedbtest.SchemaName(t.Name(), "category", 0, schemaSuffix) - - tempDB, err := tempdb.OpenUnique(ctx, satelliteDB.MasterDB.URL, schema) - require.NoError(t, err) - - db, err := satellitedbtest.CreateMasterDBOnTopOf(ctx, log, tempDB) - require.NoError(t, err) - defer ctx.Check(db.Close) - - err = db.TestingMigrateToLatest(ctx) - require.NoError(t, err) - - prepare(t, ctx, tempDB, db) - - mConnStr := strings.Replace(tempDB.ConnStr, "cockroach", "postgres", 1) - - conn, err := pgx.Connect(ctx, mConnStr) - require.NoError(t, err) - - err = migrate(ctx, log, conn, *config) - require.NoError(t, err) - - require.NoError(t, err) - - check(t, ctx, db) - }) - } -} diff --git a/cmd/tools/nullify-bad-user-agents/migrations.go b/cmd/tools/nullify-bad-user-agents/migrations.go deleted file mode 100644 index 44ac69b6e..000000000 --- a/cmd/tools/nullify-bad-user-agents/migrations.go +++ /dev/null @@ -1,511 +0,0 @@ -// Copyright (C) 2021 Storj Labs, Inc. -// See LICENSE for copying information. - -package main - -import ( - "context" - - pgx "github.com/jackc/pgx/v4" - "github.com/zeebo/errs" - "go.uber.org/zap" - - "storj.io/private/dbutil/cockroachutil" -) - -// MigrateTables runs the migration for each table. -func MigrateTables(ctx context.Context, log *zap.Logger, conn *pgx.Conn, config Config) (err error) { - err = MigrateUsers(ctx, log, conn, config) - if err != nil { - return errs.New("error migrating users: %w", err) - } - err = MigrateProjects(ctx, log, conn, config) - if err != nil { - return errs.New("error migrating projects: %w", err) - } - err = MigrateAPIKeys(ctx, log, conn, config) - if err != nil { - return errs.New("error migrating api_keys: %w", err) - } - err = MigrateBucketMetainfos(ctx, log, conn, config) - if err != nil { - return errs.New("error migrating bucket_metainfos: %w", err) - } - err = MigrateValueAttributions(ctx, log, conn, config) - if err != nil { - return errs.New("error migrating value_attributions: %w", err) - } - return nil -} - -// MigrateUsers updates the user_agent column to corresponding Partners.Names or partner_id if applicable. -func MigrateUsers(ctx context.Context, log *zap.Logger, conn *pgx.Conn, config Config) (err error) { - defer mon.Task()(&ctx)(&err) - - // We select the next id then use limit as an offset which actually gives us limit+1 rows. - offset := config.Limit - 1 - - startID := []byte{} - nextID := []byte{} - var total int - more := true - - // select the next ID after startID and offset the result. We will update relevant rows from the range of startID to nextID. - _, err = conn.Prepare(ctx, "select-for-update-users", "SELECT id FROM users WHERE id > $1 ORDER BY id OFFSET $2 LIMIT 1") - if err != nil { - return errs.New("could not prepare select query") - } - - // update range from startID to nextID. Return count of updated rows to log progress. - _, err = conn.Prepare(ctx, "update-limited-users", ` - WITH updated as ( - UPDATE users - SET user_agent = NULL - WHERE users.id > $1 AND users.id <= $2 - AND user_agent = partner_id - RETURNING 1 - ) - SELECT count(*) - FROM updated; - `) - if err != nil { - return errs.New("could not prepare update statement: %w", err) - } - - for { - row := conn.QueryRow(ctx, "select-for-update-users", startID, offset) - err = row.Scan(&nextID) - if err != nil { - if errs.Is(err, pgx.ErrNoRows) { - more = false - } else { - return errs.New("unable to select row for update from users: %w", err) - } - } - - var updated int - for { - var row pgx.Row - if more { - row = conn.QueryRow(ctx, "update-limited-users", startID, nextID) - } else { - // if !more then the select statement reached the end of the table. Update to the end of the table. - row = conn.QueryRow(ctx, ` - WITH updated as ( - UPDATE users - SET user_agent = NULL - WHERE users.id > $1 - AND user_agent = partner_id - RETURNING 1 - ) - SELECT count(*) - FROM updated; - `, startID, - ) - } - err := row.Scan(&updated) - if err != nil { - if cockroachutil.NeedsRetry(err) { - continue - } else if errs.Is(err, pgx.ErrNoRows) { - break - } - return errs.New("updating users %w", err) - } - break - } - - total += updated - if !more { - log.Info("batch update complete", zap.Int("rows updated", updated)) - break - } - log.Info("batch update complete", zap.Int("rows updated", updated), zap.Binary("last id", nextID)) - startID = nextID - } - log.Info("users migration complete", zap.Int("total rows updated", total)) - return nil -} - -// MigrateProjects updates the user_agent column to corresponding PartnerInfo.Names or partner_id if applicable. -func MigrateProjects(ctx context.Context, log *zap.Logger, conn *pgx.Conn, config Config) (err error) { - defer mon.Task()(&ctx)(&err) - - // We select the next id then use limit as an offset which actually gives us limit+1 rows. - offset := config.Limit - 1 - - startID := []byte{} - nextID := []byte{} - var total int - more := true - - // select the next ID after startID and offset the result. We will update relevant rows from the range of startID to nextID. - _, err = conn.Prepare(ctx, "select-for-update-projects", "SELECT id FROM projects WHERE id > $1 ORDER BY id OFFSET $2 LIMIT 1") - if err != nil { - return errs.New("could not prepare select query: %w", err) - } - - // update range from startID to nextID. Return count of updated rows to log progress. - _, err = conn.Prepare(ctx, "update-limited-projects", ` - WITH updated as ( - UPDATE projects - SET user_agent = NULL - WHERE projects.id > $1 AND projects.id <= $2 - AND user_agent = partner_id - RETURNING 1 - ) - SELECT count(*) - FROM updated; - `) - if err != nil { - return errs.New("could not prepare update statement") - } - - for { - row := conn.QueryRow(ctx, "select-for-update-projects", startID, offset) - err = row.Scan(&nextID) - if err != nil { - if errs.Is(err, pgx.ErrNoRows) { - more = false - } else { - return errs.New("unable to select row for update from projects: %w", err) - } - } - - var updated int - for { - var row pgx.Row - if more { - row = conn.QueryRow(ctx, "update-limited-projects", startID, nextID) - } else { - // if !more then the select statement reached the end of the table. Update to the end of the table. - row = conn.QueryRow(ctx, ` - WITH updated as ( - UPDATE projects - SET user_agent = NULL - WHERE projects.id > $1 - AND user_agent = partner_id - RETURNING 1 - ) - SELECT count(*) - FROM updated; - `, startID, - ) - } - - err := row.Scan(&updated) - if err != nil { - if cockroachutil.NeedsRetry(err) { - continue - } else if errs.Is(err, pgx.ErrNoRows) { - break - } - return errs.New("updating projects %w", err) - } - break - } - - total += updated - if !more { - log.Info("batch update complete", zap.Int("rows updated", updated)) - break - } - log.Info("batch update complete", zap.Int("rows updated", updated), zap.Binary("last id", nextID)) - startID = nextID - } - log.Info("projects migration complete", zap.Int("total rows updated", total)) - return nil -} - -// MigrateAPIKeys updates the user_agent column to corresponding PartnerInfo.Names or partner_id if applicable. -func MigrateAPIKeys(ctx context.Context, log *zap.Logger, conn *pgx.Conn, config Config) (err error) { - defer mon.Task()(&ctx)(&err) - - // We select the next id then use limit as an offset which actually gives us limit+1 rows. - offset := config.Limit - 1 - - startID := []byte{} - nextID := []byte{} - var total int - more := true - - // select the next ID after startID and offset the result. We will update relevant rows from the range of startID to nextID. - _, err = conn.Prepare(ctx, "select-for-update-api-keys", "SELECT id FROM api_keys WHERE id > $1 ORDER BY id OFFSET $2 LIMIT 1") - if err != nil { - return errs.New("could not prepare select query: %w", err) - } - - // update range from startID to nextID. Return count of updated rows to log progress. - _, err = conn.Prepare(ctx, "update-limited-api-keys", ` - WITH updated as ( - UPDATE api_keys - SET user_agent = NULL - WHERE api_keys.id > $1 AND api_keys.id <= $2 - AND user_agent = partner_id - RETURNING 1 - ) - SELECT count(*) - FROM updated; - `) - if err != nil { - return errs.New("could not prepare update statement") - } - - for { - row := conn.QueryRow(ctx, "select-for-update-api-keys", startID, offset) - err = row.Scan(&nextID) - if err != nil { - if errs.Is(err, pgx.ErrNoRows) { - more = false - } else { - return errs.New("unable to select row for update from api_keys: %w", err) - } - } - var updated int - for { - var row pgx.Row - if more { - row = conn.QueryRow(ctx, "update-limited-api-keys", startID, nextID) - } else { - // if !more then the select statement reached the end of the table. Update to the end of the table. - row = conn.QueryRow(ctx, ` - WITH updated as ( - UPDATE api_keys - SET user_agent = NULL - WHERE api_keys.id > $1 - AND user_agent = partner_id - RETURNING 1 - ) - SELECT count(*) - FROM updated; - `, startID, - ) - } - err := row.Scan(&updated) - if err != nil { - if cockroachutil.NeedsRetry(err) { - continue - } else if errs.Is(err, pgx.ErrNoRows) { - break - } - return errs.New("updating api_keys %w", err) - } - break - } - - total += updated - if !more { - log.Info("batch update complete", zap.Int("rows updated", updated)) - break - } - log.Info("batch update complete", zap.Int("rows updated", updated), zap.Binary("last id", nextID)) - startID = nextID - } - log.Info("api_keys migration complete", zap.Int("total rows updated", total)) - return nil -} - -// MigrateBucketMetainfos updates the user_agent column to corresponding Partners.Names or partner_id if applicable. -func MigrateBucketMetainfos(ctx context.Context, log *zap.Logger, conn *pgx.Conn, config Config) (err error) { - defer mon.Task()(&ctx)(&err) - - // We select the next id then use limit as an offset which actually gives us limit+1 rows. - offset := config.Limit - 1 - - startID := []byte{} - nextID := []byte{} - var total int - more := true - - // select the next ID after startID and offset the result. We will update relevant rows from the range of startID to nextID. - _, err = conn.Prepare(ctx, "select-for-update-bucket-metainfos", "SELECT id FROM bucket_metainfos WHERE id > $1 ORDER BY id OFFSET $2 LIMIT 1") - if err != nil { - return errs.New("could not prepare select query: %w", err) - } - - // update range from startID to nextID. Return count of updated rows to log progress. - _, err = conn.Prepare(ctx, "update-limited-bucket-metainfos", ` - WITH updated as ( - UPDATE bucket_metainfos - SET user_agent = NULL - WHERE bucket_metainfos.id > $1 AND bucket_metainfos.id <= $2 - AND user_agent = partner_id - RETURNING 1 - ) - SELECT count(*) - FROM updated; - `) - if err != nil { - return errs.New("could not prepare update statement") - } - - for { - row := conn.QueryRow(ctx, "select-for-update-bucket-metainfos", startID, offset) - err = row.Scan(&nextID) - if err != nil { - if errs.Is(err, pgx.ErrNoRows) { - more = false - } else { - return errs.New("unable to select row for update from bucket_metainfos: %w", err) - } - } - var updated int - for { - var row pgx.Row - if more { - row = conn.QueryRow(ctx, "update-limited-bucket-metainfos", startID, nextID) - } else { - // if !more then the select statement reached the end of the table. Update to the end of the table. - row = conn.QueryRow(ctx, ` - WITH updated as ( - UPDATE bucket_metainfos - SET user_agent = NULL - WHERE bucket_metainfos.id > $1 - AND user_agent = partner_id - RETURNING 1 - ) - SELECT count(*) - FROM updated; - `, startID, - ) - } - err := row.Scan(&updated) - if err != nil { - if cockroachutil.NeedsRetry(err) { - continue - } else if errs.Is(err, pgx.ErrNoRows) { - break - } - return errs.New("updating bucket_metainfos %w", err) - } - break - } - - total += updated - if !more { - log.Info("batch update complete", zap.Int("rows updated", updated)) - break - } - log.Info("batch update complete", zap.Int("rows updated", updated), zap.Binary("last id", nextID)) - startID = nextID - } - log.Info("bucket_metainfos migration complete", zap.Int("total rows updated", total)) - return nil -} - -// MigrateValueAttributions updates the user_agent column to corresponding Partners.Names or partner_id if applicable. -func MigrateValueAttributions(ctx context.Context, log *zap.Logger, conn *pgx.Conn, config Config) (err error) { - defer mon.Task()(&ctx)(&err) - - // We select the next id then use limit as an offset which actually gives us limit+1 rows. - offset := config.Limit - 1 - - startProjectID := []byte{} - startBucket := []byte{} - nextProjectID := []byte{} - nextBucket := []byte{} - var total int - more := true - - // select the next ID after startID and offset the result. We will update relevant rows from the range of startID to nextID. - _, err = conn.Prepare(ctx, "select-for-update-value-attributions", ` - SELECT project_id, bucket_name - FROM value_attributions - WHERE project_id > $1 - OR ( - project_id = $1 AND bucket_name > $2 - ) - ORDER BY project_id, bucket_name - OFFSET $3 - LIMIT 1 - `) - if err != nil { - return errs.New("could not prepare select query: %w", err) - } - - // update range from startID to nextID. Return count of updated rows to log progress. - _, err = conn.Prepare(ctx, "update-limited-value-attributions", ` - WITH updated as ( - UPDATE value_attributions - SET user_agent = NULL - WHERE user_agent = partner_id - AND ( - value_attributions.project_id > $1 - OR ( - value_attributions.project_id = $1 AND value_attributions.bucket_name > $3 - ) - ) - AND ( - value_attributions.project_id < $2 - OR ( - value_attributions.project_id = $2 AND value_attributions.bucket_name <= $4 - ) - ) - RETURNING 1 - ) - SELECT count(*) - FROM updated; - `) - if err != nil { - return errs.New("could not prepare update statement") - } - - for { - row := conn.QueryRow(ctx, "select-for-update-value-attributions", startProjectID, startBucket, offset) - err = row.Scan(&nextProjectID, &nextBucket) - if err != nil { - if errs.Is(err, pgx.ErrNoRows) { - more = false - } else { - return errs.New("unable to select row for update from value_attributions: %w", err) - } - } - var updated int - for { - var row pgx.Row - if more { - row = conn.QueryRow(ctx, "update-limited-value-attributions", startProjectID, nextProjectID, startBucket, nextBucket) - } else { - // if !more then the select statement reached the end of the table. Update to the end of the table. - row = conn.QueryRow(ctx, ` - WITH updated as ( - UPDATE value_attributions - SET user_agent = NULL - WHERE user_agent = partner_id - AND ( - value_attributions.project_id > $1 - OR ( - value_attributions.project_id = $1 AND value_attributions.bucket_name > $2 - ) - ) - RETURNING 1 - ) - SELECT count(*) - FROM updated; - `, startProjectID, startBucket, - ) - } - err := row.Scan(&updated) - if err != nil { - if cockroachutil.NeedsRetry(err) { - continue - } else if errs.Is(err, pgx.ErrNoRows) { - break - } - return errs.New("updating value_attributions %w", err) - } - break - } - - total += updated - if !more { - log.Info("batch update complete", zap.Int("rows updated", updated)) - break - } - log.Info("batch update complete", zap.Int("rows updated", updated), zap.Binary("last project id", nextProjectID), zap.String("last bucket name", string(nextBucket))) - startProjectID = nextProjectID - startBucket = nextBucket - } - log.Info("value_attributions migration complete", zap.Int("total rows updated", total)) - return nil -}