storj/storagenode/storagenodedb/storagenodedbtest/run_test.go
Simon Guindon a2b1e9fa95
storagenode/storagenodedb: refactor both data access objects and migrations to support multiple DB connections (#3057)
* Split the info.db database into multiple DBs using Backup API.

* Remove location. Prev refactor assumed we would need this but don't.

* Added VACUUM to reclaim space after splitting storage node databases.

* Added unique names to SQLite3 connection hooks to fix testplanet.

* Moving DB closing to the migration step.

* Removing the closing of the versions DB. It's already getting closed.

* Swapping the database connection references on reconnect.

* Moved sqlite closing logic away from the boltdb closing logic.

* Moved sqlite closing logic away from the boltdb closing logic.

* Remove certificate and vouchers from DB split migration.

* Removed vouchers and bumped up the migration version.

* Use same constructor in tests for storage node databases.

* Use same constructor in tests for storage node databases.

* Adding method to access underlining SQL database connections and cleanup

* Adding logging for migration diagnostics.

* Moved migration closing database logic to minimize disk usage.

* Cleaning up error handling.

* Fix missing copyright.

* Fix linting error.

* Add test for migration 21 (#3012)

* Refactoring migration code into a nicer to use object.

* Refactoring migration code into a nicer to use object.

* Fixing broken migration test.

* Removed unnecessary code that is no longer needed now that we close DBs.

* Removed unnecessary code that is no longer needed now that we close DBs.

* Fixed bug where an invalid database path was being opened.

* Fixed linting errors.

* Renamed VersionsDB to LegacyInfoDB and refactored DB lookup keys.

* Renamed VersionsDB to LegacyInfoDB and refactored DB lookup keys.

* Fix migration test. NOTE: This change does not address new tables satellites and satellite_exit_progress

* Removing v22 migration to move into it's own PR.

* Removing v22 migration to move into it's own PR.

* Refactored schema, rebind and configure functions to be re-useable.

* Renamed LegacyInfoDB to DeprecatedInfoDB.

* Cleaned up closeDatabase function.

* Renamed storageNodeSQLDB to migratableDB.

* Switched from using errs.Combine() to errs.Group in closeDatabases func.

* Removed constructors from storage node data access objects.

* Reformatted usage of const.

* Fixed broken test snapshots.

* Fixed linting error.
2019-09-18 12:17:28 -04:00

178 lines
4.5 KiB
Go

// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package storagenodedbtest_test
import (
"path/filepath"
"runtime"
"sync"
"testing"
"time"
"github.com/skyrings/skyring-common/tools/uuid"
"github.com/stretchr/testify/require"
"go.uber.org/zap/zaptest"
"storj.io/storj/internal/testcontext"
"storj.io/storj/internal/testidentity"
"storj.io/storj/internal/testrand"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/signing"
"storj.io/storj/pkg/storj"
"storj.io/storj/storagenode"
"storj.io/storj/storagenode/orders"
"storj.io/storj/storagenode/storagenodedb"
"storj.io/storj/storagenode/storagenodedb/storagenodedbtest"
)
func TestDatabase(t *testing.T) {
storagenodedbtest.Run(t, func(t *testing.T, db storagenode.DB) {
})
}
func TestFileConcurrency(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
log := zaptest.NewLogger(t)
db, err := storagenodedb.New(log, storagenodedb.Config{
Pieces: ctx.Dir("storage"),
Info2: ctx.Dir("storage") + "/info.db",
Kademlia: ctx.Dir("storage") + "/kademlia",
})
if err != nil {
t.Fatal(err)
}
defer ctx.Check(db.Close)
testConcurrency(t, ctx, db)
}
func TestInMemoryConcurrency(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
log := zaptest.NewLogger(t)
storageDir := ctx.Dir("storage")
cfg := storagenodedb.Config{
Pieces: storageDir,
Storage: storageDir,
Info: filepath.Join(storageDir, "piecestore.db"),
Info2: filepath.Join(storageDir, "info.db"),
Kademlia: filepath.Join(storageDir, "kademlia"),
}
db, err := storagenodedb.New(log, cfg)
if err != nil {
t.Fatal(err)
}
defer ctx.Check(db.Close)
testConcurrency(t, ctx, db)
}
func testConcurrency(t *testing.T, ctx *testcontext.Context, db *storagenodedb.DB) {
t.Run("Sqlite", func(t *testing.T) {
runtime.GOMAXPROCS(2)
err := db.CreateTables()
if err != nil {
t.Fatal(err)
}
ordersMap := make(map[string]orders.Info)
err = createOrders(t, ctx, ordersMap, 1000)
require.NoError(t, err)
err = insertOrders(t, ctx, db, ordersMap)
require.NoError(t, err)
err = verifyOrders(t, ctx, db, ordersMap)
require.NoError(t, err)
})
}
func insertOrders(t *testing.T, ctx *testcontext.Context, db *storagenodedb.DB, ordersMap map[string]orders.Info) (err error) {
var wg sync.WaitGroup
for _, order := range ordersMap {
wg.Add(1)
o := order
go insertOrder(t, ctx, db, &wg, &o)
}
wg.Wait()
return nil
}
func insertOrder(t *testing.T, ctx *testcontext.Context, db *storagenodedb.DB, wg *sync.WaitGroup, order *orders.Info) {
defer wg.Done()
err := db.Orders().Enqueue(ctx, order)
require.NoError(t, err)
}
func verifyOrders(t *testing.T, ctx *testcontext.Context, db *storagenodedb.DB, orders map[string]orders.Info) (err error) {
dbOrders, _ := db.Orders().ListUnsent(ctx, 10000)
found := 0
for _, order := range orders {
for _, dbOrder := range dbOrders {
if order.Order.SerialNumber == dbOrder.Order.SerialNumber {
//fmt.Printf("Found %v\n", order.Order.SerialNumber)
found++
}
}
}
require.Equal(t, len(orders), found, "Number found must equal the length of the test data")
return nil
}
func createOrders(t *testing.T, ctx *testcontext.Context, orders map[string]orders.Info, count int) (err error) {
for i := 0; i < count; i++ {
key, err := uuid.New()
if err != nil {
return err
}
order := createOrder(t, ctx)
orders[key.String()] = *order
}
return nil
}
func createOrder(t *testing.T, ctx *testcontext.Context) (info *orders.Info) {
storageNodeIdentity := testidentity.MustPregeneratedSignedIdentity(0, storj.LatestIDVersion())
satelliteIdentity := testidentity.MustPregeneratedSignedIdentity(1, storj.LatestIDVersion())
piecePublicKey, piecePrivateKey, err := storj.NewPieceKey()
require.NoError(t, err)
piece := testrand.PieceID()
serialNumber := testrand.SerialNumber()
expiration := time.Now()
limit, err := signing.SignOrderLimit(ctx, signing.SignerFromFullIdentity(satelliteIdentity), &pb.OrderLimit{
SerialNumber: serialNumber,
SatelliteId: satelliteIdentity.ID,
UplinkPublicKey: piecePublicKey,
StorageNodeId: storageNodeIdentity.ID,
PieceId: piece,
Limit: 100,
Action: pb.PieceAction_GET,
PieceExpiration: expiration,
OrderExpiration: expiration,
})
require.NoError(t, err)
order, err := signing.SignUplinkOrder(ctx, piecePrivateKey, &pb.Order{
SerialNumber: serialNumber,
Amount: 50,
})
require.NoError(t, err)
return &orders.Info{
Limit: limit,
Order: order,
}
}