2019-03-18 10:55:06 +00:00
|
|
|
// Copyright (C) 2019 Storj Labs, Inc.
|
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
|
|
|
package storagenodedbtest_test
|
|
|
|
|
|
|
|
import (
|
2019-07-02 22:23:02 +01:00
|
|
|
"runtime"
|
|
|
|
"sync"
|
2019-03-18 10:55:06 +00:00
|
|
|
"testing"
|
2019-07-09 22:54:00 +01:00
|
|
|
"time"
|
2019-03-18 10:55:06 +00:00
|
|
|
|
2019-07-02 22:23:02 +01:00
|
|
|
"github.com/skyrings/skyring-common/tools/uuid"
|
|
|
|
"github.com/stretchr/testify/require"
|
|
|
|
"go.uber.org/zap/zaptest"
|
|
|
|
|
|
|
|
"storj.io/storj/internal/testcontext"
|
|
|
|
"storj.io/storj/internal/testidentity"
|
|
|
|
"storj.io/storj/internal/testrand"
|
2019-07-16 15:58:58 +01:00
|
|
|
"storj.io/storj/internal/teststorj"
|
2019-07-02 22:23:02 +01:00
|
|
|
"storj.io/storj/pkg/auth/signing"
|
|
|
|
"storj.io/storj/pkg/pb"
|
|
|
|
"storj.io/storj/pkg/storj"
|
2019-03-18 10:55:06 +00:00
|
|
|
"storj.io/storj/storagenode"
|
2019-07-02 22:23:02 +01:00
|
|
|
"storj.io/storj/storagenode/orders"
|
|
|
|
"storj.io/storj/storagenode/storagenodedb"
|
2019-03-18 10:55:06 +00:00
|
|
|
"storj.io/storj/storagenode/storagenodedb/storagenodedbtest"
|
|
|
|
)
|
|
|
|
|
|
|
|
func TestDatabase(t *testing.T) {
|
|
|
|
storagenodedbtest.Run(t, func(t *testing.T, db storagenode.DB) {
|
|
|
|
})
|
|
|
|
}
|
2019-07-02 22:23:02 +01:00
|
|
|
|
2019-07-16 15:58:58 +01:00
|
|
|
func TestBandwidthRollup(t *testing.T) {
|
|
|
|
ctx := testcontext.New(t)
|
|
|
|
defer ctx.Cleanup()
|
|
|
|
|
|
|
|
log := zaptest.NewLogger(t)
|
|
|
|
|
|
|
|
db, err := storagenodedb.NewInMemory(log, ctx.Dir("storage"))
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
defer ctx.Check(db.Close)
|
|
|
|
|
|
|
|
t.Run("Sqlite", func(t *testing.T) {
|
|
|
|
err := db.CreateTables()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
testID1 := teststorj.NodeIDFromString("testId1")
|
|
|
|
testID2 := teststorj.NodeIDFromString("testId2")
|
|
|
|
testID3 := teststorj.NodeIDFromString("testId3")
|
|
|
|
|
|
|
|
// Create data for an hour ago so we can rollup
|
|
|
|
err = db.Bandwidth().Add(ctx, testID1, pb.PieceAction_PUT, 2, time.Now().Add(time.Hour*-2))
|
|
|
|
require.NoError(t, err)
|
|
|
|
err = db.Bandwidth().Add(ctx, testID1, pb.PieceAction_GET, 3, time.Now().Add(time.Hour*-2))
|
|
|
|
require.NoError(t, err)
|
|
|
|
err = db.Bandwidth().Add(ctx, testID1, pb.PieceAction_GET_AUDIT, 4, time.Now().Add(time.Hour*-2))
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
err = db.Bandwidth().Add(ctx, testID2, pb.PieceAction_PUT, 5, time.Now().Add(time.Hour*-2))
|
|
|
|
require.NoError(t, err)
|
|
|
|
err = db.Bandwidth().Add(ctx, testID2, pb.PieceAction_GET, 6, time.Now().Add(time.Hour*-2))
|
|
|
|
require.NoError(t, err)
|
|
|
|
err = db.Bandwidth().Add(ctx, testID2, pb.PieceAction_GET_AUDIT, 7, time.Now().Add(time.Hour*-2))
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
usage, err := db.Bandwidth().Summary(ctx, time.Now().Add(time.Hour*-48), time.Now())
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, int64(27), usage.Total())
|
|
|
|
|
|
|
|
err = db.Bandwidth().Rollup(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// After rollup, the totals should still be the same
|
|
|
|
usage, err = db.Bandwidth().Summary(ctx, time.Now().Add(time.Hour*-48), time.Now())
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, int64(27), usage.Total())
|
|
|
|
|
|
|
|
// Add more data to test the Summary calculates the bandwidth across both tables.
|
|
|
|
err = db.Bandwidth().Add(ctx, testID3, pb.PieceAction_PUT, 8, time.Now().Add(time.Hour*-2))
|
|
|
|
require.NoError(t, err)
|
|
|
|
err = db.Bandwidth().Add(ctx, testID3, pb.PieceAction_GET, 9, time.Now().Add(time.Hour*-2))
|
|
|
|
require.NoError(t, err)
|
|
|
|
err = db.Bandwidth().Add(ctx, testID3, pb.PieceAction_GET_AUDIT, 10, time.Now().Add(time.Hour*-2))
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
usage, err = db.Bandwidth().Summary(ctx, time.Now().Add(time.Hour*-48), time.Now())
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, int64(54), usage.Total())
|
|
|
|
|
|
|
|
usageBySatellite, err := db.Bandwidth().SummaryBySatellite(ctx, time.Now().Add(time.Hour*-48), time.Now())
|
|
|
|
require.NoError(t, err)
|
|
|
|
for k := range usageBySatellite {
|
|
|
|
switch k {
|
|
|
|
case testID1:
|
|
|
|
require.Equal(t, int64(9), usageBySatellite[testID1].Total())
|
|
|
|
case testID2:
|
|
|
|
require.Equal(t, int64(18), usageBySatellite[testID2].Total())
|
|
|
|
case testID3:
|
|
|
|
require.Equal(t, int64(27), usageBySatellite[testID3].Total())
|
|
|
|
default:
|
|
|
|
require.Fail(t, "Found satellite usage when that shouldn't be there.")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-07-02 22:23:02 +01:00
|
|
|
func TestFileConcurrency(t *testing.T) {
|
|
|
|
ctx := testcontext.New(t)
|
|
|
|
defer ctx.Cleanup()
|
|
|
|
|
|
|
|
log := zaptest.NewLogger(t)
|
|
|
|
|
|
|
|
db, err := storagenodedb.New(log, storagenodedb.Config{
|
|
|
|
Pieces: ctx.Dir("storage"),
|
|
|
|
Info2: ctx.Dir("storage") + "/info.db",
|
|
|
|
Kademlia: ctx.Dir("storage") + "/kademlia",
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
defer ctx.Check(db.Close)
|
|
|
|
|
|
|
|
testConcurrency(t, ctx, db)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestInMemoryConcurrency(t *testing.T) {
|
|
|
|
ctx := testcontext.New(t)
|
|
|
|
defer ctx.Cleanup()
|
|
|
|
|
|
|
|
log := zaptest.NewLogger(t)
|
|
|
|
|
|
|
|
db, err := storagenodedb.NewInMemory(log, ctx.Dir("storage"))
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
defer ctx.Check(db.Close)
|
|
|
|
|
|
|
|
testConcurrency(t, ctx, db)
|
|
|
|
}
|
|
|
|
|
|
|
|
func testConcurrency(t *testing.T, ctx *testcontext.Context, db *storagenodedb.DB) {
|
|
|
|
t.Run("Sqlite", func(t *testing.T) {
|
|
|
|
runtime.GOMAXPROCS(2)
|
|
|
|
|
|
|
|
err := db.CreateTables()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
ordersMap := make(map[string]orders.Info)
|
|
|
|
err = createOrders(t, ctx, ordersMap, 1000)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
err = insertOrders(t, ctx, db, ordersMap)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
err = verifyOrders(t, ctx, db, ordersMap)
|
|
|
|
require.NoError(t, err)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func insertOrders(t *testing.T, ctx *testcontext.Context, db *storagenodedb.DB, ordersMap map[string]orders.Info) (err error) {
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
for _, order := range ordersMap {
|
|
|
|
wg.Add(1)
|
|
|
|
o := order
|
|
|
|
go insertOrder(t, ctx, db, &wg, &o)
|
|
|
|
|
|
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func insertOrder(t *testing.T, ctx *testcontext.Context, db *storagenodedb.DB, wg *sync.WaitGroup, order *orders.Info) {
|
|
|
|
defer wg.Done()
|
|
|
|
err := db.Orders().Enqueue(ctx, order)
|
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
func verifyOrders(t *testing.T, ctx *testcontext.Context, db *storagenodedb.DB, orders map[string]orders.Info) (err error) {
|
|
|
|
dbOrders, _ := db.Orders().ListUnsent(ctx, 10000)
|
|
|
|
found := 0
|
|
|
|
for _, order := range orders {
|
|
|
|
for _, dbOrder := range dbOrders {
|
|
|
|
if order.Order.SerialNumber == dbOrder.Order.SerialNumber {
|
|
|
|
//fmt.Printf("Found %v\n", order.Order.SerialNumber)
|
|
|
|
found++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
require.Equal(t, len(orders), found, "Number found must equal the length of the test data")
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func createOrders(t *testing.T, ctx *testcontext.Context, orders map[string]orders.Info, count int) (err error) {
|
|
|
|
for i := 0; i < count; i++ {
|
|
|
|
key, err := uuid.New()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
order := createOrder(t, ctx)
|
|
|
|
orders[key.String()] = *order
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-07-11 21:51:40 +01:00
|
|
|
func createOrder(t *testing.T, ctx *testcontext.Context) (info *orders.Info) {
|
2019-07-02 22:23:02 +01:00
|
|
|
storageNodeIdentity := testidentity.MustPregeneratedSignedIdentity(0, storj.LatestIDVersion())
|
|
|
|
satelliteIdentity := testidentity.MustPregeneratedSignedIdentity(1, storj.LatestIDVersion())
|
|
|
|
|
2019-07-11 21:51:40 +01:00
|
|
|
piecePublicKey, piecePrivateKey, err := storj.NewPieceKey()
|
|
|
|
require.NoError(t, err)
|
2019-07-02 22:23:02 +01:00
|
|
|
|
2019-07-11 21:51:40 +01:00
|
|
|
piece := testrand.PieceID()
|
2019-07-02 22:23:02 +01:00
|
|
|
serialNumber := testrand.SerialNumber()
|
2019-07-09 22:54:00 +01:00
|
|
|
expiration := time.Now()
|
2019-07-02 22:23:02 +01:00
|
|
|
|
|
|
|
limit, err := signing.SignOrderLimit(ctx, signing.SignerFromFullIdentity(satelliteIdentity), &pb.OrderLimit{
|
|
|
|
SerialNumber: serialNumber,
|
|
|
|
SatelliteId: satelliteIdentity.ID,
|
2019-07-11 21:51:40 +01:00
|
|
|
UplinkPublicKey: piecePublicKey,
|
2019-07-02 22:23:02 +01:00
|
|
|
StorageNodeId: storageNodeIdentity.ID,
|
|
|
|
PieceId: piece,
|
|
|
|
Limit: 100,
|
|
|
|
Action: pb.PieceAction_GET,
|
2019-07-09 22:54:00 +01:00
|
|
|
PieceExpiration: expiration,
|
|
|
|
OrderExpiration: expiration,
|
2019-07-02 22:23:02 +01:00
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-07-11 21:51:40 +01:00
|
|
|
order, err := signing.SignUplinkOrder(ctx, piecePrivateKey, &pb.Order{
|
2019-07-02 22:23:02 +01:00
|
|
|
SerialNumber: serialNumber,
|
|
|
|
Amount: 50,
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
return &orders.Info{
|
2019-07-09 22:33:45 +01:00
|
|
|
Limit: limit,
|
|
|
|
Order: order,
|
2019-07-02 22:23:02 +01:00
|
|
|
}
|
|
|
|
}
|