storj/storagenode/orders/service_test.go
Jeff Wendling 91698207cf storagenode: live tracking of order window usage
This change accomplishes multiple things:

1. Instead of having a max in flight time, which means
   we effectively have a minimum bandwidth for uploads
   and downloads, we keep track of what windows have
   active requests happening in them.

2. We don't double check when we save the order to see if it
   is too old: by then, it's too late. A malicious uplink
   could just submit orders outside of the grace window and
   receive all the data, but the node would just not commit
   it, so the uplink gets free traffic. Because the endpoints
   also check for the order being too old, this would be a
   very tight race that depends on knowledge of the node system
   clock, but best to not have the race exist. Instead, we piggy
   back off of the in flight tracking and do the check when
   we start to handle the order, and commit at the end.

3. Change the functions that send orders and list unsent
   orders to accept a time at which that operation is
   happening. This way, in tests, we can pretend we're
   listing or sending far into the future after the windows
   are available to send, rather than exposing test functions
   to modify internal state about the grace period to get
   the desired effect. This brings tests closer to actual
   usage in production.

4. Change the calculation for if an order is allowed to be
   enqueued due to the grace period to just look at the
   order creation time, rather than some computation involving
   the window it will be in. In this way, you can easily
   answer the question of "will this order be accepted?" by
   asking "is it older than X?" where X is the grace period.

5. Increases the frequency we check to send up orders to once
   every 5 minutes instead of once every hour because we already
   have hour-long buffering due to the windows. This decreases
   the maximum latency that an order will be reported back to
   the satellite by 55 minutes.

Change-Id: Ie08b90d139d45ee89b82347e191a2f8db1b88036
2020-08-19 19:42:33 +00:00

349 lines
10 KiB
Go

// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package orders_test
import (
"testing"
"time"
"github.com/stretchr/testify/require"
"storj.io/common/memory"
"storj.io/common/pb"
"storj.io/common/signing"
"storj.io/common/storj"
"storj.io/common/testcontext"
"storj.io/common/testrand"
"storj.io/storj/private/testplanet"
"storj.io/storj/satellite/overlay"
"storj.io/storj/storagenode"
"storj.io/storj/storagenode/orders"
)
// TODO remove when db is removed.
func TestOrderDBSettle(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
satellite := planet.Satellites[0]
satellite.Audit.Worker.Loop.Pause()
node := planet.StorageNodes[0]
service := node.Storage2.Orders
service.Sender.Pause()
service.Cleanup.Pause()
projects, err := satellite.DB.Console().Projects().GetAll(ctx)
require.NoError(t, err)
bucketID := []byte(storj.JoinPaths(projects[0].ID.String(), "testbucket"))
_, orderLimits, piecePrivateKey, err := satellite.Orders.Service.CreatePutOrderLimits(
ctx,
bucketID,
[]*overlay.SelectedNode{
{ID: node.ID(), LastIPPort: "fake", Address: new(pb.NodeAddress)},
},
time.Now().Add(2*time.Hour),
2000,
)
require.NoError(t, err)
require.Len(t, orderLimits, 1)
orderLimit := orderLimits[0].Limit
order := &pb.Order{
SerialNumber: orderLimit.SerialNumber,
Amount: 1000,
}
signedOrder, err := signing.SignUplinkOrder(ctx, piecePrivateKey, order)
require.NoError(t, err)
order0 := &orders.Info{
Limit: orderLimit,
Order: signedOrder,
}
// enter orders into unsent_orders
err = node.DB.Orders().Enqueue(ctx, order0)
require.NoError(t, err)
toSend, err := node.DB.Orders().ListUnsent(ctx, 10)
require.NoError(t, err)
require.Len(t, toSend, 1)
// trigger order send
service.Sender.TriggerWait()
toSend, err = node.DB.Orders().ListUnsent(ctx, 10)
require.NoError(t, err)
require.Len(t, toSend, 0)
archived, err := node.DB.Orders().ListArchived(ctx, 10)
require.NoError(t, err)
require.Len(t, archived, 1)
})
}
func TestOrderFileStoreSettle(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
satellite := planet.Satellites[0]
uplinkPeer := planet.Uplinks[0]
satellite.Audit.Worker.Loop.Pause()
node := planet.StorageNodes[0]
service := node.Storage2.Orders
service.Sender.Pause()
service.Cleanup.Pause()
tomorrow := time.Now().Add(24 * time.Hour)
// upload a file to generate an order on the storagenode
testData := testrand.Bytes(8 * memory.KiB)
err := uplinkPeer.Upload(ctx, satellite, "testbucket", "test/path", testData)
require.NoError(t, err)
toSend, err := node.OrdersStore.ListUnsentBySatellite(tomorrow)
require.NoError(t, err)
require.Len(t, toSend, 1)
ordersForSat := toSend[satellite.ID()]
require.Len(t, ordersForSat.InfoList, 1)
// trigger order send
service.SendOrders(ctx, tomorrow)
toSend, err = node.OrdersStore.ListUnsentBySatellite(tomorrow)
require.NoError(t, err)
require.Len(t, toSend, 0)
archived, err := node.OrdersStore.ListArchived()
require.NoError(t, err)
require.Len(t, archived, 1)
})
}
// TODO remove when db is removed.
// TestOrderFileStoreAndDBSettle ensures that if orders exist in both DB and filestore, that the DB orders are settled first.
func TestOrderFileStoreAndDBSettle(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
satellite := planet.Satellites[0]
uplinkPeer := planet.Uplinks[0]
satellite.Audit.Worker.Loop.Pause()
node := planet.StorageNodes[0]
service := node.Storage2.Orders
service.Sender.Pause()
service.Cleanup.Pause()
tomorrow := time.Now().Add(24 * time.Hour)
// add orders to orders DB
projects, err := satellite.DB.Console().Projects().GetAll(ctx)
require.NoError(t, err)
bucketID := []byte(storj.JoinPaths(projects[0].ID.String(), "testbucket"))
_, orderLimits, piecePrivateKey, err := satellite.Orders.Service.CreatePutOrderLimits(
ctx,
bucketID,
[]*overlay.SelectedNode{
{ID: node.ID(), LastIPPort: "fake", Address: new(pb.NodeAddress)},
},
time.Now().Add(2*time.Hour),
2000,
)
require.NoError(t, err)
require.Len(t, orderLimits, 1)
orderLimit := orderLimits[0].Limit
order := &pb.Order{
SerialNumber: orderLimit.SerialNumber,
Amount: 1000,
}
signedOrder, err := signing.SignUplinkOrder(ctx, piecePrivateKey, order)
require.NoError(t, err)
order0 := &orders.Info{
Limit: orderLimit,
Order: signedOrder,
}
// enter orders into unsent_orders
err = node.DB.Orders().Enqueue(ctx, order0)
require.NoError(t, err)
toSendDB, err := node.DB.Orders().ListUnsent(ctx, 10)
require.NoError(t, err)
require.Len(t, toSendDB, 1)
// upload a file to add orders to filestore
testData := testrand.Bytes(8 * memory.KiB)
err = uplinkPeer.Upload(ctx, satellite, "testbucket", "test/path", testData)
require.NoError(t, err)
toSendFileStore, err := node.OrdersStore.ListUnsentBySatellite(tomorrow)
require.NoError(t, err)
require.Len(t, toSendFileStore, 1)
ordersForSat := toSendFileStore[satellite.ID()]
require.Len(t, ordersForSat.InfoList, 1)
// trigger order send
service.SendOrders(ctx, tomorrow)
// DB orders should be archived, but filestore orders should still be unsent.
toSendDB, err = node.DB.Orders().ListUnsent(ctx, 10)
require.NoError(t, err)
require.Len(t, toSendDB, 0)
archived, err := node.DB.Orders().ListArchived(ctx, 10)
require.NoError(t, err)
require.Len(t, archived, 1)
toSendFileStore, err = node.OrdersStore.ListUnsentBySatellite(tomorrow)
require.NoError(t, err)
require.Len(t, toSendFileStore, 1)
ordersForSat = toSendFileStore[satellite.ID()]
require.Len(t, ordersForSat.InfoList, 1)
// trigger order send again
service.SendOrders(ctx, tomorrow)
// now FileStore orders should be archived too.
toSendFileStore, err = node.OrdersStore.ListUnsentBySatellite(tomorrow)
require.NoError(t, err)
require.Len(t, toSendFileStore, 0)
archived, err = node.OrdersStore.ListArchived()
require.NoError(t, err)
require.Len(t, archived, 1)
})
}
// TODO remove when db is removed.
func TestCleanArchiveDB(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 0,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
planet.Satellites[0].Audit.Worker.Loop.Pause()
satellite := planet.Satellites[0].ID()
node := planet.StorageNodes[0]
service := node.Storage2.Orders
service.Sender.Pause()
service.Cleanup.Pause()
serialNumber0 := testrand.SerialNumber()
serialNumber1 := testrand.SerialNumber()
order0 := &orders.Info{
Limit: &pb.OrderLimit{
SatelliteId: satellite,
SerialNumber: serialNumber0,
},
Order: &pb.Order{},
}
order1 := &orders.Info{
Limit: &pb.OrderLimit{
SatelliteId: satellite,
SerialNumber: serialNumber1,
},
Order: &pb.Order{},
}
// enter orders into unsent_orders
err := node.DB.Orders().Enqueue(ctx, order0)
require.NoError(t, err)
err = node.DB.Orders().Enqueue(ctx, order1)
require.NoError(t, err)
now := time.Now()
yesterday := now.Add(-24 * time.Hour)
// archive one order yesterday, one today
err = node.DB.Orders().Archive(ctx, yesterday, orders.ArchiveRequest{
Satellite: satellite,
Serial: serialNumber0,
Status: orders.StatusAccepted,
})
require.NoError(t, err)
err = node.DB.Orders().Archive(ctx, now, orders.ArchiveRequest{
Satellite: satellite,
Serial: serialNumber1,
Status: orders.StatusAccepted,
})
require.NoError(t, err)
// trigger cleanup of archived orders older than 12 hours
require.NoError(t, service.CleanArchive(ctx, now.Add(-12*time.Hour)))
archived, err := node.DB.Orders().ListArchived(ctx, 10)
require.NoError(t, err)
require.Len(t, archived, 1)
require.Equal(t, archived[0].Limit.SerialNumber, serialNumber1)
})
}
func TestCleanArchiveFileStore(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 0,
Reconfigure: testplanet.Reconfigure{
StorageNode: func(_ int, config *storagenode.Config) {
// A large grace period so we can write to multiple buckets at once
config.Storage2.OrderLimitGracePeriod = 48 * time.Hour
},
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
planet.Satellites[0].Audit.Worker.Loop.Pause()
satellite := planet.Satellites[0].ID()
node := planet.StorageNodes[0]
service := node.Storage2.Orders
service.Sender.Pause()
service.Cleanup.Pause()
now := time.Now()
yesterday := now.Add(-24 * time.Hour)
serialNumber0 := testrand.SerialNumber()
createdAt0 := now
serialNumber1 := testrand.SerialNumber()
createdAt1 := now.Add(-24 * time.Hour)
order0 := &orders.Info{
Limit: &pb.OrderLimit{
SatelliteId: satellite,
SerialNumber: serialNumber0,
OrderCreation: createdAt0,
},
Order: &pb.Order{},
}
order1 := &orders.Info{
Limit: &pb.OrderLimit{
SatelliteId: satellite,
SerialNumber: serialNumber1,
OrderCreation: createdAt1,
},
Order: &pb.Order{},
}
// enqueue both orders; they will be placed in separate buckets because they have different creation hours
err := node.OrdersStore.Enqueue(order0)
require.NoError(t, err)
err = node.OrdersStore.Enqueue(order1)
require.NoError(t, err)
// archive one order yesterday, one today
err = node.OrdersStore.Archive(satellite, createdAt0.Truncate(time.Hour), yesterday, pb.SettlementWithWindowResponse_ACCEPTED)
require.NoError(t, err)
err = node.OrdersStore.Archive(satellite, createdAt1.Truncate(time.Hour), now, pb.SettlementWithWindowResponse_ACCEPTED)
require.NoError(t, err)
archived, err := node.OrdersStore.ListArchived()
require.NoError(t, err)
require.Len(t, archived, 2)
// trigger cleanup of archived orders older than 12 hours
require.NoError(t, service.CleanArchive(ctx, now.Add(-12*time.Hour)))
archived, err = node.OrdersStore.ListArchived()
require.NoError(t, err)
require.Len(t, archived, 1)
require.Equal(t, archived[0].Limit.SerialNumber, serialNumber1)
})
}