storj/storagenode/orders/db_test.go

236 lines
6.3 KiB
Go
Raw Normal View History

// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package orders_test
import (
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/stretchr/testify/require"
"storj.io/common/identity/testidentity"
"storj.io/common/pb"
"storj.io/common/signing"
"storj.io/common/storj"
"storj.io/common/testcontext"
"storj.io/common/testrand"
"storj.io/storj/storagenode"
"storj.io/storj/storagenode/orders"
"storj.io/storj/storagenode/orders/ordersfile"
"storj.io/storj/storagenode/storagenodedb/storagenodedbtest"
)
func TestDB(t *testing.T) {
storagenodedbtest.Run(t, func(ctx *testcontext.Context, t *testing.T, db storagenode.DB) {
ordersdb := db.Orders()
2019-04-08 19:15:19 +01:00
storagenode := testidentity.MustPregeneratedSignedIdentity(0, storj.LatestIDVersion())
2019-04-08 19:15:19 +01:00
satellite0 := testidentity.MustPregeneratedSignedIdentity(1, storj.LatestIDVersion())
piece := storj.NewPieceID()
// basic test
2019-03-21 13:24:26 +00:00
emptyUnsent, err := ordersdb.ListUnsent(ctx, 100)
require.NoError(t, err)
2019-03-21 13:24:26 +00:00
require.Len(t, emptyUnsent, 0)
emptyArchive, err := ordersdb.ListArchived(ctx, 100)
require.NoError(t, err)
require.Len(t, emptyArchive, 0)
storagenode: live tracking of order window usage This change accomplishes multiple things: 1. Instead of having a max in flight time, which means we effectively have a minimum bandwidth for uploads and downloads, we keep track of what windows have active requests happening in them. 2. We don't double check when we save the order to see if it is too old: by then, it's too late. A malicious uplink could just submit orders outside of the grace window and receive all the data, but the node would just not commit it, so the uplink gets free traffic. Because the endpoints also check for the order being too old, this would be a very tight race that depends on knowledge of the node system clock, but best to not have the race exist. Instead, we piggy back off of the in flight tracking and do the check when we start to handle the order, and commit at the end. 3. Change the functions that send orders and list unsent orders to accept a time at which that operation is happening. This way, in tests, we can pretend we're listing or sending far into the future after the windows are available to send, rather than exposing test functions to modify internal state about the grace period to get the desired effect. This brings tests closer to actual usage in production. 4. Change the calculation for if an order is allowed to be enqueued due to the grace period to just look at the order creation time, rather than some computation involving the window it will be in. In this way, you can easily answer the question of "will this order be accepted?" by asking "is it older than X?" where X is the grace period. 5. Increases the frequency we check to send up orders to once every 5 minutes instead of once every hour because we already have hour-long buffering due to the windows. This decreases the maximum latency that an order will be reported back to the satellite by 55 minutes. Change-Id: Ie08b90d139d45ee89b82347e191a2f8db1b88036
2020-08-12 20:01:43 +01:00
now := time.Now()
before := now.Add(-time.Second)
piecePublicKey, piecePrivateKey, err := storj.NewPieceKey()
require.NoError(t, err)
infos := make([]*ordersfile.Info, 2)
for i := 0; i < len(infos); i++ {
serialNumber := testrand.SerialNumber()
limit, err := signing.SignOrderLimit(ctx, signing.SignerFromFullIdentity(satellite0), &pb.OrderLimit{
SerialNumber: serialNumber,
SatelliteId: satellite0.ID,
UplinkPublicKey: piecePublicKey,
StorageNodeId: storagenode.ID,
PieceId: piece,
Limit: 100,
Action: pb.PieceAction_GET,
OrderCreation: before.AddDate(0, 0, -1),
PieceExpiration: before,
OrderExpiration: before,
})
require.NoError(t, err)
order, err := signing.SignUplinkOrder(ctx, piecePrivateKey, &pb.Order{
SerialNumber: serialNumber,
Amount: 50,
})
require.NoError(t, err)
infos[i] = &ordersfile.Info{
Limit: limit,
Order: order,
}
}
// basic add
err = ordersdb.Enqueue(ctx, infos[0])
require.NoError(t, err)
// duplicate add
err = ordersdb.Enqueue(ctx, infos[0])
require.Error(t, err, "duplicate add")
unsent, err := ordersdb.ListUnsent(ctx, 100)
require.NoError(t, err)
require.Empty(t, cmp.Diff([]*ordersfile.Info{infos[0]}, unsent, cmp.Comparer(pb.Equal)))
// Another add
err = ordersdb.Enqueue(ctx, infos[1])
require.NoError(t, err)
unsent, err = ordersdb.ListUnsent(ctx, 100)
require.NoError(t, err)
require.Empty(t,
cmp.Diff([]*ordersfile.Info{infos[0], infos[1]}, unsent, cmp.Comparer(pb.Equal)),
)
2019-03-21 13:24:26 +00:00
// list by group
unsentGrouped, err := ordersdb.ListUnsentBySatellite(ctx)
require.NoError(t, err)
expectedGrouped := map[storj.NodeID][]*ordersfile.Info{}
2019-03-21 13:24:26 +00:00
require.Empty(t, cmp.Diff(expectedGrouped, unsentGrouped, cmp.Comparer(pb.Equal)))
// test archival
storagenode: live tracking of order window usage This change accomplishes multiple things: 1. Instead of having a max in flight time, which means we effectively have a minimum bandwidth for uploads and downloads, we keep track of what windows have active requests happening in them. 2. We don't double check when we save the order to see if it is too old: by then, it's too late. A malicious uplink could just submit orders outside of the grace window and receive all the data, but the node would just not commit it, so the uplink gets free traffic. Because the endpoints also check for the order being too old, this would be a very tight race that depends on knowledge of the node system clock, but best to not have the race exist. Instead, we piggy back off of the in flight tracking and do the check when we start to handle the order, and commit at the end. 3. Change the functions that send orders and list unsent orders to accept a time at which that operation is happening. This way, in tests, we can pretend we're listing or sending far into the future after the windows are available to send, rather than exposing test functions to modify internal state about the grace period to get the desired effect. This brings tests closer to actual usage in production. 4. Change the calculation for if an order is allowed to be enqueued due to the grace period to just look at the order creation time, rather than some computation involving the window it will be in. In this way, you can easily answer the question of "will this order be accepted?" by asking "is it older than X?" where X is the grace period. 5. Increases the frequency we check to send up orders to once every 5 minutes instead of once every hour because we already have hour-long buffering due to the windows. This decreases the maximum latency that an order will be reported back to the satellite by 55 minutes. Change-Id: Ie08b90d139d45ee89b82347e191a2f8db1b88036
2020-08-12 20:01:43 +01:00
archivedAt := time.Now()
err = ordersdb.Archive(ctx, archivedAt, orders.ArchiveRequest{
Satellite: satellite0.ID,
Serial: infos[0].Limit.SerialNumber,
Status: orders.StatusAccepted,
})
2019-03-21 13:24:26 +00:00
require.NoError(t, err)
// duplicate archive
err = ordersdb.Archive(ctx, archivedAt, orders.ArchiveRequest{
Satellite: satellite0.ID,
Serial: infos[0].Limit.SerialNumber,
Status: orders.StatusRejected,
})
require.Error(t, err)
require.True(t,
orders.OrderNotFoundError.Has(err),
"expected orders.OrderNotFoundError class",
)
// one new archive and one duplicated
err = ordersdb.Archive(ctx, archivedAt, orders.ArchiveRequest{
Satellite: satellite0.ID,
Serial: infos[0].Limit.SerialNumber,
Status: orders.StatusRejected,
}, orders.ArchiveRequest{
Satellite: satellite0.ID,
Serial: infos[1].Limit.SerialNumber,
Status: orders.StatusRejected,
})
2019-03-21 13:24:26 +00:00
require.Error(t, err)
require.True(t,
orders.OrderNotFoundError.Has(err),
"expected ErrUnsentOrderNotFoundError class",
)
2019-03-21 13:24:26 +00:00
// shouldn't be in unsent list
unsent, err = ordersdb.ListUnsent(ctx, 100)
require.NoError(t, err)
require.Len(t, unsent, 0)
// it should now be in the archive
archived, err := ordersdb.ListArchived(ctx, 100)
require.NoError(t, err)
require.Len(t, archived, 2)
2019-03-21 13:24:26 +00:00
require.Empty(t, cmp.Diff([]*orders.ArchivedInfo{
{
Limit: infos[0].Limit,
Order: infos[0].Order,
2019-03-21 13:24:26 +00:00
Status: orders.StatusAccepted,
ArchivedAt: archived[0].ArchivedAt,
},
{
Limit: infos[1].Limit,
Order: infos[1].Order,
Status: orders.StatusRejected,
ArchivedAt: archived[1].ArchivedAt,
},
2019-03-21 13:24:26 +00:00
}, archived, cmp.Comparer(pb.Equal)))
time.Sleep(time.Second)
storagenode: live tracking of order window usage This change accomplishes multiple things: 1. Instead of having a max in flight time, which means we effectively have a minimum bandwidth for uploads and downloads, we keep track of what windows have active requests happening in them. 2. We don't double check when we save the order to see if it is too old: by then, it's too late. A malicious uplink could just submit orders outside of the grace window and receive all the data, but the node would just not commit it, so the uplink gets free traffic. Because the endpoints also check for the order being too old, this would be a very tight race that depends on knowledge of the node system clock, but best to not have the race exist. Instead, we piggy back off of the in flight tracking and do the check when we start to handle the order, and commit at the end. 3. Change the functions that send orders and list unsent orders to accept a time at which that operation is happening. This way, in tests, we can pretend we're listing or sending far into the future after the windows are available to send, rather than exposing test functions to modify internal state about the grace period to get the desired effect. This brings tests closer to actual usage in production. 4. Change the calculation for if an order is allowed to be enqueued due to the grace period to just look at the order creation time, rather than some computation involving the window it will be in. In this way, you can easily answer the question of "will this order be accepted?" by asking "is it older than X?" where X is the grace period. 5. Increases the frequency we check to send up orders to once every 5 minutes instead of once every hour because we already have hour-long buffering due to the windows. This decreases the maximum latency that an order will be reported back to the satellite by 55 minutes. Change-Id: Ie08b90d139d45ee89b82347e191a2f8db1b88036
2020-08-12 20:01:43 +01:00
// archived order should not be deleted because they are not 1 hour old
n, err := db.Orders().CleanArchive(ctx, now.Add(-time.Hour))
require.NoError(t, err)
require.Equal(t, 0, n)
storagenode: live tracking of order window usage This change accomplishes multiple things: 1. Instead of having a max in flight time, which means we effectively have a minimum bandwidth for uploads and downloads, we keep track of what windows have active requests happening in them. 2. We don't double check when we save the order to see if it is too old: by then, it's too late. A malicious uplink could just submit orders outside of the grace window and receive all the data, but the node would just not commit it, so the uplink gets free traffic. Because the endpoints also check for the order being too old, this would be a very tight race that depends on knowledge of the node system clock, but best to not have the race exist. Instead, we piggy back off of the in flight tracking and do the check when we start to handle the order, and commit at the end. 3. Change the functions that send orders and list unsent orders to accept a time at which that operation is happening. This way, in tests, we can pretend we're listing or sending far into the future after the windows are available to send, rather than exposing test functions to modify internal state about the grace period to get the desired effect. This brings tests closer to actual usage in production. 4. Change the calculation for if an order is allowed to be enqueued due to the grace period to just look at the order creation time, rather than some computation involving the window it will be in. In this way, you can easily answer the question of "will this order be accepted?" by asking "is it older than X?" where X is the grace period. 5. Increases the frequency we check to send up orders to once every 5 minutes instead of once every hour because we already have hour-long buffering due to the windows. This decreases the maximum latency that an order will be reported back to the satellite by 55 minutes. Change-Id: Ie08b90d139d45ee89b82347e191a2f8db1b88036
2020-08-12 20:01:43 +01:00
// archived order should be deleted because they are archived before 1 second later
n, err = db.Orders().CleanArchive(ctx, archivedAt.Add(time.Second))
require.NoError(t, err)
require.Equal(t, 2, n)
})
}
func TestDB_Trivial(t *testing.T) {
storagenodedbtest.Run(t, func(ctx *testcontext.Context, t *testing.T, db storagenode.DB) {
satelliteID, serial := testrand.NodeID(), testrand.SerialNumber()
storagenode: live tracking of order window usage This change accomplishes multiple things: 1. Instead of having a max in flight time, which means we effectively have a minimum bandwidth for uploads and downloads, we keep track of what windows have active requests happening in them. 2. We don't double check when we save the order to see if it is too old: by then, it's too late. A malicious uplink could just submit orders outside of the grace window and receive all the data, but the node would just not commit it, so the uplink gets free traffic. Because the endpoints also check for the order being too old, this would be a very tight race that depends on knowledge of the node system clock, but best to not have the race exist. Instead, we piggy back off of the in flight tracking and do the check when we start to handle the order, and commit at the end. 3. Change the functions that send orders and list unsent orders to accept a time at which that operation is happening. This way, in tests, we can pretend we're listing or sending far into the future after the windows are available to send, rather than exposing test functions to modify internal state about the grace period to get the desired effect. This brings tests closer to actual usage in production. 4. Change the calculation for if an order is allowed to be enqueued due to the grace period to just look at the order creation time, rather than some computation involving the window it will be in. In this way, you can easily answer the question of "will this order be accepted?" by asking "is it older than X?" where X is the grace period. 5. Increases the frequency we check to send up orders to once every 5 minutes instead of once every hour because we already have hour-long buffering due to the windows. This decreases the maximum latency that an order will be reported back to the satellite by 55 minutes. Change-Id: Ie08b90d139d45ee89b82347e191a2f8db1b88036
2020-08-12 20:01:43 +01:00
now := time.Now()
before := now.Add(-time.Second)
{ // Ensure Enqueue works at all
err := db.Orders().Enqueue(ctx, &ordersfile.Info{
Order: &pb.Order{},
Limit: &pb.OrderLimit{
SatelliteId: satelliteID,
SerialNumber: serial,
OrderExpiration: before,
},
})
require.NoError(t, err)
}
{ // Ensure ListUnsent works at all
infos, err := db.Orders().ListUnsent(ctx, 1)
require.NoError(t, err)
require.Len(t, infos, 1)
}
{ // Ensure ListUnsentBySatellite works at all
infos, err := db.Orders().ListUnsentBySatellite(ctx)
require.NoError(t, err)
require.Len(t, infos, 0)
}
{ // Ensure Archive works at all
storagenode: live tracking of order window usage This change accomplishes multiple things: 1. Instead of having a max in flight time, which means we effectively have a minimum bandwidth for uploads and downloads, we keep track of what windows have active requests happening in them. 2. We don't double check when we save the order to see if it is too old: by then, it's too late. A malicious uplink could just submit orders outside of the grace window and receive all the data, but the node would just not commit it, so the uplink gets free traffic. Because the endpoints also check for the order being too old, this would be a very tight race that depends on knowledge of the node system clock, but best to not have the race exist. Instead, we piggy back off of the in flight tracking and do the check when we start to handle the order, and commit at the end. 3. Change the functions that send orders and list unsent orders to accept a time at which that operation is happening. This way, in tests, we can pretend we're listing or sending far into the future after the windows are available to send, rather than exposing test functions to modify internal state about the grace period to get the desired effect. This brings tests closer to actual usage in production. 4. Change the calculation for if an order is allowed to be enqueued due to the grace period to just look at the order creation time, rather than some computation involving the window it will be in. In this way, you can easily answer the question of "will this order be accepted?" by asking "is it older than X?" where X is the grace period. 5. Increases the frequency we check to send up orders to once every 5 minutes instead of once every hour because we already have hour-long buffering due to the windows. This decreases the maximum latency that an order will be reported back to the satellite by 55 minutes. Change-Id: Ie08b90d139d45ee89b82347e191a2f8db1b88036
2020-08-12 20:01:43 +01:00
err := db.Orders().Archive(ctx, before, orders.ArchiveRequest{satelliteID, serial, orders.StatusAccepted})
require.NoError(t, err)
}
{ // Ensure ListArchived works at all
infos, err := db.Orders().ListArchived(ctx, 1)
require.NoError(t, err)
require.Len(t, infos, 1)
}
storagenode: live tracking of order window usage This change accomplishes multiple things: 1. Instead of having a max in flight time, which means we effectively have a minimum bandwidth for uploads and downloads, we keep track of what windows have active requests happening in them. 2. We don't double check when we save the order to see if it is too old: by then, it's too late. A malicious uplink could just submit orders outside of the grace window and receive all the data, but the node would just not commit it, so the uplink gets free traffic. Because the endpoints also check for the order being too old, this would be a very tight race that depends on knowledge of the node system clock, but best to not have the race exist. Instead, we piggy back off of the in flight tracking and do the check when we start to handle the order, and commit at the end. 3. Change the functions that send orders and list unsent orders to accept a time at which that operation is happening. This way, in tests, we can pretend we're listing or sending far into the future after the windows are available to send, rather than exposing test functions to modify internal state about the grace period to get the desired effect. This brings tests closer to actual usage in production. 4. Change the calculation for if an order is allowed to be enqueued due to the grace period to just look at the order creation time, rather than some computation involving the window it will be in. In this way, you can easily answer the question of "will this order be accepted?" by asking "is it older than X?" where X is the grace period. 5. Increases the frequency we check to send up orders to once every 5 minutes instead of once every hour because we already have hour-long buffering due to the windows. This decreases the maximum latency that an order will be reported back to the satellite by 55 minutes. Change-Id: Ie08b90d139d45ee89b82347e191a2f8db1b88036
2020-08-12 20:01:43 +01:00
{ // Ensure CleanArchive works at all
storagenode: live tracking of order window usage This change accomplishes multiple things: 1. Instead of having a max in flight time, which means we effectively have a minimum bandwidth for uploads and downloads, we keep track of what windows have active requests happening in them. 2. We don't double check when we save the order to see if it is too old: by then, it's too late. A malicious uplink could just submit orders outside of the grace window and receive all the data, but the node would just not commit it, so the uplink gets free traffic. Because the endpoints also check for the order being too old, this would be a very tight race that depends on knowledge of the node system clock, but best to not have the race exist. Instead, we piggy back off of the in flight tracking and do the check when we start to handle the order, and commit at the end. 3. Change the functions that send orders and list unsent orders to accept a time at which that operation is happening. This way, in tests, we can pretend we're listing or sending far into the future after the windows are available to send, rather than exposing test functions to modify internal state about the grace period to get the desired effect. This brings tests closer to actual usage in production. 4. Change the calculation for if an order is allowed to be enqueued due to the grace period to just look at the order creation time, rather than some computation involving the window it will be in. In this way, you can easily answer the question of "will this order be accepted?" by asking "is it older than X?" where X is the grace period. 5. Increases the frequency we check to send up orders to once every 5 minutes instead of once every hour because we already have hour-long buffering due to the windows. This decreases the maximum latency that an order will be reported back to the satellite by 55 minutes. Change-Id: Ie08b90d139d45ee89b82347e191a2f8db1b88036
2020-08-12 20:01:43 +01:00
n, err := db.Orders().CleanArchive(ctx, now)
require.NoError(t, err)
require.Equal(t, 1, n)
}
})
}