2020-06-02 16:29:46 +01:00
|
|
|
// Copyright (C) 2020 Storj Labs, Inc.
|
|
|
|
// See LICENSE for copying information.
|
2020-07-01 23:05:01 +01:00
|
|
|
|
2020-07-14 23:31:22 +01:00
|
|
|
package orders_test
|
2020-06-02 16:29:46 +01:00
|
|
|
|
|
|
|
import (
|
2020-09-10 21:25:22 +01:00
|
|
|
"os"
|
|
|
|
"path/filepath"
|
2020-06-02 16:29:46 +01:00
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/stretchr/testify/require"
|
2020-09-10 21:25:22 +01:00
|
|
|
"go.uber.org/zap/zaptest"
|
2020-06-02 16:29:46 +01:00
|
|
|
|
|
|
|
"storj.io/common/pb"
|
|
|
|
"storj.io/common/storj"
|
|
|
|
"storj.io/common/testcontext"
|
|
|
|
"storj.io/common/testrand"
|
2020-10-15 19:57:02 +01:00
|
|
|
"storj.io/storj/private/testplanet"
|
2020-07-14 23:31:22 +01:00
|
|
|
"storj.io/storj/storagenode/orders"
|
2020-10-01 23:52:22 +01:00
|
|
|
"storj.io/storj/storagenode/orders/ordersfile"
|
2020-06-02 16:29:46 +01:00
|
|
|
)
|
|
|
|
|
storagenode: live tracking of order window usage
This change accomplishes multiple things:
1. Instead of having a max in flight time, which means
we effectively have a minimum bandwidth for uploads
and downloads, we keep track of what windows have
active requests happening in them.
2. We don't double check when we save the order to see if it
is too old: by then, it's too late. A malicious uplink
could just submit orders outside of the grace window and
receive all the data, but the node would just not commit
it, so the uplink gets free traffic. Because the endpoints
also check for the order being too old, this would be a
very tight race that depends on knowledge of the node system
clock, but best to not have the race exist. Instead, we piggy
back off of the in flight tracking and do the check when
we start to handle the order, and commit at the end.
3. Change the functions that send orders and list unsent
orders to accept a time at which that operation is
happening. This way, in tests, we can pretend we're
listing or sending far into the future after the windows
are available to send, rather than exposing test functions
to modify internal state about the grace period to get
the desired effect. This brings tests closer to actual
usage in production.
4. Change the calculation for if an order is allowed to be
enqueued due to the grace period to just look at the
order creation time, rather than some computation involving
the window it will be in. In this way, you can easily
answer the question of "will this order be accepted?" by
asking "is it older than X?" where X is the grace period.
5. Increases the frequency we check to send up orders to once
every 5 minutes instead of once every hour because we already
have hour-long buffering due to the windows. This decreases
the maximum latency that an order will be reported back to
the satellite by 55 minutes.
Change-Id: Ie08b90d139d45ee89b82347e191a2f8db1b88036
2020-08-12 20:01:43 +01:00
|
|
|
func TestOrdersStore_Enqueue_GracePeriodFailure(t *testing.T) {
|
2020-06-02 16:29:46 +01:00
|
|
|
ctx := testcontext.New(t)
|
|
|
|
defer ctx.Cleanup()
|
|
|
|
dirName := ctx.Dir("test-orders")
|
storagenode: live tracking of order window usage
This change accomplishes multiple things:
1. Instead of having a max in flight time, which means
we effectively have a minimum bandwidth for uploads
and downloads, we keep track of what windows have
active requests happening in them.
2. We don't double check when we save the order to see if it
is too old: by then, it's too late. A malicious uplink
could just submit orders outside of the grace window and
receive all the data, but the node would just not commit
it, so the uplink gets free traffic. Because the endpoints
also check for the order being too old, this would be a
very tight race that depends on knowledge of the node system
clock, but best to not have the race exist. Instead, we piggy
back off of the in flight tracking and do the check when
we start to handle the order, and commit at the end.
3. Change the functions that send orders and list unsent
orders to accept a time at which that operation is
happening. This way, in tests, we can pretend we're
listing or sending far into the future after the windows
are available to send, rather than exposing test functions
to modify internal state about the grace period to get
the desired effect. This brings tests closer to actual
usage in production.
4. Change the calculation for if an order is allowed to be
enqueued due to the grace period to just look at the
order creation time, rather than some computation involving
the window it will be in. In this way, you can easily
answer the question of "will this order be accepted?" by
asking "is it older than X?" where X is the grace period.
5. Increases the frequency we check to send up orders to once
every 5 minutes instead of once every hour because we already
have hour-long buffering due to the windows. This decreases
the maximum latency that an order will be reported back to
the satellite by 55 minutes.
Change-Id: Ie08b90d139d45ee89b82347e191a2f8db1b88036
2020-08-12 20:01:43 +01:00
|
|
|
now := time.Now()
|
2020-06-02 16:29:46 +01:00
|
|
|
|
2020-06-03 20:21:59 +01:00
|
|
|
// make order limit grace period 24 hours
|
2020-09-10 21:25:22 +01:00
|
|
|
ordersStore, err := orders.NewFileStore(zaptest.NewLogger(t), dirName, 24*time.Hour)
|
2020-07-14 23:31:22 +01:00
|
|
|
require.NoError(t, err)
|
2020-07-01 23:05:01 +01:00
|
|
|
|
2020-06-03 20:21:59 +01:00
|
|
|
// adding order before grace period should result in an error
|
|
|
|
newSN := testrand.SerialNumber()
|
2020-10-01 23:52:22 +01:00
|
|
|
newInfo := &ordersfile.Info{
|
2020-06-03 20:21:59 +01:00
|
|
|
Limit: &pb.OrderLimit{
|
|
|
|
SerialNumber: newSN,
|
|
|
|
SatelliteId: testrand.NodeID(),
|
|
|
|
Action: pb.PieceAction_GET,
|
storagenode: live tracking of order window usage
This change accomplishes multiple things:
1. Instead of having a max in flight time, which means
we effectively have a minimum bandwidth for uploads
and downloads, we keep track of what windows have
active requests happening in them.
2. We don't double check when we save the order to see if it
is too old: by then, it's too late. A malicious uplink
could just submit orders outside of the grace window and
receive all the data, but the node would just not commit
it, so the uplink gets free traffic. Because the endpoints
also check for the order being too old, this would be a
very tight race that depends on knowledge of the node system
clock, but best to not have the race exist. Instead, we piggy
back off of the in flight tracking and do the check when
we start to handle the order, and commit at the end.
3. Change the functions that send orders and list unsent
orders to accept a time at which that operation is
happening. This way, in tests, we can pretend we're
listing or sending far into the future after the windows
are available to send, rather than exposing test functions
to modify internal state about the grace period to get
the desired effect. This brings tests closer to actual
usage in production.
4. Change the calculation for if an order is allowed to be
enqueued due to the grace period to just look at the
order creation time, rather than some computation involving
the window it will be in. In this way, you can easily
answer the question of "will this order be accepted?" by
asking "is it older than X?" where X is the grace period.
5. Increases the frequency we check to send up orders to once
every 5 minutes instead of once every hour because we already
have hour-long buffering due to the windows. This decreases
the maximum latency that an order will be reported back to
the satellite by 55 minutes.
Change-Id: Ie08b90d139d45ee89b82347e191a2f8db1b88036
2020-08-12 20:01:43 +01:00
|
|
|
OrderCreation: now.Add(-48 * time.Hour),
|
|
|
|
OrderExpiration: now.Add(time.Hour),
|
2020-06-03 20:21:59 +01:00
|
|
|
},
|
|
|
|
Order: &pb.Order{
|
|
|
|
SerialNumber: newSN,
|
|
|
|
Amount: 10,
|
|
|
|
},
|
|
|
|
}
|
2020-07-14 23:31:22 +01:00
|
|
|
err = ordersStore.Enqueue(newInfo)
|
2020-06-03 20:21:59 +01:00
|
|
|
require.Error(t, err)
|
storagenode: live tracking of order window usage
This change accomplishes multiple things:
1. Instead of having a max in flight time, which means
we effectively have a minimum bandwidth for uploads
and downloads, we keep track of what windows have
active requests happening in them.
2. We don't double check when we save the order to see if it
is too old: by then, it's too late. A malicious uplink
could just submit orders outside of the grace window and
receive all the data, but the node would just not commit
it, so the uplink gets free traffic. Because the endpoints
also check for the order being too old, this would be a
very tight race that depends on knowledge of the node system
clock, but best to not have the race exist. Instead, we piggy
back off of the in flight tracking and do the check when
we start to handle the order, and commit at the end.
3. Change the functions that send orders and list unsent
orders to accept a time at which that operation is
happening. This way, in tests, we can pretend we're
listing or sending far into the future after the windows
are available to send, rather than exposing test functions
to modify internal state about the grace period to get
the desired effect. This brings tests closer to actual
usage in production.
4. Change the calculation for if an order is allowed to be
enqueued due to the grace period to just look at the
order creation time, rather than some computation involving
the window it will be in. In this way, you can easily
answer the question of "will this order be accepted?" by
asking "is it older than X?" where X is the grace period.
5. Increases the frequency we check to send up orders to once
every 5 minutes instead of once every hour because we already
have hour-long buffering due to the windows. This decreases
the maximum latency that an order will be reported back to
the satellite by 55 minutes.
Change-Id: Ie08b90d139d45ee89b82347e191a2f8db1b88036
2020-08-12 20:01:43 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestOrdersStore_ListUnsentBySatellite(t *testing.T) {
|
|
|
|
ctx := testcontext.New(t)
|
|
|
|
defer ctx.Cleanup()
|
|
|
|
dirName := ctx.Dir("test-orders")
|
|
|
|
now := time.Now()
|
|
|
|
|
|
|
|
// make order limit grace period 12 hours
|
2020-09-10 21:25:22 +01:00
|
|
|
ordersStore, err := orders.NewFileStore(zaptest.NewLogger(t), dirName, 12*time.Hour)
|
storagenode: live tracking of order window usage
This change accomplishes multiple things:
1. Instead of having a max in flight time, which means
we effectively have a minimum bandwidth for uploads
and downloads, we keep track of what windows have
active requests happening in them.
2. We don't double check when we save the order to see if it
is too old: by then, it's too late. A malicious uplink
could just submit orders outside of the grace window and
receive all the data, but the node would just not commit
it, so the uplink gets free traffic. Because the endpoints
also check for the order being too old, this would be a
very tight race that depends on knowledge of the node system
clock, but best to not have the race exist. Instead, we piggy
back off of the in flight tracking and do the check when
we start to handle the order, and commit at the end.
3. Change the functions that send orders and list unsent
orders to accept a time at which that operation is
happening. This way, in tests, we can pretend we're
listing or sending far into the future after the windows
are available to send, rather than exposing test functions
to modify internal state about the grace period to get
the desired effect. This brings tests closer to actual
usage in production.
4. Change the calculation for if an order is allowed to be
enqueued due to the grace period to just look at the
order creation time, rather than some computation involving
the window it will be in. In this way, you can easily
answer the question of "will this order be accepted?" by
asking "is it older than X?" where X is the grace period.
5. Increases the frequency we check to send up orders to once
every 5 minutes instead of once every hour because we already
have hour-long buffering due to the windows. This decreases
the maximum latency that an order will be reported back to
the satellite by 55 minutes.
Change-Id: Ie08b90d139d45ee89b82347e191a2f8db1b88036
2020-08-12 20:01:43 +01:00
|
|
|
require.NoError(t, err)
|
2020-06-02 16:29:46 +01:00
|
|
|
|
2020-06-03 20:21:59 +01:00
|
|
|
// for each satellite, make three orders from four hours ago, three from two hours ago, and three from now.
|
2020-06-02 16:29:46 +01:00
|
|
|
numSatellites := 3
|
2020-06-03 20:21:59 +01:00
|
|
|
createdTimes := []time.Time{
|
storagenode: live tracking of order window usage
This change accomplishes multiple things:
1. Instead of having a max in flight time, which means
we effectively have a minimum bandwidth for uploads
and downloads, we keep track of what windows have
active requests happening in them.
2. We don't double check when we save the order to see if it
is too old: by then, it's too late. A malicious uplink
could just submit orders outside of the grace window and
receive all the data, but the node would just not commit
it, so the uplink gets free traffic. Because the endpoints
also check for the order being too old, this would be a
very tight race that depends on knowledge of the node system
clock, but best to not have the race exist. Instead, we piggy
back off of the in flight tracking and do the check when
we start to handle the order, and commit at the end.
3. Change the functions that send orders and list unsent
orders to accept a time at which that operation is
happening. This way, in tests, we can pretend we're
listing or sending far into the future after the windows
are available to send, rather than exposing test functions
to modify internal state about the grace period to get
the desired effect. This brings tests closer to actual
usage in production.
4. Change the calculation for if an order is allowed to be
enqueued due to the grace period to just look at the
order creation time, rather than some computation involving
the window it will be in. In this way, you can easily
answer the question of "will this order be accepted?" by
asking "is it older than X?" where X is the grace period.
5. Increases the frequency we check to send up orders to once
every 5 minutes instead of once every hour because we already
have hour-long buffering due to the windows. This decreases
the maximum latency that an order will be reported back to
the satellite by 55 minutes.
Change-Id: Ie08b90d139d45ee89b82347e191a2f8db1b88036
2020-08-12 20:01:43 +01:00
|
|
|
now.Add(-4 * time.Hour),
|
|
|
|
now.Add(-2 * time.Hour),
|
|
|
|
now,
|
2020-06-03 20:21:59 +01:00
|
|
|
}
|
|
|
|
serialsPerSatPerTime := 3
|
2020-06-02 16:29:46 +01:00
|
|
|
|
2020-06-03 20:21:59 +01:00
|
|
|
originalInfos, err := storeNewOrders(ordersStore, numSatellites, serialsPerSatPerTime, createdTimes)
|
2020-06-02 16:29:46 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2020-06-03 20:21:59 +01:00
|
|
|
// 3 times:
|
|
|
|
// list unsent orders - should receive data from all satellites the first two times, and nothing the last time.
|
2020-07-14 23:31:22 +01:00
|
|
|
// archive unsent orders
|
|
|
|
expectedArchivedInfos := make(map[storj.SerialNumber]*orders.ArchivedInfo)
|
2020-07-01 23:05:01 +01:00
|
|
|
|
storagenode: live tracking of order window usage
This change accomplishes multiple things:
1. Instead of having a max in flight time, which means
we effectively have a minimum bandwidth for uploads
and downloads, we keep track of what windows have
active requests happening in them.
2. We don't double check when we save the order to see if it
is too old: by then, it's too late. A malicious uplink
could just submit orders outside of the grace window and
receive all the data, but the node would just not commit
it, so the uplink gets free traffic. Because the endpoints
also check for the order being too old, this would be a
very tight race that depends on knowledge of the node system
clock, but best to not have the race exist. Instead, we piggy
back off of the in flight tracking and do the check when
we start to handle the order, and commit at the end.
3. Change the functions that send orders and list unsent
orders to accept a time at which that operation is
happening. This way, in tests, we can pretend we're
listing or sending far into the future after the windows
are available to send, rather than exposing test functions
to modify internal state about the grace period to get
the desired effect. This brings tests closer to actual
usage in production.
4. Change the calculation for if an order is allowed to be
enqueued due to the grace period to just look at the
order creation time, rather than some computation involving
the window it will be in. In this way, you can easily
answer the question of "will this order be accepted?" by
asking "is it older than X?" where X is the grace period.
5. Increases the frequency we check to send up orders to once
every 5 minutes instead of once every hour because we already
have hour-long buffering due to the windows. This decreases
the maximum latency that an order will be reported back to
the satellite by 55 minutes.
Change-Id: Ie08b90d139d45ee89b82347e191a2f8db1b88036
2020-08-12 20:01:43 +01:00
|
|
|
archiveTime1 := now.Add(-2 * time.Hour)
|
|
|
|
archiveTime2 := now
|
2020-07-14 23:31:22 +01:00
|
|
|
status1 := pb.SettlementWithWindowResponse_ACCEPTED
|
|
|
|
status2 := pb.SettlementWithWindowResponse_REJECTED
|
2020-06-03 20:21:59 +01:00
|
|
|
for i := 0; i < 3; i++ {
|
storagenode: live tracking of order window usage
This change accomplishes multiple things:
1. Instead of having a max in flight time, which means
we effectively have a minimum bandwidth for uploads
and downloads, we keep track of what windows have
active requests happening in them.
2. We don't double check when we save the order to see if it
is too old: by then, it's too late. A malicious uplink
could just submit orders outside of the grace window and
receive all the data, but the node would just not commit
it, so the uplink gets free traffic. Because the endpoints
also check for the order being too old, this would be a
very tight race that depends on knowledge of the node system
clock, but best to not have the race exist. Instead, we piggy
back off of the in flight tracking and do the check when
we start to handle the order, and commit at the end.
3. Change the functions that send orders and list unsent
orders to accept a time at which that operation is
happening. This way, in tests, we can pretend we're
listing or sending far into the future after the windows
are available to send, rather than exposing test functions
to modify internal state about the grace period to get
the desired effect. This brings tests closer to actual
usage in production.
4. Change the calculation for if an order is allowed to be
enqueued due to the grace period to just look at the
order creation time, rather than some computation involving
the window it will be in. In this way, you can easily
answer the question of "will this order be accepted?" by
asking "is it older than X?" where X is the grace period.
5. Increases the frequency we check to send up orders to once
every 5 minutes instead of once every hour because we already
have hour-long buffering due to the windows. This decreases
the maximum latency that an order will be reported back to
the satellite by 55 minutes.
Change-Id: Ie08b90d139d45ee89b82347e191a2f8db1b88036
2020-08-12 20:01:43 +01:00
|
|
|
// This should return all the orders created more than 1 hour before "now".
|
2020-10-15 19:57:02 +01:00
|
|
|
unsentMap, err := ordersStore.ListUnsentBySatellite(ctx, now.Add(12*time.Hour))
|
2020-06-03 20:21:59 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// on last iteration, expect nothing returned
|
|
|
|
if i == 2 {
|
|
|
|
require.Len(t, unsentMap, 0)
|
|
|
|
break
|
|
|
|
}
|
2020-07-01 23:05:01 +01:00
|
|
|
|
2020-06-03 20:21:59 +01:00
|
|
|
// go through order limits and make sure information is accurate
|
|
|
|
require.Len(t, unsentMap, numSatellites)
|
|
|
|
for satelliteID, unsentSatList := range unsentMap {
|
|
|
|
require.Len(t, unsentSatList.InfoList, serialsPerSatPerTime)
|
|
|
|
|
|
|
|
for _, unsentInfo := range unsentSatList.InfoList {
|
|
|
|
// "new" orders should not be returned
|
|
|
|
require.True(t, unsentInfo.Limit.OrderCreation.Before(createdTimes[2]))
|
|
|
|
sn := unsentInfo.Limit.SerialNumber
|
|
|
|
originalInfo := originalInfos[sn]
|
|
|
|
|
|
|
|
verifyInfosEqual(t, unsentInfo, originalInfo)
|
|
|
|
// expect that creation hour is consistent with order
|
storagenode: live tracking of order window usage
This change accomplishes multiple things:
1. Instead of having a max in flight time, which means
we effectively have a minimum bandwidth for uploads
and downloads, we keep track of what windows have
active requests happening in them.
2. We don't double check when we save the order to see if it
is too old: by then, it's too late. A malicious uplink
could just submit orders outside of the grace window and
receive all the data, but the node would just not commit
it, so the uplink gets free traffic. Because the endpoints
also check for the order being too old, this would be a
very tight race that depends on knowledge of the node system
clock, but best to not have the race exist. Instead, we piggy
back off of the in flight tracking and do the check when
we start to handle the order, and commit at the end.
3. Change the functions that send orders and list unsent
orders to accept a time at which that operation is
happening. This way, in tests, we can pretend we're
listing or sending far into the future after the windows
are available to send, rather than exposing test functions
to modify internal state about the grace period to get
the desired effect. This brings tests closer to actual
usage in production.
4. Change the calculation for if an order is allowed to be
enqueued due to the grace period to just look at the
order creation time, rather than some computation involving
the window it will be in. In this way, you can easily
answer the question of "will this order be accepted?" by
asking "is it older than X?" where X is the grace period.
5. Increases the frequency we check to send up orders to once
every 5 minutes instead of once every hour because we already
have hour-long buffering due to the windows. This decreases
the maximum latency that an order will be reported back to
the satellite by 55 minutes.
Change-Id: Ie08b90d139d45ee89b82347e191a2f8db1b88036
2020-08-12 20:01:43 +01:00
|
|
|
require.True(t, unsentSatList.CreatedAtHour.Equal(unsentInfo.Limit.OrderCreation.Truncate(time.Hour)))
|
2020-06-03 20:21:59 +01:00
|
|
|
|
|
|
|
// add to archive batch
|
2020-07-14 23:31:22 +01:00
|
|
|
// create
|
2020-06-03 20:21:59 +01:00
|
|
|
archivedAt := archiveTime1
|
2020-07-14 23:31:22 +01:00
|
|
|
orderStatus := orders.StatusAccepted
|
2020-06-03 20:21:59 +01:00
|
|
|
if i == 1 {
|
|
|
|
archivedAt = archiveTime2
|
2020-07-14 23:31:22 +01:00
|
|
|
orderStatus = orders.StatusRejected
|
2020-06-03 20:21:59 +01:00
|
|
|
}
|
2020-07-14 23:31:22 +01:00
|
|
|
newArchivedInfo := &orders.ArchivedInfo{
|
2020-06-03 20:21:59 +01:00
|
|
|
Limit: unsentInfo.Limit,
|
|
|
|
Order: unsentInfo.Order,
|
2020-07-14 23:31:22 +01:00
|
|
|
Status: orderStatus,
|
2020-06-03 20:21:59 +01:00
|
|
|
ArchivedAt: archivedAt,
|
|
|
|
}
|
2020-07-14 23:31:22 +01:00
|
|
|
expectedArchivedInfos[unsentInfo.Limit.SerialNumber] = newArchivedInfo
|
2020-06-03 20:21:59 +01:00
|
|
|
}
|
2020-06-02 16:29:46 +01:00
|
|
|
|
2020-07-14 23:31:22 +01:00
|
|
|
// archive unsent file
|
|
|
|
archivedAt := archiveTime1
|
|
|
|
status := status1
|
|
|
|
if i == 1 {
|
|
|
|
archivedAt = archiveTime2
|
|
|
|
status = status2
|
|
|
|
}
|
2020-10-01 23:52:22 +01:00
|
|
|
err = ordersStore.Archive(satelliteID, unsentSatList, archivedAt, status)
|
2020-06-03 20:21:59 +01:00
|
|
|
require.NoError(t, err)
|
2020-06-02 16:29:46 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-03 20:21:59 +01:00
|
|
|
// list archived, expect everything from first two created at time buckets
|
|
|
|
archived, err := ordersStore.ListArchived()
|
2020-06-02 16:29:46 +01:00
|
|
|
require.NoError(t, err)
|
2020-06-03 20:21:59 +01:00
|
|
|
require.Len(t, archived, numSatellites*serialsPerSatPerTime*2)
|
|
|
|
for _, archivedInfo := range archived {
|
|
|
|
sn := archivedInfo.Limit.SerialNumber
|
2020-07-14 23:31:22 +01:00
|
|
|
expectedInfo := expectedArchivedInfos[sn]
|
|
|
|
verifyArchivedInfosEqual(t, expectedInfo, archivedInfo)
|
|
|
|
|
|
|
|
// one of the batches should be "accepted" and the other should be "rejected"
|
|
|
|
if archivedInfo.ArchivedAt.Round(0).Equal(archiveTime2.Round(0)) {
|
|
|
|
require.Equal(t, archivedInfo.Status, orders.StatusRejected)
|
|
|
|
} else {
|
|
|
|
require.Equal(t, archivedInfo.Status, orders.StatusAccepted)
|
|
|
|
}
|
2020-06-02 16:29:46 +01:00
|
|
|
}
|
|
|
|
|
2020-06-03 20:21:59 +01:00
|
|
|
// clean archive for anything older than 30 minutes
|
storagenode: live tracking of order window usage
This change accomplishes multiple things:
1. Instead of having a max in flight time, which means
we effectively have a minimum bandwidth for uploads
and downloads, we keep track of what windows have
active requests happening in them.
2. We don't double check when we save the order to see if it
is too old: by then, it's too late. A malicious uplink
could just submit orders outside of the grace window and
receive all the data, but the node would just not commit
it, so the uplink gets free traffic. Because the endpoints
also check for the order being too old, this would be a
very tight race that depends on knowledge of the node system
clock, but best to not have the race exist. Instead, we piggy
back off of the in flight tracking and do the check when
we start to handle the order, and commit at the end.
3. Change the functions that send orders and list unsent
orders to accept a time at which that operation is
happening. This way, in tests, we can pretend we're
listing or sending far into the future after the windows
are available to send, rather than exposing test functions
to modify internal state about the grace period to get
the desired effect. This brings tests closer to actual
usage in production.
4. Change the calculation for if an order is allowed to be
enqueued due to the grace period to just look at the
order creation time, rather than some computation involving
the window it will be in. In this way, you can easily
answer the question of "will this order be accepted?" by
asking "is it older than X?" where X is the grace period.
5. Increases the frequency we check to send up orders to once
every 5 minutes instead of once every hour because we already
have hour-long buffering due to the windows. This decreases
the maximum latency that an order will be reported back to
the satellite by 55 minutes.
Change-Id: Ie08b90d139d45ee89b82347e191a2f8db1b88036
2020-08-12 20:01:43 +01:00
|
|
|
err = ordersStore.CleanArchive(now.Add(-30 * time.Minute))
|
2020-06-02 16:29:46 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2020-06-03 20:21:59 +01:00
|
|
|
// list archived, expect only recent archived batch (other was cleaned)
|
|
|
|
archived, err = ordersStore.ListArchived()
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, archived, numSatellites*serialsPerSatPerTime)
|
|
|
|
for _, archivedInfo := range archived {
|
|
|
|
sn := archivedInfo.Limit.SerialNumber
|
2020-07-14 23:31:22 +01:00
|
|
|
expectedInfo := expectedArchivedInfos[sn]
|
|
|
|
verifyArchivedInfosEqual(t, expectedInfo, archivedInfo)
|
2020-06-03 20:21:59 +01:00
|
|
|
require.Equal(t, archivedInfo.ArchivedAt.Round(0), archiveTime2.Round(0))
|
2020-07-14 23:31:22 +01:00
|
|
|
require.Equal(t, archivedInfo.Status, orders.StatusRejected)
|
2020-06-02 16:29:46 +01:00
|
|
|
}
|
|
|
|
|
2020-06-03 20:21:59 +01:00
|
|
|
// clean archive for everything before now, expect list to return nothing
|
storagenode: live tracking of order window usage
This change accomplishes multiple things:
1. Instead of having a max in flight time, which means
we effectively have a minimum bandwidth for uploads
and downloads, we keep track of what windows have
active requests happening in them.
2. We don't double check when we save the order to see if it
is too old: by then, it's too late. A malicious uplink
could just submit orders outside of the grace window and
receive all the data, but the node would just not commit
it, so the uplink gets free traffic. Because the endpoints
also check for the order being too old, this would be a
very tight race that depends on knowledge of the node system
clock, but best to not have the race exist. Instead, we piggy
back off of the in flight tracking and do the check when
we start to handle the order, and commit at the end.
3. Change the functions that send orders and list unsent
orders to accept a time at which that operation is
happening. This way, in tests, we can pretend we're
listing or sending far into the future after the windows
are available to send, rather than exposing test functions
to modify internal state about the grace period to get
the desired effect. This brings tests closer to actual
usage in production.
4. Change the calculation for if an order is allowed to be
enqueued due to the grace period to just look at the
order creation time, rather than some computation involving
the window it will be in. In this way, you can easily
answer the question of "will this order be accepted?" by
asking "is it older than X?" where X is the grace period.
5. Increases the frequency we check to send up orders to once
every 5 minutes instead of once every hour because we already
have hour-long buffering due to the windows. This decreases
the maximum latency that an order will be reported back to
the satellite by 55 minutes.
Change-Id: Ie08b90d139d45ee89b82347e191a2f8db1b88036
2020-08-12 20:01:43 +01:00
|
|
|
err = ordersStore.CleanArchive(now.Add(time.Nanosecond))
|
2020-06-02 16:29:46 +01:00
|
|
|
require.NoError(t, err)
|
2020-06-03 20:21:59 +01:00
|
|
|
archived, err = ordersStore.ListArchived()
|
2020-06-02 16:29:46 +01:00
|
|
|
require.NoError(t, err)
|
2020-06-03 20:21:59 +01:00
|
|
|
require.Len(t, archived, 0)
|
|
|
|
}
|
2020-06-02 16:29:46 +01:00
|
|
|
|
storagenode: live tracking of order window usage
This change accomplishes multiple things:
1. Instead of having a max in flight time, which means
we effectively have a minimum bandwidth for uploads
and downloads, we keep track of what windows have
active requests happening in them.
2. We don't double check when we save the order to see if it
is too old: by then, it's too late. A malicious uplink
could just submit orders outside of the grace window and
receive all the data, but the node would just not commit
it, so the uplink gets free traffic. Because the endpoints
also check for the order being too old, this would be a
very tight race that depends on knowledge of the node system
clock, but best to not have the race exist. Instead, we piggy
back off of the in flight tracking and do the check when
we start to handle the order, and commit at the end.
3. Change the functions that send orders and list unsent
orders to accept a time at which that operation is
happening. This way, in tests, we can pretend we're
listing or sending far into the future after the windows
are available to send, rather than exposing test functions
to modify internal state about the grace period to get
the desired effect. This brings tests closer to actual
usage in production.
4. Change the calculation for if an order is allowed to be
enqueued due to the grace period to just look at the
order creation time, rather than some computation involving
the window it will be in. In this way, you can easily
answer the question of "will this order be accepted?" by
asking "is it older than X?" where X is the grace period.
5. Increases the frequency we check to send up orders to once
every 5 minutes instead of once every hour because we already
have hour-long buffering due to the windows. This decreases
the maximum latency that an order will be reported back to
the satellite by 55 minutes.
Change-Id: Ie08b90d139d45ee89b82347e191a2f8db1b88036
2020-08-12 20:01:43 +01:00
|
|
|
func TestOrdersStore_ListUnsentBySatellite_Ongoing(t *testing.T) {
|
|
|
|
ctx := testcontext.New(t)
|
|
|
|
defer ctx.Cleanup()
|
|
|
|
dirName := ctx.Dir("test-orders")
|
|
|
|
now := time.Now()
|
|
|
|
satellite := testrand.NodeID()
|
|
|
|
tomorrow := now.Add(24 * time.Hour)
|
|
|
|
|
|
|
|
// make order limit grace period 1 hour
|
2020-09-10 21:25:22 +01:00
|
|
|
ordersStore, err := orders.NewFileStore(zaptest.NewLogger(t), dirName, time.Hour)
|
storagenode: live tracking of order window usage
This change accomplishes multiple things:
1. Instead of having a max in flight time, which means
we effectively have a minimum bandwidth for uploads
and downloads, we keep track of what windows have
active requests happening in them.
2. We don't double check when we save the order to see if it
is too old: by then, it's too late. A malicious uplink
could just submit orders outside of the grace window and
receive all the data, but the node would just not commit
it, so the uplink gets free traffic. Because the endpoints
also check for the order being too old, this would be a
very tight race that depends on knowledge of the node system
clock, but best to not have the race exist. Instead, we piggy
back off of the in flight tracking and do the check when
we start to handle the order, and commit at the end.
3. Change the functions that send orders and list unsent
orders to accept a time at which that operation is
happening. This way, in tests, we can pretend we're
listing or sending far into the future after the windows
are available to send, rather than exposing test functions
to modify internal state about the grace period to get
the desired effect. This brings tests closer to actual
usage in production.
4. Change the calculation for if an order is allowed to be
enqueued due to the grace period to just look at the
order creation time, rather than some computation involving
the window it will be in. In this way, you can easily
answer the question of "will this order be accepted?" by
asking "is it older than X?" where X is the grace period.
5. Increases the frequency we check to send up orders to once
every 5 minutes instead of once every hour because we already
have hour-long buffering due to the windows. This decreases
the maximum latency that an order will be reported back to
the satellite by 55 minutes.
Change-Id: Ie08b90d139d45ee89b82347e191a2f8db1b88036
2020-08-12 20:01:43 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// empty store means no orders can be listed
|
2020-10-15 19:57:02 +01:00
|
|
|
unsent, err := ordersStore.ListUnsentBySatellite(ctx, tomorrow)
|
storagenode: live tracking of order window usage
This change accomplishes multiple things:
1. Instead of having a max in flight time, which means
we effectively have a minimum bandwidth for uploads
and downloads, we keep track of what windows have
active requests happening in them.
2. We don't double check when we save the order to see if it
is too old: by then, it's too late. A malicious uplink
could just submit orders outside of the grace window and
receive all the data, but the node would just not commit
it, so the uplink gets free traffic. Because the endpoints
also check for the order being too old, this would be a
very tight race that depends on knowledge of the node system
clock, but best to not have the race exist. Instead, we piggy
back off of the in flight tracking and do the check when
we start to handle the order, and commit at the end.
3. Change the functions that send orders and list unsent
orders to accept a time at which that operation is
happening. This way, in tests, we can pretend we're
listing or sending far into the future after the windows
are available to send, rather than exposing test functions
to modify internal state about the grace period to get
the desired effect. This brings tests closer to actual
usage in production.
4. Change the calculation for if an order is allowed to be
enqueued due to the grace period to just look at the
order creation time, rather than some computation involving
the window it will be in. In this way, you can easily
answer the question of "will this order be accepted?" by
asking "is it older than X?" where X is the grace period.
5. Increases the frequency we check to send up orders to once
every 5 minutes instead of once every hour because we already
have hour-long buffering due to the windows. This decreases
the maximum latency that an order will be reported back to
the satellite by 55 minutes.
Change-Id: Ie08b90d139d45ee89b82347e191a2f8db1b88036
2020-08-12 20:01:43 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, unsent, 0)
|
|
|
|
|
|
|
|
// store an order that can be listed
|
|
|
|
sn := testrand.SerialNumber()
|
2020-10-01 23:52:22 +01:00
|
|
|
require.NoError(t, ordersStore.Enqueue(&ordersfile.Info{
|
storagenode: live tracking of order window usage
This change accomplishes multiple things:
1. Instead of having a max in flight time, which means
we effectively have a minimum bandwidth for uploads
and downloads, we keep track of what windows have
active requests happening in them.
2. We don't double check when we save the order to see if it
is too old: by then, it's too late. A malicious uplink
could just submit orders outside of the grace window and
receive all the data, but the node would just not commit
it, so the uplink gets free traffic. Because the endpoints
also check for the order being too old, this would be a
very tight race that depends on knowledge of the node system
clock, but best to not have the race exist. Instead, we piggy
back off of the in flight tracking and do the check when
we start to handle the order, and commit at the end.
3. Change the functions that send orders and list unsent
orders to accept a time at which that operation is
happening. This way, in tests, we can pretend we're
listing or sending far into the future after the windows
are available to send, rather than exposing test functions
to modify internal state about the grace period to get
the desired effect. This brings tests closer to actual
usage in production.
4. Change the calculation for if an order is allowed to be
enqueued due to the grace period to just look at the
order creation time, rather than some computation involving
the window it will be in. In this way, you can easily
answer the question of "will this order be accepted?" by
asking "is it older than X?" where X is the grace period.
5. Increases the frequency we check to send up orders to once
every 5 minutes instead of once every hour because we already
have hour-long buffering due to the windows. This decreases
the maximum latency that an order will be reported back to
the satellite by 55 minutes.
Change-Id: Ie08b90d139d45ee89b82347e191a2f8db1b88036
2020-08-12 20:01:43 +01:00
|
|
|
Limit: &pb.OrderLimit{
|
|
|
|
SerialNumber: sn,
|
|
|
|
SatelliteId: satellite,
|
|
|
|
Action: pb.PieceAction_GET,
|
|
|
|
OrderCreation: now,
|
|
|
|
},
|
|
|
|
Order: &pb.Order{
|
|
|
|
SerialNumber: sn,
|
|
|
|
Amount: 1,
|
|
|
|
},
|
|
|
|
}))
|
|
|
|
|
|
|
|
// check that we can list it tomorrow
|
2020-10-15 19:57:02 +01:00
|
|
|
unsent, err = ordersStore.ListUnsentBySatellite(ctx, tomorrow)
|
storagenode: live tracking of order window usage
This change accomplishes multiple things:
1. Instead of having a max in flight time, which means
we effectively have a minimum bandwidth for uploads
and downloads, we keep track of what windows have
active requests happening in them.
2. We don't double check when we save the order to see if it
is too old: by then, it's too late. A malicious uplink
could just submit orders outside of the grace window and
receive all the data, but the node would just not commit
it, so the uplink gets free traffic. Because the endpoints
also check for the order being too old, this would be a
very tight race that depends on knowledge of the node system
clock, but best to not have the race exist. Instead, we piggy
back off of the in flight tracking and do the check when
we start to handle the order, and commit at the end.
3. Change the functions that send orders and list unsent
orders to accept a time at which that operation is
happening. This way, in tests, we can pretend we're
listing or sending far into the future after the windows
are available to send, rather than exposing test functions
to modify internal state about the grace period to get
the desired effect. This brings tests closer to actual
usage in production.
4. Change the calculation for if an order is allowed to be
enqueued due to the grace period to just look at the
order creation time, rather than some computation involving
the window it will be in. In this way, you can easily
answer the question of "will this order be accepted?" by
asking "is it older than X?" where X is the grace period.
5. Increases the frequency we check to send up orders to once
every 5 minutes instead of once every hour because we already
have hour-long buffering due to the windows. This decreases
the maximum latency that an order will be reported back to
the satellite by 55 minutes.
Change-Id: Ie08b90d139d45ee89b82347e191a2f8db1b88036
2020-08-12 20:01:43 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, unsent, 1)
|
|
|
|
|
|
|
|
// begin an enqueue in the bucket
|
|
|
|
commit, err := ordersStore.BeginEnqueue(satellite, now)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// we should no longer be able to list that window
|
2020-10-15 19:57:02 +01:00
|
|
|
unsent, err = ordersStore.ListUnsentBySatellite(ctx, tomorrow)
|
storagenode: live tracking of order window usage
This change accomplishes multiple things:
1. Instead of having a max in flight time, which means
we effectively have a minimum bandwidth for uploads
and downloads, we keep track of what windows have
active requests happening in them.
2. We don't double check when we save the order to see if it
is too old: by then, it's too late. A malicious uplink
could just submit orders outside of the grace window and
receive all the data, but the node would just not commit
it, so the uplink gets free traffic. Because the endpoints
also check for the order being too old, this would be a
very tight race that depends on knowledge of the node system
clock, but best to not have the race exist. Instead, we piggy
back off of the in flight tracking and do the check when
we start to handle the order, and commit at the end.
3. Change the functions that send orders and list unsent
orders to accept a time at which that operation is
happening. This way, in tests, we can pretend we're
listing or sending far into the future after the windows
are available to send, rather than exposing test functions
to modify internal state about the grace period to get
the desired effect. This brings tests closer to actual
usage in production.
4. Change the calculation for if an order is allowed to be
enqueued due to the grace period to just look at the
order creation time, rather than some computation involving
the window it will be in. In this way, you can easily
answer the question of "will this order be accepted?" by
asking "is it older than X?" where X is the grace period.
5. Increases the frequency we check to send up orders to once
every 5 minutes instead of once every hour because we already
have hour-long buffering due to the windows. This decreases
the maximum latency that an order will be reported back to
the satellite by 55 minutes.
Change-Id: Ie08b90d139d45ee89b82347e191a2f8db1b88036
2020-08-12 20:01:43 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, unsent, 0)
|
|
|
|
|
|
|
|
// commit the order
|
|
|
|
sn = testrand.SerialNumber()
|
2020-10-01 23:52:22 +01:00
|
|
|
require.NoError(t, commit(&ordersfile.Info{
|
storagenode: live tracking of order window usage
This change accomplishes multiple things:
1. Instead of having a max in flight time, which means
we effectively have a minimum bandwidth for uploads
and downloads, we keep track of what windows have
active requests happening in them.
2. We don't double check when we save the order to see if it
is too old: by then, it's too late. A malicious uplink
could just submit orders outside of the grace window and
receive all the data, but the node would just not commit
it, so the uplink gets free traffic. Because the endpoints
also check for the order being too old, this would be a
very tight race that depends on knowledge of the node system
clock, but best to not have the race exist. Instead, we piggy
back off of the in flight tracking and do the check when
we start to handle the order, and commit at the end.
3. Change the functions that send orders and list unsent
orders to accept a time at which that operation is
happening. This way, in tests, we can pretend we're
listing or sending far into the future after the windows
are available to send, rather than exposing test functions
to modify internal state about the grace period to get
the desired effect. This brings tests closer to actual
usage in production.
4. Change the calculation for if an order is allowed to be
enqueued due to the grace period to just look at the
order creation time, rather than some computation involving
the window it will be in. In this way, you can easily
answer the question of "will this order be accepted?" by
asking "is it older than X?" where X is the grace period.
5. Increases the frequency we check to send up orders to once
every 5 minutes instead of once every hour because we already
have hour-long buffering due to the windows. This decreases
the maximum latency that an order will be reported back to
the satellite by 55 minutes.
Change-Id: Ie08b90d139d45ee89b82347e191a2f8db1b88036
2020-08-12 20:01:43 +01:00
|
|
|
Limit: &pb.OrderLimit{
|
|
|
|
SerialNumber: sn,
|
|
|
|
SatelliteId: satellite,
|
|
|
|
Action: pb.PieceAction_GET,
|
|
|
|
OrderCreation: now,
|
|
|
|
},
|
|
|
|
Order: &pb.Order{
|
|
|
|
SerialNumber: sn,
|
|
|
|
Amount: 1,
|
|
|
|
},
|
|
|
|
}))
|
|
|
|
|
|
|
|
// check that we can list it tomorrow
|
2020-10-15 19:57:02 +01:00
|
|
|
unsent, err = ordersStore.ListUnsentBySatellite(ctx, tomorrow)
|
storagenode: live tracking of order window usage
This change accomplishes multiple things:
1. Instead of having a max in flight time, which means
we effectively have a minimum bandwidth for uploads
and downloads, we keep track of what windows have
active requests happening in them.
2. We don't double check when we save the order to see if it
is too old: by then, it's too late. A malicious uplink
could just submit orders outside of the grace window and
receive all the data, but the node would just not commit
it, so the uplink gets free traffic. Because the endpoints
also check for the order being too old, this would be a
very tight race that depends on knowledge of the node system
clock, but best to not have the race exist. Instead, we piggy
back off of the in flight tracking and do the check when
we start to handle the order, and commit at the end.
3. Change the functions that send orders and list unsent
orders to accept a time at which that operation is
happening. This way, in tests, we can pretend we're
listing or sending far into the future after the windows
are available to send, rather than exposing test functions
to modify internal state about the grace period to get
the desired effect. This brings tests closer to actual
usage in production.
4. Change the calculation for if an order is allowed to be
enqueued due to the grace period to just look at the
order creation time, rather than some computation involving
the window it will be in. In this way, you can easily
answer the question of "will this order be accepted?" by
asking "is it older than X?" where X is the grace period.
5. Increases the frequency we check to send up orders to once
every 5 minutes instead of once every hour because we already
have hour-long buffering due to the windows. This decreases
the maximum latency that an order will be reported back to
the satellite by 55 minutes.
Change-Id: Ie08b90d139d45ee89b82347e191a2f8db1b88036
2020-08-12 20:01:43 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, unsent, 1)
|
|
|
|
}
|
|
|
|
|
2020-10-15 19:57:02 +01:00
|
|
|
func TestOrdersDB_ListUnsentBySatellite_Expired(t *testing.T) {
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 1,
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
satellitePeer := planet.Satellites[0]
|
|
|
|
storagenodePeer := planet.StorageNodes[0]
|
|
|
|
storageNodeOrdersDB := storagenodePeer.DB.Orders()
|
|
|
|
now := time.Now().UTC()
|
|
|
|
testSerialNumber := testrand.SerialNumber()
|
|
|
|
// Setup: add one order that is not expired
|
|
|
|
require.NoError(t, storageNodeOrdersDB.Enqueue(ctx, &ordersfile.Info{
|
|
|
|
Limit: &pb.OrderLimit{
|
|
|
|
SerialNumber: testSerialNumber,
|
|
|
|
SatelliteId: satellitePeer.ID(),
|
|
|
|
Action: pb.PieceAction_GET,
|
|
|
|
OrderCreation: now,
|
|
|
|
OrderExpiration: now.Add(5 * time.Hour),
|
|
|
|
},
|
|
|
|
Order: &pb.Order{
|
|
|
|
SerialNumber: testSerialNumber,
|
|
|
|
Amount: 100,
|
|
|
|
},
|
|
|
|
}))
|
|
|
|
testSerialNumber2 := testrand.SerialNumber()
|
|
|
|
// Setup: add one order that IS expired
|
|
|
|
require.NoError(t, storageNodeOrdersDB.Enqueue(ctx, &ordersfile.Info{
|
|
|
|
Limit: &pb.OrderLimit{
|
|
|
|
SerialNumber: testSerialNumber2,
|
|
|
|
SatelliteId: satellitePeer.ID(),
|
|
|
|
Action: pb.PieceAction_GET,
|
|
|
|
OrderExpiration: now.Add(-5 * time.Hour),
|
|
|
|
},
|
|
|
|
Order: &pb.Order{
|
|
|
|
SerialNumber: testSerialNumber2,
|
|
|
|
Amount: 20,
|
|
|
|
},
|
|
|
|
}))
|
|
|
|
|
|
|
|
// Confirm that when you list unsent orders that expired orders are not returned
|
|
|
|
unsentOrdersBySA, err := storageNodeOrdersDB.ListUnsentBySatellite(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, len(unsentOrdersBySA), 1)
|
|
|
|
// there should only be 1 unsent order, since the other order is expired
|
|
|
|
require.Equal(t, len(unsentOrdersBySA[satellitePeer.ID()]), 1)
|
|
|
|
// the unsent order should be the unexpired order
|
|
|
|
require.Equal(t, unsentOrdersBySA[satellitePeer.ID()][0].Limit.SerialNumber, testSerialNumber)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2020-10-06 17:14:25 +01:00
|
|
|
func TestOrdersStore_CorruptUnsentV0(t *testing.T) {
|
2020-09-10 21:25:22 +01:00
|
|
|
ctx := testcontext.New(t)
|
|
|
|
defer ctx.Cleanup()
|
|
|
|
dirName := ctx.Dir("test-orders")
|
|
|
|
now := time.Now()
|
|
|
|
satellite := testrand.NodeID()
|
|
|
|
tomorrow := now.Add(24 * time.Hour)
|
|
|
|
|
|
|
|
// make order limit grace period 1 hour
|
|
|
|
ordersStore, err := orders.NewFileStore(zaptest.NewLogger(t), dirName, time.Hour)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// empty store means no orders can be listed
|
2020-10-15 19:57:02 +01:00
|
|
|
unsent, err := ordersStore.ListUnsentBySatellite(ctx, tomorrow)
|
2020-09-10 21:25:22 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, unsent, 0)
|
|
|
|
|
|
|
|
sn := testrand.SerialNumber()
|
2020-10-01 23:52:22 +01:00
|
|
|
info := &ordersfile.Info{
|
2020-09-10 21:25:22 +01:00
|
|
|
Limit: &pb.OrderLimit{
|
|
|
|
SerialNumber: sn,
|
|
|
|
SatelliteId: satellite,
|
|
|
|
Action: pb.PieceAction_GET,
|
|
|
|
OrderCreation: now,
|
|
|
|
},
|
|
|
|
Order: &pb.Order{
|
|
|
|
SerialNumber: sn,
|
|
|
|
Amount: 1,
|
|
|
|
},
|
|
|
|
}
|
2020-10-06 17:14:25 +01:00
|
|
|
// store two orders for the same window using deprecated V0
|
|
|
|
unsentFileName := ordersfile.UnsentFileName(satellite, now, ordersfile.V0)
|
|
|
|
unsentDir := filepath.Join(dirName, "unsent")
|
|
|
|
unsentFilePath := filepath.Join(unsentDir, unsentFileName)
|
|
|
|
of, err := ordersfile.OpenWritableV0(unsentFilePath)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.NoError(t, of.Append(info))
|
|
|
|
require.NoError(t, of.Append(info))
|
|
|
|
require.NoError(t, of.Close())
|
|
|
|
|
|
|
|
// check that we can see both orders tomorrow
|
2020-10-15 19:57:02 +01:00
|
|
|
unsent, err = ordersStore.ListUnsentBySatellite(ctx, tomorrow)
|
2020-10-06 17:14:25 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, unsent, 1)
|
|
|
|
require.Len(t, unsent[satellite].InfoList, 2)
|
|
|
|
|
|
|
|
// corrupt unsent orders file by removing the last byte
|
|
|
|
err = filepath.Walk(unsentDir, func(path string, info os.FileInfo, err error) error {
|
|
|
|
require.NoError(t, err)
|
|
|
|
if info.IsDir() {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
err = os.Truncate(path, info.Size()-1)
|
|
|
|
return err
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// add another order, which we shouldn't see for V0 since it is after the corrupted one
|
|
|
|
of, err = ordersfile.OpenWritableV0(unsentFilePath)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.NoError(t, of.Append(info))
|
|
|
|
require.NoError(t, of.Close())
|
|
|
|
|
|
|
|
// only the second order should be corrupted, so we should still see one order
|
2020-10-15 19:57:02 +01:00
|
|
|
unsent, err = ordersStore.ListUnsentBySatellite(ctx, tomorrow)
|
2020-10-06 17:14:25 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, unsent, 1)
|
|
|
|
require.Len(t, unsent[satellite].InfoList, 1)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestOrdersStore_CorruptUnsentV1(t *testing.T) {
|
|
|
|
ctx := testcontext.New(t)
|
|
|
|
defer ctx.Cleanup()
|
|
|
|
dirName := ctx.Dir("test-orders")
|
|
|
|
now := time.Now()
|
|
|
|
satellite := testrand.NodeID()
|
|
|
|
tomorrow := now.Add(24 * time.Hour)
|
|
|
|
|
|
|
|
// make order limit grace period 1 hour
|
|
|
|
ordersStore, err := orders.NewFileStore(zaptest.NewLogger(t), dirName, time.Hour)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// empty store means no orders can be listed
|
2020-10-15 19:57:02 +01:00
|
|
|
unsent, err := ordersStore.ListUnsentBySatellite(ctx, tomorrow)
|
2020-10-06 17:14:25 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, unsent, 0)
|
|
|
|
|
|
|
|
sn1 := testrand.SerialNumber()
|
|
|
|
sn2 := testrand.SerialNumber()
|
|
|
|
sn3 := testrand.SerialNumber()
|
|
|
|
info := &ordersfile.Info{
|
|
|
|
Limit: &pb.OrderLimit{
|
|
|
|
SerialNumber: sn1,
|
|
|
|
SatelliteId: satellite,
|
|
|
|
Action: pb.PieceAction_GET,
|
|
|
|
OrderCreation: now,
|
|
|
|
},
|
|
|
|
Order: &pb.Order{
|
|
|
|
SerialNumber: sn1,
|
|
|
|
Amount: 1,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
// store sn1 and sn2 in the same window
|
2020-09-10 21:25:22 +01:00
|
|
|
require.NoError(t, ordersStore.Enqueue(info))
|
2020-10-06 17:14:25 +01:00
|
|
|
info.Limit.SerialNumber = sn2
|
|
|
|
info.Order.SerialNumber = sn2
|
2020-09-10 21:25:22 +01:00
|
|
|
require.NoError(t, ordersStore.Enqueue(info))
|
|
|
|
|
|
|
|
// check that we can see both orders tomorrow
|
2020-10-15 19:57:02 +01:00
|
|
|
unsent, err = ordersStore.ListUnsentBySatellite(ctx, tomorrow)
|
2020-09-10 21:25:22 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, unsent, 1)
|
|
|
|
require.Len(t, unsent[satellite].InfoList, 2)
|
|
|
|
|
|
|
|
// corrupt unsent orders file by removing the last byte
|
|
|
|
err = filepath.Walk(filepath.Join(dirName, "unsent"), func(path string, info os.FileInfo, err error) error {
|
|
|
|
require.NoError(t, err)
|
|
|
|
if info.IsDir() {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
err = os.Truncate(path, info.Size()-1)
|
|
|
|
return err
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2020-10-06 17:14:25 +01:00
|
|
|
// only the second order should be corrupted, so we should still see one order (sn1)
|
2020-10-15 19:57:02 +01:00
|
|
|
unsent, err = ordersStore.ListUnsentBySatellite(ctx, tomorrow)
|
2020-10-06 17:14:25 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, unsent, 1)
|
|
|
|
require.Len(t, unsent[satellite].InfoList, 1)
|
|
|
|
require.EqualValues(t, sn1, unsent[satellite].InfoList[0].Order.SerialNumber)
|
|
|
|
|
|
|
|
// add another order, sn3, to the same window
|
|
|
|
info.Limit.SerialNumber = sn3
|
|
|
|
info.Order.SerialNumber = sn3
|
|
|
|
require.NoError(t, ordersStore.Enqueue(info))
|
|
|
|
|
|
|
|
// only the second order should be corrupted, so we should still see first and last orders (sn1, sn3)
|
2020-10-15 19:57:02 +01:00
|
|
|
unsent, err = ordersStore.ListUnsentBySatellite(ctx, tomorrow)
|
2020-10-06 17:14:25 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, unsent, 1)
|
|
|
|
require.Len(t, unsent[satellite].InfoList, 2)
|
|
|
|
require.Equal(t, ordersfile.V1, unsent[satellite].Version)
|
|
|
|
require.EqualValues(t, sn1, unsent[satellite].InfoList[0].Order.SerialNumber)
|
|
|
|
require.EqualValues(t, sn3, unsent[satellite].InfoList[1].Order.SerialNumber)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestOrdersStore_V0ToV1(t *testing.T) {
|
|
|
|
ctx := testcontext.New(t)
|
|
|
|
defer ctx.Cleanup()
|
|
|
|
dirName := ctx.Dir("test-orders")
|
|
|
|
now := time.Now()
|
|
|
|
satellite := testrand.NodeID()
|
|
|
|
tomorrow := now.Add(24 * time.Hour)
|
|
|
|
|
|
|
|
// make order limit grace period 1 hour
|
|
|
|
ordersStore, err := orders.NewFileStore(zaptest.NewLogger(t), dirName, time.Hour)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// empty store means no orders can be listed
|
2020-10-15 19:57:02 +01:00
|
|
|
unsent, err := ordersStore.ListUnsentBySatellite(ctx, tomorrow)
|
2020-10-06 17:14:25 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, unsent, 0)
|
|
|
|
|
|
|
|
sn1 := testrand.SerialNumber()
|
|
|
|
sn2 := testrand.SerialNumber()
|
|
|
|
info := &ordersfile.Info{
|
|
|
|
Limit: &pb.OrderLimit{
|
|
|
|
SerialNumber: sn1,
|
|
|
|
SatelliteId: satellite,
|
|
|
|
Action: pb.PieceAction_GET,
|
|
|
|
OrderCreation: now,
|
|
|
|
},
|
|
|
|
Order: &pb.Order{
|
|
|
|
SerialNumber: sn1,
|
|
|
|
Amount: 1,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
// store sn1 and sn2 in the same window
|
|
|
|
// sn1 is stored with deprecated V0, so sn2 should also be stored with V0 even when Enqueue() is used
|
|
|
|
unsentFileName := ordersfile.UnsentFileName(satellite, now, ordersfile.V0)
|
|
|
|
unsentDir := filepath.Join(dirName, "unsent")
|
|
|
|
unsentFilePath := filepath.Join(unsentDir, unsentFileName)
|
|
|
|
of, err := ordersfile.OpenWritableV0(unsentFilePath)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.NoError(t, of.Append(info))
|
|
|
|
info.Limit.SerialNumber = sn2
|
|
|
|
info.Order.SerialNumber = sn2
|
|
|
|
require.NoError(t, of.Append(info))
|
|
|
|
require.NoError(t, of.Close())
|
|
|
|
|
|
|
|
// check that we can see both orders tomorrow
|
2020-10-15 19:57:02 +01:00
|
|
|
unsent, err = ordersStore.ListUnsentBySatellite(ctx, tomorrow)
|
2020-10-06 17:14:25 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, unsent, 1)
|
|
|
|
require.Len(t, unsent[satellite].InfoList, 2)
|
|
|
|
require.Equal(t, ordersfile.V0, unsent[satellite].Version)
|
|
|
|
|
|
|
|
// archive file to free up window
|
|
|
|
require.NoError(t, ordersStore.Archive(satellite, unsent[satellite], time.Now(), pb.SettlementWithWindowResponse_ACCEPTED))
|
|
|
|
// new file should be created with version V1
|
|
|
|
require.NoError(t, ordersStore.Enqueue(info))
|
|
|
|
|
2020-10-15 19:57:02 +01:00
|
|
|
unsent, err = ordersStore.ListUnsentBySatellite(ctx, tomorrow)
|
2020-09-10 21:25:22 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, unsent, 1)
|
|
|
|
require.Len(t, unsent[satellite].InfoList, 1)
|
2020-10-06 17:14:25 +01:00
|
|
|
require.Equal(t, ordersfile.V1, unsent[satellite].Version)
|
2020-09-10 21:25:22 +01:00
|
|
|
}
|
|
|
|
|
2020-10-01 23:52:22 +01:00
|
|
|
func verifyInfosEqual(t *testing.T, a, b *ordersfile.Info) {
|
2020-06-03 20:21:59 +01:00
|
|
|
t.Helper()
|
|
|
|
|
|
|
|
require.NotNil(t, a)
|
|
|
|
require.NotNil(t, b)
|
|
|
|
|
|
|
|
require.Equal(t, a.Limit.SerialNumber, b.Limit.SerialNumber)
|
|
|
|
require.Equal(t, a.Limit.SatelliteId, b.Limit.SatelliteId)
|
|
|
|
require.Equal(t, a.Limit.OrderExpiration.UTC(), b.Limit.OrderExpiration.UTC())
|
|
|
|
require.Equal(t, a.Limit.Action, b.Limit.Action)
|
|
|
|
|
|
|
|
require.Equal(t, a.Order.Amount, b.Order.Amount)
|
|
|
|
require.Equal(t, a.Order.SerialNumber, b.Order.SerialNumber)
|
2020-06-02 16:29:46 +01:00
|
|
|
}
|
|
|
|
|
2020-07-14 23:31:22 +01:00
|
|
|
func verifyArchivedInfosEqual(t *testing.T, a, b *orders.ArchivedInfo) {
|
2020-06-02 16:29:46 +01:00
|
|
|
t.Helper()
|
|
|
|
|
|
|
|
require.NotNil(t, a)
|
|
|
|
require.NotNil(t, b)
|
|
|
|
|
|
|
|
require.Equal(t, a.Limit.SerialNumber, b.Limit.SerialNumber)
|
|
|
|
require.Equal(t, a.Limit.SatelliteId, b.Limit.SatelliteId)
|
|
|
|
require.Equal(t, a.Limit.OrderExpiration.UTC(), b.Limit.OrderExpiration.UTC())
|
|
|
|
require.Equal(t, a.Limit.Action, b.Limit.Action)
|
|
|
|
|
|
|
|
require.Equal(t, a.Order.Amount, b.Order.Amount)
|
|
|
|
require.Equal(t, a.Order.SerialNumber, b.Order.SerialNumber)
|
|
|
|
|
2020-06-03 20:21:59 +01:00
|
|
|
require.Equal(t, a.Status, b.Status)
|
|
|
|
require.Equal(t, a.ArchivedAt.UTC(), b.ArchivedAt.UTC())
|
2020-06-02 16:29:46 +01:00
|
|
|
}
|
|
|
|
|
2020-10-01 23:52:22 +01:00
|
|
|
func storeNewOrders(ordersStore *orders.FileStore, numSatellites, numOrdersPerSatPerTime int, createdAtTimes []time.Time) (map[storj.SerialNumber]*ordersfile.Info, error) {
|
2020-06-02 16:29:46 +01:00
|
|
|
actions := []pb.PieceAction{
|
|
|
|
pb.PieceAction_GET,
|
|
|
|
pb.PieceAction_PUT_REPAIR,
|
|
|
|
pb.PieceAction_GET_AUDIT,
|
|
|
|
}
|
2020-10-01 23:52:22 +01:00
|
|
|
originalInfos := make(map[storj.SerialNumber]*ordersfile.Info)
|
2020-06-02 16:29:46 +01:00
|
|
|
for i := 0; i < numSatellites; i++ {
|
|
|
|
satellite := testrand.NodeID()
|
|
|
|
|
2020-06-03 20:21:59 +01:00
|
|
|
for _, createdAt := range createdAtTimes {
|
|
|
|
for j := 0; j < numOrdersPerSatPerTime; j++ {
|
|
|
|
expiration := time.Now().Add(time.Hour)
|
|
|
|
amount := testrand.Int63n(1000)
|
|
|
|
sn := testrand.SerialNumber()
|
|
|
|
action := actions[j%len(actions)]
|
2020-07-01 23:05:01 +01:00
|
|
|
|
2020-10-01 23:52:22 +01:00
|
|
|
newInfo := &ordersfile.Info{
|
2020-06-03 20:21:59 +01:00
|
|
|
Limit: &pb.OrderLimit{
|
|
|
|
SerialNumber: sn,
|
|
|
|
SatelliteId: satellite,
|
|
|
|
Action: action,
|
|
|
|
OrderCreation: createdAt,
|
|
|
|
OrderExpiration: expiration,
|
|
|
|
},
|
|
|
|
Order: &pb.Order{
|
|
|
|
SerialNumber: sn,
|
|
|
|
Amount: amount,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
originalInfos[sn] = newInfo
|
|
|
|
|
|
|
|
// store the new info in the orders store
|
|
|
|
err := ordersStore.Enqueue(newInfo)
|
|
|
|
if err != nil {
|
|
|
|
return originalInfos, err
|
|
|
|
}
|
2020-06-02 16:29:46 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return originalInfos, nil
|
|
|
|
}
|