2020-06-11 19:31:45 +01:00
|
|
|
// Copyright (C) 2019 Storj Labs, Inc.
|
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
|
|
|
package orders_test
|
|
|
|
|
|
|
|
import (
|
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/stretchr/testify/require"
|
|
|
|
|
|
|
|
"storj.io/common/pb"
|
|
|
|
"storj.io/common/signing"
|
|
|
|
"storj.io/common/storj"
|
|
|
|
"storj.io/common/testcontext"
|
|
|
|
"storj.io/common/testrand"
|
|
|
|
"storj.io/storj/private/testplanet"
|
2021-01-08 16:04:46 +00:00
|
|
|
"storj.io/storj/satellite/internalpb"
|
2021-04-21 13:42:57 +01:00
|
|
|
"storj.io/storj/satellite/metabase"
|
2020-06-11 19:31:45 +01:00
|
|
|
)
|
|
|
|
|
satellite/orders: 3-phase rollout
This adds a config flag orders.window-endpoint-rollout-phase
that can take on the values phase1, phase2 or phase3.
In phase1, the current orders endpoint continues to work as
usual, and the windowed orders endpoint uses the same backend
as the current one (but also does a bit extra).
In phase2, the current orders endpoint is disabled and the
windowed orders endpoint continues to use the same backend.
In phase3, the current orders endpoint is still disabled and
the windowed orders endpoint uses the new backend that requires
much less database traffic and state.
The intention is to deploy in phase1, roll out code to nodes
to have them use the windowed endpoint, switch to phase2, wait
a couple days for all existing orders to expire, then switch
to phase3.
Additionally, it fixes a bug where a node could submit a bunch
of orders and rack up charges for a bucket.
Change-Id: Ifdc10e09ae1645159cbec7ace687dcb2d594c76d
2020-07-21 17:53:32 +01:00
|
|
|
func TestSettlementWithWindowEndpointManyOrders(t *testing.T) {
|
2020-12-17 14:46:46 +00:00
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 1,
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
2020-06-11 19:31:45 +01:00
|
|
|
satellite := planet.Satellites[0]
|
|
|
|
ordersDB := satellite.Orders.DB
|
|
|
|
storagenode := planet.StorageNodes[0]
|
satellite/orders: 3-phase rollout
This adds a config flag orders.window-endpoint-rollout-phase
that can take on the values phase1, phase2 or phase3.
In phase1, the current orders endpoint continues to work as
usual, and the windowed orders endpoint uses the same backend
as the current one (but also does a bit extra).
In phase2, the current orders endpoint is disabled and the
windowed orders endpoint continues to use the same backend.
In phase3, the current orders endpoint is still disabled and
the windowed orders endpoint uses the new backend that requires
much less database traffic and state.
The intention is to deploy in phase1, roll out code to nodes
to have them use the windowed endpoint, switch to phase2, wait
a couple days for all existing orders to expire, then switch
to phase3.
Additionally, it fixes a bug where a node could submit a bunch
of orders and rack up charges for a bucket.
Change-Id: Ifdc10e09ae1645159cbec7ace687dcb2d594c76d
2020-07-21 17:53:32 +01:00
|
|
|
now := time.Now()
|
2020-06-11 19:31:45 +01:00
|
|
|
projectID := testrand.UUID()
|
|
|
|
bucketname := "testbucket"
|
2021-01-08 16:04:46 +00:00
|
|
|
bucketLocation := metabase.BucketLocation{
|
|
|
|
ProjectID: projectID,
|
|
|
|
BucketName: bucketname,
|
|
|
|
}
|
2020-12-17 14:46:46 +00:00
|
|
|
key := satellite.Config.Orders.EncryptionKeys.Default
|
2020-06-11 19:31:45 +01:00
|
|
|
|
2021-01-22 13:51:29 +00:00
|
|
|
// stop the async flush because we want to be sure when some values are
|
2020-07-14 20:12:32 +01:00
|
|
|
// written to avoid races
|
|
|
|
satellite.Orders.Chore.Loop.Pause()
|
|
|
|
|
2020-06-11 19:31:45 +01:00
|
|
|
// confirm storagenode and bucket bandwidth tables start empty
|
|
|
|
snbw, err := ordersDB.GetStorageNodeBandwidth(ctx, satellite.ID(), time.Time{}, now)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, int64(0), snbw)
|
|
|
|
bucketbw, err := ordersDB.GetBucketBandwidth(ctx, projectID, []byte(bucketname), time.Time{}, now)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, int64(0), bucketbw)
|
|
|
|
|
2021-01-22 13:51:29 +00:00
|
|
|
testCases := []struct {
|
2020-06-11 19:31:45 +01:00
|
|
|
name string
|
|
|
|
dataAmount int64
|
|
|
|
orderCreation time.Time
|
|
|
|
settledAmt int64
|
|
|
|
}{
|
|
|
|
{"settle 2 orders, valid", int64(50), now, int64(100)},
|
|
|
|
{"settle 2 orders, window mismatch", int64(50), now.Add(-48 * time.Hour), int64(50)},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tt := range testCases {
|
2020-11-02 12:21:55 +00:00
|
|
|
func() {
|
|
|
|
// create serial number to use in test. must be unique for each run.
|
|
|
|
serialNumber1 := testrand.SerialNumber()
|
2020-12-17 14:46:46 +00:00
|
|
|
encrypted1, err := key.EncryptMetadata(
|
|
|
|
serialNumber1,
|
2021-01-08 16:04:46 +00:00
|
|
|
&internalpb.OrderLimitMetadata{
|
|
|
|
CompactProjectBucketPrefix: bucketLocation.CompactPrefix(),
|
2020-12-17 14:46:46 +00:00
|
|
|
},
|
|
|
|
)
|
2020-11-02 12:21:55 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
serialNumber2 := testrand.SerialNumber()
|
2020-12-17 14:46:46 +00:00
|
|
|
encrypted2, err := key.EncryptMetadata(
|
|
|
|
serialNumber2,
|
2021-01-08 16:04:46 +00:00
|
|
|
&internalpb.OrderLimitMetadata{
|
|
|
|
CompactProjectBucketPrefix: bucketLocation.CompactPrefix(),
|
2020-12-17 14:46:46 +00:00
|
|
|
},
|
|
|
|
)
|
2020-11-02 12:21:55 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
piecePublicKey, piecePrivateKey, err := storj.NewPieceKey()
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// create signed orderlimit or order to test with
|
|
|
|
limit1 := &pb.OrderLimit{
|
2020-12-17 14:46:46 +00:00
|
|
|
SerialNumber: serialNumber1,
|
|
|
|
SatelliteId: satellite.ID(),
|
|
|
|
UplinkPublicKey: piecePublicKey,
|
|
|
|
StorageNodeId: storagenode.ID(),
|
|
|
|
PieceId: storj.NewPieceID(),
|
|
|
|
Action: pb.PieceAction_PUT,
|
|
|
|
Limit: 1000,
|
|
|
|
PieceExpiration: time.Time{},
|
|
|
|
OrderCreation: tt.orderCreation,
|
|
|
|
OrderExpiration: now.Add(24 * time.Hour),
|
|
|
|
EncryptedMetadataKeyId: key.ID[:],
|
|
|
|
EncryptedMetadata: encrypted1,
|
2020-11-02 12:21:55 +00:00
|
|
|
}
|
|
|
|
orderLimit1, err := signing.SignOrderLimit(ctx, signing.SignerFromFullIdentity(satellite.Identity), limit1)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
order1, err := signing.SignUplinkOrder(ctx, piecePrivateKey, &pb.Order{
|
|
|
|
SerialNumber: serialNumber1,
|
|
|
|
Amount: tt.dataAmount,
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
limit2 := &pb.OrderLimit{
|
2020-12-17 14:46:46 +00:00
|
|
|
SerialNumber: serialNumber2,
|
|
|
|
SatelliteId: satellite.ID(),
|
|
|
|
UplinkPublicKey: piecePublicKey,
|
|
|
|
StorageNodeId: storagenode.ID(),
|
|
|
|
PieceId: storj.NewPieceID(),
|
|
|
|
Action: pb.PieceAction_PUT,
|
|
|
|
Limit: 1000,
|
|
|
|
PieceExpiration: time.Time{},
|
|
|
|
OrderCreation: now,
|
|
|
|
OrderExpiration: now.Add(24 * time.Hour),
|
|
|
|
EncryptedMetadataKeyId: key.ID[:],
|
|
|
|
EncryptedMetadata: encrypted2,
|
2020-11-02 12:21:55 +00:00
|
|
|
}
|
|
|
|
orderLimit2, err := signing.SignOrderLimit(ctx, signing.SignerFromFullIdentity(satellite.Identity), limit2)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
order2, err := signing.SignUplinkOrder(ctx, piecePrivateKey, &pb.Order{
|
|
|
|
SerialNumber: serialNumber2,
|
|
|
|
Amount: tt.dataAmount,
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// create connection between storagenode and satellite
|
|
|
|
conn, err := storagenode.Dialer.DialNodeURL(ctx, storj.NodeURL{ID: satellite.ID(), Address: satellite.Addr()})
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer ctx.Check(conn.Close)
|
|
|
|
|
|
|
|
stream, err := pb.NewDRPCOrdersClient(conn).SettlementWithWindow(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer ctx.Check(stream.Close)
|
|
|
|
|
|
|
|
// storagenode settles an order and orderlimit
|
|
|
|
err = stream.Send(&pb.SettlementRequest{
|
|
|
|
Limit: orderLimit1,
|
|
|
|
Order: order1,
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
err = stream.Send(&pb.SettlementRequest{
|
|
|
|
Limit: orderLimit2,
|
|
|
|
Order: order2,
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
resp, err := stream.CloseAndRecv()
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2021-01-22 13:51:29 +00:00
|
|
|
settled := map[int32]int64{int32(pb.PieceAction_PUT): tt.settledAmt}
|
2020-11-02 12:21:55 +00:00
|
|
|
require.Equal(t, &pb.SettlementWithWindowResponse{
|
|
|
|
Status: pb.SettlementWithWindowResponse_ACCEPTED,
|
|
|
|
ActionSettled: settled,
|
|
|
|
}, resp)
|
|
|
|
|
|
|
|
satellite.Orders.Chore.Loop.TriggerWait()
|
|
|
|
|
|
|
|
// assert all the right stuff is in the satellite storagenode and bucket bandwidth tables
|
|
|
|
snbw, err = ordersDB.GetStorageNodeBandwidth(ctx, storagenode.ID(), time.Time{}, tt.orderCreation)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.EqualValues(t, tt.settledAmt, snbw)
|
|
|
|
|
|
|
|
newBbw, err := ordersDB.GetBucketBandwidth(ctx, projectID, []byte(bucketname), time.Time{}, tt.orderCreation)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.EqualValues(t, tt.settledAmt, newBbw)
|
|
|
|
}()
|
2020-06-11 19:31:45 +01:00
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
2021-01-22 13:51:29 +00:00
|
|
|
|
2020-06-11 19:31:45 +01:00
|
|
|
func TestSettlementWithWindowEndpointSingleOrder(t *testing.T) {
|
2020-12-17 14:46:46 +00:00
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 1,
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
satellite/orders: 3-phase rollout
This adds a config flag orders.window-endpoint-rollout-phase
that can take on the values phase1, phase2 or phase3.
In phase1, the current orders endpoint continues to work as
usual, and the windowed orders endpoint uses the same backend
as the current one (but also does a bit extra).
In phase2, the current orders endpoint is disabled and the
windowed orders endpoint continues to use the same backend.
In phase3, the current orders endpoint is still disabled and
the windowed orders endpoint uses the new backend that requires
much less database traffic and state.
The intention is to deploy in phase1, roll out code to nodes
to have them use the windowed endpoint, switch to phase2, wait
a couple days for all existing orders to expire, then switch
to phase3.
Additionally, it fixes a bug where a node could submit a bunch
of orders and rack up charges for a bucket.
Change-Id: Ifdc10e09ae1645159cbec7ace687dcb2d594c76d
2020-07-21 17:53:32 +01:00
|
|
|
const dataAmount int64 = 50
|
2020-06-11 19:31:45 +01:00
|
|
|
satellite := planet.Satellites[0]
|
|
|
|
ordersDB := satellite.Orders.DB
|
|
|
|
storagenode := planet.StorageNodes[0]
|
satellite/orders: 3-phase rollout
This adds a config flag orders.window-endpoint-rollout-phase
that can take on the values phase1, phase2 or phase3.
In phase1, the current orders endpoint continues to work as
usual, and the windowed orders endpoint uses the same backend
as the current one (but also does a bit extra).
In phase2, the current orders endpoint is disabled and the
windowed orders endpoint continues to use the same backend.
In phase3, the current orders endpoint is still disabled and
the windowed orders endpoint uses the new backend that requires
much less database traffic and state.
The intention is to deploy in phase1, roll out code to nodes
to have them use the windowed endpoint, switch to phase2, wait
a couple days for all existing orders to expire, then switch
to phase3.
Additionally, it fixes a bug where a node could submit a bunch
of orders and rack up charges for a bucket.
Change-Id: Ifdc10e09ae1645159cbec7ace687dcb2d594c76d
2020-07-21 17:53:32 +01:00
|
|
|
now := time.Now()
|
2020-06-11 19:31:45 +01:00
|
|
|
projectID := testrand.UUID()
|
|
|
|
bucketname := "testbucket"
|
2021-01-08 16:04:46 +00:00
|
|
|
bucketLocation := metabase.BucketLocation{
|
|
|
|
ProjectID: projectID,
|
|
|
|
BucketName: bucketname,
|
|
|
|
}
|
2020-12-17 14:46:46 +00:00
|
|
|
key := satellite.Config.Orders.EncryptionKeys.Default
|
2020-06-11 19:31:45 +01:00
|
|
|
|
2021-01-22 13:51:29 +00:00
|
|
|
// stop the async flush because we want to be sure when some values are
|
2020-07-14 20:12:32 +01:00
|
|
|
// written to avoid races
|
|
|
|
satellite.Orders.Chore.Loop.Pause()
|
|
|
|
|
2020-06-11 19:31:45 +01:00
|
|
|
// confirm storagenode and bucket bandwidth tables start empty
|
|
|
|
snbw, err := ordersDB.GetStorageNodeBandwidth(ctx, satellite.ID(), time.Time{}, now)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.EqualValues(t, 0, snbw)
|
satellite/orders: 3-phase rollout
This adds a config flag orders.window-endpoint-rollout-phase
that can take on the values phase1, phase2 or phase3.
In phase1, the current orders endpoint continues to work as
usual, and the windowed orders endpoint uses the same backend
as the current one (but also does a bit extra).
In phase2, the current orders endpoint is disabled and the
windowed orders endpoint continues to use the same backend.
In phase3, the current orders endpoint is still disabled and
the windowed orders endpoint uses the new backend that requires
much less database traffic and state.
The intention is to deploy in phase1, roll out code to nodes
to have them use the windowed endpoint, switch to phase2, wait
a couple days for all existing orders to expire, then switch
to phase3.
Additionally, it fixes a bug where a node could submit a bunch
of orders and rack up charges for a bucket.
Change-Id: Ifdc10e09ae1645159cbec7ace687dcb2d594c76d
2020-07-21 17:53:32 +01:00
|
|
|
|
2020-06-11 19:31:45 +01:00
|
|
|
bucketbw, err := ordersDB.GetBucketBandwidth(ctx, projectID, []byte(bucketname), time.Time{}, now)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.EqualValues(t, 0, bucketbw)
|
|
|
|
|
|
|
|
// create serial number to use in test
|
|
|
|
serialNumber := testrand.SerialNumber()
|
2020-12-17 14:46:46 +00:00
|
|
|
encrypted, err := key.EncryptMetadata(
|
|
|
|
serialNumber,
|
2021-01-08 16:04:46 +00:00
|
|
|
&internalpb.OrderLimitMetadata{
|
|
|
|
CompactProjectBucketPrefix: bucketLocation.CompactPrefix(),
|
2020-12-17 14:46:46 +00:00
|
|
|
},
|
|
|
|
)
|
2020-06-11 19:31:45 +01:00
|
|
|
require.NoError(t, err)
|
satellite/orders: 3-phase rollout
This adds a config flag orders.window-endpoint-rollout-phase
that can take on the values phase1, phase2 or phase3.
In phase1, the current orders endpoint continues to work as
usual, and the windowed orders endpoint uses the same backend
as the current one (but also does a bit extra).
In phase2, the current orders endpoint is disabled and the
windowed orders endpoint continues to use the same backend.
In phase3, the current orders endpoint is still disabled and
the windowed orders endpoint uses the new backend that requires
much less database traffic and state.
The intention is to deploy in phase1, roll out code to nodes
to have them use the windowed endpoint, switch to phase2, wait
a couple days for all existing orders to expire, then switch
to phase3.
Additionally, it fixes a bug where a node could submit a bunch
of orders and rack up charges for a bucket.
Change-Id: Ifdc10e09ae1645159cbec7ace687dcb2d594c76d
2020-07-21 17:53:32 +01:00
|
|
|
|
2020-06-11 19:31:45 +01:00
|
|
|
piecePublicKey, piecePrivateKey, err := storj.NewPieceKey()
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2021-01-22 13:51:29 +00:00
|
|
|
testCases := []struct {
|
2020-06-11 19:31:45 +01:00
|
|
|
name string
|
|
|
|
dataAmount int64
|
|
|
|
expectedStatus pb.SettlementWithWindowResponse_Status
|
|
|
|
}{
|
satellite/orders: 3-phase rollout
This adds a config flag orders.window-endpoint-rollout-phase
that can take on the values phase1, phase2 or phase3.
In phase1, the current orders endpoint continues to work as
usual, and the windowed orders endpoint uses the same backend
as the current one (but also does a bit extra).
In phase2, the current orders endpoint is disabled and the
windowed orders endpoint continues to use the same backend.
In phase3, the current orders endpoint is still disabled and
the windowed orders endpoint uses the new backend that requires
much less database traffic and state.
The intention is to deploy in phase1, roll out code to nodes
to have them use the windowed endpoint, switch to phase2, wait
a couple days for all existing orders to expire, then switch
to phase3.
Additionally, it fixes a bug where a node could submit a bunch
of orders and rack up charges for a bucket.
Change-Id: Ifdc10e09ae1645159cbec7ace687dcb2d594c76d
2020-07-21 17:53:32 +01:00
|
|
|
{"first settlement", dataAmount, pb.SettlementWithWindowResponse_ACCEPTED},
|
|
|
|
{"settle the same a second time, matches first", dataAmount, pb.SettlementWithWindowResponse_ACCEPTED},
|
|
|
|
{"settle a third time, doesn't match first", dataAmount + 1, pb.SettlementWithWindowResponse_REJECTED},
|
2020-06-11 19:31:45 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, tt := range testCases {
|
2020-11-02 12:21:55 +00:00
|
|
|
func() {
|
|
|
|
// create signed orderlimit or order to test with
|
|
|
|
limit := &pb.OrderLimit{
|
2020-12-17 14:46:46 +00:00
|
|
|
SerialNumber: serialNumber,
|
|
|
|
SatelliteId: satellite.ID(),
|
|
|
|
UplinkPublicKey: piecePublicKey,
|
|
|
|
StorageNodeId: storagenode.ID(),
|
|
|
|
PieceId: storj.NewPieceID(),
|
|
|
|
Action: pb.PieceAction_PUT,
|
|
|
|
Limit: 1000,
|
|
|
|
PieceExpiration: time.Time{},
|
|
|
|
OrderCreation: now,
|
|
|
|
OrderExpiration: now.Add(24 * time.Hour),
|
|
|
|
EncryptedMetadataKeyId: key.ID[:],
|
|
|
|
EncryptedMetadata: encrypted,
|
2020-11-02 12:21:55 +00:00
|
|
|
}
|
|
|
|
orderLimit, err := signing.SignOrderLimit(ctx, signing.SignerFromFullIdentity(satellite.Identity), limit)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
order, err := signing.SignUplinkOrder(ctx, piecePrivateKey, &pb.Order{
|
|
|
|
SerialNumber: serialNumber,
|
|
|
|
Amount: tt.dataAmount,
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// create connection between storagenode and satellite
|
|
|
|
conn, err := storagenode.Dialer.DialNodeURL(ctx, storj.NodeURL{ID: satellite.ID(), Address: satellite.Addr()})
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer ctx.Check(conn.Close)
|
|
|
|
|
|
|
|
stream, err := pb.NewDRPCOrdersClient(conn).SettlementWithWindow(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer ctx.Check(stream.Close)
|
|
|
|
|
|
|
|
// storagenode settles an order and orderlimit
|
|
|
|
err = stream.Send(&pb.SettlementRequest{
|
|
|
|
Limit: orderLimit,
|
|
|
|
Order: order,
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
resp, err := stream.CloseAndRecv()
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
expected := new(pb.SettlementWithWindowResponse)
|
|
|
|
switch {
|
|
|
|
case tt.expectedStatus == pb.SettlementWithWindowResponse_ACCEPTED:
|
|
|
|
expected.Status = pb.SettlementWithWindowResponse_ACCEPTED
|
|
|
|
expected.ActionSettled = map[int32]int64{int32(pb.PieceAction_PUT): tt.dataAmount}
|
|
|
|
default:
|
|
|
|
expected.Status = pb.SettlementWithWindowResponse_REJECTED
|
|
|
|
expected.ActionSettled = nil
|
|
|
|
}
|
|
|
|
require.Equal(t, expected, resp)
|
|
|
|
|
2021-01-22 13:51:29 +00:00
|
|
|
// flush the chores
|
2020-11-02 12:21:55 +00:00
|
|
|
satellite.Orders.Chore.Loop.TriggerWait()
|
|
|
|
|
|
|
|
// assert all the right stuff is in the satellite storagenode and bucket bandwidth tables
|
|
|
|
snbw, err = ordersDB.GetStorageNodeBandwidth(ctx, storagenode.ID(), time.Time{}, now)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, dataAmount, snbw)
|
|
|
|
|
|
|
|
newBbw, err := ordersDB.GetBucketBandwidth(ctx, projectID, []byte(bucketname), time.Time{}, now)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, dataAmount, newBbw)
|
|
|
|
}()
|
2020-06-11 19:31:45 +01:00
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestSettlementWithWindowEndpointErrors(t *testing.T) {
|
2021-01-22 13:51:29 +00:00
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 1,
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
2020-06-11 19:31:45 +01:00
|
|
|
satellite := planet.Satellites[0]
|
|
|
|
ordersDB := satellite.Orders.DB
|
|
|
|
storagenode := planet.StorageNodes[0]
|
satellite/orders: 3-phase rollout
This adds a config flag orders.window-endpoint-rollout-phase
that can take on the values phase1, phase2 or phase3.
In phase1, the current orders endpoint continues to work as
usual, and the windowed orders endpoint uses the same backend
as the current one (but also does a bit extra).
In phase2, the current orders endpoint is disabled and the
windowed orders endpoint continues to use the same backend.
In phase3, the current orders endpoint is still disabled and
the windowed orders endpoint uses the new backend that requires
much less database traffic and state.
The intention is to deploy in phase1, roll out code to nodes
to have them use the windowed endpoint, switch to phase2, wait
a couple days for all existing orders to expire, then switch
to phase3.
Additionally, it fixes a bug where a node could submit a bunch
of orders and rack up charges for a bucket.
Change-Id: Ifdc10e09ae1645159cbec7ace687dcb2d594c76d
2020-07-21 17:53:32 +01:00
|
|
|
now := time.Now()
|
2020-06-11 19:31:45 +01:00
|
|
|
projectID := testrand.UUID()
|
|
|
|
bucketname := "testbucket"
|
2021-01-08 16:04:46 +00:00
|
|
|
bucketLocation := metabase.BucketLocation{
|
|
|
|
ProjectID: projectID,
|
|
|
|
BucketName: bucketname,
|
|
|
|
}
|
2020-06-11 19:31:45 +01:00
|
|
|
|
2021-01-22 13:51:29 +00:00
|
|
|
// stop the async flush because we want to be sure when some values are
|
2020-07-14 20:12:32 +01:00
|
|
|
// written to avoid races
|
|
|
|
satellite.Orders.Chore.Loop.Pause()
|
|
|
|
|
2020-06-11 19:31:45 +01:00
|
|
|
// confirm storagenode and bucket bandwidth tables start empty
|
|
|
|
snbw, err := ordersDB.GetStorageNodeBandwidth(ctx, satellite.ID(), time.Time{}, now)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.EqualValues(t, 0, snbw)
|
satellite/orders: 3-phase rollout
This adds a config flag orders.window-endpoint-rollout-phase
that can take on the values phase1, phase2 or phase3.
In phase1, the current orders endpoint continues to work as
usual, and the windowed orders endpoint uses the same backend
as the current one (but also does a bit extra).
In phase2, the current orders endpoint is disabled and the
windowed orders endpoint continues to use the same backend.
In phase3, the current orders endpoint is still disabled and
the windowed orders endpoint uses the new backend that requires
much less database traffic and state.
The intention is to deploy in phase1, roll out code to nodes
to have them use the windowed endpoint, switch to phase2, wait
a couple days for all existing orders to expire, then switch
to phase3.
Additionally, it fixes a bug where a node could submit a bunch
of orders and rack up charges for a bucket.
Change-Id: Ifdc10e09ae1645159cbec7ace687dcb2d594c76d
2020-07-21 17:53:32 +01:00
|
|
|
|
2020-06-11 19:31:45 +01:00
|
|
|
bucketbw, err := ordersDB.GetBucketBandwidth(ctx, projectID, []byte(bucketname), time.Time{}, now)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.EqualValues(t, 0, bucketbw)
|
|
|
|
|
|
|
|
piecePublicKey1, piecePrivateKey1, err := storj.NewPieceKey()
|
|
|
|
require.NoError(t, err)
|
satellite/orders: 3-phase rollout
This adds a config flag orders.window-endpoint-rollout-phase
that can take on the values phase1, phase2 or phase3.
In phase1, the current orders endpoint continues to work as
usual, and the windowed orders endpoint uses the same backend
as the current one (but also does a bit extra).
In phase2, the current orders endpoint is disabled and the
windowed orders endpoint continues to use the same backend.
In phase3, the current orders endpoint is still disabled and
the windowed orders endpoint uses the new backend that requires
much less database traffic and state.
The intention is to deploy in phase1, roll out code to nodes
to have them use the windowed endpoint, switch to phase2, wait
a couple days for all existing orders to expire, then switch
to phase3.
Additionally, it fixes a bug where a node could submit a bunch
of orders and rack up charges for a bucket.
Change-Id: Ifdc10e09ae1645159cbec7ace687dcb2d594c76d
2020-07-21 17:53:32 +01:00
|
|
|
|
2020-06-11 19:31:45 +01:00
|
|
|
_, piecePrivateKey2, err := storj.NewPieceKey()
|
|
|
|
require.NoError(t, err)
|
2021-01-22 13:51:29 +00:00
|
|
|
|
|
|
|
serialNumber1 := testrand.SerialNumber()
|
2020-12-18 18:16:20 +00:00
|
|
|
key := satellite.Config.Orders.EncryptionKeys.Default
|
|
|
|
encrypted, err := key.EncryptMetadata(
|
|
|
|
serialNumber1,
|
2021-01-08 16:04:46 +00:00
|
|
|
&internalpb.OrderLimitMetadata{
|
|
|
|
CompactProjectBucketPrefix: bucketLocation.CompactPrefix(),
|
2020-12-18 18:16:20 +00:00
|
|
|
},
|
|
|
|
)
|
|
|
|
require.NoError(t, err)
|
satellite/orders: 3-phase rollout
This adds a config flag orders.window-endpoint-rollout-phase
that can take on the values phase1, phase2 or phase3.
In phase1, the current orders endpoint continues to work as
usual, and the windowed orders endpoint uses the same backend
as the current one (but also does a bit extra).
In phase2, the current orders endpoint is disabled and the
windowed orders endpoint continues to use the same backend.
In phase3, the current orders endpoint is still disabled and
the windowed orders endpoint uses the new backend that requires
much less database traffic and state.
The intention is to deploy in phase1, roll out code to nodes
to have them use the windowed endpoint, switch to phase2, wait
a couple days for all existing orders to expire, then switch
to phase3.
Additionally, it fixes a bug where a node could submit a bunch
of orders and rack up charges for a bucket.
Change-Id: Ifdc10e09ae1645159cbec7ace687dcb2d594c76d
2020-07-21 17:53:32 +01:00
|
|
|
|
2020-06-11 19:31:45 +01:00
|
|
|
limit := pb.OrderLimit{
|
2020-12-18 18:16:20 +00:00
|
|
|
SerialNumber: serialNumber1,
|
|
|
|
SatelliteId: satellite.ID(),
|
|
|
|
UplinkPublicKey: piecePublicKey1,
|
|
|
|
StorageNodeId: storagenode.ID(),
|
|
|
|
PieceId: storj.NewPieceID(),
|
|
|
|
Action: pb.PieceAction_PUT,
|
|
|
|
Limit: 1000,
|
|
|
|
PieceExpiration: time.Time{},
|
|
|
|
OrderCreation: now,
|
|
|
|
OrderExpiration: now.Add(24 * time.Hour),
|
|
|
|
EncryptedMetadataKeyId: key.ID[:],
|
|
|
|
EncryptedMetadata: encrypted,
|
2020-06-11 19:31:45 +01:00
|
|
|
}
|
|
|
|
orderLimit1, err := signing.SignOrderLimit(ctx, signing.SignerFromFullIdentity(satellite.Identity), &limit)
|
|
|
|
require.NoError(t, err)
|
satellite/orders: 3-phase rollout
This adds a config flag orders.window-endpoint-rollout-phase
that can take on the values phase1, phase2 or phase3.
In phase1, the current orders endpoint continues to work as
usual, and the windowed orders endpoint uses the same backend
as the current one (but also does a bit extra).
In phase2, the current orders endpoint is disabled and the
windowed orders endpoint continues to use the same backend.
In phase3, the current orders endpoint is still disabled and
the windowed orders endpoint uses the new backend that requires
much less database traffic and state.
The intention is to deploy in phase1, roll out code to nodes
to have them use the windowed endpoint, switch to phase2, wait
a couple days for all existing orders to expire, then switch
to phase3.
Additionally, it fixes a bug where a node could submit a bunch
of orders and rack up charges for a bucket.
Change-Id: Ifdc10e09ae1645159cbec7ace687dcb2d594c76d
2020-07-21 17:53:32 +01:00
|
|
|
|
2020-06-11 19:31:45 +01:00
|
|
|
order1, err := signing.SignUplinkOrder(ctx, piecePrivateKey1, &pb.Order{
|
|
|
|
SerialNumber: serialNumber1,
|
|
|
|
Amount: int64(50),
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
satellite/orders: 3-phase rollout
This adds a config flag orders.window-endpoint-rollout-phase
that can take on the values phase1, phase2 or phase3.
In phase1, the current orders endpoint continues to work as
usual, and the windowed orders endpoint uses the same backend
as the current one (but also does a bit extra).
In phase2, the current orders endpoint is disabled and the
windowed orders endpoint continues to use the same backend.
In phase3, the current orders endpoint is still disabled and
the windowed orders endpoint uses the new backend that requires
much less database traffic and state.
The intention is to deploy in phase1, roll out code to nodes
to have them use the windowed endpoint, switch to phase2, wait
a couple days for all existing orders to expire, then switch
to phase3.
Additionally, it fixes a bug where a node could submit a bunch
of orders and rack up charges for a bucket.
Change-Id: Ifdc10e09ae1645159cbec7ace687dcb2d594c76d
2020-07-21 17:53:32 +01:00
|
|
|
|
2021-01-22 13:51:29 +00:00
|
|
|
serialNumber2 := testrand.SerialNumber()
|
2020-06-11 19:31:45 +01:00
|
|
|
order2, err := signing.SignUplinkOrder(ctx, piecePrivateKey1, &pb.Order{
|
|
|
|
SerialNumber: serialNumber2,
|
|
|
|
Amount: int64(50),
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
satellite/orders: 3-phase rollout
This adds a config flag orders.window-endpoint-rollout-phase
that can take on the values phase1, phase2 or phase3.
In phase1, the current orders endpoint continues to work as
usual, and the windowed orders endpoint uses the same backend
as the current one (but also does a bit extra).
In phase2, the current orders endpoint is disabled and the
windowed orders endpoint continues to use the same backend.
In phase3, the current orders endpoint is still disabled and
the windowed orders endpoint uses the new backend that requires
much less database traffic and state.
The intention is to deploy in phase1, roll out code to nodes
to have them use the windowed endpoint, switch to phase2, wait
a couple days for all existing orders to expire, then switch
to phase3.
Additionally, it fixes a bug where a node could submit a bunch
of orders and rack up charges for a bucket.
Change-Id: Ifdc10e09ae1645159cbec7ace687dcb2d594c76d
2020-07-21 17:53:32 +01:00
|
|
|
|
2020-06-11 19:31:45 +01:00
|
|
|
order3, err := signing.SignUplinkOrder(ctx, piecePrivateKey2, &pb.Order{
|
|
|
|
SerialNumber: serialNumber2,
|
|
|
|
Amount: int64(50),
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2021-01-22 13:51:29 +00:00
|
|
|
testCases := []struct {
|
2020-06-11 19:31:45 +01:00
|
|
|
name string
|
|
|
|
order *pb.Order
|
|
|
|
orderLimit *pb.OrderLimit
|
|
|
|
}{
|
|
|
|
{"no order", nil, orderLimit1},
|
|
|
|
{"no order limit", order1, nil},
|
|
|
|
{"mismatch serial number", order2, orderLimit1},
|
|
|
|
{"mismatch uplink signature", order3, orderLimit1},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tt := range testCases {
|
2020-07-16 16:40:29 +01:00
|
|
|
tt := tt
|
2020-06-11 19:31:45 +01:00
|
|
|
t.Run(tt.name, func(t *testing.T) {
|
|
|
|
conn, err := storagenode.Dialer.DialNodeURL(ctx, storj.NodeURL{ID: satellite.ID(), Address: satellite.Addr()})
|
|
|
|
require.NoError(t, err)
|
satellite/orders: 3-phase rollout
This adds a config flag orders.window-endpoint-rollout-phase
that can take on the values phase1, phase2 or phase3.
In phase1, the current orders endpoint continues to work as
usual, and the windowed orders endpoint uses the same backend
as the current one (but also does a bit extra).
In phase2, the current orders endpoint is disabled and the
windowed orders endpoint continues to use the same backend.
In phase3, the current orders endpoint is still disabled and
the windowed orders endpoint uses the new backend that requires
much less database traffic and state.
The intention is to deploy in phase1, roll out code to nodes
to have them use the windowed endpoint, switch to phase2, wait
a couple days for all existing orders to expire, then switch
to phase3.
Additionally, it fixes a bug where a node could submit a bunch
of orders and rack up charges for a bucket.
Change-Id: Ifdc10e09ae1645159cbec7ace687dcb2d594c76d
2020-07-21 17:53:32 +01:00
|
|
|
defer ctx.Check(conn.Close)
|
|
|
|
|
2020-06-11 19:31:45 +01:00
|
|
|
stream, err := pb.NewDRPCOrdersClient(conn).SettlementWithWindow(ctx)
|
|
|
|
require.NoError(t, err)
|
satellite/orders: 3-phase rollout
This adds a config flag orders.window-endpoint-rollout-phase
that can take on the values phase1, phase2 or phase3.
In phase1, the current orders endpoint continues to work as
usual, and the windowed orders endpoint uses the same backend
as the current one (but also does a bit extra).
In phase2, the current orders endpoint is disabled and the
windowed orders endpoint continues to use the same backend.
In phase3, the current orders endpoint is still disabled and
the windowed orders endpoint uses the new backend that requires
much less database traffic and state.
The intention is to deploy in phase1, roll out code to nodes
to have them use the windowed endpoint, switch to phase2, wait
a couple days for all existing orders to expire, then switch
to phase3.
Additionally, it fixes a bug where a node could submit a bunch
of orders and rack up charges for a bucket.
Change-Id: Ifdc10e09ae1645159cbec7ace687dcb2d594c76d
2020-07-21 17:53:32 +01:00
|
|
|
defer ctx.Check(stream.Close)
|
|
|
|
|
2020-06-11 19:31:45 +01:00
|
|
|
err = stream.Send(&pb.SettlementRequest{
|
|
|
|
Limit: tt.orderLimit,
|
|
|
|
Order: tt.order,
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
satellite/orders: 3-phase rollout
This adds a config flag orders.window-endpoint-rollout-phase
that can take on the values phase1, phase2 or phase3.
In phase1, the current orders endpoint continues to work as
usual, and the windowed orders endpoint uses the same backend
as the current one (but also does a bit extra).
In phase2, the current orders endpoint is disabled and the
windowed orders endpoint continues to use the same backend.
In phase3, the current orders endpoint is still disabled and
the windowed orders endpoint uses the new backend that requires
much less database traffic and state.
The intention is to deploy in phase1, roll out code to nodes
to have them use the windowed endpoint, switch to phase2, wait
a couple days for all existing orders to expire, then switch
to phase3.
Additionally, it fixes a bug where a node could submit a bunch
of orders and rack up charges for a bucket.
Change-Id: Ifdc10e09ae1645159cbec7ace687dcb2d594c76d
2020-07-21 17:53:32 +01:00
|
|
|
|
2020-06-11 19:31:45 +01:00
|
|
|
resp, err := stream.CloseAndRecv()
|
|
|
|
require.NoError(t, err)
|
satellite/orders: 3-phase rollout
This adds a config flag orders.window-endpoint-rollout-phase
that can take on the values phase1, phase2 or phase3.
In phase1, the current orders endpoint continues to work as
usual, and the windowed orders endpoint uses the same backend
as the current one (but also does a bit extra).
In phase2, the current orders endpoint is disabled and the
windowed orders endpoint continues to use the same backend.
In phase3, the current orders endpoint is still disabled and
the windowed orders endpoint uses the new backend that requires
much less database traffic and state.
The intention is to deploy in phase1, roll out code to nodes
to have them use the windowed endpoint, switch to phase2, wait
a couple days for all existing orders to expire, then switch
to phase3.
Additionally, it fixes a bug where a node could submit a bunch
of orders and rack up charges for a bucket.
Change-Id: Ifdc10e09ae1645159cbec7ace687dcb2d594c76d
2020-07-21 17:53:32 +01:00
|
|
|
require.Equal(t, &pb.SettlementWithWindowResponse{
|
|
|
|
Status: pb.SettlementWithWindowResponse_REJECTED,
|
|
|
|
ActionSettled: nil,
|
|
|
|
}, resp)
|
|
|
|
|
2021-01-22 13:51:29 +00:00
|
|
|
// flush the chores
|
satellite/orders: 3-phase rollout
This adds a config flag orders.window-endpoint-rollout-phase
that can take on the values phase1, phase2 or phase3.
In phase1, the current orders endpoint continues to work as
usual, and the windowed orders endpoint uses the same backend
as the current one (but also does a bit extra).
In phase2, the current orders endpoint is disabled and the
windowed orders endpoint continues to use the same backend.
In phase3, the current orders endpoint is still disabled and
the windowed orders endpoint uses the new backend that requires
much less database traffic and state.
The intention is to deploy in phase1, roll out code to nodes
to have them use the windowed endpoint, switch to phase2, wait
a couple days for all existing orders to expire, then switch
to phase3.
Additionally, it fixes a bug where a node could submit a bunch
of orders and rack up charges for a bucket.
Change-Id: Ifdc10e09ae1645159cbec7ace687dcb2d594c76d
2020-07-21 17:53:32 +01:00
|
|
|
satellite.Orders.Chore.Loop.TriggerWait()
|
2020-06-11 19:31:45 +01:00
|
|
|
|
|
|
|
// assert no data was added to satellite storagenode or bucket bandwidth tables
|
satellite/orders: 3-phase rollout
This adds a config flag orders.window-endpoint-rollout-phase
that can take on the values phase1, phase2 or phase3.
In phase1, the current orders endpoint continues to work as
usual, and the windowed orders endpoint uses the same backend
as the current one (but also does a bit extra).
In phase2, the current orders endpoint is disabled and the
windowed orders endpoint continues to use the same backend.
In phase3, the current orders endpoint is still disabled and
the windowed orders endpoint uses the new backend that requires
much less database traffic and state.
The intention is to deploy in phase1, roll out code to nodes
to have them use the windowed endpoint, switch to phase2, wait
a couple days for all existing orders to expire, then switch
to phase3.
Additionally, it fixes a bug where a node could submit a bunch
of orders and rack up charges for a bucket.
Change-Id: Ifdc10e09ae1645159cbec7ace687dcb2d594c76d
2020-07-21 17:53:32 +01:00
|
|
|
snbw, err = ordersDB.GetStorageNodeBandwidth(ctx, storagenode.ID(), time.Time{}, now)
|
2020-06-11 19:31:45 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.EqualValues(t, 0, snbw)
|
2020-07-14 20:12:32 +01:00
|
|
|
|
satellite/orders: 3-phase rollout
This adds a config flag orders.window-endpoint-rollout-phase
that can take on the values phase1, phase2 or phase3.
In phase1, the current orders endpoint continues to work as
usual, and the windowed orders endpoint uses the same backend
as the current one (but also does a bit extra).
In phase2, the current orders endpoint is disabled and the
windowed orders endpoint continues to use the same backend.
In phase3, the current orders endpoint is still disabled and
the windowed orders endpoint uses the new backend that requires
much less database traffic and state.
The intention is to deploy in phase1, roll out code to nodes
to have them use the windowed endpoint, switch to phase2, wait
a couple days for all existing orders to expire, then switch
to phase3.
Additionally, it fixes a bug where a node could submit a bunch
of orders and rack up charges for a bucket.
Change-Id: Ifdc10e09ae1645159cbec7ace687dcb2d594c76d
2020-07-21 17:53:32 +01:00
|
|
|
newBbw, err := ordersDB.GetBucketBandwidth(ctx, projectID, []byte(bucketname), time.Time{}, now)
|
2020-06-11 19:31:45 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.EqualValues(t, 0, newBbw)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|