2020-06-11 19:31:45 +01:00
|
|
|
// Copyright (C) 2019 Storj Labs, Inc.
|
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
|
|
|
package orders_test
|
|
|
|
|
|
|
|
import (
|
satellite/orders: 3-phase rollout
This adds a config flag orders.window-endpoint-rollout-phase
that can take on the values phase1, phase2 or phase3.
In phase1, the current orders endpoint continues to work as
usual, and the windowed orders endpoint uses the same backend
as the current one (but also does a bit extra).
In phase2, the current orders endpoint is disabled and the
windowed orders endpoint continues to use the same backend.
In phase3, the current orders endpoint is still disabled and
the windowed orders endpoint uses the new backend that requires
much less database traffic and state.
The intention is to deploy in phase1, roll out code to nodes
to have them use the windowed endpoint, switch to phase2, wait
a couple days for all existing orders to expire, then switch
to phase3.
Additionally, it fixes a bug where a node could submit a bunch
of orders and rack up charges for a bucket.
Change-Id: Ifdc10e09ae1645159cbec7ace687dcb2d594c76d
2020-07-21 17:53:32 +01:00
|
|
|
"io"
|
2020-06-11 19:31:45 +01:00
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
satellite/orders: 3-phase rollout
This adds a config flag orders.window-endpoint-rollout-phase
that can take on the values phase1, phase2 or phase3.
In phase1, the current orders endpoint continues to work as
usual, and the windowed orders endpoint uses the same backend
as the current one (but also does a bit extra).
In phase2, the current orders endpoint is disabled and the
windowed orders endpoint continues to use the same backend.
In phase3, the current orders endpoint is still disabled and
the windowed orders endpoint uses the new backend that requires
much less database traffic and state.
The intention is to deploy in phase1, roll out code to nodes
to have them use the windowed endpoint, switch to phase2, wait
a couple days for all existing orders to expire, then switch
to phase3.
Additionally, it fixes a bug where a node could submit a bunch
of orders and rack up charges for a bucket.
Change-Id: Ifdc10e09ae1645159cbec7ace687dcb2d594c76d
2020-07-21 17:53:32 +01:00
|
|
|
"github.com/stretchr/testify/assert"
|
2020-06-11 19:31:45 +01:00
|
|
|
"github.com/stretchr/testify/require"
|
satellite/orders: 3-phase rollout
This adds a config flag orders.window-endpoint-rollout-phase
that can take on the values phase1, phase2 or phase3.
In phase1, the current orders endpoint continues to work as
usual, and the windowed orders endpoint uses the same backend
as the current one (but also does a bit extra).
In phase2, the current orders endpoint is disabled and the
windowed orders endpoint continues to use the same backend.
In phase3, the current orders endpoint is still disabled and
the windowed orders endpoint uses the new backend that requires
much less database traffic and state.
The intention is to deploy in phase1, roll out code to nodes
to have them use the windowed endpoint, switch to phase2, wait
a couple days for all existing orders to expire, then switch
to phase3.
Additionally, it fixes a bug where a node could submit a bunch
of orders and rack up charges for a bucket.
Change-Id: Ifdc10e09ae1645159cbec7ace687dcb2d594c76d
2020-07-21 17:53:32 +01:00
|
|
|
"go.uber.org/zap"
|
2020-06-11 19:31:45 +01:00
|
|
|
|
|
|
|
"storj.io/common/pb"
|
satellite/orders: 3-phase rollout
This adds a config flag orders.window-endpoint-rollout-phase
that can take on the values phase1, phase2 or phase3.
In phase1, the current orders endpoint continues to work as
usual, and the windowed orders endpoint uses the same backend
as the current one (but also does a bit extra).
In phase2, the current orders endpoint is disabled and the
windowed orders endpoint continues to use the same backend.
In phase3, the current orders endpoint is still disabled and
the windowed orders endpoint uses the new backend that requires
much less database traffic and state.
The intention is to deploy in phase1, roll out code to nodes
to have them use the windowed endpoint, switch to phase2, wait
a couple days for all existing orders to expire, then switch
to phase3.
Additionally, it fixes a bug where a node could submit a bunch
of orders and rack up charges for a bucket.
Change-Id: Ifdc10e09ae1645159cbec7ace687dcb2d594c76d
2020-07-21 17:53:32 +01:00
|
|
|
"storj.io/common/rpc/rpcstatus"
|
2020-06-11 19:31:45 +01:00
|
|
|
"storj.io/common/signing"
|
|
|
|
"storj.io/common/storj"
|
|
|
|
"storj.io/common/testcontext"
|
|
|
|
"storj.io/common/testrand"
|
|
|
|
"storj.io/storj/private/testplanet"
|
satellite/orders: 3-phase rollout
This adds a config flag orders.window-endpoint-rollout-phase
that can take on the values phase1, phase2 or phase3.
In phase1, the current orders endpoint continues to work as
usual, and the windowed orders endpoint uses the same backend
as the current one (but also does a bit extra).
In phase2, the current orders endpoint is disabled and the
windowed orders endpoint continues to use the same backend.
In phase3, the current orders endpoint is still disabled and
the windowed orders endpoint uses the new backend that requires
much less database traffic and state.
The intention is to deploy in phase1, roll out code to nodes
to have them use the windowed endpoint, switch to phase2, wait
a couple days for all existing orders to expire, then switch
to phase3.
Additionally, it fixes a bug where a node could submit a bunch
of orders and rack up charges for a bucket.
Change-Id: Ifdc10e09ae1645159cbec7ace687dcb2d594c76d
2020-07-21 17:53:32 +01:00
|
|
|
"storj.io/storj/satellite"
|
2021-01-08 16:04:46 +00:00
|
|
|
"storj.io/storj/satellite/internalpb"
|
|
|
|
"storj.io/storj/satellite/metainfo/metabase"
|
satellite/orders: 3-phase rollout
This adds a config flag orders.window-endpoint-rollout-phase
that can take on the values phase1, phase2 or phase3.
In phase1, the current orders endpoint continues to work as
usual, and the windowed orders endpoint uses the same backend
as the current one (but also does a bit extra).
In phase2, the current orders endpoint is disabled and the
windowed orders endpoint continues to use the same backend.
In phase3, the current orders endpoint is still disabled and
the windowed orders endpoint uses the new backend that requires
much less database traffic and state.
The intention is to deploy in phase1, roll out code to nodes
to have them use the windowed endpoint, switch to phase2, wait
a couple days for all existing orders to expire, then switch
to phase3.
Additionally, it fixes a bug where a node could submit a bunch
of orders and rack up charges for a bucket.
Change-Id: Ifdc10e09ae1645159cbec7ace687dcb2d594c76d
2020-07-21 17:53:32 +01:00
|
|
|
"storj.io/storj/satellite/orders"
|
2020-06-11 19:31:45 +01:00
|
|
|
)
|
|
|
|
|
satellite/orders: 3-phase rollout
This adds a config flag orders.window-endpoint-rollout-phase
that can take on the values phase1, phase2 or phase3.
In phase1, the current orders endpoint continues to work as
usual, and the windowed orders endpoint uses the same backend
as the current one (but also does a bit extra).
In phase2, the current orders endpoint is disabled and the
windowed orders endpoint continues to use the same backend.
In phase3, the current orders endpoint is still disabled and
the windowed orders endpoint uses the new backend that requires
much less database traffic and state.
The intention is to deploy in phase1, roll out code to nodes
to have them use the windowed endpoint, switch to phase2, wait
a couple days for all existing orders to expire, then switch
to phase3.
Additionally, it fixes a bug where a node could submit a bunch
of orders and rack up charges for a bucket.
Change-Id: Ifdc10e09ae1645159cbec7ace687dcb2d594c76d
2020-07-21 17:53:32 +01:00
|
|
|
func runTestWithPhases(t *testing.T, fn func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet)) {
|
|
|
|
run := func(phase orders.WindowEndpointRolloutPhase) func(t *testing.T) {
|
|
|
|
return func(t *testing.T) {
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 1,
|
|
|
|
Reconfigure: testplanet.Reconfigure{
|
|
|
|
Satellite: func(_ *zap.Logger, _ int, config *satellite.Config) {
|
|
|
|
config.Orders.WindowEndpointRolloutPhase = phase
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}, fn)
|
|
|
|
}
|
|
|
|
}
|
2020-07-14 20:12:32 +01:00
|
|
|
|
satellite/orders: 3-phase rollout
This adds a config flag orders.window-endpoint-rollout-phase
that can take on the values phase1, phase2 or phase3.
In phase1, the current orders endpoint continues to work as
usual, and the windowed orders endpoint uses the same backend
as the current one (but also does a bit extra).
In phase2, the current orders endpoint is disabled and the
windowed orders endpoint continues to use the same backend.
In phase3, the current orders endpoint is still disabled and
the windowed orders endpoint uses the new backend that requires
much less database traffic and state.
The intention is to deploy in phase1, roll out code to nodes
to have them use the windowed endpoint, switch to phase2, wait
a couple days for all existing orders to expire, then switch
to phase3.
Additionally, it fixes a bug where a node could submit a bunch
of orders and rack up charges for a bucket.
Change-Id: Ifdc10e09ae1645159cbec7ace687dcb2d594c76d
2020-07-21 17:53:32 +01:00
|
|
|
t.Run("Phase1", run(orders.WindowEndpointRolloutPhase1))
|
|
|
|
t.Run("Phase2", run(orders.WindowEndpointRolloutPhase2))
|
|
|
|
t.Run("Phase3", run(orders.WindowEndpointRolloutPhase3))
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestSettlementWithWindowEndpointManyOrders(t *testing.T) {
|
2020-12-17 14:46:46 +00:00
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 1,
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
2020-06-11 19:31:45 +01:00
|
|
|
satellite := planet.Satellites[0]
|
|
|
|
ordersDB := satellite.Orders.DB
|
|
|
|
storagenode := planet.StorageNodes[0]
|
satellite/orders: 3-phase rollout
This adds a config flag orders.window-endpoint-rollout-phase
that can take on the values phase1, phase2 or phase3.
In phase1, the current orders endpoint continues to work as
usual, and the windowed orders endpoint uses the same backend
as the current one (but also does a bit extra).
In phase2, the current orders endpoint is disabled and the
windowed orders endpoint continues to use the same backend.
In phase3, the current orders endpoint is still disabled and
the windowed orders endpoint uses the new backend that requires
much less database traffic and state.
The intention is to deploy in phase1, roll out code to nodes
to have them use the windowed endpoint, switch to phase2, wait
a couple days for all existing orders to expire, then switch
to phase3.
Additionally, it fixes a bug where a node could submit a bunch
of orders and rack up charges for a bucket.
Change-Id: Ifdc10e09ae1645159cbec7ace687dcb2d594c76d
2020-07-21 17:53:32 +01:00
|
|
|
now := time.Now()
|
2020-06-11 19:31:45 +01:00
|
|
|
projectID := testrand.UUID()
|
|
|
|
bucketname := "testbucket"
|
2021-01-08 16:04:46 +00:00
|
|
|
bucketLocation := metabase.BucketLocation{
|
|
|
|
ProjectID: projectID,
|
|
|
|
BucketName: bucketname,
|
|
|
|
}
|
2020-12-17 14:46:46 +00:00
|
|
|
key := satellite.Config.Orders.EncryptionKeys.Default
|
2020-06-11 19:31:45 +01:00
|
|
|
|
2020-07-14 20:12:32 +01:00
|
|
|
// stop any async flushes because we want to be sure when some values are
|
|
|
|
// written to avoid races
|
|
|
|
satellite.Orders.Chore.Loop.Pause()
|
satellite/orders: 3-phase rollout
This adds a config flag orders.window-endpoint-rollout-phase
that can take on the values phase1, phase2 or phase3.
In phase1, the current orders endpoint continues to work as
usual, and the windowed orders endpoint uses the same backend
as the current one (but also does a bit extra).
In phase2, the current orders endpoint is disabled and the
windowed orders endpoint continues to use the same backend.
In phase3, the current orders endpoint is still disabled and
the windowed orders endpoint uses the new backend that requires
much less database traffic and state.
The intention is to deploy in phase1, roll out code to nodes
to have them use the windowed endpoint, switch to phase2, wait
a couple days for all existing orders to expire, then switch
to phase3.
Additionally, it fixes a bug where a node could submit a bunch
of orders and rack up charges for a bucket.
Change-Id: Ifdc10e09ae1645159cbec7ace687dcb2d594c76d
2020-07-21 17:53:32 +01:00
|
|
|
satellite.Accounting.ReportedRollup.Loop.Pause()
|
2020-07-14 20:12:32 +01:00
|
|
|
|
2020-06-11 19:31:45 +01:00
|
|
|
// confirm storagenode and bucket bandwidth tables start empty
|
|
|
|
snbw, err := ordersDB.GetStorageNodeBandwidth(ctx, satellite.ID(), time.Time{}, now)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, int64(0), snbw)
|
|
|
|
bucketbw, err := ordersDB.GetBucketBandwidth(ctx, projectID, []byte(bucketname), time.Time{}, now)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, int64(0), bucketbw)
|
|
|
|
|
|
|
|
var testCases = []struct {
|
|
|
|
name string
|
|
|
|
dataAmount int64
|
|
|
|
orderCreation time.Time
|
|
|
|
settledAmt int64
|
|
|
|
}{
|
|
|
|
{"settle 2 orders, valid", int64(50), now, int64(100)},
|
|
|
|
{"settle 2 orders, window mismatch", int64(50), now.Add(-48 * time.Hour), int64(50)},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tt := range testCases {
|
2020-11-02 12:21:55 +00:00
|
|
|
func() {
|
|
|
|
// create serial number to use in test. must be unique for each run.
|
|
|
|
serialNumber1 := testrand.SerialNumber()
|
2020-12-17 14:46:46 +00:00
|
|
|
encrypted1, err := key.EncryptMetadata(
|
|
|
|
serialNumber1,
|
2021-01-08 16:04:46 +00:00
|
|
|
&internalpb.OrderLimitMetadata{
|
|
|
|
CompactProjectBucketPrefix: bucketLocation.CompactPrefix(),
|
2020-12-17 14:46:46 +00:00
|
|
|
},
|
|
|
|
)
|
2020-11-02 12:21:55 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
serialNumber2 := testrand.SerialNumber()
|
2020-12-17 14:46:46 +00:00
|
|
|
encrypted2, err := key.EncryptMetadata(
|
|
|
|
serialNumber2,
|
2021-01-08 16:04:46 +00:00
|
|
|
&internalpb.OrderLimitMetadata{
|
|
|
|
CompactProjectBucketPrefix: bucketLocation.CompactPrefix(),
|
2020-12-17 14:46:46 +00:00
|
|
|
},
|
|
|
|
)
|
2020-11-02 12:21:55 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
piecePublicKey, piecePrivateKey, err := storj.NewPieceKey()
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// create signed orderlimit or order to test with
|
|
|
|
limit1 := &pb.OrderLimit{
|
2020-12-17 14:46:46 +00:00
|
|
|
SerialNumber: serialNumber1,
|
|
|
|
SatelliteId: satellite.ID(),
|
|
|
|
UplinkPublicKey: piecePublicKey,
|
|
|
|
StorageNodeId: storagenode.ID(),
|
|
|
|
PieceId: storj.NewPieceID(),
|
|
|
|
Action: pb.PieceAction_PUT,
|
|
|
|
Limit: 1000,
|
|
|
|
PieceExpiration: time.Time{},
|
|
|
|
OrderCreation: tt.orderCreation,
|
|
|
|
OrderExpiration: now.Add(24 * time.Hour),
|
|
|
|
EncryptedMetadataKeyId: key.ID[:],
|
|
|
|
EncryptedMetadata: encrypted1,
|
2020-11-02 12:21:55 +00:00
|
|
|
}
|
|
|
|
orderLimit1, err := signing.SignOrderLimit(ctx, signing.SignerFromFullIdentity(satellite.Identity), limit1)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
order1, err := signing.SignUplinkOrder(ctx, piecePrivateKey, &pb.Order{
|
|
|
|
SerialNumber: serialNumber1,
|
|
|
|
Amount: tt.dataAmount,
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
limit2 := &pb.OrderLimit{
|
2020-12-17 14:46:46 +00:00
|
|
|
SerialNumber: serialNumber2,
|
|
|
|
SatelliteId: satellite.ID(),
|
|
|
|
UplinkPublicKey: piecePublicKey,
|
|
|
|
StorageNodeId: storagenode.ID(),
|
|
|
|
PieceId: storj.NewPieceID(),
|
|
|
|
Action: pb.PieceAction_PUT,
|
|
|
|
Limit: 1000,
|
|
|
|
PieceExpiration: time.Time{},
|
|
|
|
OrderCreation: now,
|
|
|
|
OrderExpiration: now.Add(24 * time.Hour),
|
|
|
|
EncryptedMetadataKeyId: key.ID[:],
|
|
|
|
EncryptedMetadata: encrypted2,
|
2020-11-02 12:21:55 +00:00
|
|
|
}
|
|
|
|
orderLimit2, err := signing.SignOrderLimit(ctx, signing.SignerFromFullIdentity(satellite.Identity), limit2)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
order2, err := signing.SignUplinkOrder(ctx, piecePrivateKey, &pb.Order{
|
|
|
|
SerialNumber: serialNumber2,
|
|
|
|
Amount: tt.dataAmount,
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// create connection between storagenode and satellite
|
|
|
|
conn, err := storagenode.Dialer.DialNodeURL(ctx, storj.NodeURL{ID: satellite.ID(), Address: satellite.Addr()})
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer ctx.Check(conn.Close)
|
|
|
|
|
|
|
|
stream, err := pb.NewDRPCOrdersClient(conn).SettlementWithWindow(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer ctx.Check(stream.Close)
|
|
|
|
|
|
|
|
// storagenode settles an order and orderlimit
|
|
|
|
err = stream.Send(&pb.SettlementRequest{
|
|
|
|
Limit: orderLimit1,
|
|
|
|
Order: order1,
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
err = stream.Send(&pb.SettlementRequest{
|
|
|
|
Limit: orderLimit2,
|
|
|
|
Order: order2,
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
resp, err := stream.CloseAndRecv()
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// the settled amount is only returned during phase3
|
|
|
|
var settled map[int32]int64
|
|
|
|
if satellite.Config.Orders.WindowEndpointRolloutPhase == orders.WindowEndpointRolloutPhase3 {
|
|
|
|
settled = map[int32]int64{int32(pb.PieceAction_PUT): tt.settledAmt}
|
|
|
|
}
|
|
|
|
require.Equal(t, &pb.SettlementWithWindowResponse{
|
|
|
|
Status: pb.SettlementWithWindowResponse_ACCEPTED,
|
|
|
|
ActionSettled: settled,
|
|
|
|
}, resp)
|
|
|
|
|
|
|
|
// trigger and wait for all of the chores necessary to flush the orders
|
|
|
|
assert.NoError(t, satellite.Accounting.ReportedRollup.RunOnce(ctx, tt.orderCreation))
|
|
|
|
satellite.Orders.Chore.Loop.TriggerWait()
|
|
|
|
|
|
|
|
// assert all the right stuff is in the satellite storagenode and bucket bandwidth tables
|
|
|
|
snbw, err = ordersDB.GetStorageNodeBandwidth(ctx, storagenode.ID(), time.Time{}, tt.orderCreation)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.EqualValues(t, tt.settledAmt, snbw)
|
|
|
|
|
|
|
|
newBbw, err := ordersDB.GetBucketBandwidth(ctx, projectID, []byte(bucketname), time.Time{}, tt.orderCreation)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.EqualValues(t, tt.settledAmt, newBbw)
|
|
|
|
}()
|
2020-06-11 19:31:45 +01:00
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
func TestSettlementWithWindowEndpointSingleOrder(t *testing.T) {
|
2020-12-17 14:46:46 +00:00
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 1,
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
|
satellite/orders: 3-phase rollout
This adds a config flag orders.window-endpoint-rollout-phase
that can take on the values phase1, phase2 or phase3.
In phase1, the current orders endpoint continues to work as
usual, and the windowed orders endpoint uses the same backend
as the current one (but also does a bit extra).
In phase2, the current orders endpoint is disabled and the
windowed orders endpoint continues to use the same backend.
In phase3, the current orders endpoint is still disabled and
the windowed orders endpoint uses the new backend that requires
much less database traffic and state.
The intention is to deploy in phase1, roll out code to nodes
to have them use the windowed endpoint, switch to phase2, wait
a couple days for all existing orders to expire, then switch
to phase3.
Additionally, it fixes a bug where a node could submit a bunch
of orders and rack up charges for a bucket.
Change-Id: Ifdc10e09ae1645159cbec7ace687dcb2d594c76d
2020-07-21 17:53:32 +01:00
|
|
|
const dataAmount int64 = 50
|
2020-06-11 19:31:45 +01:00
|
|
|
satellite := planet.Satellites[0]
|
|
|
|
ordersDB := satellite.Orders.DB
|
|
|
|
storagenode := planet.StorageNodes[0]
|
satellite/orders: 3-phase rollout
This adds a config flag orders.window-endpoint-rollout-phase
that can take on the values phase1, phase2 or phase3.
In phase1, the current orders endpoint continues to work as
usual, and the windowed orders endpoint uses the same backend
as the current one (but also does a bit extra).
In phase2, the current orders endpoint is disabled and the
windowed orders endpoint continues to use the same backend.
In phase3, the current orders endpoint is still disabled and
the windowed orders endpoint uses the new backend that requires
much less database traffic and state.
The intention is to deploy in phase1, roll out code to nodes
to have them use the windowed endpoint, switch to phase2, wait
a couple days for all existing orders to expire, then switch
to phase3.
Additionally, it fixes a bug where a node could submit a bunch
of orders and rack up charges for a bucket.
Change-Id: Ifdc10e09ae1645159cbec7ace687dcb2d594c76d
2020-07-21 17:53:32 +01:00
|
|
|
now := time.Now()
|
2020-06-11 19:31:45 +01:00
|
|
|
projectID := testrand.UUID()
|
|
|
|
bucketname := "testbucket"
|
2021-01-08 16:04:46 +00:00
|
|
|
bucketLocation := metabase.BucketLocation{
|
|
|
|
ProjectID: projectID,
|
|
|
|
BucketName: bucketname,
|
|
|
|
}
|
2020-12-17 14:46:46 +00:00
|
|
|
key := satellite.Config.Orders.EncryptionKeys.Default
|
2020-06-11 19:31:45 +01:00
|
|
|
|
2020-07-14 20:12:32 +01:00
|
|
|
// stop any async flushes because we want to be sure when some values are
|
|
|
|
// written to avoid races
|
|
|
|
satellite.Orders.Chore.Loop.Pause()
|
satellite/orders: 3-phase rollout
This adds a config flag orders.window-endpoint-rollout-phase
that can take on the values phase1, phase2 or phase3.
In phase1, the current orders endpoint continues to work as
usual, and the windowed orders endpoint uses the same backend
as the current one (but also does a bit extra).
In phase2, the current orders endpoint is disabled and the
windowed orders endpoint continues to use the same backend.
In phase3, the current orders endpoint is still disabled and
the windowed orders endpoint uses the new backend that requires
much less database traffic and state.
The intention is to deploy in phase1, roll out code to nodes
to have them use the windowed endpoint, switch to phase2, wait
a couple days for all existing orders to expire, then switch
to phase3.
Additionally, it fixes a bug where a node could submit a bunch
of orders and rack up charges for a bucket.
Change-Id: Ifdc10e09ae1645159cbec7ace687dcb2d594c76d
2020-07-21 17:53:32 +01:00
|
|
|
satellite.Accounting.ReportedRollup.Loop.Pause()
|
2020-07-14 20:12:32 +01:00
|
|
|
|
2020-06-11 19:31:45 +01:00
|
|
|
// confirm storagenode and bucket bandwidth tables start empty
|
|
|
|
snbw, err := ordersDB.GetStorageNodeBandwidth(ctx, satellite.ID(), time.Time{}, now)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.EqualValues(t, 0, snbw)
|
satellite/orders: 3-phase rollout
This adds a config flag orders.window-endpoint-rollout-phase
that can take on the values phase1, phase2 or phase3.
In phase1, the current orders endpoint continues to work as
usual, and the windowed orders endpoint uses the same backend
as the current one (but also does a bit extra).
In phase2, the current orders endpoint is disabled and the
windowed orders endpoint continues to use the same backend.
In phase3, the current orders endpoint is still disabled and
the windowed orders endpoint uses the new backend that requires
much less database traffic and state.
The intention is to deploy in phase1, roll out code to nodes
to have them use the windowed endpoint, switch to phase2, wait
a couple days for all existing orders to expire, then switch
to phase3.
Additionally, it fixes a bug where a node could submit a bunch
of orders and rack up charges for a bucket.
Change-Id: Ifdc10e09ae1645159cbec7ace687dcb2d594c76d
2020-07-21 17:53:32 +01:00
|
|
|
|
2020-06-11 19:31:45 +01:00
|
|
|
bucketbw, err := ordersDB.GetBucketBandwidth(ctx, projectID, []byte(bucketname), time.Time{}, now)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.EqualValues(t, 0, bucketbw)
|
|
|
|
|
|
|
|
// create serial number to use in test
|
|
|
|
serialNumber := testrand.SerialNumber()
|
2020-12-17 14:46:46 +00:00
|
|
|
encrypted, err := key.EncryptMetadata(
|
|
|
|
serialNumber,
|
2021-01-08 16:04:46 +00:00
|
|
|
&internalpb.OrderLimitMetadata{
|
|
|
|
CompactProjectBucketPrefix: bucketLocation.CompactPrefix(),
|
2020-12-17 14:46:46 +00:00
|
|
|
},
|
|
|
|
)
|
2020-06-11 19:31:45 +01:00
|
|
|
require.NoError(t, err)
|
satellite/orders: 3-phase rollout
This adds a config flag orders.window-endpoint-rollout-phase
that can take on the values phase1, phase2 or phase3.
In phase1, the current orders endpoint continues to work as
usual, and the windowed orders endpoint uses the same backend
as the current one (but also does a bit extra).
In phase2, the current orders endpoint is disabled and the
windowed orders endpoint continues to use the same backend.
In phase3, the current orders endpoint is still disabled and
the windowed orders endpoint uses the new backend that requires
much less database traffic and state.
The intention is to deploy in phase1, roll out code to nodes
to have them use the windowed endpoint, switch to phase2, wait
a couple days for all existing orders to expire, then switch
to phase3.
Additionally, it fixes a bug where a node could submit a bunch
of orders and rack up charges for a bucket.
Change-Id: Ifdc10e09ae1645159cbec7ace687dcb2d594c76d
2020-07-21 17:53:32 +01:00
|
|
|
|
2020-06-11 19:31:45 +01:00
|
|
|
piecePublicKey, piecePrivateKey, err := storj.NewPieceKey()
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
var testCases = []struct {
|
|
|
|
name string
|
|
|
|
dataAmount int64
|
|
|
|
expectedStatus pb.SettlementWithWindowResponse_Status
|
|
|
|
}{
|
satellite/orders: 3-phase rollout
This adds a config flag orders.window-endpoint-rollout-phase
that can take on the values phase1, phase2 or phase3.
In phase1, the current orders endpoint continues to work as
usual, and the windowed orders endpoint uses the same backend
as the current one (but also does a bit extra).
In phase2, the current orders endpoint is disabled and the
windowed orders endpoint continues to use the same backend.
In phase3, the current orders endpoint is still disabled and
the windowed orders endpoint uses the new backend that requires
much less database traffic and state.
The intention is to deploy in phase1, roll out code to nodes
to have them use the windowed endpoint, switch to phase2, wait
a couple days for all existing orders to expire, then switch
to phase3.
Additionally, it fixes a bug where a node could submit a bunch
of orders and rack up charges for a bucket.
Change-Id: Ifdc10e09ae1645159cbec7ace687dcb2d594c76d
2020-07-21 17:53:32 +01:00
|
|
|
{"first settlement", dataAmount, pb.SettlementWithWindowResponse_ACCEPTED},
|
|
|
|
{"settle the same a second time, matches first", dataAmount, pb.SettlementWithWindowResponse_ACCEPTED},
|
|
|
|
{"settle a third time, doesn't match first", dataAmount + 1, pb.SettlementWithWindowResponse_REJECTED},
|
2020-06-11 19:31:45 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, tt := range testCases {
|
2020-11-02 12:21:55 +00:00
|
|
|
func() {
|
|
|
|
// create signed orderlimit or order to test with
|
|
|
|
limit := &pb.OrderLimit{
|
2020-12-17 14:46:46 +00:00
|
|
|
SerialNumber: serialNumber,
|
|
|
|
SatelliteId: satellite.ID(),
|
|
|
|
UplinkPublicKey: piecePublicKey,
|
|
|
|
StorageNodeId: storagenode.ID(),
|
|
|
|
PieceId: storj.NewPieceID(),
|
|
|
|
Action: pb.PieceAction_PUT,
|
|
|
|
Limit: 1000,
|
|
|
|
PieceExpiration: time.Time{},
|
|
|
|
OrderCreation: now,
|
|
|
|
OrderExpiration: now.Add(24 * time.Hour),
|
|
|
|
EncryptedMetadataKeyId: key.ID[:],
|
|
|
|
EncryptedMetadata: encrypted,
|
2020-11-02 12:21:55 +00:00
|
|
|
}
|
|
|
|
orderLimit, err := signing.SignOrderLimit(ctx, signing.SignerFromFullIdentity(satellite.Identity), limit)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
order, err := signing.SignUplinkOrder(ctx, piecePrivateKey, &pb.Order{
|
|
|
|
SerialNumber: serialNumber,
|
|
|
|
Amount: tt.dataAmount,
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// create connection between storagenode and satellite
|
|
|
|
conn, err := storagenode.Dialer.DialNodeURL(ctx, storj.NodeURL{ID: satellite.ID(), Address: satellite.Addr()})
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer ctx.Check(conn.Close)
|
|
|
|
|
|
|
|
stream, err := pb.NewDRPCOrdersClient(conn).SettlementWithWindow(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer ctx.Check(stream.Close)
|
|
|
|
|
|
|
|
// storagenode settles an order and orderlimit
|
|
|
|
err = stream.Send(&pb.SettlementRequest{
|
|
|
|
Limit: orderLimit,
|
|
|
|
Order: order,
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
resp, err := stream.CloseAndRecv()
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
expected := new(pb.SettlementWithWindowResponse)
|
|
|
|
switch {
|
|
|
|
case satellite.Config.Orders.WindowEndpointRolloutPhase != orders.WindowEndpointRolloutPhase3:
|
|
|
|
expected.Status = pb.SettlementWithWindowResponse_ACCEPTED
|
|
|
|
expected.ActionSettled = nil
|
|
|
|
case tt.expectedStatus == pb.SettlementWithWindowResponse_ACCEPTED:
|
|
|
|
expected.Status = pb.SettlementWithWindowResponse_ACCEPTED
|
|
|
|
expected.ActionSettled = map[int32]int64{int32(pb.PieceAction_PUT): tt.dataAmount}
|
|
|
|
default:
|
|
|
|
expected.Status = pb.SettlementWithWindowResponse_REJECTED
|
|
|
|
expected.ActionSettled = nil
|
|
|
|
}
|
|
|
|
require.Equal(t, expected, resp)
|
|
|
|
|
|
|
|
// flush all the chores
|
|
|
|
assert.NoError(t, satellite.Accounting.ReportedRollup.RunOnce(ctx, now))
|
|
|
|
satellite.Orders.Chore.Loop.TriggerWait()
|
|
|
|
|
|
|
|
// assert all the right stuff is in the satellite storagenode and bucket bandwidth tables
|
|
|
|
snbw, err = ordersDB.GetStorageNodeBandwidth(ctx, storagenode.ID(), time.Time{}, now)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, dataAmount, snbw)
|
|
|
|
|
|
|
|
newBbw, err := ordersDB.GetBucketBandwidth(ctx, projectID, []byte(bucketname), time.Time{}, now)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, dataAmount, newBbw)
|
|
|
|
}()
|
2020-06-11 19:31:45 +01:00
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestSettlementWithWindowEndpointErrors(t *testing.T) {
|
satellite/orders: 3-phase rollout
This adds a config flag orders.window-endpoint-rollout-phase
that can take on the values phase1, phase2 or phase3.
In phase1, the current orders endpoint continues to work as
usual, and the windowed orders endpoint uses the same backend
as the current one (but also does a bit extra).
In phase2, the current orders endpoint is disabled and the
windowed orders endpoint continues to use the same backend.
In phase3, the current orders endpoint is still disabled and
the windowed orders endpoint uses the new backend that requires
much less database traffic and state.
The intention is to deploy in phase1, roll out code to nodes
to have them use the windowed endpoint, switch to phase2, wait
a couple days for all existing orders to expire, then switch
to phase3.
Additionally, it fixes a bug where a node could submit a bunch
of orders and rack up charges for a bucket.
Change-Id: Ifdc10e09ae1645159cbec7ace687dcb2d594c76d
2020-07-21 17:53:32 +01:00
|
|
|
runTestWithPhases(t, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
2020-06-11 19:31:45 +01:00
|
|
|
satellite := planet.Satellites[0]
|
|
|
|
ordersDB := satellite.Orders.DB
|
|
|
|
storagenode := planet.StorageNodes[0]
|
satellite/orders: 3-phase rollout
This adds a config flag orders.window-endpoint-rollout-phase
that can take on the values phase1, phase2 or phase3.
In phase1, the current orders endpoint continues to work as
usual, and the windowed orders endpoint uses the same backend
as the current one (but also does a bit extra).
In phase2, the current orders endpoint is disabled and the
windowed orders endpoint continues to use the same backend.
In phase3, the current orders endpoint is still disabled and
the windowed orders endpoint uses the new backend that requires
much less database traffic and state.
The intention is to deploy in phase1, roll out code to nodes
to have them use the windowed endpoint, switch to phase2, wait
a couple days for all existing orders to expire, then switch
to phase3.
Additionally, it fixes a bug where a node could submit a bunch
of orders and rack up charges for a bucket.
Change-Id: Ifdc10e09ae1645159cbec7ace687dcb2d594c76d
2020-07-21 17:53:32 +01:00
|
|
|
now := time.Now()
|
2020-06-11 19:31:45 +01:00
|
|
|
projectID := testrand.UUID()
|
|
|
|
bucketname := "testbucket"
|
2021-01-08 16:04:46 +00:00
|
|
|
bucketLocation := metabase.BucketLocation{
|
|
|
|
ProjectID: projectID,
|
|
|
|
BucketName: bucketname,
|
|
|
|
}
|
2020-06-11 19:31:45 +01:00
|
|
|
|
2020-07-14 20:12:32 +01:00
|
|
|
// stop any async flushes because we want to be sure when some values are
|
|
|
|
// written to avoid races
|
|
|
|
satellite.Orders.Chore.Loop.Pause()
|
satellite/orders: 3-phase rollout
This adds a config flag orders.window-endpoint-rollout-phase
that can take on the values phase1, phase2 or phase3.
In phase1, the current orders endpoint continues to work as
usual, and the windowed orders endpoint uses the same backend
as the current one (but also does a bit extra).
In phase2, the current orders endpoint is disabled and the
windowed orders endpoint continues to use the same backend.
In phase3, the current orders endpoint is still disabled and
the windowed orders endpoint uses the new backend that requires
much less database traffic and state.
The intention is to deploy in phase1, roll out code to nodes
to have them use the windowed endpoint, switch to phase2, wait
a couple days for all existing orders to expire, then switch
to phase3.
Additionally, it fixes a bug where a node could submit a bunch
of orders and rack up charges for a bucket.
Change-Id: Ifdc10e09ae1645159cbec7ace687dcb2d594c76d
2020-07-21 17:53:32 +01:00
|
|
|
satellite.Accounting.ReportedRollup.Loop.Pause()
|
2020-07-14 20:12:32 +01:00
|
|
|
|
2020-06-11 19:31:45 +01:00
|
|
|
// confirm storagenode and bucket bandwidth tables start empty
|
|
|
|
snbw, err := ordersDB.GetStorageNodeBandwidth(ctx, satellite.ID(), time.Time{}, now)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.EqualValues(t, 0, snbw)
|
satellite/orders: 3-phase rollout
This adds a config flag orders.window-endpoint-rollout-phase
that can take on the values phase1, phase2 or phase3.
In phase1, the current orders endpoint continues to work as
usual, and the windowed orders endpoint uses the same backend
as the current one (but also does a bit extra).
In phase2, the current orders endpoint is disabled and the
windowed orders endpoint continues to use the same backend.
In phase3, the current orders endpoint is still disabled and
the windowed orders endpoint uses the new backend that requires
much less database traffic and state.
The intention is to deploy in phase1, roll out code to nodes
to have them use the windowed endpoint, switch to phase2, wait
a couple days for all existing orders to expire, then switch
to phase3.
Additionally, it fixes a bug where a node could submit a bunch
of orders and rack up charges for a bucket.
Change-Id: Ifdc10e09ae1645159cbec7ace687dcb2d594c76d
2020-07-21 17:53:32 +01:00
|
|
|
|
2020-06-11 19:31:45 +01:00
|
|
|
bucketbw, err := ordersDB.GetBucketBandwidth(ctx, projectID, []byte(bucketname), time.Time{}, now)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.EqualValues(t, 0, bucketbw)
|
|
|
|
|
|
|
|
// create serial number to use in test
|
|
|
|
serialNumber1 := testrand.SerialNumber()
|
2021-01-08 16:04:46 +00:00
|
|
|
err = ordersDB.CreateSerialInfo(ctx, serialNumber1, []byte(bucketLocation.Prefix()), now.AddDate(1, 0, 10))
|
2020-06-11 19:31:45 +01:00
|
|
|
require.NoError(t, err)
|
satellite/orders: 3-phase rollout
This adds a config flag orders.window-endpoint-rollout-phase
that can take on the values phase1, phase2 or phase3.
In phase1, the current orders endpoint continues to work as
usual, and the windowed orders endpoint uses the same backend
as the current one (but also does a bit extra).
In phase2, the current orders endpoint is disabled and the
windowed orders endpoint continues to use the same backend.
In phase3, the current orders endpoint is still disabled and
the windowed orders endpoint uses the new backend that requires
much less database traffic and state.
The intention is to deploy in phase1, roll out code to nodes
to have them use the windowed endpoint, switch to phase2, wait
a couple days for all existing orders to expire, then switch
to phase3.
Additionally, it fixes a bug where a node could submit a bunch
of orders and rack up charges for a bucket.
Change-Id: Ifdc10e09ae1645159cbec7ace687dcb2d594c76d
2020-07-21 17:53:32 +01:00
|
|
|
|
|
|
|
serialNumber2 := testrand.SerialNumber()
|
2021-01-08 16:04:46 +00:00
|
|
|
err = ordersDB.CreateSerialInfo(ctx, serialNumber2, []byte(bucketLocation.Prefix()), now.AddDate(1, 0, 10))
|
2020-06-11 19:31:45 +01:00
|
|
|
require.NoError(t, err)
|
satellite/orders: 3-phase rollout
This adds a config flag orders.window-endpoint-rollout-phase
that can take on the values phase1, phase2 or phase3.
In phase1, the current orders endpoint continues to work as
usual, and the windowed orders endpoint uses the same backend
as the current one (but also does a bit extra).
In phase2, the current orders endpoint is disabled and the
windowed orders endpoint continues to use the same backend.
In phase3, the current orders endpoint is still disabled and
the windowed orders endpoint uses the new backend that requires
much less database traffic and state.
The intention is to deploy in phase1, roll out code to nodes
to have them use the windowed endpoint, switch to phase2, wait
a couple days for all existing orders to expire, then switch
to phase3.
Additionally, it fixes a bug where a node could submit a bunch
of orders and rack up charges for a bucket.
Change-Id: Ifdc10e09ae1645159cbec7ace687dcb2d594c76d
2020-07-21 17:53:32 +01:00
|
|
|
|
2020-06-11 19:31:45 +01:00
|
|
|
piecePublicKey1, piecePrivateKey1, err := storj.NewPieceKey()
|
|
|
|
require.NoError(t, err)
|
satellite/orders: 3-phase rollout
This adds a config flag orders.window-endpoint-rollout-phase
that can take on the values phase1, phase2 or phase3.
In phase1, the current orders endpoint continues to work as
usual, and the windowed orders endpoint uses the same backend
as the current one (but also does a bit extra).
In phase2, the current orders endpoint is disabled and the
windowed orders endpoint continues to use the same backend.
In phase3, the current orders endpoint is still disabled and
the windowed orders endpoint uses the new backend that requires
much less database traffic and state.
The intention is to deploy in phase1, roll out code to nodes
to have them use the windowed endpoint, switch to phase2, wait
a couple days for all existing orders to expire, then switch
to phase3.
Additionally, it fixes a bug where a node could submit a bunch
of orders and rack up charges for a bucket.
Change-Id: Ifdc10e09ae1645159cbec7ace687dcb2d594c76d
2020-07-21 17:53:32 +01:00
|
|
|
|
2020-06-11 19:31:45 +01:00
|
|
|
_, piecePrivateKey2, err := storj.NewPieceKey()
|
|
|
|
require.NoError(t, err)
|
2020-12-18 18:16:20 +00:00
|
|
|
key := satellite.Config.Orders.EncryptionKeys.Default
|
|
|
|
encrypted, err := key.EncryptMetadata(
|
|
|
|
serialNumber1,
|
2021-01-08 16:04:46 +00:00
|
|
|
&internalpb.OrderLimitMetadata{
|
|
|
|
CompactProjectBucketPrefix: bucketLocation.CompactPrefix(),
|
2020-12-18 18:16:20 +00:00
|
|
|
},
|
|
|
|
)
|
|
|
|
require.NoError(t, err)
|
satellite/orders: 3-phase rollout
This adds a config flag orders.window-endpoint-rollout-phase
that can take on the values phase1, phase2 or phase3.
In phase1, the current orders endpoint continues to work as
usual, and the windowed orders endpoint uses the same backend
as the current one (but also does a bit extra).
In phase2, the current orders endpoint is disabled and the
windowed orders endpoint continues to use the same backend.
In phase3, the current orders endpoint is still disabled and
the windowed orders endpoint uses the new backend that requires
much less database traffic and state.
The intention is to deploy in phase1, roll out code to nodes
to have them use the windowed endpoint, switch to phase2, wait
a couple days for all existing orders to expire, then switch
to phase3.
Additionally, it fixes a bug where a node could submit a bunch
of orders and rack up charges for a bucket.
Change-Id: Ifdc10e09ae1645159cbec7ace687dcb2d594c76d
2020-07-21 17:53:32 +01:00
|
|
|
|
2020-06-11 19:31:45 +01:00
|
|
|
limit := pb.OrderLimit{
|
2020-12-18 18:16:20 +00:00
|
|
|
SerialNumber: serialNumber1,
|
|
|
|
SatelliteId: satellite.ID(),
|
|
|
|
UplinkPublicKey: piecePublicKey1,
|
|
|
|
StorageNodeId: storagenode.ID(),
|
|
|
|
PieceId: storj.NewPieceID(),
|
|
|
|
Action: pb.PieceAction_PUT,
|
|
|
|
Limit: 1000,
|
|
|
|
PieceExpiration: time.Time{},
|
|
|
|
OrderCreation: now,
|
|
|
|
OrderExpiration: now.Add(24 * time.Hour),
|
|
|
|
EncryptedMetadataKeyId: key.ID[:],
|
|
|
|
EncryptedMetadata: encrypted,
|
2020-06-11 19:31:45 +01:00
|
|
|
}
|
|
|
|
orderLimit1, err := signing.SignOrderLimit(ctx, signing.SignerFromFullIdentity(satellite.Identity), &limit)
|
|
|
|
require.NoError(t, err)
|
satellite/orders: 3-phase rollout
This adds a config flag orders.window-endpoint-rollout-phase
that can take on the values phase1, phase2 or phase3.
In phase1, the current orders endpoint continues to work as
usual, and the windowed orders endpoint uses the same backend
as the current one (but also does a bit extra).
In phase2, the current orders endpoint is disabled and the
windowed orders endpoint continues to use the same backend.
In phase3, the current orders endpoint is still disabled and
the windowed orders endpoint uses the new backend that requires
much less database traffic and state.
The intention is to deploy in phase1, roll out code to nodes
to have them use the windowed endpoint, switch to phase2, wait
a couple days for all existing orders to expire, then switch
to phase3.
Additionally, it fixes a bug where a node could submit a bunch
of orders and rack up charges for a bucket.
Change-Id: Ifdc10e09ae1645159cbec7ace687dcb2d594c76d
2020-07-21 17:53:32 +01:00
|
|
|
|
2020-06-11 19:31:45 +01:00
|
|
|
order1, err := signing.SignUplinkOrder(ctx, piecePrivateKey1, &pb.Order{
|
|
|
|
SerialNumber: serialNumber1,
|
|
|
|
Amount: int64(50),
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
satellite/orders: 3-phase rollout
This adds a config flag orders.window-endpoint-rollout-phase
that can take on the values phase1, phase2 or phase3.
In phase1, the current orders endpoint continues to work as
usual, and the windowed orders endpoint uses the same backend
as the current one (but also does a bit extra).
In phase2, the current orders endpoint is disabled and the
windowed orders endpoint continues to use the same backend.
In phase3, the current orders endpoint is still disabled and
the windowed orders endpoint uses the new backend that requires
much less database traffic and state.
The intention is to deploy in phase1, roll out code to nodes
to have them use the windowed endpoint, switch to phase2, wait
a couple days for all existing orders to expire, then switch
to phase3.
Additionally, it fixes a bug where a node could submit a bunch
of orders and rack up charges for a bucket.
Change-Id: Ifdc10e09ae1645159cbec7ace687dcb2d594c76d
2020-07-21 17:53:32 +01:00
|
|
|
|
2020-06-11 19:31:45 +01:00
|
|
|
order2, err := signing.SignUplinkOrder(ctx, piecePrivateKey1, &pb.Order{
|
|
|
|
SerialNumber: serialNumber2,
|
|
|
|
Amount: int64(50),
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
satellite/orders: 3-phase rollout
This adds a config flag orders.window-endpoint-rollout-phase
that can take on the values phase1, phase2 or phase3.
In phase1, the current orders endpoint continues to work as
usual, and the windowed orders endpoint uses the same backend
as the current one (but also does a bit extra).
In phase2, the current orders endpoint is disabled and the
windowed orders endpoint continues to use the same backend.
In phase3, the current orders endpoint is still disabled and
the windowed orders endpoint uses the new backend that requires
much less database traffic and state.
The intention is to deploy in phase1, roll out code to nodes
to have them use the windowed endpoint, switch to phase2, wait
a couple days for all existing orders to expire, then switch
to phase3.
Additionally, it fixes a bug where a node could submit a bunch
of orders and rack up charges for a bucket.
Change-Id: Ifdc10e09ae1645159cbec7ace687dcb2d594c76d
2020-07-21 17:53:32 +01:00
|
|
|
|
2020-06-11 19:31:45 +01:00
|
|
|
order3, err := signing.SignUplinkOrder(ctx, piecePrivateKey2, &pb.Order{
|
|
|
|
SerialNumber: serialNumber2,
|
|
|
|
Amount: int64(50),
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
var testCases = []struct {
|
|
|
|
name string
|
|
|
|
order *pb.Order
|
|
|
|
orderLimit *pb.OrderLimit
|
|
|
|
}{
|
|
|
|
{"no order", nil, orderLimit1},
|
|
|
|
{"no order limit", order1, nil},
|
|
|
|
{"mismatch serial number", order2, orderLimit1},
|
|
|
|
{"mismatch uplink signature", order3, orderLimit1},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tt := range testCases {
|
2020-07-16 16:40:29 +01:00
|
|
|
tt := tt
|
2020-06-11 19:31:45 +01:00
|
|
|
t.Run(tt.name, func(t *testing.T) {
|
|
|
|
conn, err := storagenode.Dialer.DialNodeURL(ctx, storj.NodeURL{ID: satellite.ID(), Address: satellite.Addr()})
|
|
|
|
require.NoError(t, err)
|
satellite/orders: 3-phase rollout
This adds a config flag orders.window-endpoint-rollout-phase
that can take on the values phase1, phase2 or phase3.
In phase1, the current orders endpoint continues to work as
usual, and the windowed orders endpoint uses the same backend
as the current one (but also does a bit extra).
In phase2, the current orders endpoint is disabled and the
windowed orders endpoint continues to use the same backend.
In phase3, the current orders endpoint is still disabled and
the windowed orders endpoint uses the new backend that requires
much less database traffic and state.
The intention is to deploy in phase1, roll out code to nodes
to have them use the windowed endpoint, switch to phase2, wait
a couple days for all existing orders to expire, then switch
to phase3.
Additionally, it fixes a bug where a node could submit a bunch
of orders and rack up charges for a bucket.
Change-Id: Ifdc10e09ae1645159cbec7ace687dcb2d594c76d
2020-07-21 17:53:32 +01:00
|
|
|
defer ctx.Check(conn.Close)
|
|
|
|
|
2020-06-11 19:31:45 +01:00
|
|
|
stream, err := pb.NewDRPCOrdersClient(conn).SettlementWithWindow(ctx)
|
|
|
|
require.NoError(t, err)
|
satellite/orders: 3-phase rollout
This adds a config flag orders.window-endpoint-rollout-phase
that can take on the values phase1, phase2 or phase3.
In phase1, the current orders endpoint continues to work as
usual, and the windowed orders endpoint uses the same backend
as the current one (but also does a bit extra).
In phase2, the current orders endpoint is disabled and the
windowed orders endpoint continues to use the same backend.
In phase3, the current orders endpoint is still disabled and
the windowed orders endpoint uses the new backend that requires
much less database traffic and state.
The intention is to deploy in phase1, roll out code to nodes
to have them use the windowed endpoint, switch to phase2, wait
a couple days for all existing orders to expire, then switch
to phase3.
Additionally, it fixes a bug where a node could submit a bunch
of orders and rack up charges for a bucket.
Change-Id: Ifdc10e09ae1645159cbec7ace687dcb2d594c76d
2020-07-21 17:53:32 +01:00
|
|
|
defer ctx.Check(stream.Close)
|
|
|
|
|
2020-06-11 19:31:45 +01:00
|
|
|
err = stream.Send(&pb.SettlementRequest{
|
|
|
|
Limit: tt.orderLimit,
|
|
|
|
Order: tt.order,
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
satellite/orders: 3-phase rollout
This adds a config flag orders.window-endpoint-rollout-phase
that can take on the values phase1, phase2 or phase3.
In phase1, the current orders endpoint continues to work as
usual, and the windowed orders endpoint uses the same backend
as the current one (but also does a bit extra).
In phase2, the current orders endpoint is disabled and the
windowed orders endpoint continues to use the same backend.
In phase3, the current orders endpoint is still disabled and
the windowed orders endpoint uses the new backend that requires
much less database traffic and state.
The intention is to deploy in phase1, roll out code to nodes
to have them use the windowed endpoint, switch to phase2, wait
a couple days for all existing orders to expire, then switch
to phase3.
Additionally, it fixes a bug where a node could submit a bunch
of orders and rack up charges for a bucket.
Change-Id: Ifdc10e09ae1645159cbec7ace687dcb2d594c76d
2020-07-21 17:53:32 +01:00
|
|
|
|
2020-06-11 19:31:45 +01:00
|
|
|
resp, err := stream.CloseAndRecv()
|
|
|
|
require.NoError(t, err)
|
satellite/orders: 3-phase rollout
This adds a config flag orders.window-endpoint-rollout-phase
that can take on the values phase1, phase2 or phase3.
In phase1, the current orders endpoint continues to work as
usual, and the windowed orders endpoint uses the same backend
as the current one (but also does a bit extra).
In phase2, the current orders endpoint is disabled and the
windowed orders endpoint continues to use the same backend.
In phase3, the current orders endpoint is still disabled and
the windowed orders endpoint uses the new backend that requires
much less database traffic and state.
The intention is to deploy in phase1, roll out code to nodes
to have them use the windowed endpoint, switch to phase2, wait
a couple days for all existing orders to expire, then switch
to phase3.
Additionally, it fixes a bug where a node could submit a bunch
of orders and rack up charges for a bucket.
Change-Id: Ifdc10e09ae1645159cbec7ace687dcb2d594c76d
2020-07-21 17:53:32 +01:00
|
|
|
require.Equal(t, &pb.SettlementWithWindowResponse{
|
|
|
|
Status: pb.SettlementWithWindowResponse_REJECTED,
|
|
|
|
ActionSettled: nil,
|
|
|
|
}, resp)
|
|
|
|
|
|
|
|
// flush all the chores
|
|
|
|
assert.NoError(t, satellite.Accounting.ReportedRollup.RunOnce(ctx, now))
|
|
|
|
satellite.Orders.Chore.Loop.TriggerWait()
|
2020-06-11 19:31:45 +01:00
|
|
|
|
|
|
|
// assert no data was added to satellite storagenode or bucket bandwidth tables
|
satellite/orders: 3-phase rollout
This adds a config flag orders.window-endpoint-rollout-phase
that can take on the values phase1, phase2 or phase3.
In phase1, the current orders endpoint continues to work as
usual, and the windowed orders endpoint uses the same backend
as the current one (but also does a bit extra).
In phase2, the current orders endpoint is disabled and the
windowed orders endpoint continues to use the same backend.
In phase3, the current orders endpoint is still disabled and
the windowed orders endpoint uses the new backend that requires
much less database traffic and state.
The intention is to deploy in phase1, roll out code to nodes
to have them use the windowed endpoint, switch to phase2, wait
a couple days for all existing orders to expire, then switch
to phase3.
Additionally, it fixes a bug where a node could submit a bunch
of orders and rack up charges for a bucket.
Change-Id: Ifdc10e09ae1645159cbec7ace687dcb2d594c76d
2020-07-21 17:53:32 +01:00
|
|
|
snbw, err = ordersDB.GetStorageNodeBandwidth(ctx, storagenode.ID(), time.Time{}, now)
|
2020-06-11 19:31:45 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.EqualValues(t, 0, snbw)
|
2020-07-14 20:12:32 +01:00
|
|
|
|
satellite/orders: 3-phase rollout
This adds a config flag orders.window-endpoint-rollout-phase
that can take on the values phase1, phase2 or phase3.
In phase1, the current orders endpoint continues to work as
usual, and the windowed orders endpoint uses the same backend
as the current one (but also does a bit extra).
In phase2, the current orders endpoint is disabled and the
windowed orders endpoint continues to use the same backend.
In phase3, the current orders endpoint is still disabled and
the windowed orders endpoint uses the new backend that requires
much less database traffic and state.
The intention is to deploy in phase1, roll out code to nodes
to have them use the windowed endpoint, switch to phase2, wait
a couple days for all existing orders to expire, then switch
to phase3.
Additionally, it fixes a bug where a node could submit a bunch
of orders and rack up charges for a bucket.
Change-Id: Ifdc10e09ae1645159cbec7ace687dcb2d594c76d
2020-07-21 17:53:32 +01:00
|
|
|
newBbw, err := ordersDB.GetBucketBandwidth(ctx, projectID, []byte(bucketname), time.Time{}, now)
|
2020-06-11 19:31:45 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.EqualValues(t, 0, newBbw)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
satellite/orders: 3-phase rollout
This adds a config flag orders.window-endpoint-rollout-phase
that can take on the values phase1, phase2 or phase3.
In phase1, the current orders endpoint continues to work as
usual, and the windowed orders endpoint uses the same backend
as the current one (but also does a bit extra).
In phase2, the current orders endpoint is disabled and the
windowed orders endpoint continues to use the same backend.
In phase3, the current orders endpoint is still disabled and
the windowed orders endpoint uses the new backend that requires
much less database traffic and state.
The intention is to deploy in phase1, roll out code to nodes
to have them use the windowed endpoint, switch to phase2, wait
a couple days for all existing orders to expire, then switch
to phase3.
Additionally, it fixes a bug where a node could submit a bunch
of orders and rack up charges for a bucket.
Change-Id: Ifdc10e09ae1645159cbec7ace687dcb2d594c76d
2020-07-21 17:53:32 +01:00
|
|
|
|
|
|
|
func TestSettlementEndpointSingleOrder(t *testing.T) {
|
|
|
|
runTestWithPhases(t, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
const dataAmount int64 = 50
|
|
|
|
satellite := planet.Satellites[0]
|
|
|
|
ordersDB := satellite.Orders.DB
|
|
|
|
storagenode := planet.StorageNodes[0]
|
|
|
|
now := time.Now()
|
|
|
|
projectID := testrand.UUID()
|
|
|
|
bucketname := "testbucket"
|
2021-01-08 16:04:46 +00:00
|
|
|
bucketLocation := metabase.BucketLocation{
|
|
|
|
ProjectID: projectID,
|
|
|
|
BucketName: bucketname,
|
|
|
|
}
|
satellite/orders: 3-phase rollout
This adds a config flag orders.window-endpoint-rollout-phase
that can take on the values phase1, phase2 or phase3.
In phase1, the current orders endpoint continues to work as
usual, and the windowed orders endpoint uses the same backend
as the current one (but also does a bit extra).
In phase2, the current orders endpoint is disabled and the
windowed orders endpoint continues to use the same backend.
In phase3, the current orders endpoint is still disabled and
the windowed orders endpoint uses the new backend that requires
much less database traffic and state.
The intention is to deploy in phase1, roll out code to nodes
to have them use the windowed endpoint, switch to phase2, wait
a couple days for all existing orders to expire, then switch
to phase3.
Additionally, it fixes a bug where a node could submit a bunch
of orders and rack up charges for a bucket.
Change-Id: Ifdc10e09ae1645159cbec7ace687dcb2d594c76d
2020-07-21 17:53:32 +01:00
|
|
|
// stop any async flushes because we want to be sure when some values are
|
|
|
|
// written to avoid races
|
|
|
|
satellite.Orders.Chore.Loop.Pause()
|
|
|
|
satellite.Accounting.ReportedRollup.Loop.Pause()
|
|
|
|
|
|
|
|
// confirm storagenode and bucket bandwidth tables start empty
|
|
|
|
snbw, err := ordersDB.GetStorageNodeBandwidth(ctx, satellite.ID(), time.Time{}, now)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.EqualValues(t, 0, snbw)
|
|
|
|
|
|
|
|
bucketbw, err := ordersDB.GetBucketBandwidth(ctx, projectID, []byte(bucketname), time.Time{}, now)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.EqualValues(t, 0, bucketbw)
|
|
|
|
|
|
|
|
// create serial number to use in test
|
|
|
|
serialNumber := testrand.SerialNumber()
|
2021-01-08 16:04:46 +00:00
|
|
|
err = ordersDB.CreateSerialInfo(ctx, serialNumber, []byte(bucketLocation.Prefix()), now.AddDate(1, 0, 10))
|
satellite/orders: 3-phase rollout
This adds a config flag orders.window-endpoint-rollout-phase
that can take on the values phase1, phase2 or phase3.
In phase1, the current orders endpoint continues to work as
usual, and the windowed orders endpoint uses the same backend
as the current one (but also does a bit extra).
In phase2, the current orders endpoint is disabled and the
windowed orders endpoint continues to use the same backend.
In phase3, the current orders endpoint is still disabled and
the windowed orders endpoint uses the new backend that requires
much less database traffic and state.
The intention is to deploy in phase1, roll out code to nodes
to have them use the windowed endpoint, switch to phase2, wait
a couple days for all existing orders to expire, then switch
to phase3.
Additionally, it fixes a bug where a node could submit a bunch
of orders and rack up charges for a bucket.
Change-Id: Ifdc10e09ae1645159cbec7ace687dcb2d594c76d
2020-07-21 17:53:32 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
piecePublicKey, piecePrivateKey, err := storj.NewPieceKey()
|
|
|
|
require.NoError(t, err)
|
2020-12-18 18:16:20 +00:00
|
|
|
key := satellite.Config.Orders.EncryptionKeys.Default
|
|
|
|
encrypted, err := key.EncryptMetadata(
|
|
|
|
serialNumber,
|
2021-01-08 16:04:46 +00:00
|
|
|
&internalpb.OrderLimitMetadata{
|
|
|
|
CompactProjectBucketPrefix: bucketLocation.CompactPrefix(),
|
2020-12-18 18:16:20 +00:00
|
|
|
},
|
|
|
|
)
|
|
|
|
require.NoError(t, err)
|
satellite/orders: 3-phase rollout
This adds a config flag orders.window-endpoint-rollout-phase
that can take on the values phase1, phase2 or phase3.
In phase1, the current orders endpoint continues to work as
usual, and the windowed orders endpoint uses the same backend
as the current one (but also does a bit extra).
In phase2, the current orders endpoint is disabled and the
windowed orders endpoint continues to use the same backend.
In phase3, the current orders endpoint is still disabled and
the windowed orders endpoint uses the new backend that requires
much less database traffic and state.
The intention is to deploy in phase1, roll out code to nodes
to have them use the windowed endpoint, switch to phase2, wait
a couple days for all existing orders to expire, then switch
to phase3.
Additionally, it fixes a bug where a node could submit a bunch
of orders and rack up charges for a bucket.
Change-Id: Ifdc10e09ae1645159cbec7ace687dcb2d594c76d
2020-07-21 17:53:32 +01:00
|
|
|
|
|
|
|
// create signed orderlimit or order to test with
|
|
|
|
limit := &pb.OrderLimit{
|
2020-12-18 18:16:20 +00:00
|
|
|
SerialNumber: serialNumber,
|
|
|
|
SatelliteId: satellite.ID(),
|
|
|
|
UplinkPublicKey: piecePublicKey,
|
|
|
|
StorageNodeId: storagenode.ID(),
|
|
|
|
PieceId: storj.NewPieceID(),
|
|
|
|
Action: pb.PieceAction_PUT,
|
|
|
|
Limit: 1000,
|
|
|
|
PieceExpiration: time.Time{},
|
|
|
|
OrderCreation: now,
|
|
|
|
OrderExpiration: now.Add(24 * time.Hour),
|
|
|
|
EncryptedMetadataKeyId: key.ID[:],
|
|
|
|
EncryptedMetadata: encrypted,
|
satellite/orders: 3-phase rollout
This adds a config flag orders.window-endpoint-rollout-phase
that can take on the values phase1, phase2 or phase3.
In phase1, the current orders endpoint continues to work as
usual, and the windowed orders endpoint uses the same backend
as the current one (but also does a bit extra).
In phase2, the current orders endpoint is disabled and the
windowed orders endpoint continues to use the same backend.
In phase3, the current orders endpoint is still disabled and
the windowed orders endpoint uses the new backend that requires
much less database traffic and state.
The intention is to deploy in phase1, roll out code to nodes
to have them use the windowed endpoint, switch to phase2, wait
a couple days for all existing orders to expire, then switch
to phase3.
Additionally, it fixes a bug where a node could submit a bunch
of orders and rack up charges for a bucket.
Change-Id: Ifdc10e09ae1645159cbec7ace687dcb2d594c76d
2020-07-21 17:53:32 +01:00
|
|
|
}
|
|
|
|
orderLimit, err := signing.SignOrderLimit(ctx, signing.SignerFromFullIdentity(satellite.Identity), limit)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
order, err := signing.SignUplinkOrder(ctx, piecePrivateKey, &pb.Order{
|
|
|
|
SerialNumber: serialNumber,
|
|
|
|
Amount: dataAmount,
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// create connection between storagenode and satellite
|
|
|
|
conn, err := storagenode.Dialer.DialNodeURL(ctx, storj.NodeURL{ID: satellite.ID(), Address: satellite.Addr()})
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer ctx.Check(conn.Close)
|
|
|
|
|
|
|
|
stream, err := pb.NewDRPCOrdersClient(conn).Settlement(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer ctx.Check(stream.Close)
|
|
|
|
|
|
|
|
// storagenode settles an order and orderlimit
|
|
|
|
var resp *pb.SettlementResponse
|
|
|
|
if satellite.Config.Orders.WindowEndpointRolloutPhase == orders.WindowEndpointRolloutPhase1 {
|
|
|
|
err = stream.Send(&pb.SettlementRequest{
|
|
|
|
Limit: orderLimit,
|
|
|
|
Order: order,
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.NoError(t, stream.CloseSend())
|
|
|
|
|
|
|
|
resp, err = stream.Recv()
|
|
|
|
require.NoError(t, err)
|
|
|
|
} else {
|
|
|
|
// in phase2 and phase3, the endpoint is disabled. depending on how fast the
|
|
|
|
// server sends that error message, we may see an io.EOF on the Send call, or
|
|
|
|
// we may see no error at all. In either case, we have to call stream.Recv to
|
|
|
|
// see the actual error. gRPC semantics are funky.
|
|
|
|
err = stream.Send(&pb.SettlementRequest{
|
|
|
|
Limit: orderLimit,
|
|
|
|
Order: order,
|
|
|
|
})
|
|
|
|
if err != io.EOF {
|
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
require.NoError(t, stream.CloseSend())
|
|
|
|
|
|
|
|
_, err = stream.Recv()
|
|
|
|
require.Error(t, err)
|
|
|
|
require.Equal(t, rpcstatus.Unavailable, rpcstatus.Code(err))
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
require.Equal(t, &pb.SettlementResponse{
|
|
|
|
SerialNumber: serialNumber,
|
|
|
|
Status: pb.SettlementResponse_ACCEPTED,
|
|
|
|
}, resp)
|
|
|
|
|
|
|
|
// flush all the chores
|
|
|
|
assert.NoError(t, satellite.Accounting.ReportedRollup.RunOnce(ctx, now))
|
|
|
|
satellite.Orders.Chore.Loop.TriggerWait()
|
|
|
|
|
|
|
|
// assert all the right stuff is in the satellite storagenode and bucket bandwidth tables
|
|
|
|
snbw, err = ordersDB.GetStorageNodeBandwidth(ctx, storagenode.ID(), time.Time{}, now)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, dataAmount, snbw)
|
|
|
|
|
|
|
|
newBbw, err := ordersDB.GetBucketBandwidth(ctx, projectID, []byte(bucketname), time.Time{}, now)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, dataAmount, newBbw)
|
|
|
|
})
|
|
|
|
}
|