storj/satellite/orders/service_test.go
Márton Elek 97a89c3476 satellite: switch to use nodefilters instead of old placement.AllowedCountry
placement.AllowedCountry is the old way to specify placement, with the new approach we can use a more generic (dynamic method), which can check full node information instead of just the country code.

The 90% of this patch is just search and replace:

 * we need to use NodeFilters instead of placement.AllowedCountry
 * which means, we need an initialized PlacementRules available everywhere
 * which means we need to configure the placement rules

The remaining 10% is the placement.go, where we introduced a new type of configuration (lightweight expression language) to define any kind of placement without code change.

Change-Id: Ie644b0b1840871b0e6bbcf80c6b50a947503d7df
2023-07-07 16:55:45 +00:00

116 lines
2.9 KiB
Go

// Copyright (C) 2023 Storj Labs, Inc.
// See LICENSE for copying information.
package orders_test
import (
"fmt"
"testing"
"time"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/require"
"go.uber.org/zap/zaptest"
"storj.io/common/identity/testidentity"
"storj.io/common/pb"
"storj.io/common/signing"
"storj.io/common/storj"
"storj.io/common/testcontext"
"storj.io/common/testrand"
"storj.io/storj/satellite/metabase"
"storj.io/storj/satellite/nodeselection"
"storj.io/storj/satellite/orders"
"storj.io/storj/satellite/overlay"
)
func TestGetOrderLimits(t *testing.T) {
ctx := testcontext.New(t)
ctrl := gomock.NewController(t)
bucket := metabase.BucketLocation{ProjectID: testrand.UUID(), BucketName: "bucket1"}
pieces := metabase.Pieces{}
nodes := map[storj.NodeID]*nodeselection.SelectedNode{}
for i := 0; i < 8; i++ {
nodeID := testrand.NodeID()
nodes[nodeID] = &nodeselection.SelectedNode{
ID: nodeID,
Address: &pb.NodeAddress{
Address: fmt.Sprintf("host%d.com", i),
},
}
pieces = append(pieces, metabase.Piece{
Number: uint16(i),
StorageNode: nodeID,
})
}
testIdentity, err := testidentity.PregeneratedIdentity(0, storj.LatestIDVersion())
require.NoError(t, err)
k := signing.SignerFromFullIdentity(testIdentity)
overlayService := orders.NewMockOverlayForOrders(ctrl)
overlayService.
EXPECT().
CachedGetOnlineNodesForGet(gomock.Any(), gomock.Any()).
Return(nodes, nil).AnyTimes()
service, err := orders.NewService(zaptest.NewLogger(t), k, overlayService, orders.NewNoopDB(),
overlay.NewPlacementRules().CreateFilters,
orders.Config{
EncryptionKeys: orders.EncryptionKeys{
Default: orders.EncryptionKey{
ID: orders.EncryptionKeyID{1, 2, 3, 4, 5, 6, 7, 8},
Key: testrand.Key(),
},
},
})
require.NoError(t, err)
segment := metabase.Segment{
StreamID: testrand.UUID(),
CreatedAt: time.Now(),
Redundancy: storj.RedundancyScheme{
Algorithm: storj.ReedSolomon,
ShareSize: 256,
RequiredShares: 4,
RepairShares: 5,
OptimalShares: 6,
TotalShares: 10,
},
Pieces: pieces,
EncryptedKey: []byte{1, 2, 3, 4},
RootPieceID: testrand.PieceID(),
}
checkExpectedLimits := func(requested int32, received int) {
limits, _, err := service.CreateGetOrderLimits(ctx, bucket, segment, requested, 0)
require.NoError(t, err)
realLimits := 0
for _, limit := range limits {
if limit.Limit != nil {
realLimits++
}
}
require.Equal(t, received, realLimits)
}
t.Run("Do not request any specific number", func(t *testing.T) {
checkExpectedLimits(0, 6)
})
t.Run("Request less than the optimal", func(t *testing.T) {
checkExpectedLimits(2, 6)
})
t.Run("Request more than the optimal", func(t *testing.T) {
checkExpectedLimits(8, 8)
})
t.Run("Request more than the replication", func(t *testing.T) {
checkExpectedLimits(1000, 8)
})
}