satellite: switch to use nodefilters instead of old placement.AllowedCountry

placement.AllowedCountry is the old way to specify placement, with the new approach we can use a more generic (dynamic method), which can check full node information instead of just the country code.

The 90% of this patch is just search and replace:

 * we need to use NodeFilters instead of placement.AllowedCountry
 * which means, we need an initialized PlacementRules available everywhere
 * which means we need to configure the placement rules

The remaining 10% is the placement.go, where we introduced a new type of configuration (lightweight expression language) to define any kind of placement without code change.

Change-Id: Ie644b0b1840871b0e6bbcf80c6b50a947503d7df
This commit is contained in:
Márton Elek 2023-07-06 14:35:26 +02:00 committed by Storj Robot
parent e0b5476e78
commit 97a89c3476
32 changed files with 427 additions and 94 deletions

View File

@ -94,7 +94,7 @@ func cmdRepairSegment(cmd *cobra.Command, args []string) (err error) {
dialer := rpc.NewDefaultDialer(tlsOptions)
overlay, err := overlay.NewService(log.Named("overlay"), db.OverlayCache(), db.NodeEvents(), config.Console.ExternalAddress, config.Console.SatelliteName, config.Overlay)
overlayService, err := overlay.NewService(log.Named("overlay"), db.OverlayCache(), db.NodeEvents(), config.Placement.CreateFilters, config.Console.ExternalAddress, config.Console.SatelliteName, config.Overlay)
if err != nil {
return err
}
@ -102,8 +102,9 @@ func cmdRepairSegment(cmd *cobra.Command, args []string) (err error) {
orders, err := orders.NewService(
log.Named("orders"),
signing.SignerFromFullIdentity(identity),
overlay,
overlayService,
orders.NewNoopDB(),
config.Placement.CreateFilters,
config.Orders,
)
if err != nil {
@ -122,9 +123,10 @@ func cmdRepairSegment(cmd *cobra.Command, args []string) (err error) {
log.Named("segment-repair"),
metabaseDB,
orders,
overlay,
overlayService,
nil, // TODO add noop version
ecRepairer,
config.Placement.CreateFilters,
config.Checker.RepairOverrides,
config.Repairer,
)
@ -132,7 +134,7 @@ func cmdRepairSegment(cmd *cobra.Command, args []string) (err error) {
// TODO reorganize to avoid using peer.
peer := &satellite.Repairer{}
peer.Overlay = overlay
peer.Overlay = overlayService
peer.Orders.Service = orders
peer.EcRepairer = ecRepairer
peer.SegmentRepairer = segmentRepairer

View File

@ -203,12 +203,12 @@ func verifySegments(cmd *cobra.Command, args []string) error {
dialer := rpc.NewDefaultDialer(tlsOptions)
// setup dependencies for verification
overlay, err := overlay.NewService(log.Named("overlay"), db.OverlayCache(), db.NodeEvents(), "", "", satelliteCfg.Overlay)
overlayService, err := overlay.NewService(log.Named("overlay"), db.OverlayCache(), db.NodeEvents(), overlay.NewPlacementRules().CreateFilters, "", "", satelliteCfg.Overlay)
if err != nil {
return Error.Wrap(err)
}
ordersService, err := orders.NewService(log.Named("orders"), signing.SignerFromFullIdentity(identity), overlay, orders.NewNoopDB(), satelliteCfg.Orders)
ordersService, err := orders.NewService(log.Named("orders"), signing.SignerFromFullIdentity(identity), overlayService, orders.NewNoopDB(), overlay.NewPlacementRules().CreateFilters, satelliteCfg.Orders)
if err != nil {
return Error.Wrap(err)
}
@ -243,7 +243,7 @@ func verifySegments(cmd *cobra.Command, args []string) error {
// setup verifier
verifier := NewVerifier(log.Named("verifier"), dialer, ordersService, verifyConfig)
service, err := NewService(log.Named("service"), metabaseDB, verifier, overlay, serviceConfig)
service, err := NewService(log.Named("service"), metabaseDB, verifier, overlayService, serviceConfig)
if err != nil {
return Error.Wrap(err)
}

1
go.mod
View File

@ -22,6 +22,7 @@ require (
github.com/jackc/pgx/v5 v5.3.1
github.com/jtolds/monkit-hw/v2 v2.0.0-20191108235325-141a0da276b3
github.com/jtolio/eventkit v0.0.0-20230607152326-4668f79ff72d
github.com/jtolio/mito v0.0.0-20230523171229-d78ef06bb77b
github.com/jtolio/noiseconn v0.0.0-20230301220541-88105e6c8ac6
github.com/loov/hrtime v1.0.3
github.com/mattn/go-sqlite3 v1.14.12

2
go.sum
View File

@ -324,6 +324,8 @@ github.com/jtolds/tracetagger/v2 v2.0.0-rc5 h1:SriMFVtftPsQmG+0xaABotz9HnoKoo1QM
github.com/jtolds/tracetagger/v2 v2.0.0-rc5/go.mod h1:61Fh+XhbBONy+RsqkA+xTtmaFbEVL040m9FAF/hTrjQ=
github.com/jtolio/eventkit v0.0.0-20230607152326-4668f79ff72d h1:MAGZUXA8MLSA5oJT1Gua3nLSyTYF2uvBgM4Sfs5+jts=
github.com/jtolio/eventkit v0.0.0-20230607152326-4668f79ff72d/go.mod h1:PXFUrknJu7TkBNyL8t7XWDPtDFFLFrNQQAdsXv9YfJE=
github.com/jtolio/mito v0.0.0-20230523171229-d78ef06bb77b h1:HKvXTXZTeUHXRibg2ilZlkGSQP6A3cs0zXrBd4xMi6M=
github.com/jtolio/mito v0.0.0-20230523171229-d78ef06bb77b/go.mod h1:Mrym6OnPMkBKvN8/uXSkyhFSh6ndKKYE+Q4kxCfQ4V0=
github.com/jtolio/noiseconn v0.0.0-20230301220541-88105e6c8ac6 h1:iVMQyk78uOpX/UKjEbzyBdptXgEz6jwGwo7kM9IQ+3U=
github.com/jtolio/noiseconn v0.0.0-20230301220541-88105e6c8ac6/go.mod h1:MEkhEPFwP3yudWO0lj6vfYpLIB+3eIcuIW+e0AZzUQk=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=

View File

@ -746,7 +746,6 @@ func (planet *Planet) newRangedLoop(ctx context.Context, index int, db satellite
prefix := "satellite-ranged-loop" + strconv.Itoa(index)
log := planet.log.Named(prefix)
return satellite.NewRangedLoop(log, db, metabaseDB, &config, nil)
}

View File

@ -282,7 +282,7 @@ func NewAPI(log *zap.Logger, full *identity.FullIdentity, db DB,
{ // setup overlay
peer.Overlay.DB = peer.DB.OverlayCache()
peer.Overlay.Service, err = overlay.NewService(peer.Log.Named("overlay"), peer.Overlay.DB, peer.DB.NodeEvents(), config.Console.ExternalAddress, config.Console.SatelliteName, config.Overlay)
peer.Overlay.Service, err = overlay.NewService(peer.Log.Named("overlay"), peer.Overlay.DB, peer.DB.NodeEvents(), config.Placement.CreateFilters, config.Console.ExternalAddress, config.Console.SatelliteName, config.Overlay)
if err != nil {
return nil, errs.Combine(err, peer.Close())
}
@ -387,6 +387,7 @@ func NewAPI(log *zap.Logger, full *identity.FullIdentity, db DB,
signing.SignerFromFullIdentity(peer.Identity),
peer.Overlay.Service,
peer.Orders.DB,
config.Placement.CreateFilters,
config.Orders,
)
if err != nil {

View File

@ -141,7 +141,7 @@ func NewAuditor(log *zap.Logger, full *identity.FullIdentity,
{ // setup overlay
var err error
peer.Overlay, err = overlay.NewService(log.Named("overlay"), overlayCache, nodeEvents, config.Console.ExternalAddress, config.Console.SatelliteName, config.Overlay)
peer.Overlay, err = overlay.NewService(log.Named("overlay"), overlayCache, nodeEvents, config.Placement.CreateFilters, config.Console.ExternalAddress, config.Console.SatelliteName, config.Overlay)
if err != nil {
return nil, errs.Combine(err, peer.Close())
}
@ -183,6 +183,7 @@ func NewAuditor(log *zap.Logger, full *identity.FullIdentity,
// PUT and GET actions which are not used by
// auditor so we can set noop implementation.
orders.NewNoopDB(),
config.Placement.CreateFilters,
config.Orders,
)
if err != nil {

View File

@ -244,7 +244,7 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB,
{ // setup overlay
peer.Overlay.DB = peer.DB.OverlayCache()
peer.Overlay.Service, err = overlay.NewService(peer.Log.Named("overlay"), peer.Overlay.DB, peer.DB.NodeEvents(), config.Console.ExternalAddress, config.Console.SatelliteName, config.Overlay)
peer.Overlay.Service, err = overlay.NewService(peer.Log.Named("overlay"), peer.Overlay.DB, peer.DB.NodeEvents(), config.Placement.CreateFilters, config.Console.ExternalAddress, config.Console.SatelliteName, config.Overlay)
if err != nil {
return nil, errs.Combine(err, peer.Close())
}

View File

@ -31,6 +31,7 @@ import (
"storj.io/storj/satellite/metabase/rangedloop"
"storj.io/storj/satellite/metabase/rangedloop/rangedlooptest"
"storj.io/storj/satellite/metrics"
"storj.io/storj/satellite/overlay"
"storj.io/storj/satellite/repair/checker"
)
@ -426,6 +427,7 @@ func TestAllInOne(t *testing.T) {
satellite.DB.RepairQueue(),
satellite.Overlay.Service,
satellite.Config.Checker,
overlay.NewPlacementRules().CreateFilters,
[]string{},
),
})

View File

@ -22,7 +22,9 @@ import (
"storj.io/common/errs2"
"storj.io/common/identity"
"storj.io/common/identity/testidentity"
"storj.io/common/memory"
"storj.io/common/nodetag"
"storj.io/common/pb"
"storj.io/common/rpc/rpcstatus"
"storj.io/common/signing"
@ -37,6 +39,9 @@ import (
"storj.io/storj/satellite/internalpb"
"storj.io/storj/satellite/metabase"
"storj.io/storj/satellite/metainfo"
"storj.io/storj/satellite/overlay"
"storj.io/storj/storagenode"
"storj.io/storj/storagenode/contact"
"storj.io/uplink"
"storj.io/uplink/private/metaclient"
"storj.io/uplink/private/object"
@ -2450,3 +2455,100 @@ func TestListUploads(t *testing.T) {
require.Equal(t, 1000, items)
})
}
func TestPlacements(t *testing.T) {
ctx := testcontext.New(t)
satelliteIdentity := signing.SignerFromFullIdentity(testidentity.MustPregeneratedSignedIdentity(0, storj.LatestIDVersion()))
placementRules := overlay.ConfigurablePlacementRule{}
err := placementRules.Set(fmt.Sprintf(`16:tag("%s", "certified","true")`, satelliteIdentity.ID()))
require.NoError(t, err)
testplanet.Run(t,
testplanet.Config{
SatelliteCount: 1,
StorageNodeCount: 12,
UplinkCount: 1,
Reconfigure: testplanet.Reconfigure{
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
config.Metainfo.RS.Min = 3
config.Metainfo.RS.Repair = 4
config.Metainfo.RS.Success = 5
config.Metainfo.RS.Total = 6
config.Metainfo.MaxInlineSegmentSize = 1
config.Placement = placementRules
},
StorageNode: func(index int, config *storagenode.Config) {
if index%2 == 0 {
tags := &pb.NodeTagSet{
NodeId: testidentity.MustPregeneratedSignedIdentity(index+1, storj.LatestIDVersion()).ID.Bytes(),
Timestamp: time.Now().Unix(),
Tags: []*pb.Tag{
{
Name: "certified",
Value: []byte("true"),
},
},
}
signed, err := nodetag.Sign(ctx, tags, satelliteIdentity)
require.NoError(t, err)
config.Contact.Tags = contact.SignedTags(pb.SignedNodeTagSets{
Tags: []*pb.SignedNodeTagSet{
signed,
},
})
}
},
},
},
func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
satellite := planet.Satellites[0]
buckets := satellite.API.Buckets.Service
uplink := planet.Uplinks[0]
projectID := uplink.Projects[0].ID
// create buckets with different placement (placement 16 is configured above)
createGeofencedBucket(t, ctx, buckets, projectID, "constrained", 16)
objectNo := 10
for i := 0; i < objectNo; i++ {
// upload an object to one of the global buckets
err := uplink.Upload(ctx, satellite, "constrained", "testobject"+strconv.Itoa(i), make([]byte, 10240))
require.NoError(t, err)
}
apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
metainfoClient, err := uplink.DialMetainfo(ctx, satellite, apiKey)
require.NoError(t, err)
defer func() {
_ = metainfoClient.Close()
}()
objects, _, err := metainfoClient.ListObjects(ctx, metaclient.ListObjectsParams{
Bucket: []byte("constrained"),
})
require.NoError(t, err)
require.Len(t, objects, objectNo)
for _, listedObject := range objects {
o, err := metainfoClient.DownloadObject(ctx, metaclient.DownloadObjectParams{
Bucket: []byte("constrained"),
EncryptedObjectKey: listedObject.EncryptedObjectKey,
})
require.NoError(t, err)
for _, limit := range o.DownloadedSegments[0].Limits {
if limit != nil {
// starting from 2 (first identity used for satellite, SN with even index are fine)
for i := 2; i < 11; i += 2 {
require.NotEqual(t, testidentity.MustPregeneratedSignedIdentity(i, storj.LatestIDVersion()).ID, limit.Limit.StorageNodeId)
}
}
}
}
},
)
}

View File

@ -54,10 +54,11 @@ type Overlay interface {
//
// architecture: Service
type Service struct {
log *zap.Logger
satellite signing.Signer
overlay Overlay
orders DB
log *zap.Logger
satellite signing.Signer
overlay Overlay
orders DB
placementRules overlay.PlacementRules
encryptionKeys EncryptionKeys
@ -70,17 +71,18 @@ type Service struct {
// NewService creates new service for creating order limits.
func NewService(
log *zap.Logger, satellite signing.Signer, overlay Overlay,
orders DB, config Config,
orders DB, placementRules overlay.PlacementRules, config Config,
) (*Service, error) {
if config.EncryptionKeys.Default.IsZero() {
return nil, Error.New("encryption keys must be specified to include encrypted metadata")
}
return &Service{
log: log,
satellite: satellite,
overlay: overlay,
orders: orders,
log: log,
satellite: satellite,
overlay: overlay,
orders: orders,
placementRules: placementRules,
encryptionKeys: config.EncryptionKeys,
@ -146,8 +148,9 @@ func (service *Service) CreateGetOrderLimits(ctx context.Context, bucket metabas
}
if segment.Placement != storj.EveryCountry {
filter := service.placementRules(segment.Placement)
for id, node := range nodes {
if !segment.Placement.AllowedCountry(node.CountryCode) {
if !filter.MatchInclude(node) {
delete(nodes, id)
}
}

View File

@ -21,6 +21,7 @@ import (
"storj.io/storj/satellite/metabase"
"storj.io/storj/satellite/nodeselection"
"storj.io/storj/satellite/orders"
"storj.io/storj/satellite/overlay"
)
func TestGetOrderLimits(t *testing.T) {
@ -55,14 +56,16 @@ func TestGetOrderLimits(t *testing.T) {
CachedGetOnlineNodesForGet(gomock.Any(), gomock.Any()).
Return(nodes, nil).AnyTimes()
service, err := orders.NewService(zaptest.NewLogger(t), k, overlayService, orders.NewNoopDB(), orders.Config{
EncryptionKeys: orders.EncryptionKeys{
Default: orders.EncryptionKey{
ID: orders.EncryptionKeyID{1, 2, 3, 4, 5, 6, 7, 8},
Key: testrand.Key(),
service, err := orders.NewService(zaptest.NewLogger(t), k, overlayService, orders.NewNoopDB(),
overlay.NewPlacementRules().CreateFilters,
orders.Config{
EncryptionKeys: orders.EncryptionKeys{
Default: orders.EncryptionKey{
ID: orders.EncryptionKeyID{1, 2, 3, 4, 5, 6, 7, 8},
Key: testrand.Key(),
},
},
},
})
})
require.NoError(t, err)
segment := metabase.Segment{

View File

@ -355,7 +355,7 @@ func BenchmarkNodeSelection(b *testing.B) {
}
})
service, err := overlay.NewService(zap.NewNop(), overlaydb, db.NodeEvents(), "", "", overlay.Config{
service, err := overlay.NewService(zap.NewNop(), overlaydb, db.NodeEvents(), overlay.NewPlacementRules().CreateFilters, "", "", overlay.Config{
Node: nodeSelectionConfig,
NodeSelectionCache: overlay.UploadSelectionCacheConfig{
Staleness: time.Hour,

View File

@ -36,15 +36,17 @@ type DownloadSelectionCache struct {
db DownloadSelectionDB
config DownloadSelectionCacheConfig
cache sync2.ReadCacheOf[*DownloadSelectionCacheState]
cache sync2.ReadCacheOf[*DownloadSelectionCacheState]
placementRules PlacementRules
}
// NewDownloadSelectionCache creates a new cache that keeps a list of all the storage nodes that are qualified to download data from.
func NewDownloadSelectionCache(log *zap.Logger, db DownloadSelectionDB, config DownloadSelectionCacheConfig) (*DownloadSelectionCache, error) {
func NewDownloadSelectionCache(log *zap.Logger, db DownloadSelectionDB, placementRules PlacementRules, config DownloadSelectionCacheConfig) (*DownloadSelectionCache, error) {
cache := &DownloadSelectionCache{
log: log,
db: db,
config: config,
log: log,
db: db,
placementRules: placementRules,
config: config,
}
return cache, cache.cache.Init(config.Staleness/2, config.Staleness, cache.read)
}
@ -85,7 +87,7 @@ func (cache *DownloadSelectionCache) GetNodeIPsFromPlacement(ctx context.Context
return nil, Error.Wrap(err)
}
return state.IPsFromPlacement(nodes, placement), nil
return state.FilteredIPs(nodes, cache.placementRules(placement)), nil
}
// GetNodes gets nodes by ID from the cache, and refreshes the cache if it is stale.
@ -141,11 +143,11 @@ func (state *DownloadSelectionCacheState) IPs(nodes []storj.NodeID) map[storj.No
return xs
}
// IPsFromPlacement returns node ip:port for nodes that are in state. Results are filtered out by placement.
func (state *DownloadSelectionCacheState) IPsFromPlacement(nodes []storj.NodeID, placement storj.PlacementConstraint) map[storj.NodeID]string {
// FilteredIPs returns node ip:port for nodes that are in state. Results are filtered out..
func (state *DownloadSelectionCacheState) FilteredIPs(nodes []storj.NodeID, filter nodeselection.NodeFilters) map[storj.NodeID]string {
xs := make(map[storj.NodeID]string, len(nodes))
for _, nodeID := range nodes {
if n, exists := state.byID[nodeID]; exists && placement.AllowedCountry(n.CountryCode) {
if n, exists := state.byID[nodeID]; exists && filter.MatchInclude(n) {
xs[nodeID] = n.LastIPPort
}
}

View File

@ -31,6 +31,7 @@ func TestDownloadSelectionCacheState_Refresh(t *testing.T) {
satellitedbtest.Run(t, func(ctx *testcontext.Context, t *testing.T, db satellite.DB) {
cache, err := overlay.NewDownloadSelectionCache(zap.NewNop(),
db.OverlayCache(),
overlay.NewPlacementRules().CreateFilters,
downloadSelectionCacheConfig,
)
require.NoError(t, err)
@ -63,6 +64,7 @@ func TestDownloadSelectionCacheState_GetNodeIPs(t *testing.T) {
satellitedbtest.Run(t, func(ctx *testcontext.Context, t *testing.T, db satellite.DB) {
cache, err := overlay.NewDownloadSelectionCache(zap.NewNop(),
db.OverlayCache(),
overlay.NewPlacementRules().CreateFilters,
downloadSelectionCacheConfig,
)
require.NoError(t, err)
@ -114,6 +116,7 @@ func TestDownloadSelectionCache_GetNodes(t *testing.T) {
// create new cache and select nodes
cache, err := overlay.NewDownloadSelectionCache(zap.NewNop(),
db.OverlayCache(),
overlay.NewPlacementRules().CreateFilters,
overlay.DownloadSelectionCacheConfig{
Staleness: time.Hour,
OnlineWindow: time.Hour,

View File

@ -4,6 +4,14 @@
package overlay
import (
"fmt"
"strconv"
"strings"
"github.com/jtolio/mito"
"github.com/spf13/pflag"
"github.com/zeebo/errs"
"storj.io/common/storj"
"storj.io/common/storj/location"
"storj.io/storj/satellite/nodeselection"
@ -17,6 +25,35 @@ type ConfigurablePlacementRule struct {
placements map[storj.PlacementConstraint]nodeselection.NodeFilters
}
// String implements pflag.Value.
func (d *ConfigurablePlacementRule) String() string {
parts := []string{}
for id, filter := range d.placements {
// we can hide the internal rules...
if id > 9 {
// TODO: we need proper String implementation for all the used filters
parts = append(parts, fmt.Sprintf("%d:%s", id, filter))
}
}
return strings.Join(parts, ";")
}
// Set implements pflag.Value.
func (d *ConfigurablePlacementRule) Set(s string) error {
if d.placements == nil {
d.placements = make(map[storj.PlacementConstraint]nodeselection.NodeFilters)
}
d.AddLegacyStaticRules()
return d.AddPlacementFromString(s)
}
// Type implements pflag.Value.
func (d *ConfigurablePlacementRule) Type() string {
return "placement-rule"
}
var _ pflag.Value = &ConfigurablePlacementRule{}
// NewPlacementRules creates a fully initialized NewPlacementRules.
func NewPlacementRules() *ConfigurablePlacementRule {
return &ConfigurablePlacementRule{
@ -63,6 +100,70 @@ func (d *ConfigurablePlacementRule) AddPlacementRule(id storj.PlacementConstrain
d.placements[id] = filters
}
// AddPlacementFromString parses placement definition form string representations from id:definition;id:definition;...
func (d *ConfigurablePlacementRule) AddPlacementFromString(definitions string) error {
env := map[any]any{
"country": func(countries ...string) (nodeselection.NodeFilters, error) {
countryCodes := make([]location.CountryCode, len(countries))
for i, country := range countries {
countryCodes[i] = location.ToCountryCode(country)
}
return nodeselection.NodeFilters{}.WithCountryFilter(func(code location.CountryCode) bool {
for _, expectedCode := range countryCodes {
if code == expectedCode {
return true
}
}
return false
}), nil
},
"all": func(filters ...nodeselection.NodeFilters) (nodeselection.NodeFilters, error) {
res := nodeselection.NodeFilters{}
for _, filter := range filters {
res = append(res, filter...)
}
return res, nil
},
"tag": func(nodeIDstr string, key string, value any) (nodeselection.NodeFilters, error) {
nodeID, err := storj.NodeIDFromString(nodeIDstr)
if err != nil {
return nil, err
}
var rawValue []byte
switch v := value.(type) {
case string:
rawValue = []byte(v)
case []byte:
rawValue = v
default:
return nil, errs.New("3rd argument of tag() should be string or []byte")
}
res := nodeselection.NodeFilters{
nodeselection.NewTagFilter(nodeID, key, rawValue),
}
return res, nil
},
}
for _, definition := range strings.Split(definitions, ";") {
definition = strings.TrimSpace(definition)
if definition == "" {
continue
}
idDef := strings.SplitN(definition, ":", 2)
val, err := mito.Eval(idDef[1], env)
if err != nil {
return errs.Wrap(err)
}
id, err := strconv.Atoi(idDef[0])
if err != nil {
return errs.Wrap(err)
}
d.placements[storj.PlacementConstraint(id)] = val.(nodeselection.NodeFilters)
}
return nil
}
// CreateFilters implements PlacementCondition.
func (d *ConfigurablePlacementRule) CreateFilters(constraint storj.PlacementConstraint) (filter nodeselection.NodeFilters) {
if constraint == 0 {
@ -73,13 +174,3 @@ func (d *ConfigurablePlacementRule) CreateFilters(constraint storj.PlacementCons
}
return nodeselection.ExcludeAll
}
// CreateDefaultPlacementRules returns with a default set of configured placement rules.
func CreateDefaultPlacementRules(satelliteID storj.NodeID) PlacementRules {
placement := NewPlacementRules()
placement.AddLegacyStaticRules()
placement.AddPlacementRule(10, nodeselection.NodeFilters{
nodeselection.NewTagFilter(satelliteID, "selection", []byte("true")),
})
return placement.CreateFilters
}

View File

@ -0,0 +1,110 @@
// Copyright (C) 2023 Storj Labs, Inc.
// See LICENSE for copying information.
package overlay
import (
"testing"
"github.com/stretchr/testify/require"
"storj.io/common/storj"
"storj.io/common/storj/location"
"storj.io/storj/satellite/nodeselection"
)
func TestPlacementFromString(t *testing.T) {
signer, err := storj.NodeIDFromString("12whfK1EDvHJtajBiAUeajQLYcWqxcQmdYQU5zX5cCf6bAxfgu4")
require.NoError(t, err)
t.Run("single country", func(t *testing.T) {
p := NewPlacementRules()
err := p.AddPlacementFromString(`11:country("GB")`)
require.NoError(t, err)
filters := p.placements[storj.PlacementConstraint(11)]
require.NotNil(t, filters)
require.True(t, filters.MatchInclude(&nodeselection.SelectedNode{
CountryCode: location.UnitedKingdom,
}))
require.False(t, filters.MatchInclude(&nodeselection.SelectedNode{
CountryCode: location.Germany,
}))
})
t.Run("tag rule", func(t *testing.T) {
p := NewPlacementRules()
err := p.AddPlacementFromString(`11:tag("12whfK1EDvHJtajBiAUeajQLYcWqxcQmdYQU5zX5cCf6bAxfgu4","foo","bar")`)
require.NoError(t, err)
filters := p.placements[storj.PlacementConstraint(11)]
require.NotNil(t, filters)
require.True(t, filters.MatchInclude(&nodeselection.SelectedNode{
Tags: nodeselection.NodeTags{
{
Signer: signer,
Name: "foo",
Value: []byte("bar"),
},
},
}))
require.False(t, filters.MatchInclude(&nodeselection.SelectedNode{
CountryCode: location.Germany,
}))
})
t.Run("all rules", func(t *testing.T) {
p := NewPlacementRules()
err := p.AddPlacementFromString(`11:all(country("GB"),tag("12whfK1EDvHJtajBiAUeajQLYcWqxcQmdYQU5zX5cCf6bAxfgu4","foo","bar"))`)
require.NoError(t, err)
filters := p.placements[storj.PlacementConstraint(11)]
require.NotNil(t, filters)
require.True(t, filters.MatchInclude(&nodeselection.SelectedNode{
CountryCode: location.UnitedKingdom,
Tags: nodeselection.NodeTags{
{
Signer: signer,
Name: "foo",
Value: []byte("bar"),
},
},
}))
require.False(t, filters.MatchInclude(&nodeselection.SelectedNode{
CountryCode: location.UnitedKingdom,
}))
require.False(t, filters.MatchInclude(&nodeselection.SelectedNode{
CountryCode: location.Germany,
Tags: nodeselection.NodeTags{
{
Signer: signer,
Name: "foo",
Value: []byte("bar"),
},
},
}))
})
t.Run("multi rule", func(t *testing.T) {
p := NewPlacementRules()
err := p.AddPlacementFromString(`11:country("GB");12:country("DE")`)
require.NoError(t, err)
filters := p.placements[storj.PlacementConstraint(11)]
require.NotNil(t, filters)
require.True(t, filters.MatchInclude(&nodeselection.SelectedNode{
CountryCode: location.UnitedKingdom,
}))
require.False(t, filters.MatchInclude(&nodeselection.SelectedNode{
CountryCode: location.Germany,
}))
filters = p.placements[storj.PlacementConstraint(12)]
require.NotNil(t, filters)
require.False(t, filters.MatchInclude(&nodeselection.SelectedNode{
CountryCode: location.UnitedKingdom,
}))
require.True(t, filters.MatchInclude(&nodeselection.SelectedNode{
CountryCode: location.Germany,
}))
})
}

View File

@ -304,13 +304,14 @@ type Service struct {
UploadSelectionCache *UploadSelectionCache
DownloadSelectionCache *DownloadSelectionCache
LastNetFunc LastNetFunc
placementRules PlacementRules
}
// LastNetFunc is the type of a function that will be used to derive a network from an ip and port.
type LastNetFunc func(config NodeSelectionConfig, ip net.IP, port string) (string, error)
// NewService returns a new Service.
func NewService(log *zap.Logger, db DB, nodeEvents nodeevents.DB, satelliteAddr, satelliteName string, config Config) (*Service, error) {
func NewService(log *zap.Logger, db DB, nodeEvents nodeevents.DB, placementRules PlacementRules, satelliteAddr, satelliteName string, config Config) (*Service, error) {
err := config.Node.AsOfSystemTime.isValid()
if err != nil {
return nil, errs.Wrap(err)
@ -337,21 +338,20 @@ func NewService(log *zap.Logger, db DB, nodeEvents nodeevents.DB, satelliteAddr,
})
}
// TODO: this supposed to be configurable
placementRules := NewPlacementRules()
placementRules.AddLegacyStaticRules()
uploadSelectionCache, err := NewUploadSelectionCache(log, db,
config.NodeSelectionCache.Staleness, config.Node,
defaultSelection, placementRules.CreateFilters,
defaultSelection, placementRules,
)
if err != nil {
return nil, errs.Wrap(err)
}
downloadSelectionCache, err := NewDownloadSelectionCache(log, db, DownloadSelectionCacheConfig{
Staleness: config.NodeSelectionCache.Staleness,
OnlineWindow: config.Node.OnlineWindow,
AsOfSystemTime: config.Node.AsOfSystemTime,
})
downloadSelectionCache, err := NewDownloadSelectionCache(log, db,
placementRules,
DownloadSelectionCacheConfig{
Staleness: config.NodeSelectionCache.Staleness,
OnlineWindow: config.Node.OnlineWindow,
AsOfSystemTime: config.Node.AsOfSystemTime,
})
if err != nil {
return nil, errs.Wrap(err)
}
@ -369,6 +369,8 @@ func NewService(log *zap.Logger, db DB, nodeEvents nodeevents.DB, satelliteAddr,
UploadSelectionCache: uploadSelectionCache,
DownloadSelectionCache: downloadSelectionCache,
LastNetFunc: MaskOffLastNet,
placementRules: placementRules,
}, nil
}

View File

@ -74,7 +74,7 @@ func testCache(ctx *testcontext.Context, t *testing.T, store overlay.DB, nodeEve
serviceCtx, serviceCancel := context.WithCancel(ctx)
defer serviceCancel()
service, err := overlay.NewService(zaptest.NewLogger(t), store, nodeEvents, "", "", serviceConfig)
service, err := overlay.NewService(zaptest.NewLogger(t), store, nodeEvents, overlay.NewPlacementRules().CreateFilters, "", "", serviceConfig)
require.NoError(t, err)
ctx.Go(func() error { return service.Run(serviceCtx) })
defer ctx.Check(service.Close)

View File

@ -162,6 +162,8 @@ type Config struct {
Server server.Config
Debug debug.Config
Placement overlay.ConfigurablePlacementRule `help:"detailed placement rules in the form 'id:definition;id:definition;...' where id is a 16 bytes integer (use >10 for backward compatibility), definition is a combination of the following functions:country(2 letter country codes,...), tag(nodeId, key, bytes(value)) all(...,...)."`
Admin admin.Config
Contact contact.Config

View File

@ -139,7 +139,7 @@ func NewRangedLoop(log *zap.Logger, db DB, metabaseDB *metabase.DB, config *Conf
}
{ // setup overlay
peer.Overlay.Service, err = overlay.NewService(peer.Log.Named("overlay"), peer.DB.OverlayCache(), peer.DB.NodeEvents(), config.Console.ExternalAddress, config.Console.SatelliteName, config.Overlay)
peer.Overlay.Service, err = overlay.NewService(peer.Log.Named("overlay"), peer.DB.OverlayCache(), peer.DB.NodeEvents(), config.Placement.CreateFilters, config.Console.ExternalAddress, config.Console.SatelliteName, config.Overlay)
if err != nil {
return nil, errs.Combine(err, peer.Close())
}
@ -156,6 +156,7 @@ func NewRangedLoop(log *zap.Logger, db DB, metabaseDB *metabase.DB, config *Conf
peer.DB.RepairQueue(),
peer.Overlay.Service,
config.Checker,
config.Placement.CreateFilters,
config.Overlay.RepairExcludedCountryCodes,
)
}

View File

@ -52,12 +52,12 @@ type Observer struct {
// NewObserver creates new checker observer instance.
// TODO move excludedCountries into config but share it somehow with segment repairer.
func NewObserver(logger *zap.Logger, repairQueue queue.RepairQueue, overlay *overlay.Service, config Config, excludedCountries []string) *Observer {
func NewObserver(logger *zap.Logger, repairQueue queue.RepairQueue, overlay *overlay.Service, config Config, placementRules overlay.PlacementRules, excludedCountries []string) *Observer {
return &Observer{
logger: logger,
repairQueue: repairQueue,
nodesCache: NewReliabilityCache(overlay, config.ReliabilityCacheStaleness, excludedCountries),
nodesCache: NewReliabilityCache(overlay, config.ReliabilityCacheStaleness, placementRules, excludedCountries),
overlayService: overlay,
repairOverrides: config.RepairOverrides.GetMap(),
nodeFailureRate: config.NodeFailureRate,

View File

@ -555,7 +555,7 @@ func BenchmarkRemoteSegment(b *testing.B) {
}
observer := checker.NewObserver(zap.NewNop(), planet.Satellites[0].DB.RepairQueue(),
planet.Satellites[0].Auditor.Overlay, planet.Satellites[0].Config.Checker, []string{})
planet.Satellites[0].Auditor.Overlay, planet.Satellites[0].Config.Checker, overlay.NewPlacementRules().CreateFilters, []string{})
segments, err := planet.Satellites[0].Metabase.DB.TestingAllSegments(ctx)
require.NoError(b, err)

View File

@ -12,6 +12,7 @@ import (
"storj.io/common/storj"
"storj.io/common/storj/location"
"storj.io/storj/satellite/metabase"
"storj.io/storj/satellite/nodeselection"
"storj.io/storj/satellite/overlay"
)
@ -26,22 +27,18 @@ type ReliabilityCache struct {
excludedCountryCodes map[location.CountryCode]struct{}
mu sync.Mutex
state atomic.Value // contains immutable *reliabilityState
}
type reliableNode struct {
LastNet string
CountryCode location.CountryCode
placementRules overlay.PlacementRules
}
// reliabilityState.
type reliabilityState struct {
reliableOnline map[storj.NodeID]reliableNode
reliableAll map[storj.NodeID]reliableNode
reliableOnline map[storj.NodeID]nodeselection.SelectedNode
reliableAll map[storj.NodeID]nodeselection.SelectedNode
created time.Time
}
// NewReliabilityCache creates a new reliability checking cache.
func NewReliabilityCache(overlay *overlay.Service, staleness time.Duration, excludedCountries []string) *ReliabilityCache {
func NewReliabilityCache(overlay *overlay.Service, staleness time.Duration, placementRules overlay.PlacementRules, excludedCountries []string) *ReliabilityCache {
excludedCountryCodes := make(map[location.CountryCode]struct{})
for _, countryCode := range excludedCountries {
if cc := location.ToCountryCode(countryCode); cc != location.None {
@ -52,6 +49,7 @@ func NewReliabilityCache(overlay *overlay.Service, staleness time.Duration, excl
return &ReliabilityCache{
overlay: overlay,
staleness: staleness,
placementRules: placementRules,
excludedCountryCodes: excludedCountryCodes,
}
}
@ -109,9 +107,9 @@ func (cache *ReliabilityCache) OutOfPlacementPieces(ctx context.Context, created
return nil, err
}
var outOfPlacementPieces metabase.Pieces
nodeFilters := cache.placementRules(placement)
for _, p := range pieces {
if node, ok := state.reliableAll[p.StorageNode]; ok && !placement.AllowedCountry(node.CountryCode) {
if node, ok := state.reliableAll[p.StorageNode]; ok && !nodeFilters.MatchInclude(&node) {
outOfPlacementPieces = append(outOfPlacementPieces, p)
}
}
@ -189,24 +187,15 @@ func (cache *ReliabilityCache) refreshLocked(ctx context.Context) (_ *reliabilit
state := &reliabilityState{
created: time.Now(),
reliableOnline: make(map[storj.NodeID]reliableNode, len(online)),
reliableAll: make(map[storj.NodeID]reliableNode, len(online)+len(offline)),
reliableOnline: make(map[storj.NodeID]nodeselection.SelectedNode, len(online)),
reliableAll: make(map[storj.NodeID]nodeselection.SelectedNode, len(online)+len(offline)),
}
for _, node := range online {
state.reliableOnline[node.ID] = reliableNode{
LastNet: node.LastNet,
CountryCode: node.CountryCode,
}
state.reliableAll[node.ID] = reliableNode{
LastNet: node.LastNet,
CountryCode: node.CountryCode,
}
state.reliableOnline[node.ID] = node
state.reliableAll[node.ID] = node
}
for _, node := range offline {
state.reliableAll[node.ID] = reliableNode{
LastNet: node.LastNet,
CountryCode: node.CountryCode,
}
state.reliableAll[node.ID] = node
}
cache.state.Store(state)

View File

@ -29,7 +29,7 @@ func TestReliabilityCache_Concurrent(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
overlayCache, err := overlay.NewService(zap.NewNop(), fakeOverlayDB{}, fakeNodeEvents{}, "", "", overlay.Config{
overlayCache, err := overlay.NewService(zap.NewNop(), fakeOverlayDB{}, fakeNodeEvents{}, overlay.NewPlacementRules().CreateFilters, "", "", overlay.Config{
NodeSelectionCache: overlay.UploadSelectionCacheConfig{
Staleness: 2 * time.Nanosecond,
},
@ -40,7 +40,7 @@ func TestReliabilityCache_Concurrent(t *testing.T) {
ctx.Go(func() error { return overlayCache.Run(cacheCtx) })
defer ctx.Check(overlayCache.Close)
cache := checker.NewReliabilityCache(overlayCache, time.Millisecond, []string{})
cache := checker.NewReliabilityCache(overlayCache, time.Millisecond, overlay.NewPlacementRules().CreateFilters, []string{})
var group errgroup.Group
for i := 0; i < 10; i++ {
group.Go(func() error {
@ -79,14 +79,16 @@ func TestReliabilityCache_OutOfPlacementPieces(t *testing.T) {
},
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
overlay := planet.Satellites[0].Overlay.Service
overlayService := planet.Satellites[0].Overlay.Service
config := planet.Satellites[0].Config.Checker
cache := checker.NewReliabilityCache(overlay, config.ReliabilityCacheStaleness, []string{})
rules := overlay.NewPlacementRules()
rules.AddLegacyStaticRules()
cache := checker.NewReliabilityCache(overlayService, config.ReliabilityCacheStaleness, rules.CreateFilters, []string{})
nodesPlacement := func(location location.CountryCode, nodes ...*testplanet.StorageNode) {
for _, node := range nodes {
err := overlay.TestNodeCountryCode(ctx, node.ID(), location.String())
err := overlayService.TestNodeCountryCode(ctx, node.ID(), location.String())
require.NoError(t, err)
}
require.NoError(t, cache.Refresh(ctx))

View File

@ -102,6 +102,7 @@ type SegmentRepairer struct {
nowFn func() time.Time
OnTestingCheckSegmentAlteredHook func()
OnTestingPiecesReportHook func(pieces FetchResultReport)
placementRules overlay.PlacementRules
}
// NewSegmentRepairer creates a new instance of SegmentRepairer.
@ -116,6 +117,7 @@ func NewSegmentRepairer(
overlay *overlay.Service,
reporter audit.Reporter,
ecRepairer *ECRepairer,
placementRules overlay.PlacementRules,
repairOverrides checker.RepairOverrides,
config Config,
) *SegmentRepairer {
@ -139,6 +141,7 @@ func NewSegmentRepairer(
reputationUpdateEnabled: config.ReputationUpdateEnabled,
doDeclumping: config.DoDeclumping,
doPlacementCheck: config.DoPlacementCheck,
placementRules: placementRules,
nowFn: time.Now,
}
@ -705,9 +708,10 @@ func (repairer *SegmentRepairer) classifySegmentPieces(ctx context.Context, segm
if repairer.doPlacementCheck && segment.Placement != storj.EveryCountry {
result.OutOfPlacementPiecesSet = map[uint16]bool{}
nodeFilters := repairer.placementRules(segment.Placement)
checkPlacement := func(reliable []nodeselection.SelectedNode) {
for _, node := range reliable {
if segment.Placement.AllowedCountry(node.CountryCode) {
if nodeFilters.MatchInclude(&node) {
continue
}

View File

@ -141,7 +141,7 @@ func NewRepairer(log *zap.Logger, full *identity.FullIdentity,
{ // setup overlay
var err error
peer.Overlay, err = overlay.NewService(log.Named("overlay"), overlayCache, nodeEvents, config.Console.ExternalAddress, config.Console.SatelliteName, config.Overlay)
peer.Overlay, err = overlay.NewService(log.Named("overlay"), overlayCache, nodeEvents, config.Placement.CreateFilters, config.Console.ExternalAddress, config.Console.SatelliteName, config.Overlay)
if err != nil {
return nil, errs.Combine(err, peer.Close())
}
@ -183,6 +183,7 @@ func NewRepairer(log *zap.Logger, full *identity.FullIdentity,
// PUT and GET actions which are not used by
// repairer so we can set noop implementation.
orders.NewNoopDB(),
config.Placement.CreateFilters,
config.Orders,
)
if err != nil {
@ -217,6 +218,7 @@ func NewRepairer(log *zap.Logger, full *identity.FullIdentity,
peer.Overlay,
peer.Audit.Reporter,
peer.EcRepairer,
config.Placement.CreateFilters,
config.Checker.RepairOverrides,
config.Repairer,
)

View File

@ -892,6 +892,9 @@ identity.key-path: /root/.local/share/storj/identity/satellite/identity.key
# whether to enable piece tracker observer with ranged loop
# piece-tracker.use-ranged-loop: true
# detailed placement rules in the form 'id:definition;id:definition;...' where id is a 16 bytes integer (use >10 for backward compatibility), definition is a combination of the following functions:country(2 letter country codes,...), tag(nodeId, key, bytes(value)) all(...,...).
# placement: ""
# how often to remove unused project bandwidth rollups
# project-bw-cleanup.interval: 24h0m0s

View File

@ -79,6 +79,7 @@ require (
github.com/jtolds/monkit-hw/v2 v2.0.0-20191108235325-141a0da276b3 // indirect
github.com/jtolds/tracetagger/v2 v2.0.0-rc5 // indirect
github.com/jtolio/eventkit v0.0.0-20230607152326-4668f79ff72d // indirect
github.com/jtolio/mito v0.0.0-20230523171229-d78ef06bb77b // indirect
github.com/jtolio/noiseconn v0.0.0-20230301220541-88105e6c8ac6 // indirect
github.com/klauspost/cpuid/v2 v2.0.12 // indirect
github.com/magefile/mage v1.13.0 // indirect

View File

@ -456,6 +456,8 @@ github.com/jtolds/tracetagger/v2 v2.0.0-rc5 h1:SriMFVtftPsQmG+0xaABotz9HnoKoo1QM
github.com/jtolds/tracetagger/v2 v2.0.0-rc5/go.mod h1:61Fh+XhbBONy+RsqkA+xTtmaFbEVL040m9FAF/hTrjQ=
github.com/jtolio/eventkit v0.0.0-20230607152326-4668f79ff72d h1:MAGZUXA8MLSA5oJT1Gua3nLSyTYF2uvBgM4Sfs5+jts=
github.com/jtolio/eventkit v0.0.0-20230607152326-4668f79ff72d/go.mod h1:PXFUrknJu7TkBNyL8t7XWDPtDFFLFrNQQAdsXv9YfJE=
github.com/jtolio/mito v0.0.0-20230523171229-d78ef06bb77b h1:HKvXTXZTeUHXRibg2ilZlkGSQP6A3cs0zXrBd4xMi6M=
github.com/jtolio/mito v0.0.0-20230523171229-d78ef06bb77b/go.mod h1:Mrym6OnPMkBKvN8/uXSkyhFSh6ndKKYE+Q4kxCfQ4V0=
github.com/jtolio/noiseconn v0.0.0-20230301220541-88105e6c8ac6 h1:iVMQyk78uOpX/UKjEbzyBdptXgEz6jwGwo7kM9IQ+3U=
github.com/jtolio/noiseconn v0.0.0-20230301220541-88105e6c8ac6/go.mod h1:MEkhEPFwP3yudWO0lj6vfYpLIB+3eIcuIW+e0AZzUQk=
github.com/julienschmidt/httprouter v1.2.0 h1:TDTW5Yz1mjftljbcKqRcrYhd4XeOoI98t+9HbQbYf7g=

View File

@ -95,6 +95,7 @@ require (
github.com/jtolds/monkit-hw/v2 v2.0.0-20191108235325-141a0da276b3 // indirect
github.com/jtolds/tracetagger/v2 v2.0.0-rc5 // indirect
github.com/jtolio/eventkit v0.0.0-20230607152326-4668f79ff72d // indirect
github.com/jtolio/mito v0.0.0-20230523171229-d78ef06bb77b // indirect
github.com/jtolio/noiseconn v0.0.0-20230301220541-88105e6c8ac6 // indirect
github.com/klauspost/compress v1.15.10 // indirect
github.com/klauspost/cpuid v1.3.1 // indirect

View File

@ -694,6 +694,8 @@ github.com/jtolds/tracetagger/v2 v2.0.0-rc5 h1:SriMFVtftPsQmG+0xaABotz9HnoKoo1QM
github.com/jtolds/tracetagger/v2 v2.0.0-rc5/go.mod h1:61Fh+XhbBONy+RsqkA+xTtmaFbEVL040m9FAF/hTrjQ=
github.com/jtolio/eventkit v0.0.0-20230607152326-4668f79ff72d h1:MAGZUXA8MLSA5oJT1Gua3nLSyTYF2uvBgM4Sfs5+jts=
github.com/jtolio/eventkit v0.0.0-20230607152326-4668f79ff72d/go.mod h1:PXFUrknJu7TkBNyL8t7XWDPtDFFLFrNQQAdsXv9YfJE=
github.com/jtolio/mito v0.0.0-20230523171229-d78ef06bb77b h1:HKvXTXZTeUHXRibg2ilZlkGSQP6A3cs0zXrBd4xMi6M=
github.com/jtolio/mito v0.0.0-20230523171229-d78ef06bb77b/go.mod h1:Mrym6OnPMkBKvN8/uXSkyhFSh6ndKKYE+Q4kxCfQ4V0=
github.com/jtolio/noiseconn v0.0.0-20230301220541-88105e6c8ac6 h1:iVMQyk78uOpX/UKjEbzyBdptXgEz6jwGwo7kM9IQ+3U=
github.com/jtolio/noiseconn v0.0.0-20230301220541-88105e6c8ac6/go.mod h1:MEkhEPFwP3yudWO0lj6vfYpLIB+3eIcuIW+e0AZzUQk=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=