2023-08-21 11:11:22 +01:00
|
|
|
// Copyright (C) 2023 Storj Labs, Inc.
|
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
|
|
|
package checker
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/stretchr/testify/require"
|
|
|
|
"go.uber.org/zap/zaptest"
|
|
|
|
|
|
|
|
"storj.io/common/identity/testidentity"
|
|
|
|
"storj.io/common/storj"
|
|
|
|
"storj.io/common/storj/location"
|
|
|
|
"storj.io/common/testcontext"
|
|
|
|
"storj.io/storj/satellite/metabase"
|
|
|
|
"storj.io/storj/satellite/metabase/rangedloop"
|
|
|
|
"storj.io/storj/satellite/nodeselection"
|
|
|
|
"storj.io/storj/satellite/overlay"
|
|
|
|
"storj.io/storj/satellite/repair/queue"
|
|
|
|
)
|
|
|
|
|
|
|
|
func TestObserverForkProcess(t *testing.T) {
|
|
|
|
|
|
|
|
nodes := func() (res []nodeselection.SelectedNode) {
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
res = append(res, nodeselection.SelectedNode{
|
|
|
|
ID: testidentity.MustPregeneratedIdentity(i, storj.LatestIDVersion()).ID,
|
|
|
|
Online: true,
|
|
|
|
CountryCode: location.Germany,
|
|
|
|
LastNet: "127.0.0.0",
|
|
|
|
})
|
|
|
|
}
|
|
|
|
return res
|
|
|
|
}()
|
|
|
|
|
|
|
|
mapNodes := func(nodes []nodeselection.SelectedNode, include func(node nodeselection.SelectedNode) bool) map[storj.NodeID]nodeselection.SelectedNode {
|
|
|
|
res := map[storj.NodeID]nodeselection.SelectedNode{}
|
|
|
|
for _, node := range nodes {
|
|
|
|
if include(node) {
|
|
|
|
res[node.ID] = node
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return res
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx := testcontext.New(t)
|
|
|
|
createDefaultObserver := func() *Observer {
|
|
|
|
o := &Observer{
|
|
|
|
statsCollector: make(map[string]*observerRSStats),
|
|
|
|
nodesCache: &ReliabilityCache{
|
|
|
|
staleness: time.Hour,
|
satellite/overlay: fix placement selection config parsing
When we do `satellite run api --placement '...'`, the placement rules are not parsed well.
The problem is based on `viper.AllSettings()`, and the main logic is sg. like this (from a new unit test):
```
r := ConfigurablePlacementRule{}
err := r.Set(p)
require.NoError(t, err)
serialized := r.String()
r2 := ConfigurablePlacementRule{}
err = r2.Set(serialized)
require.NoError(t, err)
require.Equal(t, p, r2.String())
```
All settings evaluates the placement rules in `ConfigurablePlacementRules` and stores the string representation.
The problem is that we don't have proper `String()` implementation (it prints out the structs instead of the original definition.
There are two main solutions for this problem:
1. We can fix the `String()`. When we parse a placement rule, the `String()` method should print out the original definition
2. We can switch to use pure string as configuration parameter, and parse the rules only when required.
I feel that 1 is error prone, we can do it (and in this patch I added a lot of `String()` implementations, but it's hard to be sure that our `String()` logic is inline with the parsing logic.
Therefore I decided to make the configuration value of the placements a string (or a wrapper around string).
That's the main reason why this patch seems to be big, as I updated all the usages.
But the main part is in beginning of the `placement.go` (configuration parsing is not a pflag.Value implementation any more, but a separated step).
And `filter.go`, (a few more String implementation for filters.
https://github.com/storj/storj/issues/6248
Change-Id: I47c762d3514342b76a2e85683b1c891502a0756a
2023-09-06 10:40:22 +01:00
|
|
|
placementRules: overlay.NewPlacementDefinitions().CreateFilters,
|
2023-08-21 11:11:22 +01:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
o.nodesCache.state.Store(&reliabilityState{
|
2023-08-21 12:59:54 +01:00
|
|
|
nodeByID: mapNodes(nodes, func(node nodeselection.SelectedNode) bool {
|
2023-08-21 11:11:22 +01:00
|
|
|
return true
|
|
|
|
}),
|
|
|
|
created: time.Now(),
|
|
|
|
})
|
|
|
|
return o
|
|
|
|
}
|
|
|
|
|
|
|
|
createFork := func(o *Observer, q queue.RepairQueue) *observerFork {
|
|
|
|
return &observerFork{
|
|
|
|
log: zaptest.NewLogger(t),
|
|
|
|
getObserverStats: o.getObserverStats,
|
|
|
|
rsStats: make(map[string]*partialRSStats),
|
|
|
|
doDeclumping: o.doDeclumping,
|
|
|
|
doPlacementCheck: o.doPlacementCheck,
|
|
|
|
getNodesEstimate: o.getNodesEstimate,
|
|
|
|
nodesCache: o.nodesCache,
|
|
|
|
repairQueue: queue.NewInsertBuffer(q, 1000),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
createPieces := func(nodes []nodeselection.SelectedNode, selected ...int) metabase.Pieces {
|
|
|
|
pieces := make(metabase.Pieces, len(selected))
|
|
|
|
for ix, s := range selected {
|
|
|
|
pieces[ix] = metabase.Piece{
|
|
|
|
Number: uint16(ix),
|
|
|
|
StorageNode: nodes[s].ID,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return pieces
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Run("all healthy", func(t *testing.T) {
|
|
|
|
o := createDefaultObserver()
|
|
|
|
q := queue.MockRepairQueue{}
|
|
|
|
fork := createFork(o, &q)
|
|
|
|
err := fork.process(ctx, &rangedloop.Segment{
|
|
|
|
Pieces: createPieces(nodes, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9),
|
|
|
|
Redundancy: storj.RedundancyScheme{
|
|
|
|
Algorithm: storj.ReedSolomon,
|
|
|
|
ShareSize: 256,
|
|
|
|
RepairShares: 4,
|
|
|
|
RequiredShares: 6,
|
|
|
|
OptimalShares: 8,
|
|
|
|
TotalShares: 10,
|
|
|
|
},
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
err = fork.repairQueue.Flush(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.Len(t, q.Segments, 0)
|
|
|
|
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("declumping", func(t *testing.T) {
|
|
|
|
o := createDefaultObserver()
|
|
|
|
o.doDeclumping = true
|
|
|
|
q := queue.MockRepairQueue{}
|
|
|
|
fork := createFork(o, &q)
|
|
|
|
err := fork.process(ctx, &rangedloop.Segment{
|
|
|
|
Pieces: createPieces(nodes, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9),
|
|
|
|
Redundancy: storj.RedundancyScheme{
|
|
|
|
Algorithm: storj.ReedSolomon,
|
|
|
|
ShareSize: 256,
|
|
|
|
RepairShares: 4,
|
|
|
|
RequiredShares: 6,
|
|
|
|
OptimalShares: 8,
|
|
|
|
TotalShares: 10,
|
|
|
|
},
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
err = fork.repairQueue.Flush(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// as all test nodes are in the same subnet...
|
|
|
|
require.Len(t, q.Segments, 1)
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("declumping is ignored by annotation", func(t *testing.T) {
|
|
|
|
o := createDefaultObserver()
|
|
|
|
o.doDeclumping = true
|
|
|
|
|
|
|
|
placements := overlay.ConfigurablePlacementRule{}
|
|
|
|
require.NoError(t, placements.Set(fmt.Sprintf(`10:annotated(country("DE"),annotation("%s","%s"))`, nodeselection.AutoExcludeSubnet, nodeselection.AutoExcludeSubnetOFF)))
|
satellite/overlay: fix placement selection config parsing
When we do `satellite run api --placement '...'`, the placement rules are not parsed well.
The problem is based on `viper.AllSettings()`, and the main logic is sg. like this (from a new unit test):
```
r := ConfigurablePlacementRule{}
err := r.Set(p)
require.NoError(t, err)
serialized := r.String()
r2 := ConfigurablePlacementRule{}
err = r2.Set(serialized)
require.NoError(t, err)
require.Equal(t, p, r2.String())
```
All settings evaluates the placement rules in `ConfigurablePlacementRules` and stores the string representation.
The problem is that we don't have proper `String()` implementation (it prints out the structs instead of the original definition.
There are two main solutions for this problem:
1. We can fix the `String()`. When we parse a placement rule, the `String()` method should print out the original definition
2. We can switch to use pure string as configuration parameter, and parse the rules only when required.
I feel that 1 is error prone, we can do it (and in this patch I added a lot of `String()` implementations, but it's hard to be sure that our `String()` logic is inline with the parsing logic.
Therefore I decided to make the configuration value of the placements a string (or a wrapper around string).
That's the main reason why this patch seems to be big, as I updated all the usages.
But the main part is in beginning of the `placement.go` (configuration parsing is not a pflag.Value implementation any more, but a separated step).
And `filter.go`, (a few more String implementation for filters.
https://github.com/storj/storj/issues/6248
Change-Id: I47c762d3514342b76a2e85683b1c891502a0756a
2023-09-06 10:40:22 +01:00
|
|
|
parsed, err := placements.Parse()
|
|
|
|
require.NoError(t, err)
|
|
|
|
o.nodesCache.placementRules = parsed.CreateFilters
|
2023-08-21 11:11:22 +01:00
|
|
|
|
|
|
|
q := queue.MockRepairQueue{}
|
|
|
|
fork := createFork(o, &q)
|
satellite/overlay: fix placement selection config parsing
When we do `satellite run api --placement '...'`, the placement rules are not parsed well.
The problem is based on `viper.AllSettings()`, and the main logic is sg. like this (from a new unit test):
```
r := ConfigurablePlacementRule{}
err := r.Set(p)
require.NoError(t, err)
serialized := r.String()
r2 := ConfigurablePlacementRule{}
err = r2.Set(serialized)
require.NoError(t, err)
require.Equal(t, p, r2.String())
```
All settings evaluates the placement rules in `ConfigurablePlacementRules` and stores the string representation.
The problem is that we don't have proper `String()` implementation (it prints out the structs instead of the original definition.
There are two main solutions for this problem:
1. We can fix the `String()`. When we parse a placement rule, the `String()` method should print out the original definition
2. We can switch to use pure string as configuration parameter, and parse the rules only when required.
I feel that 1 is error prone, we can do it (and in this patch I added a lot of `String()` implementations, but it's hard to be sure that our `String()` logic is inline with the parsing logic.
Therefore I decided to make the configuration value of the placements a string (or a wrapper around string).
That's the main reason why this patch seems to be big, as I updated all the usages.
But the main part is in beginning of the `placement.go` (configuration parsing is not a pflag.Value implementation any more, but a separated step).
And `filter.go`, (a few more String implementation for filters.
https://github.com/storj/storj/issues/6248
Change-Id: I47c762d3514342b76a2e85683b1c891502a0756a
2023-09-06 10:40:22 +01:00
|
|
|
err = fork.process(ctx, &rangedloop.Segment{
|
2023-08-21 11:11:22 +01:00
|
|
|
Placement: 10,
|
|
|
|
Pieces: createPieces(nodes, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9),
|
|
|
|
Redundancy: storj.RedundancyScheme{
|
|
|
|
Algorithm: storj.ReedSolomon,
|
|
|
|
ShareSize: 256,
|
|
|
|
RepairShares: 4,
|
|
|
|
RequiredShares: 6,
|
|
|
|
OptimalShares: 8,
|
|
|
|
TotalShares: 10,
|
|
|
|
},
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
err = fork.repairQueue.Flush(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.Len(t, q.Segments, 0)
|
|
|
|
})
|
|
|
|
|
|
|
|
}
|