satellite/{repair,rangedloop,overlay}: fix node tag placement selection for repair

This patch fixes the node tag based placement of rangedloop/repairchecker + repair process.

The main change is just adding the node tags for Reliable and KnownReliabel database calls + adding new tests to prove, it works.

https://github.com/storj/storj/issues/6126

Change-Id: I245d654a18c1d61b2c72df49afa0718d0de76da1
This commit is contained in:
Márton Elek 2023-08-15 12:42:42 +02:00 committed by Storj Robot
parent a3067b7b3b
commit de7aabc8c9
4 changed files with 285 additions and 52 deletions

View File

@ -2403,13 +2403,14 @@ func TestListUploads(t *testing.T) {
})
}
func TestPlacements(t *testing.T) {
func TestNodeTagPlacement(t *testing.T) {
ctx := testcontext.New(t)
satelliteIdentity := signing.SignerFromFullIdentity(testidentity.MustPregeneratedSignedIdentity(0, storj.LatestIDVersion()))
placementRules := overlay.ConfigurablePlacementRule{}
err := placementRules.Set(fmt.Sprintf(`16:tag("%s", "certified","true")`, satelliteIdentity.ID()))
tag := fmt.Sprintf(`tag("%s", "certified","true")`, satelliteIdentity.ID())
err := placementRules.Set(fmt.Sprintf(`0:exclude(%s);16:%s`, tag, tag))
require.NoError(t, err)
testplanet.Run(t,
@ -2457,16 +2458,6 @@ func TestPlacements(t *testing.T) {
uplink := planet.Uplinks[0]
projectID := uplink.Projects[0].ID
// create buckets with different placement (placement 16 is configured above)
createGeofencedBucket(t, ctx, buckets, projectID, "constrained", 16)
objectNo := 10
for i := 0; i < objectNo; i++ {
// upload an object to one of the global buckets
err := uplink.Upload(ctx, satellite, "constrained", "testobject"+strconv.Itoa(i), make([]byte, 10240))
require.NoError(t, err)
}
apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
metainfoClient, err := uplink.DialMetainfo(ctx, satellite, apiKey)
require.NoError(t, err)
@ -2474,28 +2465,55 @@ func TestPlacements(t *testing.T) {
_ = metainfoClient.Close()
}()
nodeIndex := map[storj.NodeID]int{}
for ix, node := range planet.StorageNodes {
nodeIndex[node.Identity.ID] = ix
}
testPlacement := func(bucketName string, placement int, allowedNodes func(int) bool) {
createGeofencedBucket(t, ctx, buckets, projectID, bucketName, storj.PlacementConstraint(placement))
objectNo := 10
for i := 0; i < objectNo; i++ {
err := uplink.Upload(ctx, satellite, bucketName, "testobject"+strconv.Itoa(i), make([]byte, 10240))
require.NoError(t, err)
}
objects, _, err := metainfoClient.ListObjects(ctx, metaclient.ListObjectsParams{
Bucket: []byte("constrained"),
Bucket: []byte(bucketName),
})
require.NoError(t, err)
require.Len(t, objects, objectNo)
for _, listedObject := range objects {
for i := 0; i < 5; i++ {
o, err := metainfoClient.DownloadObject(ctx, metaclient.DownloadObjectParams{
Bucket: []byte("constrained"),
Bucket: []byte(bucketName),
EncryptedObjectKey: listedObject.EncryptedObjectKey,
})
require.NoError(t, err)
for _, limit := range o.DownloadedSegments[0].Limits {
if limit != nil {
// starting from 2 (first identity used for satellite, SN with even index are fine)
for i := 2; i < 11; i += 2 {
require.NotEqual(t, testidentity.MustPregeneratedSignedIdentity(i, storj.LatestIDVersion()).ID, limit.Limit.StorageNodeId)
ix := nodeIndex[limit.Limit.StorageNodeId]
require.True(t, allowedNodes(ix))
}
}
}
}
}
t.Run("upload to constrained", func(t *testing.T) {
testPlacement("constrained", 16, func(i int) bool {
return i%2 == 0
})
})
t.Run("upload to generic excluding constrained", func(t *testing.T) {
testPlacement("generic", 0, func(i int) bool {
return i%2 == 1
})
})
},
)
}

View File

@ -660,7 +660,6 @@ func (repairer *SegmentRepairer) classifySegmentPieces(ctx context.Context, segm
defer mon.Task()(&ctx)(&err)
pieces := segment.Pieces
placement := segment.Placement
allNodeIDs := make([]storj.NodeID, len(pieces))
nodeIDPieceMap := map[storj.NodeID]uint16{}
@ -678,11 +677,13 @@ func (repairer *SegmentRepairer) classifySegmentPieces(ctx context.Context, segm
return piecesCheckResult{}, overlayQueryError.New("error identifying missing pieces: %w", err)
}
nodeFilters := repairer.placementRules(segment.Placement)
// remove online nodes from missing pieces
for _, onlineNode := range online {
// count online nodes in excluded countries only if country is not excluded by segment
// placement, those nodes will be counted with out of placement check
if _, excluded := repairer.excludedCountryCodes[onlineNode.CountryCode]; excluded && placement.AllowedCountry(onlineNode.CountryCode) {
if _, excluded := repairer.excludedCountryCodes[onlineNode.CountryCode]; excluded && nodeFilters.MatchInclude(&onlineNode) {
result.NumHealthyInExcludedCountries++
}
@ -719,7 +720,7 @@ func (repairer *SegmentRepairer) classifySegmentPieces(ctx context.Context, segm
result.OutOfPlacementPiecesSet = map[uint16]bool{}
nodeFilters := repairer.placementRules(segment.Placement)
nodeFilters = repairer.placementRules(segment.Placement)
checkPlacement := func(reliable []nodeselection.SelectedNode) {
for _, node := range reliable {
if nodeFilters.MatchInclude(&node) {

View File

@ -5,15 +5,19 @@ package repairer_test
import (
"context"
"strconv"
"fmt"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/zeebo/errs"
"go.uber.org/zap"
"storj.io/common/identity/testidentity"
"storj.io/common/memory"
"storj.io/common/nodetag"
"storj.io/common/pb"
"storj.io/common/signing"
"storj.io/common/storj"
"storj.io/common/storj/location"
"storj.io/common/testcontext"
@ -22,8 +26,11 @@ import (
"storj.io/storj/satellite"
"storj.io/storj/satellite/buckets"
"storj.io/storj/satellite/metabase"
"storj.io/storj/satellite/nodeselection"
"storj.io/storj/satellite/overlay"
"storj.io/storj/satellite/repair/queue"
"storj.io/storj/storagenode"
"storj.io/storj/storagenode/contact"
)
func TestSegmentRepairPlacement(t *testing.T) {
@ -58,7 +65,7 @@ func TestSegmentRepairPlacement(t *testing.T) {
piecesOutOfPlacementOffline int
}
for i, tc := range []testCase{
for _, tc := range []testCase{
// all pieces/nodes are out of placement, repair download/upload should be triggered
{piecesOutOfPlacement: piecesCount, piecesAfterRepair: piecesCount},
@ -76,7 +83,8 @@ func TestSegmentRepairPlacement(t *testing.T) {
{piecesOutOfPlacement: 1, piecesAfterRepair: piecesCount - 1, piecesOutOfPlacementOffline: 1},
{piecesOutOfPlacement: 1, piecesAfterRepair: piecesCount - 1, piecesOutOfPlacementOffline: 1},
} {
t.Run("#"+strconv.Itoa(i), func(t *testing.T) {
t.Run(fmt.Sprintf("oop_%d_ar_%d_off_%d", tc.piecesOutOfPlacement, tc.piecesAfterRepair, tc.piecesOutOfPlacementOffline), func(t *testing.T) {
for _, node := range planet.StorageNodes {
require.NoError(t, planet.Satellites[0].Overlay.Service.TestNodeCountryCode(ctx, node.ID(), defaultLocation.String()))
}
@ -102,7 +110,7 @@ func TestSegmentRepairPlacement(t *testing.T) {
}
// confirm that some pieces are out of placement
ok, err := allPiecesInPlacement(ctx, planet.Satellites[0].Overlay.Service, segments[0].Pieces, segments[0].Placement)
ok, err := allPiecesInPlacement(ctx, planet.Satellites[0].Overlay.Service, segments[0].Pieces, segments[0].Placement, planet.Satellites[0].Config.Placement.CreateFilters)
require.NoError(t, err)
require.False(t, ok)
@ -121,7 +129,7 @@ func TestSegmentRepairPlacement(t *testing.T) {
require.NotNil(t, segments[0].RepairedAt)
require.Len(t, segments[0].Pieces, tc.piecesAfterRepair)
ok, err = allPiecesInPlacement(ctx, planet.Satellites[0].Overlay.Service, segments[0].Pieces, segments[0].Placement)
ok, err = allPiecesInPlacement(ctx, planet.Satellites[0].Overlay.Service, segments[0].Pieces, segments[0].Placement, planet.Satellites[0].Config.Placement.CreateFilters)
require.NoError(t, err)
require.True(t, ok)
@ -135,6 +143,150 @@ func TestSegmentRepairPlacement(t *testing.T) {
})
}
func TestSegmentRepairWithNodeTags(t *testing.T) {
satelliteIdentity := signing.SignerFromFullIdentity(testidentity.MustPregeneratedSignedIdentity(0, storj.LatestIDVersion()))
ctx := testcontext.New(t)
testplanet.Run(t, testplanet.Config{
// we use 23 nodes:
// first 0-9: untagged
// next 10-19: tagged, used to upload (remaining should be offline during first upload)
// next 20-22: tagged, used to upload during repair (4 should be offline from the previous set: we will have 6 pieces + 3 new to these)
SatelliteCount: 1, StorageNodeCount: 23, UplinkCount: 1,
Reconfigure: testplanet.Reconfigure{
Satellite: testplanet.Combine(
func(log *zap.Logger, index int, config *satellite.Config) {
placementRules := overlay.ConfigurablePlacementRule{}
tag := fmt.Sprintf("tag(\"%s\",\"selected\",\"true\")", satelliteIdentity.ID())
err := placementRules.Set(fmt.Sprintf("0:exclude(%s);10:%s", tag, tag))
require.NoError(t, err)
config.Placement = placementRules
},
func(log *zap.Logger, index int, config *satellite.Config) {
config.Overlay.Node.AsOfSystemTime.Enabled = false
},
testplanet.ReconfigureRS(4, 6, 8, 10),
),
StorageNode: func(index int, config *storagenode.Config) {
if index >= 10 {
tags := &pb.NodeTagSet{
NodeId: testidentity.MustPregeneratedSignedIdentity(index+1, storj.LatestIDVersion()).ID.Bytes(),
SignedAt: time.Now().Unix(),
Tags: []*pb.Tag{
{
Name: "selected",
Value: []byte("true"),
},
},
}
signed, err := nodetag.Sign(ctx, tags, satelliteIdentity)
require.NoError(t, err)
config.Contact.Tags = contact.SignedTags(pb.SignedNodeTagSets{
Tags: []*pb.SignedNodeTagSet{
signed,
},
})
}
// make sure we control the checking requests, and they won't be updated
config.Contact.Interval = 60 * time.Minute
},
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
allTaggedNodes := []int{10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23}
{
// create two buckets: one normal, one with placement=10
require.NoError(t, planet.Uplinks[0].CreateBucket(ctx, planet.Satellites[0], "generic"))
require.NoError(t, planet.Uplinks[0].CreateBucket(ctx, planet.Satellites[0], "selected"))
_, err := planet.Satellites[0].API.Buckets.Service.UpdateBucket(ctx, buckets.Bucket{
ProjectID: planet.Uplinks[0].Projects[0].ID,
Name: "selected",
Placement: 10,
})
require.NoError(t, err)
}
{
// these nodes will be used during the repair, let's make them offline to make sure, they don't have any pieces
require.NoError(t, updateNodeStatus(ctx, planet.Satellites[0], planet.StorageNodes[20], true, location.Germany))
require.NoError(t, updateNodeStatus(ctx, planet.Satellites[0], planet.StorageNodes[21], true, location.Germany))
require.NoError(t, updateNodeStatus(ctx, planet.Satellites[0], planet.StorageNodes[22], true, location.Germany))
require.NoError(t, planet.Satellites[0].Overlay.Service.UploadSelectionCache.Refresh(ctx))
}
expectedData := testrand.Bytes(5 * memory.KiB)
{
err := planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "selected", "object", expectedData)
require.NoError(t, err)
}
{
// check the right placement
segments, err := planet.Satellites[0].Metabase.DB.TestingAllSegments(ctx)
require.NoError(t, err)
require.Len(t, segments, 1)
require.Equal(t, storj.PlacementConstraint(10), segments[0].Placement)
ok, err := allPiecesInPlacement(ctx, planet.Satellites[0].Overlay.Service, segments[0].Pieces, segments[0].Placement, planet.Satellites[0].Config.Placement.CreateFilters)
require.NoError(t, err)
require.True(t, ok)
err = piecesOnNodeByIndex(ctx, planet, segments[0].Pieces, allTaggedNodes)
require.NoError(t, err)
}
{
// 4 offline nodes should trigger a new repair (6 pieces available)
require.NoError(t, updateNodeStatus(ctx, planet.Satellites[0], planet.StorageNodes[16], true, location.Germany))
require.NoError(t, updateNodeStatus(ctx, planet.Satellites[0], planet.StorageNodes[17], true, location.Germany))
require.NoError(t, updateNodeStatus(ctx, planet.Satellites[0], planet.StorageNodes[18], true, location.Germany))
require.NoError(t, updateNodeStatus(ctx, planet.Satellites[0], planet.StorageNodes[19], true, location.Germany))
// we need 4 more online (tagged) nodes to repair, let's turn them on
require.NoError(t, updateNodeStatus(ctx, planet.Satellites[0], planet.StorageNodes[20], false, location.Germany))
require.NoError(t, updateNodeStatus(ctx, planet.Satellites[0], planet.StorageNodes[21], false, location.Germany))
require.NoError(t, updateNodeStatus(ctx, planet.Satellites[0], planet.StorageNodes[22], false, location.Germany))
require.NoError(t, planet.Satellites[0].Repairer.Overlay.UploadSelectionCache.Refresh(ctx))
}
{
segments, err := planet.Satellites[0].Metabase.DB.TestingAllSegments(ctx)
require.NoError(t, err)
_, err = planet.Satellites[0].Repairer.SegmentRepairer.Repair(ctx, &queue.InjuredSegment{
StreamID: segments[0].StreamID,
Position: segments[0].Position,
})
require.NoError(t, err)
}
{
// check the right placement
segments, err := planet.Satellites[0].Metabase.DB.TestingAllSegments(ctx)
require.NoError(t, err)
require.Len(t, segments, 1)
err = piecesOnNodeByIndex(ctx, planet, segments[0].Pieces, allTaggedNodes)
require.NoError(t, err)
}
data, err := planet.Uplinks[0].Download(ctx, planet.Satellites[0], "selected", "object")
require.NoError(t, err)
require.Equal(t, expectedData, data)
})
}
func TestSegmentRepairPlacementAndClumped(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 8, UplinkCount: 1,
@ -196,7 +348,7 @@ func TestSegmentRepairPlacementAndClumped(t *testing.T) {
}
// confirm that some pieces are out of placement
ok, err := allPiecesInPlacement(ctx, planet.Satellites[0].Overlay.Service, segments[0].Pieces, segments[0].Placement)
ok, err := allPiecesInPlacement(ctx, planet.Satellites[0].Overlay.Service, segments[0].Pieces, segments[0].Placement, planet.Satellites[0].Config.Placement.CreateFilters)
require.NoError(t, err)
require.False(t, ok)
@ -215,7 +367,7 @@ func TestSegmentRepairPlacementAndClumped(t *testing.T) {
require.NotNil(t, segments[0].RepairedAt)
require.Len(t, segments[0].Pieces, 4)
ok, err = allPiecesInPlacement(ctx, planet.Satellites[0].Overlay.Service, segments[0].Pieces, segments[0].Placement)
ok, err = allPiecesInPlacement(ctx, planet.Satellites[0].Overlay.Service, segments[0].Pieces, segments[0].Placement, planet.Satellites[0].Config.Placement.CreateFilters)
require.NoError(t, err)
require.True(t, ok)
})
@ -321,13 +473,56 @@ func TestSegmentRepairPlacementAndExcludedCountries(t *testing.T) {
})
}
func allPiecesInPlacement(ctx context.Context, overaly *overlay.Service, pieces metabase.Pieces, placement storj.PlacementConstraint) (bool, error) {
func piecesOnNodeByIndex(ctx context.Context, planet *testplanet.Planet, pieces metabase.Pieces, allowedIndexes []int) error {
findIndex := func(id storj.NodeID) int {
for ix, storagenode := range planet.StorageNodes {
if storagenode.ID() == id {
return ix
}
}
return -1
}
intInSlice := func(allowedNumbers []int, num int) bool {
for _, n := range allowedNumbers {
if n == num {
return true
}
}
return false
}
for _, piece := range pieces {
ix := findIndex(piece.StorageNode)
if ix == -1 || !intInSlice(allowedIndexes, ix) {
return errs.New("piece is on storagenode (%s, %d) which is not whitelisted", piece.StorageNode, ix)
}
}
return nil
}
func allPiecesInPlacement(ctx context.Context, overaly *overlay.Service, pieces metabase.Pieces, placement storj.PlacementConstraint, rules overlay.PlacementRules) (bool, error) {
filter := rules(placement)
for _, piece := range pieces {
nodeDossier, err := overaly.Get(ctx, piece.StorageNode)
if err != nil {
return false, err
}
if !placement.AllowedCountry(nodeDossier.CountryCode) {
tags, err := overaly.GetNodeTags(ctx, piece.StorageNode)
if err != nil {
return false, err
}
node := &nodeselection.SelectedNode{
ID: nodeDossier.Id,
CountryCode: nodeDossier.CountryCode,
Tags: tags,
}
if !filter.MatchInclude(node) {
return false, nil
}
}
@ -343,6 +538,7 @@ func updateNodeStatus(ctx context.Context, satellite *testplanet.Satellite, node
return satellite.DB.OverlayCache().UpdateCheckIn(ctx, overlay.NodeCheckInInfo{
NodeID: node.ID(),
Address: &pb.NodeAddress{Address: node.Addr()},
LastNet: node.Addr(),
IsUp: true,
Version: &pb.NodeVersion{
Version: "v0.0.0",

View File

@ -395,9 +395,11 @@ func (cache *overlaycache) UpdateLastOfflineEmail(ctx context.Context, nodeIDs s
}
// KnownReliable filters a set of nodes to reliable nodes. List is split into online and offline nodes.
func (cache *overlaycache) KnownReliable(ctx context.Context, nodeIDs storj.NodeIDList, onlineWindow, asOfSystemInterval time.Duration) (online []nodeselection.SelectedNode, offline []nodeselection.SelectedNode, err error) {
func (cache *overlaycache) KnownReliable(ctx context.Context, nodeIDs storj.NodeIDList, onlineWindow, asOfSystemInterval time.Duration) ([]nodeselection.SelectedNode, []nodeselection.SelectedNode, error) {
var on, off []*nodeselection.SelectedNode
var err error
for {
online, offline, err = cache.knownReliable(ctx, nodeIDs, onlineWindow, asOfSystemInterval)
on, off, err = cache.knownReliable(ctx, nodeIDs, onlineWindow, asOfSystemInterval)
if err != nil {
if cockroachutil.NeedsRetry(err) {
continue
@ -406,11 +408,18 @@ func (cache *overlaycache) KnownReliable(ctx context.Context, nodeIDs storj.Node
}
break
}
return online, offline, err
err = cache.addNodeTags(ctx, append(on, off...))
deref := func(nodes []*nodeselection.SelectedNode) []nodeselection.SelectedNode {
var res []nodeselection.SelectedNode
for _, node := range nodes {
res = append(res, *node)
}
return res
}
return deref(on), deref(off), err
}
func (cache *overlaycache) knownReliable(ctx context.Context, nodeIDs storj.NodeIDList, onlineWindow, asOfSystemInterval time.Duration) (online []nodeselection.SelectedNode, offline []nodeselection.SelectedNode, err error) {
func (cache *overlaycache) knownReliable(ctx context.Context, nodeIDs storj.NodeIDList, onlineWindow, asOfSystemInterval time.Duration) (online []*nodeselection.SelectedNode, offline []*nodeselection.SelectedNode, err error) {
defer mon.Task()(&ctx)(&err)
if len(nodeIDs) == 0 {
@ -435,9 +444,9 @@ func (cache *overlaycache) knownReliable(ctx context.Context, nodeIDs storj.Node
}
if node.Online {
online = append(online, node)
online = append(online, &node)
} else {
offline = append(offline, node)
offline = append(offline, &node)
}
}
return nil
@ -447,9 +456,11 @@ func (cache *overlaycache) knownReliable(ctx context.Context, nodeIDs storj.Node
}
// Reliable returns all nodes that are reliable, online and offline.
func (cache *overlaycache) Reliable(ctx context.Context, onlineWindow, asOfSystemInterval time.Duration) (online []nodeselection.SelectedNode, offline []nodeselection.SelectedNode, err error) {
func (cache *overlaycache) Reliable(ctx context.Context, onlineWindow, asOfSystemInterval time.Duration) ([]nodeselection.SelectedNode, []nodeselection.SelectedNode, error) {
var on, off []*nodeselection.SelectedNode
var err error
for {
online, offline, err = cache.reliable(ctx, onlineWindow, asOfSystemInterval)
on, off, err = cache.reliable(ctx, onlineWindow, asOfSystemInterval)
if err != nil {
if cockroachutil.NeedsRetry(err) {
continue
@ -458,11 +469,18 @@ func (cache *overlaycache) Reliable(ctx context.Context, onlineWindow, asOfSyste
}
break
}
return online, offline, nil
err = cache.addNodeTags(ctx, append(on, off...))
deref := func(nodes []*nodeselection.SelectedNode) []nodeselection.SelectedNode {
var res []nodeselection.SelectedNode
for _, node := range nodes {
res = append(res, *node)
}
return res
}
return deref(on), deref(off), err
}
func (cache *overlaycache) reliable(ctx context.Context, onlineWindow, asOfSystemInterval time.Duration) (online []nodeselection.SelectedNode, offline []nodeselection.SelectedNode, err error) {
func (cache *overlaycache) reliable(ctx context.Context, onlineWindow, asOfSystemInterval time.Duration) (online []*nodeselection.SelectedNode, offline []*nodeselection.SelectedNode, err error) {
defer mon.Task()(&ctx)(&err)
err = withRows(cache.db.Query(ctx, `
@ -482,9 +500,9 @@ func (cache *overlaycache) reliable(ctx context.Context, onlineWindow, asOfSyste
}
if node.Online {
online = append(online, node)
online = append(online, &node)
} else {
offline = append(offline, node)
offline = append(offline, &node)
}
}
return nil