satellite: move satellite/nodeselection/uploadselection => satellite/nodeselection
All the files in uploadselection are (in fact) related to generic node selection, and used not only for upload, but for download, repair, etc... Change-Id: Ie4098318a6f8f0bbf672d432761e87047d3762ab
This commit is contained in:
parent
8b4387a498
commit
70cdca5d3c
@ -40,7 +40,7 @@ import (
|
|||||||
"storj.io/storj/satellite/accounting/live"
|
"storj.io/storj/satellite/accounting/live"
|
||||||
"storj.io/storj/satellite/compensation"
|
"storj.io/storj/satellite/compensation"
|
||||||
"storj.io/storj/satellite/metabase"
|
"storj.io/storj/satellite/metabase"
|
||||||
"storj.io/storj/satellite/nodeselection/uploadselection"
|
"storj.io/storj/satellite/nodeselection"
|
||||||
"storj.io/storj/satellite/payments/stripe"
|
"storj.io/storj/satellite/payments/stripe"
|
||||||
"storj.io/storj/satellite/satellitedb"
|
"storj.io/storj/satellite/satellitedb"
|
||||||
)
|
)
|
||||||
@ -932,7 +932,7 @@ func cmdRestoreTrash(cmd *cobra.Command, args []string) error {
|
|||||||
successes := new(int64)
|
successes := new(int64)
|
||||||
failures := new(int64)
|
failures := new(int64)
|
||||||
|
|
||||||
undelete := func(node *uploadselection.SelectedNode) {
|
undelete := func(node *nodeselection.SelectedNode) {
|
||||||
log.Info("starting restore trash", zap.String("Node ID", node.ID.String()))
|
log.Info("starting restore trash", zap.String("Node ID", node.ID.String()))
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||||
@ -966,9 +966,9 @@ func cmdRestoreTrash(cmd *cobra.Command, args []string) error {
|
|||||||
log.Info("successful restore trash", zap.String("Node ID", node.ID.String()))
|
log.Info("successful restore trash", zap.String("Node ID", node.ID.String()))
|
||||||
}
|
}
|
||||||
|
|
||||||
var nodes []*uploadselection.SelectedNode
|
var nodes []*nodeselection.SelectedNode
|
||||||
if len(args) == 0 {
|
if len(args) == 0 {
|
||||||
err = db.OverlayCache().IterateAllContactedNodes(ctx, func(ctx context.Context, node *uploadselection.SelectedNode) error {
|
err = db.OverlayCache().IterateAllContactedNodes(ctx, func(ctx context.Context, node *nodeselection.SelectedNode) error {
|
||||||
nodes = append(nodes, node)
|
nodes = append(nodes, node)
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
@ -985,7 +985,7 @@ func cmdRestoreTrash(cmd *cobra.Command, args []string) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
nodes = append(nodes, &uploadselection.SelectedNode{
|
nodes = append(nodes, &nodeselection.SelectedNode{
|
||||||
ID: dossier.Id,
|
ID: dossier.Id,
|
||||||
Address: dossier.Address,
|
Address: dossier.Address,
|
||||||
LastNet: dossier.LastNet,
|
LastNet: dossier.LastNet,
|
||||||
|
@ -15,7 +15,7 @@ import (
|
|||||||
"storj.io/common/uuid"
|
"storj.io/common/uuid"
|
||||||
"storj.io/private/process"
|
"storj.io/private/process"
|
||||||
"storj.io/storj/satellite/metabase"
|
"storj.io/storj/satellite/metabase"
|
||||||
"storj.io/storj/satellite/nodeselection/uploadselection"
|
"storj.io/storj/satellite/nodeselection"
|
||||||
"storj.io/storj/satellite/overlay"
|
"storj.io/storj/satellite/overlay"
|
||||||
"storj.io/storj/satellite/satellitedb"
|
"storj.io/storj/satellite/satellitedb"
|
||||||
)
|
)
|
||||||
@ -79,7 +79,7 @@ type NodeCheckConfig struct {
|
|||||||
|
|
||||||
// NodeCheckOverlayDB contains dependencies from overlay that are needed for the processing.
|
// NodeCheckOverlayDB contains dependencies from overlay that are needed for the processing.
|
||||||
type NodeCheckOverlayDB interface {
|
type NodeCheckOverlayDB interface {
|
||||||
IterateAllContactedNodes(context.Context, func(context.Context, *uploadselection.SelectedNode) error) error
|
IterateAllContactedNodes(context.Context, func(context.Context, *nodeselection.SelectedNode) error) error
|
||||||
IterateAllNodeDossiers(context.Context, func(context.Context, *overlay.NodeDossier) error) error
|
IterateAllNodeDossiers(context.Context, func(context.Context, *overlay.NodeDossier) error) error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -21,7 +21,7 @@ import (
|
|||||||
"storj.io/common/uuid"
|
"storj.io/common/uuid"
|
||||||
"storj.io/storj/satellite/audit"
|
"storj.io/storj/satellite/audit"
|
||||||
"storj.io/storj/satellite/metabase"
|
"storj.io/storj/satellite/metabase"
|
||||||
"storj.io/storj/satellite/nodeselection/uploadselection"
|
"storj.io/storj/satellite/nodeselection"
|
||||||
"storj.io/storj/satellite/overlay"
|
"storj.io/storj/satellite/overlay"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -47,7 +47,7 @@ type Verifier interface {
|
|||||||
type Overlay interface {
|
type Overlay interface {
|
||||||
// Get looks up the node by nodeID
|
// Get looks up the node by nodeID
|
||||||
Get(ctx context.Context, nodeID storj.NodeID) (*overlay.NodeDossier, error)
|
Get(ctx context.Context, nodeID storj.NodeID) (*overlay.NodeDossier, error)
|
||||||
SelectAllStorageNodesDownload(ctx context.Context, onlineWindow time.Duration, asOf overlay.AsOfSystemTimeConfig) ([]*uploadselection.SelectedNode, error)
|
SelectAllStorageNodesDownload(ctx context.Context, onlineWindow time.Duration, asOf overlay.AsOfSystemTimeConfig) ([]*nodeselection.SelectedNode, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SegmentWriter allows writing segments to some output.
|
// SegmentWriter allows writing segments to some output.
|
||||||
|
@ -23,7 +23,7 @@ import (
|
|||||||
segmentverify "storj.io/storj/cmd/tools/segment-verify"
|
segmentverify "storj.io/storj/cmd/tools/segment-verify"
|
||||||
"storj.io/storj/private/testplanet"
|
"storj.io/storj/private/testplanet"
|
||||||
"storj.io/storj/satellite/metabase"
|
"storj.io/storj/satellite/metabase"
|
||||||
"storj.io/storj/satellite/nodeselection/uploadselection"
|
"storj.io/storj/satellite/nodeselection"
|
||||||
"storj.io/storj/satellite/overlay"
|
"storj.io/storj/satellite/overlay"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -345,10 +345,10 @@ func (db *metabaseMock) Get(ctx context.Context, nodeID storj.NodeID) (*overlay.
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (db *metabaseMock) SelectAllStorageNodesDownload(ctx context.Context, onlineWindow time.Duration, asOf overlay.AsOfSystemTimeConfig) ([]*uploadselection.SelectedNode, error) {
|
func (db *metabaseMock) SelectAllStorageNodesDownload(ctx context.Context, onlineWindow time.Duration, asOf overlay.AsOfSystemTimeConfig) ([]*nodeselection.SelectedNode, error) {
|
||||||
var xs []*uploadselection.SelectedNode
|
var xs []*nodeselection.SelectedNode
|
||||||
for nodeID := range db.nodeIDToAlias {
|
for nodeID := range db.nodeIDToAlias {
|
||||||
xs = append(xs, &uploadselection.SelectedNode{
|
xs = append(xs, &nodeselection.SelectedNode{
|
||||||
ID: nodeID,
|
ID: nodeID,
|
||||||
Address: &pb.NodeAddress{
|
Address: &pb.NodeAddress{
|
||||||
Address: fmt.Sprintf("nodeid:%v", nodeID),
|
Address: fmt.Sprintf("nodeid:%v", nodeID),
|
||||||
|
@ -18,7 +18,7 @@ import (
|
|||||||
"storj.io/common/rpc/quic"
|
"storj.io/common/rpc/quic"
|
||||||
"storj.io/common/rpc/rpcstatus"
|
"storj.io/common/rpc/rpcstatus"
|
||||||
"storj.io/common/storj"
|
"storj.io/common/storj"
|
||||||
"storj.io/storj/satellite/nodeselection/uploadselection"
|
"storj.io/storj/satellite/nodeselection"
|
||||||
"storj.io/storj/satellite/overlay"
|
"storj.io/storj/satellite/overlay"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -159,7 +159,7 @@ func (service *Service) pingNodeQUIC(ctx context.Context, nodeurl storj.NodeURL)
|
|||||||
|
|
||||||
func (service *Service) processNodeTags(ctx context.Context, nodeID storj.NodeID, req *pb.SignedNodeTagSets) error {
|
func (service *Service) processNodeTags(ctx context.Context, nodeID storj.NodeID, req *pb.SignedNodeTagSets) error {
|
||||||
if req != nil {
|
if req != nil {
|
||||||
tags := uploadselection.NodeTags{}
|
tags := nodeselection.NodeTags{}
|
||||||
for _, t := range req.Tags {
|
for _, t := range req.Tags {
|
||||||
verifiedTags, signerID, err := verifyTags(ctx, service.nodeTagAuthority, nodeID, t)
|
verifiedTags, signerID, err := verifyTags(ctx, service.nodeTagAuthority, nodeID, t)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -169,7 +169,7 @@ func (service *Service) processNodeTags(ctx context.Context, nodeID storj.NodeID
|
|||||||
|
|
||||||
ts := time.Unix(verifiedTags.Timestamp, 0)
|
ts := time.Unix(verifiedTags.Timestamp, 0)
|
||||||
for _, vt := range verifiedTags.Tags {
|
for _, vt := range verifiedTags.Tags {
|
||||||
tags = append(tags, uploadselection.NodeTag{
|
tags = append(tags, nodeselection.NodeTag{
|
||||||
NodeID: nodeID,
|
NodeID: nodeID,
|
||||||
Name: vt.Name,
|
Name: vt.Name,
|
||||||
Value: vt.Value,
|
Value: vt.Value,
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
// See LICENSE for copying information.
|
// See LICENSE for copying information.
|
||||||
|
|
||||||
// Package uploadselection implements node selection logic for uploads.
|
// Package uploadselection implements node selection logic for uploads.
|
||||||
package uploadselection
|
package nodeselection
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/spacemonkeygo/monkit/v3"
|
"github.com/spacemonkeygo/monkit/v3"
|
@ -1,7 +1,7 @@
|
|||||||
// Copyright (C) 2023 Storj Labs, Inc.
|
// Copyright (C) 2023 Storj Labs, Inc.
|
||||||
// See LICENSE for copying information.
|
// See LICENSE for copying information.
|
||||||
|
|
||||||
package uploadselection
|
package nodeselection
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
@ -1,7 +1,7 @@
|
|||||||
// Copyright (C) 2023 Storj Labs, Inc.
|
// Copyright (C) 2023 Storj Labs, Inc.
|
||||||
// See LICENSE for copying information.
|
// See LICENSE for copying information.
|
||||||
|
|
||||||
package uploadselection
|
package nodeselection
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
@ -1,7 +1,7 @@
|
|||||||
// Copyright (C) 2020 Storj Labs, Inc.
|
// Copyright (C) 2020 Storj Labs, Inc.
|
||||||
// See LICENSE for copying information.
|
// See LICENSE for copying information.
|
||||||
|
|
||||||
package uploadselection
|
package nodeselection
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"time"
|
"time"
|
@ -1,7 +1,7 @@
|
|||||||
// Copyright (C) 2020 Storj Labs, Inc.
|
// Copyright (C) 2020 Storj Labs, Inc.
|
||||||
// See LICENSE for copying information.
|
// See LICENSE for copying information.
|
||||||
|
|
||||||
package uploadselection
|
package nodeselection
|
||||||
|
|
||||||
import (
|
import (
|
||||||
mathrand "math/rand" // Using mathrand here because crypto-graphic randomness is not required and simplifies code.
|
mathrand "math/rand" // Using mathrand here because crypto-graphic randomness is not required and simplifies code.
|
@ -1,7 +1,7 @@
|
|||||||
// Copyright (C) 2020 Storj Labs, Inc.
|
// Copyright (C) 2020 Storj Labs, Inc.
|
||||||
// See LICENSE for copying information.
|
// See LICENSE for copying information.
|
||||||
|
|
||||||
package uploadselection_test
|
package nodeselection_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
@ -12,7 +12,7 @@ import (
|
|||||||
"storj.io/common/storj"
|
"storj.io/common/storj"
|
||||||
"storj.io/common/testcontext"
|
"storj.io/common/testcontext"
|
||||||
"storj.io/common/testrand"
|
"storj.io/common/testrand"
|
||||||
"storj.io/storj/satellite/nodeselection/uploadselection"
|
"storj.io/storj/satellite/nodeselection"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestSelectByID(t *testing.T) {
|
func TestSelectByID(t *testing.T) {
|
||||||
@ -24,26 +24,26 @@ func TestSelectByID(t *testing.T) {
|
|||||||
|
|
||||||
// create 3 nodes, 2 with same subnet
|
// create 3 nodes, 2 with same subnet
|
||||||
lastNetDuplicate := "1.0.1"
|
lastNetDuplicate := "1.0.1"
|
||||||
subnetA1 := &uploadselection.SelectedNode{
|
subnetA1 := &nodeselection.SelectedNode{
|
||||||
ID: testrand.NodeID(),
|
ID: testrand.NodeID(),
|
||||||
LastNet: lastNetDuplicate,
|
LastNet: lastNetDuplicate,
|
||||||
LastIPPort: lastNetDuplicate + ".4:8080",
|
LastIPPort: lastNetDuplicate + ".4:8080",
|
||||||
}
|
}
|
||||||
subnetA2 := &uploadselection.SelectedNode{
|
subnetA2 := &nodeselection.SelectedNode{
|
||||||
ID: testrand.NodeID(),
|
ID: testrand.NodeID(),
|
||||||
LastNet: lastNetDuplicate,
|
LastNet: lastNetDuplicate,
|
||||||
LastIPPort: lastNetDuplicate + ".5:8080",
|
LastIPPort: lastNetDuplicate + ".5:8080",
|
||||||
}
|
}
|
||||||
|
|
||||||
lastNetSingle := "1.0.2"
|
lastNetSingle := "1.0.2"
|
||||||
subnetB1 := &uploadselection.SelectedNode{
|
subnetB1 := &nodeselection.SelectedNode{
|
||||||
ID: testrand.NodeID(),
|
ID: testrand.NodeID(),
|
||||||
LastNet: lastNetSingle,
|
LastNet: lastNetSingle,
|
||||||
LastIPPort: lastNetSingle + ".5:8080",
|
LastIPPort: lastNetSingle + ".5:8080",
|
||||||
}
|
}
|
||||||
|
|
||||||
nodes := []*uploadselection.SelectedNode{subnetA1, subnetA2, subnetB1}
|
nodes := []*nodeselection.SelectedNode{subnetA1, subnetA2, subnetB1}
|
||||||
selector := uploadselection.SelectByID(nodes)
|
selector := nodeselection.SelectByID(nodes)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
reqCount = 2
|
reqCount = 2
|
||||||
@ -54,7 +54,7 @@ func TestSelectByID(t *testing.T) {
|
|||||||
|
|
||||||
// perform many node selections that selects 2 nodes
|
// perform many node selections that selects 2 nodes
|
||||||
for i := 0; i < executionCount; i++ {
|
for i := 0; i < executionCount; i++ {
|
||||||
selectedNodes := selector.Select(reqCount, uploadselection.NodeFilters{})
|
selectedNodes := selector.Select(reqCount, nodeselection.NodeFilters{})
|
||||||
require.Len(t, selectedNodes, reqCount)
|
require.Len(t, selectedNodes, reqCount)
|
||||||
for _, node := range selectedNodes {
|
for _, node := range selectedNodes {
|
||||||
selectedNodeCount[node.ID]++
|
selectedNodeCount[node.ID]++
|
||||||
@ -84,26 +84,26 @@ func TestSelectBySubnet(t *testing.T) {
|
|||||||
|
|
||||||
// create 3 nodes, 2 with same subnet
|
// create 3 nodes, 2 with same subnet
|
||||||
lastNetDuplicate := "1.0.1"
|
lastNetDuplicate := "1.0.1"
|
||||||
subnetA1 := &uploadselection.SelectedNode{
|
subnetA1 := &nodeselection.SelectedNode{
|
||||||
ID: testrand.NodeID(),
|
ID: testrand.NodeID(),
|
||||||
LastNet: lastNetDuplicate,
|
LastNet: lastNetDuplicate,
|
||||||
LastIPPort: lastNetDuplicate + ".4:8080",
|
LastIPPort: lastNetDuplicate + ".4:8080",
|
||||||
}
|
}
|
||||||
subnetA2 := &uploadselection.SelectedNode{
|
subnetA2 := &nodeselection.SelectedNode{
|
||||||
ID: testrand.NodeID(),
|
ID: testrand.NodeID(),
|
||||||
LastNet: lastNetDuplicate,
|
LastNet: lastNetDuplicate,
|
||||||
LastIPPort: lastNetDuplicate + ".5:8080",
|
LastIPPort: lastNetDuplicate + ".5:8080",
|
||||||
}
|
}
|
||||||
|
|
||||||
lastNetSingle := "1.0.2"
|
lastNetSingle := "1.0.2"
|
||||||
subnetB1 := &uploadselection.SelectedNode{
|
subnetB1 := &nodeselection.SelectedNode{
|
||||||
ID: testrand.NodeID(),
|
ID: testrand.NodeID(),
|
||||||
LastNet: lastNetSingle,
|
LastNet: lastNetSingle,
|
||||||
LastIPPort: lastNetSingle + ".5:8080",
|
LastIPPort: lastNetSingle + ".5:8080",
|
||||||
}
|
}
|
||||||
|
|
||||||
nodes := []*uploadselection.SelectedNode{subnetA1, subnetA2, subnetB1}
|
nodes := []*nodeselection.SelectedNode{subnetA1, subnetA2, subnetB1}
|
||||||
selector := uploadselection.SelectBySubnetFromNodes(nodes)
|
selector := nodeselection.SelectBySubnetFromNodes(nodes)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
reqCount = 2
|
reqCount = 2
|
||||||
@ -114,7 +114,7 @@ func TestSelectBySubnet(t *testing.T) {
|
|||||||
|
|
||||||
// perform many node selections that selects 2 nodes
|
// perform many node selections that selects 2 nodes
|
||||||
for i := 0; i < executionCount; i++ {
|
for i := 0; i < executionCount; i++ {
|
||||||
selectedNodes := selector.Select(reqCount, uploadselection.NodeFilters{})
|
selectedNodes := selector.Select(reqCount, nodeselection.NodeFilters{})
|
||||||
require.Len(t, selectedNodes, reqCount)
|
require.Len(t, selectedNodes, reqCount)
|
||||||
for _, node := range selectedNodes {
|
for _, node := range selectedNodes {
|
||||||
selectedNodeCount[node.ID]++
|
selectedNodeCount[node.ID]++
|
||||||
@ -156,26 +156,26 @@ func TestSelectBySubnetOneAtATime(t *testing.T) {
|
|||||||
|
|
||||||
// create 3 nodes, 2 with same subnet
|
// create 3 nodes, 2 with same subnet
|
||||||
lastNetDuplicate := "1.0.1"
|
lastNetDuplicate := "1.0.1"
|
||||||
subnetA1 := &uploadselection.SelectedNode{
|
subnetA1 := &nodeselection.SelectedNode{
|
||||||
ID: testrand.NodeID(),
|
ID: testrand.NodeID(),
|
||||||
LastNet: lastNetDuplicate,
|
LastNet: lastNetDuplicate,
|
||||||
LastIPPort: lastNetDuplicate + ".4:8080",
|
LastIPPort: lastNetDuplicate + ".4:8080",
|
||||||
}
|
}
|
||||||
subnetA2 := &uploadselection.SelectedNode{
|
subnetA2 := &nodeselection.SelectedNode{
|
||||||
ID: testrand.NodeID(),
|
ID: testrand.NodeID(),
|
||||||
LastNet: lastNetDuplicate,
|
LastNet: lastNetDuplicate,
|
||||||
LastIPPort: lastNetDuplicate + ".5:8080",
|
LastIPPort: lastNetDuplicate + ".5:8080",
|
||||||
}
|
}
|
||||||
|
|
||||||
lastNetSingle := "1.0.2"
|
lastNetSingle := "1.0.2"
|
||||||
subnetB1 := &uploadselection.SelectedNode{
|
subnetB1 := &nodeselection.SelectedNode{
|
||||||
ID: testrand.NodeID(),
|
ID: testrand.NodeID(),
|
||||||
LastNet: lastNetSingle,
|
LastNet: lastNetSingle,
|
||||||
LastIPPort: lastNetSingle + ".5:8080",
|
LastIPPort: lastNetSingle + ".5:8080",
|
||||||
}
|
}
|
||||||
|
|
||||||
nodes := []*uploadselection.SelectedNode{subnetA1, subnetA2, subnetB1}
|
nodes := []*nodeselection.SelectedNode{subnetA1, subnetA2, subnetB1}
|
||||||
selector := uploadselection.SelectBySubnetFromNodes(nodes)
|
selector := nodeselection.SelectBySubnetFromNodes(nodes)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
reqCount = 1
|
reqCount = 1
|
||||||
@ -186,7 +186,7 @@ func TestSelectBySubnetOneAtATime(t *testing.T) {
|
|||||||
|
|
||||||
// perform many node selections that selects 1 node
|
// perform many node selections that selects 1 node
|
||||||
for i := 0; i < executionCount; i++ {
|
for i := 0; i < executionCount; i++ {
|
||||||
selectedNodes := selector.Select(reqCount, uploadselection.NodeFilters{})
|
selectedNodes := selector.Select(reqCount, nodeselection.NodeFilters{})
|
||||||
require.Len(t, selectedNodes, reqCount)
|
require.Len(t, selectedNodes, reqCount)
|
||||||
for _, node := range selectedNodes {
|
for _, node := range selectedNodes {
|
||||||
selectedNodeCount[node.ID]++
|
selectedNodeCount[node.ID]++
|
||||||
@ -220,14 +220,14 @@ func TestSelectFiltered(t *testing.T) {
|
|||||||
// create 3 nodes, 2 with same subnet
|
// create 3 nodes, 2 with same subnet
|
||||||
lastNetDuplicate := "1.0.1"
|
lastNetDuplicate := "1.0.1"
|
||||||
firstID := testrand.NodeID()
|
firstID := testrand.NodeID()
|
||||||
subnetA1 := &uploadselection.SelectedNode{
|
subnetA1 := &nodeselection.SelectedNode{
|
||||||
ID: firstID,
|
ID: firstID,
|
||||||
LastNet: lastNetDuplicate,
|
LastNet: lastNetDuplicate,
|
||||||
LastIPPort: lastNetDuplicate + ".4:8080",
|
LastIPPort: lastNetDuplicate + ".4:8080",
|
||||||
}
|
}
|
||||||
|
|
||||||
secondID := testrand.NodeID()
|
secondID := testrand.NodeID()
|
||||||
subnetA2 := &uploadselection.SelectedNode{
|
subnetA2 := &nodeselection.SelectedNode{
|
||||||
ID: secondID,
|
ID: secondID,
|
||||||
LastNet: lastNetDuplicate,
|
LastNet: lastNetDuplicate,
|
||||||
LastIPPort: lastNetDuplicate + ".5:8080",
|
LastIPPort: lastNetDuplicate + ".5:8080",
|
||||||
@ -235,20 +235,20 @@ func TestSelectFiltered(t *testing.T) {
|
|||||||
|
|
||||||
thirdID := testrand.NodeID()
|
thirdID := testrand.NodeID()
|
||||||
lastNetSingle := "1.0.2"
|
lastNetSingle := "1.0.2"
|
||||||
subnetB1 := &uploadselection.SelectedNode{
|
subnetB1 := &nodeselection.SelectedNode{
|
||||||
ID: thirdID,
|
ID: thirdID,
|
||||||
LastNet: lastNetSingle,
|
LastNet: lastNetSingle,
|
||||||
LastIPPort: lastNetSingle + ".5:8080",
|
LastIPPort: lastNetSingle + ".5:8080",
|
||||||
}
|
}
|
||||||
|
|
||||||
nodes := []*uploadselection.SelectedNode{subnetA1, subnetA2, subnetB1}
|
nodes := []*nodeselection.SelectedNode{subnetA1, subnetA2, subnetB1}
|
||||||
selector := uploadselection.SelectByID(nodes)
|
selector := nodeselection.SelectByID(nodes)
|
||||||
|
|
||||||
assert.Len(t, selector.Select(3, uploadselection.NodeFilters{}), 3)
|
assert.Len(t, selector.Select(3, nodeselection.NodeFilters{}), 3)
|
||||||
assert.Len(t, selector.Select(3, uploadselection.NodeFilters{}.WithAutoExcludeSubnets()), 2)
|
assert.Len(t, selector.Select(3, nodeselection.NodeFilters{}.WithAutoExcludeSubnets()), 2)
|
||||||
assert.Len(t, selector.Select(3, uploadselection.NodeFilters{}), 3)
|
assert.Len(t, selector.Select(3, nodeselection.NodeFilters{}), 3)
|
||||||
|
|
||||||
assert.Len(t, selector.Select(3, uploadselection.NodeFilters{}.WithExcludedIDs([]storj.NodeID{firstID, secondID})), 1)
|
assert.Len(t, selector.Select(3, nodeselection.NodeFilters{}.WithExcludedIDs([]storj.NodeID{firstID, secondID})), 1)
|
||||||
assert.Len(t, selector.Select(3, uploadselection.NodeFilters{}.WithAutoExcludeSubnets()), 2)
|
assert.Len(t, selector.Select(3, nodeselection.NodeFilters{}.WithAutoExcludeSubnets()), 2)
|
||||||
assert.Len(t, selector.Select(3, uploadselection.NodeFilters{}.WithExcludedIDs([]storj.NodeID{thirdID}).WithAutoExcludeSubnets()), 1)
|
assert.Len(t, selector.Select(3, nodeselection.NodeFilters{}.WithExcludedIDs([]storj.NodeID{thirdID}).WithAutoExcludeSubnets()), 1)
|
||||||
}
|
}
|
@ -1,7 +1,7 @@
|
|||||||
// Copyright (C) 2020 Storj Labs, Inc.
|
// Copyright (C) 2020 Storj Labs, Inc.
|
||||||
// See LICENSE for copying information.
|
// See LICENSE for copying information.
|
||||||
|
|
||||||
package uploadselection
|
package nodeselection
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
@ -1,7 +1,7 @@
|
|||||||
// Copyright (C) 2020 Storj Labs, Inc.
|
// Copyright (C) 2020 Storj Labs, Inc.
|
||||||
// See LICENSE for copying information.
|
// See LICENSE for copying information.
|
||||||
|
|
||||||
package uploadselection_test
|
package nodeselection_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"strconv"
|
"strconv"
|
||||||
@ -12,7 +12,7 @@ import (
|
|||||||
|
|
||||||
"storj.io/common/testcontext"
|
"storj.io/common/testcontext"
|
||||||
"storj.io/common/testrand"
|
"storj.io/common/testrand"
|
||||||
"storj.io/storj/satellite/nodeselection/uploadselection"
|
"storj.io/storj/satellite/nodeselection"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestState_SelectNonDistinct(t *testing.T) {
|
func TestState_SelectNonDistinct(t *testing.T) {
|
||||||
@ -28,15 +28,15 @@ func TestState_SelectNonDistinct(t *testing.T) {
|
|||||||
createRandomNodes(3, "1.0.4", false),
|
createRandomNodes(3, "1.0.4", false),
|
||||||
)
|
)
|
||||||
|
|
||||||
state := uploadselection.NewState(reputableNodes, newNodes)
|
state := nodeselection.NewState(reputableNodes, newNodes)
|
||||||
require.Equal(t, uploadselection.Stats{
|
require.Equal(t, nodeselection.Stats{
|
||||||
New: 5,
|
New: 5,
|
||||||
Reputable: 5,
|
Reputable: 5,
|
||||||
}, state.Stats())
|
}, state.Stats())
|
||||||
|
|
||||||
{ // select 5 non-distinct subnet reputable nodes
|
{ // select 5 non-distinct subnet reputable nodes
|
||||||
const selectCount = 5
|
const selectCount = 5
|
||||||
selected, err := state.Select(ctx, uploadselection.Request{
|
selected, err := state.Select(ctx, nodeselection.Request{
|
||||||
Count: selectCount,
|
Count: selectCount,
|
||||||
NewFraction: 0,
|
NewFraction: 0,
|
||||||
})
|
})
|
||||||
@ -47,7 +47,7 @@ func TestState_SelectNonDistinct(t *testing.T) {
|
|||||||
{ // select 6 non-distinct subnet reputable and new nodes (50%)
|
{ // select 6 non-distinct subnet reputable and new nodes (50%)
|
||||||
const selectCount = 6
|
const selectCount = 6
|
||||||
const newFraction = 0.5
|
const newFraction = 0.5
|
||||||
selected, err := state.Select(ctx, uploadselection.Request{
|
selected, err := state.Select(ctx, nodeselection.Request{
|
||||||
Count: selectCount,
|
Count: selectCount,
|
||||||
NewFraction: newFraction,
|
NewFraction: newFraction,
|
||||||
})
|
})
|
||||||
@ -60,7 +60,7 @@ func TestState_SelectNonDistinct(t *testing.T) {
|
|||||||
{ // select 10 distinct subnet reputable and new nodes (100%), falling back to 5 reputable
|
{ // select 10 distinct subnet reputable and new nodes (100%), falling back to 5 reputable
|
||||||
const selectCount = 10
|
const selectCount = 10
|
||||||
const newFraction = 1.0
|
const newFraction = 1.0
|
||||||
selected, err := state.Select(ctx, uploadselection.Request{
|
selected, err := state.Select(ctx, nodeselection.Request{
|
||||||
Count: selectCount,
|
Count: selectCount,
|
||||||
NewFraction: newFraction,
|
NewFraction: newFraction,
|
||||||
})
|
})
|
||||||
@ -84,15 +84,15 @@ func TestState_SelectDistinct(t *testing.T) {
|
|||||||
createRandomNodes(3, "1.0.4", true),
|
createRandomNodes(3, "1.0.4", true),
|
||||||
)
|
)
|
||||||
|
|
||||||
state := uploadselection.NewState(reputableNodes, newNodes)
|
state := nodeselection.NewState(reputableNodes, newNodes)
|
||||||
require.Equal(t, uploadselection.Stats{
|
require.Equal(t, nodeselection.Stats{
|
||||||
New: 2,
|
New: 2,
|
||||||
Reputable: 2,
|
Reputable: 2,
|
||||||
}, state.Stats())
|
}, state.Stats())
|
||||||
|
|
||||||
{ // select 2 distinct subnet reputable nodes
|
{ // select 2 distinct subnet reputable nodes
|
||||||
const selectCount = 2
|
const selectCount = 2
|
||||||
selected, err := state.Select(ctx, uploadselection.Request{
|
selected, err := state.Select(ctx, nodeselection.Request{
|
||||||
Count: selectCount,
|
Count: selectCount,
|
||||||
NewFraction: 0,
|
NewFraction: 0,
|
||||||
})
|
})
|
||||||
@ -102,7 +102,7 @@ func TestState_SelectDistinct(t *testing.T) {
|
|||||||
|
|
||||||
{ // try to select 5 distinct subnet reputable nodes, but there are only two 2 in the state
|
{ // try to select 5 distinct subnet reputable nodes, but there are only two 2 in the state
|
||||||
const selectCount = 5
|
const selectCount = 5
|
||||||
selected, err := state.Select(ctx, uploadselection.Request{
|
selected, err := state.Select(ctx, nodeselection.Request{
|
||||||
Count: selectCount,
|
Count: selectCount,
|
||||||
NewFraction: 0,
|
NewFraction: 0,
|
||||||
})
|
})
|
||||||
@ -113,7 +113,7 @@ func TestState_SelectDistinct(t *testing.T) {
|
|||||||
{ // select 4 distinct subnet reputable and new nodes (50%)
|
{ // select 4 distinct subnet reputable and new nodes (50%)
|
||||||
const selectCount = 4
|
const selectCount = 4
|
||||||
const newFraction = 0.5
|
const newFraction = 0.5
|
||||||
selected, err := state.Select(ctx, uploadselection.Request{
|
selected, err := state.Select(ctx, nodeselection.Request{
|
||||||
Count: selectCount,
|
Count: selectCount,
|
||||||
NewFraction: newFraction,
|
NewFraction: newFraction,
|
||||||
})
|
})
|
||||||
@ -137,12 +137,12 @@ func TestState_Select_Concurrent(t *testing.T) {
|
|||||||
createRandomNodes(3, "1.0.4", false),
|
createRandomNodes(3, "1.0.4", false),
|
||||||
)
|
)
|
||||||
|
|
||||||
state := uploadselection.NewState(reputableNodes, newNodes)
|
state := nodeselection.NewState(reputableNodes, newNodes)
|
||||||
|
|
||||||
var group errgroup.Group
|
var group errgroup.Group
|
||||||
group.Go(func() error {
|
group.Go(func() error {
|
||||||
const selectCount = 5
|
const selectCount = 5
|
||||||
nodes, err := state.Select(ctx, uploadselection.Request{
|
nodes, err := state.Select(ctx, nodeselection.Request{
|
||||||
Count: selectCount,
|
Count: selectCount,
|
||||||
NewFraction: 0.5,
|
NewFraction: 0.5,
|
||||||
})
|
})
|
||||||
@ -152,7 +152,7 @@ func TestState_Select_Concurrent(t *testing.T) {
|
|||||||
|
|
||||||
group.Go(func() error {
|
group.Go(func() error {
|
||||||
const selectCount = 4
|
const selectCount = 4
|
||||||
nodes, err := state.Select(ctx, uploadselection.Request{
|
nodes, err := state.Select(ctx, nodeselection.Request{
|
||||||
Count: selectCount,
|
Count: selectCount,
|
||||||
NewFraction: 0.5,
|
NewFraction: 0.5,
|
||||||
})
|
})
|
||||||
@ -163,11 +163,11 @@ func TestState_Select_Concurrent(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// createRandomNodes creates n random nodes all in the subnet.
|
// createRandomNodes creates n random nodes all in the subnet.
|
||||||
func createRandomNodes(n int, subnet string, shareNets bool) []*uploadselection.SelectedNode {
|
func createRandomNodes(n int, subnet string, shareNets bool) []*nodeselection.SelectedNode {
|
||||||
xs := make([]*uploadselection.SelectedNode, n)
|
xs := make([]*nodeselection.SelectedNode, n)
|
||||||
for i := range xs {
|
for i := range xs {
|
||||||
addr := subnet + "." + strconv.Itoa(i) + ":8080"
|
addr := subnet + "." + strconv.Itoa(i) + ":8080"
|
||||||
xs[i] = &uploadselection.SelectedNode{
|
xs[i] = &nodeselection.SelectedNode{
|
||||||
ID: testrand.NodeID(),
|
ID: testrand.NodeID(),
|
||||||
LastNet: addr,
|
LastNet: addr,
|
||||||
LastIPPort: addr,
|
LastIPPort: addr,
|
||||||
@ -182,8 +182,8 @@ func createRandomNodes(n int, subnet string, shareNets bool) []*uploadselection.
|
|||||||
}
|
}
|
||||||
|
|
||||||
// joinNodes appends all slices into a single slice.
|
// joinNodes appends all slices into a single slice.
|
||||||
func joinNodes(lists ...[]*uploadselection.SelectedNode) []*uploadselection.SelectedNode {
|
func joinNodes(lists ...[]*nodeselection.SelectedNode) []*nodeselection.SelectedNode {
|
||||||
xs := []*uploadselection.SelectedNode{}
|
xs := []*nodeselection.SelectedNode{}
|
||||||
for _, list := range lists {
|
for _, list := range lists {
|
||||||
xs = append(xs, list...)
|
xs = append(xs, list...)
|
||||||
}
|
}
|
||||||
@ -191,8 +191,8 @@ func joinNodes(lists ...[]*uploadselection.SelectedNode) []*uploadselection.Sele
|
|||||||
}
|
}
|
||||||
|
|
||||||
// intersectLists returns nodes that exist in both lists compared by ID.
|
// intersectLists returns nodes that exist in both lists compared by ID.
|
||||||
func intersectLists(as, bs []*uploadselection.SelectedNode) []*uploadselection.SelectedNode {
|
func intersectLists(as, bs []*nodeselection.SelectedNode) []*nodeselection.SelectedNode {
|
||||||
var xs []*uploadselection.SelectedNode
|
var xs []*nodeselection.SelectedNode
|
||||||
|
|
||||||
next:
|
next:
|
||||||
for _, a := range as {
|
for _, a := range as {
|
@ -11,7 +11,7 @@ import (
|
|||||||
gomock "github.com/golang/mock/gomock"
|
gomock "github.com/golang/mock/gomock"
|
||||||
|
|
||||||
storj "storj.io/common/storj"
|
storj "storj.io/common/storj"
|
||||||
"storj.io/storj/satellite/nodeselection/uploadselection"
|
"storj.io/storj/satellite/nodeselection"
|
||||||
overlay "storj.io/storj/satellite/overlay"
|
overlay "storj.io/storj/satellite/overlay"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -39,10 +39,10 @@ func (m *MockOverlayForOrders) EXPECT() *MockOverlayForOrdersMockRecorder {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CachedGetOnlineNodesForGet mocks base method.
|
// CachedGetOnlineNodesForGet mocks base method.
|
||||||
func (m *MockOverlayForOrders) CachedGetOnlineNodesForGet(arg0 context.Context, arg1 []storj.NodeID) (map[storj.NodeID]*uploadselection.SelectedNode, error) {
|
func (m *MockOverlayForOrders) CachedGetOnlineNodesForGet(arg0 context.Context, arg1 []storj.NodeID) (map[storj.NodeID]*nodeselection.SelectedNode, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "CachedGetOnlineNodesForGet", arg0, arg1)
|
ret := m.ctrl.Call(m, "CachedGetOnlineNodesForGet", arg0, arg1)
|
||||||
ret0, _ := ret[0].(map[storj.NodeID]*uploadselection.SelectedNode)
|
ret0, _ := ret[0].(map[storj.NodeID]*nodeselection.SelectedNode)
|
||||||
ret1, _ := ret[1].(error)
|
ret1, _ := ret[1].(error)
|
||||||
return ret0, ret1
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
|
@ -18,7 +18,7 @@ import (
|
|||||||
"storj.io/common/storj"
|
"storj.io/common/storj"
|
||||||
"storj.io/storj/satellite/internalpb"
|
"storj.io/storj/satellite/internalpb"
|
||||||
"storj.io/storj/satellite/metabase"
|
"storj.io/storj/satellite/metabase"
|
||||||
"storj.io/storj/satellite/nodeselection/uploadselection"
|
"storj.io/storj/satellite/nodeselection"
|
||||||
"storj.io/storj/satellite/overlay"
|
"storj.io/storj/satellite/overlay"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -44,7 +44,7 @@ type Config struct {
|
|||||||
//
|
//
|
||||||
//go:generate mockgen -destination mock_test.go -package orders . OverlayForOrders
|
//go:generate mockgen -destination mock_test.go -package orders . OverlayForOrders
|
||||||
type Overlay interface {
|
type Overlay interface {
|
||||||
CachedGetOnlineNodesForGet(context.Context, []storj.NodeID) (map[storj.NodeID]*uploadselection.SelectedNode, error)
|
CachedGetOnlineNodesForGet(context.Context, []storj.NodeID) (map[storj.NodeID]*nodeselection.SelectedNode, error)
|
||||||
GetOnlineNodesForAuditRepair(context.Context, []storj.NodeID) (map[storj.NodeID]*overlay.NodeReputation, error)
|
GetOnlineNodesForAuditRepair(context.Context, []storj.NodeID) (map[storj.NodeID]*overlay.NodeReputation, error)
|
||||||
Get(ctx context.Context, nodeID storj.NodeID) (*overlay.NodeDossier, error)
|
Get(ctx context.Context, nodeID storj.NodeID) (*overlay.NodeDossier, error)
|
||||||
IsOnline(node *overlay.NodeDossier) bool
|
IsOnline(node *overlay.NodeDossier) bool
|
||||||
@ -236,7 +236,7 @@ func getLimitByStorageNodeID(limits []*pb.AddressedOrderLimit, storageNodeID sto
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CreatePutOrderLimits creates the order limits for uploading pieces to nodes.
|
// CreatePutOrderLimits creates the order limits for uploading pieces to nodes.
|
||||||
func (service *Service) CreatePutOrderLimits(ctx context.Context, bucket metabase.BucketLocation, nodes []*uploadselection.SelectedNode, pieceExpiration time.Time, maxPieceSize int64) (_ storj.PieceID, _ []*pb.AddressedOrderLimit, privateKey storj.PiecePrivateKey, err error) {
|
func (service *Service) CreatePutOrderLimits(ctx context.Context, bucket metabase.BucketLocation, nodes []*nodeselection.SelectedNode, pieceExpiration time.Time, maxPieceSize int64) (_ storj.PieceID, _ []*pb.AddressedOrderLimit, privateKey storj.PiecePrivateKey, err error) {
|
||||||
defer mon.Task()(&ctx)(&err)
|
defer mon.Task()(&ctx)(&err)
|
||||||
|
|
||||||
signer, err := NewSignerPut(service, pieceExpiration, time.Now(), maxPieceSize, bucket)
|
signer, err := NewSignerPut(service, pieceExpiration, time.Now(), maxPieceSize, bucket)
|
||||||
@ -255,7 +255,7 @@ func (service *Service) CreatePutOrderLimits(ctx context.Context, bucket metabas
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ReplacePutOrderLimits replaces order limits for uploading pieces to nodes.
|
// ReplacePutOrderLimits replaces order limits for uploading pieces to nodes.
|
||||||
func (service *Service) ReplacePutOrderLimits(ctx context.Context, rootPieceID storj.PieceID, addressedLimits []*pb.AddressedOrderLimit, nodes []*uploadselection.SelectedNode, pieceNumbers []int32) (_ []*pb.AddressedOrderLimit, err error) {
|
func (service *Service) ReplacePutOrderLimits(ctx context.Context, rootPieceID storj.PieceID, addressedLimits []*pb.AddressedOrderLimit, nodes []*nodeselection.SelectedNode, pieceNumbers []int32) (_ []*pb.AddressedOrderLimit, err error) {
|
||||||
defer mon.Task()(&ctx)(&err)
|
defer mon.Task()(&ctx)(&err)
|
||||||
|
|
||||||
pieceIDDeriver := rootPieceID.Deriver()
|
pieceIDDeriver := rootPieceID.Deriver()
|
||||||
@ -458,7 +458,7 @@ func (service *Service) CreateGetRepairOrderLimits(ctx context.Context, segment
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CreatePutRepairOrderLimits creates the order limits for uploading the repaired pieces of segment to newNodes.
|
// CreatePutRepairOrderLimits creates the order limits for uploading the repaired pieces of segment to newNodes.
|
||||||
func (service *Service) CreatePutRepairOrderLimits(ctx context.Context, segment metabase.Segment, getOrderLimits []*pb.AddressedOrderLimit, healthySet map[int32]struct{}, newNodes []*uploadselection.SelectedNode, optimalThresholdMultiplier float64, numPiecesInExcludedCountries int) (_ []*pb.AddressedOrderLimit, _ storj.PiecePrivateKey, err error) {
|
func (service *Service) CreatePutRepairOrderLimits(ctx context.Context, segment metabase.Segment, getOrderLimits []*pb.AddressedOrderLimit, healthySet map[int32]struct{}, newNodes []*nodeselection.SelectedNode, optimalThresholdMultiplier float64, numPiecesInExcludedCountries int) (_ []*pb.AddressedOrderLimit, _ storj.PiecePrivateKey, err error) {
|
||||||
defer mon.Task()(&ctx)(&err)
|
defer mon.Task()(&ctx)(&err)
|
||||||
|
|
||||||
// Create the order limits for being used to upload the repaired pieces
|
// Create the order limits for being used to upload the repaired pieces
|
||||||
@ -591,7 +591,7 @@ func (service *Service) DecryptOrderMetadata(ctx context.Context, order *pb.Orde
|
|||||||
return key.DecryptMetadata(order.SerialNumber, order.EncryptedMetadata)
|
return key.DecryptMetadata(order.SerialNumber, order.EncryptedMetadata)
|
||||||
}
|
}
|
||||||
|
|
||||||
func resolveStorageNode_Selected(node *uploadselection.SelectedNode, resolveDNS bool) *pb.Node {
|
func resolveStorageNode_Selected(node *nodeselection.SelectedNode, resolveDNS bool) *pb.Node {
|
||||||
return resolveStorageNode(&pb.Node{
|
return resolveStorageNode(&pb.Node{
|
||||||
Id: node.ID,
|
Id: node.ID,
|
||||||
Address: node.Address,
|
Address: node.Address,
|
||||||
|
@ -19,7 +19,7 @@ import (
|
|||||||
"storj.io/common/testcontext"
|
"storj.io/common/testcontext"
|
||||||
"storj.io/common/testrand"
|
"storj.io/common/testrand"
|
||||||
"storj.io/storj/satellite/metabase"
|
"storj.io/storj/satellite/metabase"
|
||||||
"storj.io/storj/satellite/nodeselection/uploadselection"
|
"storj.io/storj/satellite/nodeselection"
|
||||||
"storj.io/storj/satellite/orders"
|
"storj.io/storj/satellite/orders"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -30,10 +30,10 @@ func TestGetOrderLimits(t *testing.T) {
|
|||||||
bucket := metabase.BucketLocation{ProjectID: testrand.UUID(), BucketName: "bucket1"}
|
bucket := metabase.BucketLocation{ProjectID: testrand.UUID(), BucketName: "bucket1"}
|
||||||
|
|
||||||
pieces := metabase.Pieces{}
|
pieces := metabase.Pieces{}
|
||||||
nodes := map[storj.NodeID]*uploadselection.SelectedNode{}
|
nodes := map[storj.NodeID]*nodeselection.SelectedNode{}
|
||||||
for i := 0; i < 8; i++ {
|
for i := 0; i < 8; i++ {
|
||||||
nodeID := testrand.NodeID()
|
nodeID := testrand.NodeID()
|
||||||
nodes[nodeID] = &uploadselection.SelectedNode{
|
nodes[nodeID] = &nodeselection.SelectedNode{
|
||||||
ID: nodeID,
|
ID: nodeID,
|
||||||
Address: &pb.NodeAddress{
|
Address: &pb.NodeAddress{
|
||||||
Address: fmt.Sprintf("host%d.com", i),
|
Address: fmt.Sprintf("host%d.com", i),
|
||||||
|
@ -11,7 +11,7 @@ import (
|
|||||||
|
|
||||||
"storj.io/common/storj"
|
"storj.io/common/storj"
|
||||||
"storj.io/common/sync2"
|
"storj.io/common/sync2"
|
||||||
"storj.io/storj/satellite/nodeselection/uploadselection"
|
"storj.io/storj/satellite/nodeselection"
|
||||||
)
|
)
|
||||||
|
|
||||||
// DownloadSelectionDB implements the database for download selection cache.
|
// DownloadSelectionDB implements the database for download selection cache.
|
||||||
@ -19,7 +19,7 @@ import (
|
|||||||
// architecture: Database
|
// architecture: Database
|
||||||
type DownloadSelectionDB interface {
|
type DownloadSelectionDB interface {
|
||||||
// SelectAllStorageNodesDownload returns nodes that are ready for downloading
|
// SelectAllStorageNodesDownload returns nodes that are ready for downloading
|
||||||
SelectAllStorageNodesDownload(ctx context.Context, onlineWindow time.Duration, asOf AsOfSystemTimeConfig) ([]*uploadselection.SelectedNode, error)
|
SelectAllStorageNodesDownload(ctx context.Context, onlineWindow time.Duration, asOf AsOfSystemTimeConfig) ([]*nodeselection.SelectedNode, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DownloadSelectionCacheConfig contains configuration for the selection cache.
|
// DownloadSelectionCacheConfig contains configuration for the selection cache.
|
||||||
@ -89,7 +89,7 @@ func (cache *DownloadSelectionCache) GetNodeIPsFromPlacement(ctx context.Context
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetNodes gets nodes by ID from the cache, and refreshes the cache if it is stale.
|
// GetNodes gets nodes by ID from the cache, and refreshes the cache if it is stale.
|
||||||
func (cache *DownloadSelectionCache) GetNodes(ctx context.Context, nodes []storj.NodeID) (_ map[storj.NodeID]*uploadselection.SelectedNode, err error) {
|
func (cache *DownloadSelectionCache) GetNodes(ctx context.Context, nodes []storj.NodeID) (_ map[storj.NodeID]*nodeselection.SelectedNode, err error) {
|
||||||
defer mon.Task()(&ctx)(&err)
|
defer mon.Task()(&ctx)(&err)
|
||||||
|
|
||||||
state, err := cache.cache.Get(ctx, time.Now())
|
state, err := cache.cache.Get(ctx, time.Now())
|
||||||
@ -111,12 +111,12 @@ func (cache *DownloadSelectionCache) Size(ctx context.Context) (int, error) {
|
|||||||
// DownloadSelectionCacheState contains state of download selection cache.
|
// DownloadSelectionCacheState contains state of download selection cache.
|
||||||
type DownloadSelectionCacheState struct {
|
type DownloadSelectionCacheState struct {
|
||||||
// byID returns IP based on storj.NodeID
|
// byID returns IP based on storj.NodeID
|
||||||
byID map[storj.NodeID]*uploadselection.SelectedNode // TODO: optimize, avoid pointery structures for performance
|
byID map[storj.NodeID]*nodeselection.SelectedNode // TODO: optimize, avoid pointery structures for performance
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDownloadSelectionCacheState creates a new state from the nodes.
|
// NewDownloadSelectionCacheState creates a new state from the nodes.
|
||||||
func NewDownloadSelectionCacheState(nodes []*uploadselection.SelectedNode) *DownloadSelectionCacheState {
|
func NewDownloadSelectionCacheState(nodes []*nodeselection.SelectedNode) *DownloadSelectionCacheState {
|
||||||
byID := map[storj.NodeID]*uploadselection.SelectedNode{}
|
byID := map[storj.NodeID]*nodeselection.SelectedNode{}
|
||||||
for _, n := range nodes {
|
for _, n := range nodes {
|
||||||
byID[n.ID] = n
|
byID[n.ID] = n
|
||||||
}
|
}
|
||||||
@ -153,8 +153,8 @@ func (state *DownloadSelectionCacheState) IPsFromPlacement(nodes []storj.NodeID,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Nodes returns node ip:port for nodes that are in state.
|
// Nodes returns node ip:port for nodes that are in state.
|
||||||
func (state *DownloadSelectionCacheState) Nodes(nodes []storj.NodeID) map[storj.NodeID]*uploadselection.SelectedNode {
|
func (state *DownloadSelectionCacheState) Nodes(nodes []storj.NodeID) map[storj.NodeID]*nodeselection.SelectedNode {
|
||||||
xs := make(map[storj.NodeID]*uploadselection.SelectedNode, len(nodes))
|
xs := make(map[storj.NodeID]*nodeselection.SelectedNode, len(nodes))
|
||||||
for _, nodeID := range nodes {
|
for _, nodeID := range nodes {
|
||||||
if n, exists := state.byID[nodeID]; exists {
|
if n, exists := state.byID[nodeID]; exists {
|
||||||
xs[nodeID] = n.Clone() // TODO: optimize the clones
|
xs[nodeID] = n.Clone() // TODO: optimize the clones
|
||||||
|
@ -16,7 +16,7 @@ import (
|
|||||||
"storj.io/common/testcontext"
|
"storj.io/common/testcontext"
|
||||||
"storj.io/common/testrand"
|
"storj.io/common/testrand"
|
||||||
"storj.io/storj/satellite"
|
"storj.io/storj/satellite"
|
||||||
"storj.io/storj/satellite/nodeselection/uploadselection"
|
"storj.io/storj/satellite/nodeselection"
|
||||||
"storj.io/storj/satellite/overlay"
|
"storj.io/storj/satellite/overlay"
|
||||||
"storj.io/storj/satellite/satellitedb/satellitedbtest"
|
"storj.io/storj/satellite/satellitedb/satellitedbtest"
|
||||||
)
|
)
|
||||||
@ -88,7 +88,7 @@ func TestDownloadSelectionCacheState_IPs(t *testing.T) {
|
|||||||
ctx := testcontext.New(t)
|
ctx := testcontext.New(t)
|
||||||
defer ctx.Cleanup()
|
defer ctx.Cleanup()
|
||||||
|
|
||||||
node := &uploadselection.SelectedNode{
|
node := &nodeselection.SelectedNode{
|
||||||
ID: testrand.NodeID(),
|
ID: testrand.NodeID(),
|
||||||
Address: &pb.NodeAddress{
|
Address: &pb.NodeAddress{
|
||||||
Address: "1.0.1.1:8080",
|
Address: "1.0.1.1:8080",
|
||||||
@ -97,7 +97,7 @@ func TestDownloadSelectionCacheState_IPs(t *testing.T) {
|
|||||||
LastIPPort: "1.0.1.1:8080",
|
LastIPPort: "1.0.1.1:8080",
|
||||||
}
|
}
|
||||||
|
|
||||||
state := overlay.NewDownloadSelectionCacheState([]*uploadselection.SelectedNode{node})
|
state := overlay.NewDownloadSelectionCacheState([]*nodeselection.SelectedNode{node})
|
||||||
require.Equal(t, state.Size(), 1)
|
require.Equal(t, state.Size(), 1)
|
||||||
|
|
||||||
ips := state.IPs([]storj.NodeID{testrand.NodeID(), node.ID})
|
ips := state.IPs([]storj.NodeID{testrand.NodeID(), node.ID})
|
||||||
|
@ -6,27 +6,27 @@ package overlay
|
|||||||
import (
|
import (
|
||||||
"storj.io/common/storj"
|
"storj.io/common/storj"
|
||||||
"storj.io/common/storj/location"
|
"storj.io/common/storj/location"
|
||||||
"storj.io/storj/satellite/nodeselection/uploadselection"
|
"storj.io/storj/satellite/nodeselection"
|
||||||
)
|
)
|
||||||
|
|
||||||
// PlacementRules can crate filter based on the placement identifier.
|
// PlacementRules can crate filter based on the placement identifier.
|
||||||
type PlacementRules func(constraint storj.PlacementConstraint) (filter uploadselection.NodeFilters)
|
type PlacementRules func(constraint storj.PlacementConstraint) (filter nodeselection.NodeFilters)
|
||||||
|
|
||||||
// ConfigurablePlacementRule can include the placement definitions for each known identifier.
|
// ConfigurablePlacementRule can include the placement definitions for each known identifier.
|
||||||
type ConfigurablePlacementRule struct {
|
type ConfigurablePlacementRule struct {
|
||||||
placements map[storj.PlacementConstraint]uploadselection.NodeFilters
|
placements map[storj.PlacementConstraint]nodeselection.NodeFilters
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewPlacementRules creates a fully initialized NewPlacementRules.
|
// NewPlacementRules creates a fully initialized NewPlacementRules.
|
||||||
func NewPlacementRules() *ConfigurablePlacementRule {
|
func NewPlacementRules() *ConfigurablePlacementRule {
|
||||||
return &ConfigurablePlacementRule{
|
return &ConfigurablePlacementRule{
|
||||||
placements: map[storj.PlacementConstraint]uploadselection.NodeFilters{},
|
placements: map[storj.PlacementConstraint]nodeselection.NodeFilters{},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddLegacyStaticRules initializes all the placement rules defined earlier in static golang code.
|
// AddLegacyStaticRules initializes all the placement rules defined earlier in static golang code.
|
||||||
func (d *ConfigurablePlacementRule) AddLegacyStaticRules() {
|
func (d *ConfigurablePlacementRule) AddLegacyStaticRules() {
|
||||||
d.placements[storj.EEA] = uploadselection.NodeFilters{}.WithCountryFilter(func(isoCountryCode location.CountryCode) bool {
|
d.placements[storj.EEA] = nodeselection.NodeFilters{}.WithCountryFilter(func(isoCountryCode location.CountryCode) bool {
|
||||||
for _, c := range location.EeaNonEuCountries {
|
for _, c := range location.EeaNonEuCountries {
|
||||||
if c == isoCountryCode {
|
if c == isoCountryCode {
|
||||||
return true
|
return true
|
||||||
@ -39,7 +39,7 @@ func (d *ConfigurablePlacementRule) AddLegacyStaticRules() {
|
|||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
})
|
})
|
||||||
d.placements[storj.EU] = uploadselection.NodeFilters{}.WithCountryFilter(func(isoCountryCode location.CountryCode) bool {
|
d.placements[storj.EU] = nodeselection.NodeFilters{}.WithCountryFilter(func(isoCountryCode location.CountryCode) bool {
|
||||||
for _, c := range location.EuCountries {
|
for _, c := range location.EuCountries {
|
||||||
if c == isoCountryCode {
|
if c == isoCountryCode {
|
||||||
return true
|
return true
|
||||||
@ -47,39 +47,39 @@ func (d *ConfigurablePlacementRule) AddLegacyStaticRules() {
|
|||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
})
|
})
|
||||||
d.placements[storj.US] = uploadselection.NodeFilters{}.WithCountryFilter(func(isoCountryCode location.CountryCode) bool {
|
d.placements[storj.US] = nodeselection.NodeFilters{}.WithCountryFilter(func(isoCountryCode location.CountryCode) bool {
|
||||||
return isoCountryCode == location.UnitedStates
|
return isoCountryCode == location.UnitedStates
|
||||||
})
|
})
|
||||||
d.placements[storj.DE] = uploadselection.NodeFilters{}.WithCountryFilter(func(isoCountryCode location.CountryCode) bool {
|
d.placements[storj.DE] = nodeselection.NodeFilters{}.WithCountryFilter(func(isoCountryCode location.CountryCode) bool {
|
||||||
return isoCountryCode == location.Germany
|
return isoCountryCode == location.Germany
|
||||||
})
|
})
|
||||||
d.placements[storj.NR] = uploadselection.NodeFilters{}.WithCountryFilter(func(isoCountryCode location.CountryCode) bool {
|
d.placements[storj.NR] = nodeselection.NodeFilters{}.WithCountryFilter(func(isoCountryCode location.CountryCode) bool {
|
||||||
return isoCountryCode != location.Russia && isoCountryCode != location.Belarus
|
return isoCountryCode != location.Russia && isoCountryCode != location.Belarus
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddPlacementRule registers a new placement.
|
// AddPlacementRule registers a new placement.
|
||||||
func (d *ConfigurablePlacementRule) AddPlacementRule(id storj.PlacementConstraint, filters uploadselection.NodeFilters) {
|
func (d *ConfigurablePlacementRule) AddPlacementRule(id storj.PlacementConstraint, filters nodeselection.NodeFilters) {
|
||||||
d.placements[id] = filters
|
d.placements[id] = filters
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateFilters implements PlacementCondition.
|
// CreateFilters implements PlacementCondition.
|
||||||
func (d *ConfigurablePlacementRule) CreateFilters(constraint storj.PlacementConstraint) (filter uploadselection.NodeFilters) {
|
func (d *ConfigurablePlacementRule) CreateFilters(constraint storj.PlacementConstraint) (filter nodeselection.NodeFilters) {
|
||||||
if constraint == 0 {
|
if constraint == 0 {
|
||||||
return uploadselection.NodeFilters{}
|
return nodeselection.NodeFilters{}
|
||||||
}
|
}
|
||||||
if filters, found := d.placements[constraint]; found {
|
if filters, found := d.placements[constraint]; found {
|
||||||
return filters
|
return filters
|
||||||
}
|
}
|
||||||
return uploadselection.ExcludeAll
|
return nodeselection.ExcludeAll
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateDefaultPlacementRules returns with a default set of configured placement rules.
|
// CreateDefaultPlacementRules returns with a default set of configured placement rules.
|
||||||
func CreateDefaultPlacementRules(satelliteID storj.NodeID) PlacementRules {
|
func CreateDefaultPlacementRules(satelliteID storj.NodeID) PlacementRules {
|
||||||
placement := NewPlacementRules()
|
placement := NewPlacementRules()
|
||||||
placement.AddLegacyStaticRules()
|
placement.AddLegacyStaticRules()
|
||||||
placement.AddPlacementRule(10, uploadselection.NodeFilters{
|
placement.AddPlacementRule(10, nodeselection.NodeFilters{
|
||||||
uploadselection.NewTagFilter(satelliteID, "selection", []byte("true")),
|
nodeselection.NewTagFilter(satelliteID, "selection", []byte("true")),
|
||||||
})
|
})
|
||||||
return placement.CreateFilters
|
return placement.CreateFilters
|
||||||
}
|
}
|
||||||
|
@ -26,7 +26,7 @@ import (
|
|||||||
"storj.io/common/testcontext"
|
"storj.io/common/testcontext"
|
||||||
"storj.io/storj/private/testplanet"
|
"storj.io/storj/private/testplanet"
|
||||||
"storj.io/storj/satellite"
|
"storj.io/storj/satellite"
|
||||||
"storj.io/storj/satellite/nodeselection/uploadselection"
|
"storj.io/storj/satellite/nodeselection"
|
||||||
"storj.io/storj/satellite/overlay"
|
"storj.io/storj/satellite/overlay"
|
||||||
"storj.io/storj/satellite/reputation"
|
"storj.io/storj/satellite/reputation"
|
||||||
)
|
)
|
||||||
@ -148,10 +148,10 @@ func TestOnlineOffline(t *testing.T) {
|
|||||||
require.Empty(t, offline)
|
require.Empty(t, offline)
|
||||||
require.Len(t, online, 2)
|
require.Len(t, online, 2)
|
||||||
|
|
||||||
require.False(t, slices.ContainsFunc(online, func(node uploadselection.SelectedNode) bool {
|
require.False(t, slices.ContainsFunc(online, func(node nodeselection.SelectedNode) bool {
|
||||||
return node.ID == unreliableNodeID
|
return node.ID == unreliableNodeID
|
||||||
}))
|
}))
|
||||||
require.False(t, slices.ContainsFunc(offline, func(node uploadselection.SelectedNode) bool {
|
require.False(t, slices.ContainsFunc(offline, func(node nodeselection.SelectedNode) bool {
|
||||||
return node.ID == unreliableNodeID
|
return node.ID == unreliableNodeID
|
||||||
}))
|
}))
|
||||||
})
|
})
|
||||||
@ -193,7 +193,7 @@ func TestEnsureMinimumRequested(t *testing.T) {
|
|||||||
|
|
||||||
reputable := map[storj.NodeID]bool{}
|
reputable := map[storj.NodeID]bool{}
|
||||||
|
|
||||||
countReputable := func(selected []*uploadselection.SelectedNode) (count int) {
|
countReputable := func(selected []*nodeselection.SelectedNode) (count int) {
|
||||||
for _, n := range selected {
|
for _, n := range selected {
|
||||||
if reputable[n.ID] {
|
if reputable[n.ID] {
|
||||||
count++
|
count++
|
||||||
|
@ -21,7 +21,7 @@ import (
|
|||||||
"storj.io/storj/satellite/geoip"
|
"storj.io/storj/satellite/geoip"
|
||||||
"storj.io/storj/satellite/metabase"
|
"storj.io/storj/satellite/metabase"
|
||||||
"storj.io/storj/satellite/nodeevents"
|
"storj.io/storj/satellite/nodeevents"
|
||||||
"storj.io/storj/satellite/nodeselection/uploadselection"
|
"storj.io/storj/satellite/nodeselection"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ErrEmptyNode is returned when the nodeID is empty.
|
// ErrEmptyNode is returned when the nodeID is empty.
|
||||||
@ -54,20 +54,20 @@ type DB interface {
|
|||||||
// current reputation status.
|
// current reputation status.
|
||||||
GetOnlineNodesForAuditRepair(ctx context.Context, nodeIDs []storj.NodeID, onlineWindow time.Duration) (map[storj.NodeID]*NodeReputation, error)
|
GetOnlineNodesForAuditRepair(ctx context.Context, nodeIDs []storj.NodeID, onlineWindow time.Duration) (map[storj.NodeID]*NodeReputation, error)
|
||||||
// SelectStorageNodes looks up nodes based on criteria
|
// SelectStorageNodes looks up nodes based on criteria
|
||||||
SelectStorageNodes(ctx context.Context, totalNeededNodes, newNodeCount int, criteria *NodeCriteria) ([]*uploadselection.SelectedNode, error)
|
SelectStorageNodes(ctx context.Context, totalNeededNodes, newNodeCount int, criteria *NodeCriteria) ([]*nodeselection.SelectedNode, error)
|
||||||
// SelectAllStorageNodesUpload returns all nodes that qualify to store data, organized as reputable nodes and new nodes
|
// SelectAllStorageNodesUpload returns all nodes that qualify to store data, organized as reputable nodes and new nodes
|
||||||
SelectAllStorageNodesUpload(ctx context.Context, selectionCfg NodeSelectionConfig) (reputable, new []*uploadselection.SelectedNode, err error)
|
SelectAllStorageNodesUpload(ctx context.Context, selectionCfg NodeSelectionConfig) (reputable, new []*nodeselection.SelectedNode, err error)
|
||||||
// SelectAllStorageNodesDownload returns a nodes that are ready for downloading
|
// SelectAllStorageNodesDownload returns a nodes that are ready for downloading
|
||||||
SelectAllStorageNodesDownload(ctx context.Context, onlineWindow time.Duration, asOf AsOfSystemTimeConfig) ([]*uploadselection.SelectedNode, error)
|
SelectAllStorageNodesDownload(ctx context.Context, onlineWindow time.Duration, asOf AsOfSystemTimeConfig) ([]*nodeselection.SelectedNode, error)
|
||||||
|
|
||||||
// Get looks up the node by nodeID
|
// Get looks up the node by nodeID
|
||||||
Get(ctx context.Context, nodeID storj.NodeID) (*NodeDossier, error)
|
Get(ctx context.Context, nodeID storj.NodeID) (*NodeDossier, error)
|
||||||
// KnownReliableInExcludedCountries filters healthy nodes that are in excluded countries.
|
// KnownReliableInExcludedCountries filters healthy nodes that are in excluded countries.
|
||||||
KnownReliableInExcludedCountries(context.Context, *NodeCriteria, storj.NodeIDList) (storj.NodeIDList, error)
|
KnownReliableInExcludedCountries(context.Context, *NodeCriteria, storj.NodeIDList) (storj.NodeIDList, error)
|
||||||
// KnownReliable filters a set of nodes to reliable (online and qualified) nodes.
|
// KnownReliable filters a set of nodes to reliable (online and qualified) nodes.
|
||||||
KnownReliable(ctx context.Context, nodeIDs storj.NodeIDList, onlineWindow, asOfSystemInterval time.Duration) (online []uploadselection.SelectedNode, offline []uploadselection.SelectedNode, err error)
|
KnownReliable(ctx context.Context, nodeIDs storj.NodeIDList, onlineWindow, asOfSystemInterval time.Duration) (online []nodeselection.SelectedNode, offline []nodeselection.SelectedNode, err error)
|
||||||
// Reliable returns all nodes that are reliable (separated by whether they are currently online or offline).
|
// Reliable returns all nodes that are reliable (separated by whether they are currently online or offline).
|
||||||
Reliable(ctx context.Context, onlineWindow, asOfSystemInterval time.Duration) (online []uploadselection.SelectedNode, offline []uploadselection.SelectedNode, err error)
|
Reliable(ctx context.Context, onlineWindow, asOfSystemInterval time.Duration) (online []nodeselection.SelectedNode, offline []nodeselection.SelectedNode, err error)
|
||||||
// UpdateReputation updates the DB columns for all reputation fields in ReputationStatus.
|
// UpdateReputation updates the DB columns for all reputation fields in ReputationStatus.
|
||||||
UpdateReputation(ctx context.Context, id storj.NodeID, request ReputationUpdate) error
|
UpdateReputation(ctx context.Context, id storj.NodeID, request ReputationUpdate) error
|
||||||
// UpdateNodeInfo updates node dossier with info requested from the node itself like node type, email, wallet, capacity, and version.
|
// UpdateNodeInfo updates node dossier with info requested from the node itself like node type, email, wallet, capacity, and version.
|
||||||
@ -132,15 +132,15 @@ type DB interface {
|
|||||||
OneTimeFixLastNets(ctx context.Context) error
|
OneTimeFixLastNets(ctx context.Context) error
|
||||||
|
|
||||||
// IterateAllContactedNodes will call cb on all known nodes (used in restore trash contexts).
|
// IterateAllContactedNodes will call cb on all known nodes (used in restore trash contexts).
|
||||||
IterateAllContactedNodes(context.Context, func(context.Context, *uploadselection.SelectedNode) error) error
|
IterateAllContactedNodes(context.Context, func(context.Context, *nodeselection.SelectedNode) error) error
|
||||||
// IterateAllNodeDossiers will call cb on all known nodes (used for invoice generation).
|
// IterateAllNodeDossiers will call cb on all known nodes (used for invoice generation).
|
||||||
IterateAllNodeDossiers(context.Context, func(context.Context, *NodeDossier) error) error
|
IterateAllNodeDossiers(context.Context, func(context.Context, *NodeDossier) error) error
|
||||||
|
|
||||||
// UpdateNodeTags insert (or refresh) node tags.
|
// UpdateNodeTags insert (or refresh) node tags.
|
||||||
UpdateNodeTags(ctx context.Context, tags uploadselection.NodeTags) error
|
UpdateNodeTags(ctx context.Context, tags nodeselection.NodeTags) error
|
||||||
|
|
||||||
// GetNodeTags returns all nodes for a specific node.
|
// GetNodeTags returns all nodes for a specific node.
|
||||||
GetNodeTags(ctx context.Context, id storj.NodeID) (uploadselection.NodeTags, error)
|
GetNodeTags(ctx context.Context, id storj.NodeID) (nodeselection.NodeTags, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DisqualificationReason is disqualification reason enum type.
|
// DisqualificationReason is disqualification reason enum type.
|
||||||
@ -324,7 +324,7 @@ func NewService(log *zap.Logger, db DB, nodeEvents nodeevents.DB, satelliteAddr,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
defaultSelection := uploadselection.NodeFilters{}
|
defaultSelection := nodeselection.NodeFilters{}
|
||||||
|
|
||||||
if len(config.Node.UploadExcludedCountryCodes) > 0 {
|
if len(config.Node.UploadExcludedCountryCodes) > 0 {
|
||||||
defaultSelection = defaultSelection.WithCountryFilter(func(code location.CountryCode) bool {
|
defaultSelection = defaultSelection.WithCountryFilter(func(code location.CountryCode) bool {
|
||||||
@ -395,7 +395,7 @@ func (service *Service) Get(ctx context.Context, nodeID storj.NodeID) (_ *NodeDo
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CachedGetOnlineNodesForGet returns a map of nodes from the download selection cache from the suppliedIDs.
|
// CachedGetOnlineNodesForGet returns a map of nodes from the download selection cache from the suppliedIDs.
|
||||||
func (service *Service) CachedGetOnlineNodesForGet(ctx context.Context, nodeIDs []storj.NodeID) (_ map[storj.NodeID]*uploadselection.SelectedNode, err error) {
|
func (service *Service) CachedGetOnlineNodesForGet(ctx context.Context, nodeIDs []storj.NodeID) (_ map[storj.NodeID]*nodeselection.SelectedNode, err error) {
|
||||||
defer mon.Task()(&ctx)(&err)
|
defer mon.Task()(&ctx)(&err)
|
||||||
return service.DownloadSelectionCache.GetNodes(ctx, nodeIDs)
|
return service.DownloadSelectionCache.GetNodes(ctx, nodeIDs)
|
||||||
}
|
}
|
||||||
@ -419,7 +419,7 @@ func (service *Service) IsOnline(node *NodeDossier) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// FindStorageNodesForGracefulExit searches the overlay network for nodes that meet the provided requirements for graceful-exit requests.
|
// FindStorageNodesForGracefulExit searches the overlay network for nodes that meet the provided requirements for graceful-exit requests.
|
||||||
func (service *Service) FindStorageNodesForGracefulExit(ctx context.Context, req FindStorageNodesRequest) (_ []*uploadselection.SelectedNode, err error) {
|
func (service *Service) FindStorageNodesForGracefulExit(ctx context.Context, req FindStorageNodesRequest) (_ []*nodeselection.SelectedNode, err error) {
|
||||||
defer mon.Task()(&ctx)(&err)
|
defer mon.Task()(&ctx)(&err)
|
||||||
return service.UploadSelectionCache.GetNodes(ctx, req)
|
return service.UploadSelectionCache.GetNodes(ctx, req)
|
||||||
}
|
}
|
||||||
@ -428,7 +428,7 @@ func (service *Service) FindStorageNodesForGracefulExit(ctx context.Context, req
|
|||||||
//
|
//
|
||||||
// When enabled it uses the cache to select nodes.
|
// When enabled it uses the cache to select nodes.
|
||||||
// When the node selection from the cache fails, it falls back to the old implementation.
|
// When the node selection from the cache fails, it falls back to the old implementation.
|
||||||
func (service *Service) FindStorageNodesForUpload(ctx context.Context, req FindStorageNodesRequest) (_ []*uploadselection.SelectedNode, err error) {
|
func (service *Service) FindStorageNodesForUpload(ctx context.Context, req FindStorageNodesRequest) (_ []*nodeselection.SelectedNode, err error) {
|
||||||
defer mon.Task()(&ctx)(&err)
|
defer mon.Task()(&ctx)(&err)
|
||||||
if service.config.Node.AsOfSystemTime.Enabled && service.config.Node.AsOfSystemTime.DefaultInterval < 0 {
|
if service.config.Node.AsOfSystemTime.Enabled && service.config.Node.AsOfSystemTime.DefaultInterval < 0 {
|
||||||
req.AsOfSystemInterval = service.config.Node.AsOfSystemTime.DefaultInterval
|
req.AsOfSystemInterval = service.config.Node.AsOfSystemTime.DefaultInterval
|
||||||
@ -464,7 +464,7 @@ func (service *Service) FindStorageNodesForUpload(ctx context.Context, req FindS
|
|||||||
// FindStorageNodesWithPreferences searches the overlay network for nodes that meet the provided criteria.
|
// FindStorageNodesWithPreferences searches the overlay network for nodes that meet the provided criteria.
|
||||||
//
|
//
|
||||||
// This does not use a cache.
|
// This does not use a cache.
|
||||||
func (service *Service) FindStorageNodesWithPreferences(ctx context.Context, req FindStorageNodesRequest, preferences *NodeSelectionConfig) (nodes []*uploadselection.SelectedNode, err error) {
|
func (service *Service) FindStorageNodesWithPreferences(ctx context.Context, req FindStorageNodesRequest, preferences *NodeSelectionConfig) (nodes []*nodeselection.SelectedNode, err error) {
|
||||||
defer mon.Task()(&ctx)(&err)
|
defer mon.Task()(&ctx)(&err)
|
||||||
// TODO: add sanity limits to requested node count
|
// TODO: add sanity limits to requested node count
|
||||||
// TODO: add sanity limits to excluded nodes
|
// TODO: add sanity limits to excluded nodes
|
||||||
@ -550,7 +550,7 @@ func (service *Service) KnownReliableInExcludedCountries(ctx context.Context, no
|
|||||||
}
|
}
|
||||||
|
|
||||||
// KnownReliable filters a set of nodes to reliable (online and qualified) nodes.
|
// KnownReliable filters a set of nodes to reliable (online and qualified) nodes.
|
||||||
func (service *Service) KnownReliable(ctx context.Context, nodeIDs storj.NodeIDList) (onlineNodes []uploadselection.SelectedNode, offlineNodes []uploadselection.SelectedNode, err error) {
|
func (service *Service) KnownReliable(ctx context.Context, nodeIDs storj.NodeIDList) (onlineNodes []nodeselection.SelectedNode, offlineNodes []nodeselection.SelectedNode, err error) {
|
||||||
defer mon.Task()(&ctx)(&err)
|
defer mon.Task()(&ctx)(&err)
|
||||||
|
|
||||||
// TODO add as of system time
|
// TODO add as of system time
|
||||||
@ -558,7 +558,7 @@ func (service *Service) KnownReliable(ctx context.Context, nodeIDs storj.NodeIDL
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Reliable returns all nodes that are reliable (separated by whether they are currently online or offline).
|
// Reliable returns all nodes that are reliable (separated by whether they are currently online or offline).
|
||||||
func (service *Service) Reliable(ctx context.Context) (online []uploadselection.SelectedNode, offline []uploadselection.SelectedNode, err error) {
|
func (service *Service) Reliable(ctx context.Context) (online []nodeselection.SelectedNode, offline []nodeselection.SelectedNode, err error) {
|
||||||
defer mon.Task()(&ctx)(&err)
|
defer mon.Task()(&ctx)(&err)
|
||||||
|
|
||||||
// TODO add as of system tim.
|
// TODO add as of system tim.
|
||||||
@ -803,7 +803,7 @@ func (service *Service) DisqualifyNode(ctx context.Context, nodeID storj.NodeID,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SelectAllStorageNodesDownload returns a nodes that are ready for downloading.
|
// SelectAllStorageNodesDownload returns a nodes that are ready for downloading.
|
||||||
func (service *Service) SelectAllStorageNodesDownload(ctx context.Context, onlineWindow time.Duration, asOf AsOfSystemTimeConfig) (_ []*uploadselection.SelectedNode, err error) {
|
func (service *Service) SelectAllStorageNodesDownload(ctx context.Context, onlineWindow time.Duration, asOf AsOfSystemTimeConfig) (_ []*nodeselection.SelectedNode, err error) {
|
||||||
defer mon.Task()(&ctx)(&err)
|
defer mon.Task()(&ctx)(&err)
|
||||||
return service.db.SelectAllStorageNodesDownload(ctx, onlineWindow, asOf)
|
return service.db.SelectAllStorageNodesDownload(ctx, onlineWindow, asOf)
|
||||||
}
|
}
|
||||||
@ -815,12 +815,12 @@ func (service *Service) ResolveIPAndNetwork(ctx context.Context, target string)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// UpdateNodeTags persists all new and old node tags.
|
// UpdateNodeTags persists all new and old node tags.
|
||||||
func (service *Service) UpdateNodeTags(ctx context.Context, tags []uploadselection.NodeTag) error {
|
func (service *Service) UpdateNodeTags(ctx context.Context, tags []nodeselection.NodeTag) error {
|
||||||
return service.db.UpdateNodeTags(ctx, tags)
|
return service.db.UpdateNodeTags(ctx, tags)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetNodeTags returns the node tags of a node.
|
// GetNodeTags returns the node tags of a node.
|
||||||
func (service *Service) GetNodeTags(ctx context.Context, id storj.NodeID) (uploadselection.NodeTags, error) {
|
func (service *Service) GetNodeTags(ctx context.Context, id storj.NodeID) (nodeselection.NodeTags, error) {
|
||||||
return service.db.GetNodeTags(ctx, id)
|
return service.db.GetNodeTags(ctx, id)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -23,7 +23,7 @@ import (
|
|||||||
"storj.io/storj/private/testplanet"
|
"storj.io/storj/private/testplanet"
|
||||||
"storj.io/storj/satellite"
|
"storj.io/storj/satellite"
|
||||||
"storj.io/storj/satellite/nodeevents"
|
"storj.io/storj/satellite/nodeevents"
|
||||||
"storj.io/storj/satellite/nodeselection/uploadselection"
|
"storj.io/storj/satellite/nodeselection"
|
||||||
"storj.io/storj/satellite/overlay"
|
"storj.io/storj/satellite/overlay"
|
||||||
"storj.io/storj/satellite/reputation"
|
"storj.io/storj/satellite/reputation"
|
||||||
"storj.io/storj/satellite/satellitedb/satellitedbtest"
|
"storj.io/storj/satellite/satellitedb/satellitedbtest"
|
||||||
@ -205,7 +205,7 @@ func TestRandomizedSelection(t *testing.T) {
|
|||||||
|
|
||||||
// select numNodesToSelect nodes selectIterations times
|
// select numNodesToSelect nodes selectIterations times
|
||||||
for i := 0; i < selectIterations; i++ {
|
for i := 0; i < selectIterations; i++ {
|
||||||
var nodes []*uploadselection.SelectedNode
|
var nodes []*nodeselection.SelectedNode
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
if i%2 == 0 {
|
if i%2 == 0 {
|
||||||
@ -326,7 +326,7 @@ func TestRandomizedSelectionCache(t *testing.T) {
|
|||||||
|
|
||||||
// select numNodesToSelect nodes selectIterations times
|
// select numNodesToSelect nodes selectIterations times
|
||||||
for i := 0; i < selectIterations; i++ {
|
for i := 0; i < selectIterations; i++ {
|
||||||
var nodes []*uploadselection.SelectedNode
|
var nodes []*nodeselection.SelectedNode
|
||||||
var err error
|
var err error
|
||||||
req := overlay.FindStorageNodesRequest{
|
req := overlay.FindStorageNodesRequest{
|
||||||
RequestedCount: numNodesToSelect,
|
RequestedCount: numNodesToSelect,
|
||||||
@ -670,7 +670,7 @@ func TestSuspendedSelection(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var nodes []*uploadselection.SelectedNode
|
var nodes []*nodeselection.SelectedNode
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
numNodesToSelect := 10
|
numNodesToSelect := 10
|
||||||
|
@ -18,7 +18,7 @@ import (
|
|||||||
"storj.io/common/storj/location"
|
"storj.io/common/storj/location"
|
||||||
"storj.io/common/testcontext"
|
"storj.io/common/testcontext"
|
||||||
"storj.io/storj/satellite"
|
"storj.io/storj/satellite"
|
||||||
"storj.io/storj/satellite/nodeselection/uploadselection"
|
"storj.io/storj/satellite/nodeselection"
|
||||||
"storj.io/storj/satellite/overlay"
|
"storj.io/storj/satellite/overlay"
|
||||||
"storj.io/storj/satellite/satellitedb/satellitedbtest"
|
"storj.io/storj/satellite/satellitedb/satellitedbtest"
|
||||||
)
|
)
|
||||||
@ -102,8 +102,8 @@ func testDatabase(ctx context.Context, t *testing.T, cache overlay.DB) {
|
|||||||
storj.NodeID{7}, storj.NodeID{8},
|
storj.NodeID{7}, storj.NodeID{8},
|
||||||
storj.NodeID{9},
|
storj.NodeID{9},
|
||||||
}
|
}
|
||||||
contains := func(nodeID storj.NodeID) func(node uploadselection.SelectedNode) bool {
|
contains := func(nodeID storj.NodeID) func(node nodeselection.SelectedNode) bool {
|
||||||
return func(node uploadselection.SelectedNode) bool {
|
return func(node nodeselection.SelectedNode) bool {
|
||||||
return node.ID == nodeID
|
return node.ID == nodeID
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -10,7 +10,7 @@ import (
|
|||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
|
|
||||||
"storj.io/common/sync2"
|
"storj.io/common/sync2"
|
||||||
"storj.io/storj/satellite/nodeselection/uploadselection"
|
"storj.io/storj/satellite/nodeselection"
|
||||||
)
|
)
|
||||||
|
|
||||||
// UploadSelectionDB implements the database for upload selection cache.
|
// UploadSelectionDB implements the database for upload selection cache.
|
||||||
@ -18,7 +18,7 @@ import (
|
|||||||
// architecture: Database
|
// architecture: Database
|
||||||
type UploadSelectionDB interface {
|
type UploadSelectionDB interface {
|
||||||
// SelectAllStorageNodesUpload returns all nodes that qualify to store data, organized as reputable nodes and new nodes
|
// SelectAllStorageNodesUpload returns all nodes that qualify to store data, organized as reputable nodes and new nodes
|
||||||
SelectAllStorageNodesUpload(ctx context.Context, selectionCfg NodeSelectionConfig) (reputable, new []*uploadselection.SelectedNode, err error)
|
SelectAllStorageNodesUpload(ctx context.Context, selectionCfg NodeSelectionConfig) (reputable, new []*nodeselection.SelectedNode, err error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// UploadSelectionCacheConfig is a configuration for upload selection cache.
|
// UploadSelectionCacheConfig is a configuration for upload selection cache.
|
||||||
@ -35,14 +35,14 @@ type UploadSelectionCache struct {
|
|||||||
db UploadSelectionDB
|
db UploadSelectionDB
|
||||||
selectionConfig NodeSelectionConfig
|
selectionConfig NodeSelectionConfig
|
||||||
|
|
||||||
cache sync2.ReadCacheOf[*uploadselection.State]
|
cache sync2.ReadCacheOf[*nodeselection.State]
|
||||||
|
|
||||||
defaultFilters uploadselection.NodeFilters
|
defaultFilters nodeselection.NodeFilters
|
||||||
placementRules PlacementRules
|
placementRules PlacementRules
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewUploadSelectionCache creates a new cache that keeps a list of all the storage nodes that are qualified to store data.
|
// NewUploadSelectionCache creates a new cache that keeps a list of all the storage nodes that are qualified to store data.
|
||||||
func NewUploadSelectionCache(log *zap.Logger, db UploadSelectionDB, staleness time.Duration, config NodeSelectionConfig, defaultFilter uploadselection.NodeFilters, placementRules PlacementRules) (*UploadSelectionCache, error) {
|
func NewUploadSelectionCache(log *zap.Logger, db UploadSelectionDB, staleness time.Duration, config NodeSelectionConfig, defaultFilter nodeselection.NodeFilters, placementRules PlacementRules) (*UploadSelectionCache, error) {
|
||||||
cache := &UploadSelectionCache{
|
cache := &UploadSelectionCache{
|
||||||
log: log,
|
log: log,
|
||||||
db: db,
|
db: db,
|
||||||
@ -69,7 +69,7 @@ func (cache *UploadSelectionCache) Refresh(ctx context.Context) (err error) {
|
|||||||
// refresh calls out to the database and refreshes the cache with the most up-to-date
|
// refresh calls out to the database and refreshes the cache with the most up-to-date
|
||||||
// data from the nodes table, then sets time that the last refresh occurred so we know when
|
// data from the nodes table, then sets time that the last refresh occurred so we know when
|
||||||
// to refresh again in the future.
|
// to refresh again in the future.
|
||||||
func (cache *UploadSelectionCache) read(ctx context.Context) (_ *uploadselection.State, err error) {
|
func (cache *UploadSelectionCache) read(ctx context.Context) (_ *nodeselection.State, err error) {
|
||||||
defer mon.Task()(&ctx)(&err)
|
defer mon.Task()(&ctx)(&err)
|
||||||
|
|
||||||
reputableNodes, newNodes, err := cache.db.SelectAllStorageNodesUpload(ctx, cache.selectionConfig)
|
reputableNodes, newNodes, err := cache.db.SelectAllStorageNodesUpload(ctx, cache.selectionConfig)
|
||||||
@ -77,7 +77,7 @@ func (cache *UploadSelectionCache) read(ctx context.Context) (_ *uploadselection
|
|||||||
return nil, Error.Wrap(err)
|
return nil, Error.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
state := uploadselection.NewState(reputableNodes, newNodes)
|
state := nodeselection.NewState(reputableNodes, newNodes)
|
||||||
|
|
||||||
mon.IntVal("refresh_cache_size_reputable").Observe(int64(len(reputableNodes)))
|
mon.IntVal("refresh_cache_size_reputable").Observe(int64(len(reputableNodes)))
|
||||||
mon.IntVal("refresh_cache_size_new").Observe(int64(len(newNodes)))
|
mon.IntVal("refresh_cache_size_new").Observe(int64(len(newNodes)))
|
||||||
@ -88,7 +88,7 @@ func (cache *UploadSelectionCache) read(ctx context.Context) (_ *uploadselection
|
|||||||
// GetNodes selects nodes from the cache that will be used to upload a file.
|
// GetNodes selects nodes from the cache that will be used to upload a file.
|
||||||
// Every node selected will be from a distinct network.
|
// Every node selected will be from a distinct network.
|
||||||
// If the cache hasn't been refreshed recently it will do so first.
|
// If the cache hasn't been refreshed recently it will do so first.
|
||||||
func (cache *UploadSelectionCache) GetNodes(ctx context.Context, req FindStorageNodesRequest) (_ []*uploadselection.SelectedNode, err error) {
|
func (cache *UploadSelectionCache) GetNodes(ctx context.Context, req FindStorageNodesRequest) (_ []*nodeselection.SelectedNode, err error) {
|
||||||
defer mon.Task()(&ctx)(&err)
|
defer mon.Task()(&ctx)(&err)
|
||||||
|
|
||||||
state, err := cache.cache.Get(ctx, time.Now())
|
state, err := cache.cache.Get(ctx, time.Now())
|
||||||
@ -104,12 +104,12 @@ func (cache *UploadSelectionCache) GetNodes(ctx context.Context, req FindStorage
|
|||||||
filters = append(filters, cache.defaultFilters)
|
filters = append(filters, cache.defaultFilters)
|
||||||
filters = filters.WithAutoExcludeSubnets()
|
filters = filters.WithAutoExcludeSubnets()
|
||||||
|
|
||||||
selected, err := state.Select(ctx, uploadselection.Request{
|
selected, err := state.Select(ctx, nodeselection.Request{
|
||||||
Count: req.RequestedCount,
|
Count: req.RequestedCount,
|
||||||
NewFraction: cache.selectionConfig.NewNodeFraction,
|
NewFraction: cache.selectionConfig.NewNodeFraction,
|
||||||
NodeFilters: filters,
|
NodeFilters: filters,
|
||||||
})
|
})
|
||||||
if uploadselection.ErrNotEnoughNodes.Has(err) {
|
if nodeselection.ErrNotEnoughNodes.Has(err) {
|
||||||
err = ErrNotEnoughNodes.Wrap(err)
|
err = ErrNotEnoughNodes.Wrap(err)
|
||||||
}
|
}
|
||||||
return selected, err
|
return selected, err
|
||||||
|
@ -27,7 +27,7 @@ import (
|
|||||||
"storj.io/common/testrand"
|
"storj.io/common/testrand"
|
||||||
"storj.io/storj/private/testplanet"
|
"storj.io/storj/private/testplanet"
|
||||||
"storj.io/storj/satellite"
|
"storj.io/storj/satellite"
|
||||||
"storj.io/storj/satellite/nodeselection/uploadselection"
|
"storj.io/storj/satellite/nodeselection"
|
||||||
"storj.io/storj/satellite/overlay"
|
"storj.io/storj/satellite/overlay"
|
||||||
"storj.io/storj/satellite/satellitedb/satellitedbtest"
|
"storj.io/storj/satellite/satellitedb/satellitedbtest"
|
||||||
)
|
)
|
||||||
@ -63,7 +63,7 @@ func TestRefresh(t *testing.T) {
|
|||||||
db.OverlayCache(),
|
db.OverlayCache(),
|
||||||
lowStaleness,
|
lowStaleness,
|
||||||
nodeSelectionConfig,
|
nodeSelectionConfig,
|
||||||
uploadselection.NodeFilters{},
|
nodeselection.NodeFilters{},
|
||||||
overlay.NewPlacementRules().CreateFilters,
|
overlay.NewPlacementRules().CreateFilters,
|
||||||
)
|
)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -134,21 +134,21 @@ func addNodesToNodesTable(ctx context.Context, t *testing.T, db overlay.DB, coun
|
|||||||
type mockdb struct {
|
type mockdb struct {
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
callCount int
|
callCount int
|
||||||
reputable []*uploadselection.SelectedNode
|
reputable []*nodeselection.SelectedNode
|
||||||
new []*uploadselection.SelectedNode
|
new []*nodeselection.SelectedNode
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *mockdb) SelectAllStorageNodesUpload(ctx context.Context, selectionCfg overlay.NodeSelectionConfig) (reputable, new []*uploadselection.SelectedNode, err error) {
|
func (m *mockdb) SelectAllStorageNodesUpload(ctx context.Context, selectionCfg overlay.NodeSelectionConfig) (reputable, new []*nodeselection.SelectedNode, err error) {
|
||||||
m.mu.Lock()
|
m.mu.Lock()
|
||||||
defer m.mu.Unlock()
|
defer m.mu.Unlock()
|
||||||
sync2.Sleep(ctx, 500*time.Millisecond)
|
sync2.Sleep(ctx, 500*time.Millisecond)
|
||||||
m.callCount++
|
m.callCount++
|
||||||
|
|
||||||
reputable = make([]*uploadselection.SelectedNode, len(m.reputable))
|
reputable = make([]*nodeselection.SelectedNode, len(m.reputable))
|
||||||
for i, n := range m.reputable {
|
for i, n := range m.reputable {
|
||||||
reputable[i] = n.Clone()
|
reputable[i] = n.Clone()
|
||||||
}
|
}
|
||||||
new = make([]*uploadselection.SelectedNode, len(m.new))
|
new = make([]*nodeselection.SelectedNode, len(m.new))
|
||||||
for i, n := range m.new {
|
for i, n := range m.new {
|
||||||
new[i] = n.Clone()
|
new[i] = n.Clone()
|
||||||
}
|
}
|
||||||
@ -167,7 +167,7 @@ func TestRefreshConcurrent(t *testing.T) {
|
|||||||
&mockDB,
|
&mockDB,
|
||||||
highStaleness,
|
highStaleness,
|
||||||
nodeSelectionConfig,
|
nodeSelectionConfig,
|
||||||
uploadselection.NodeFilters{},
|
nodeselection.NodeFilters{},
|
||||||
overlay.NewPlacementRules().CreateFilters,
|
overlay.NewPlacementRules().CreateFilters,
|
||||||
)
|
)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -194,7 +194,7 @@ func TestRefreshConcurrent(t *testing.T) {
|
|||||||
&mockDB,
|
&mockDB,
|
||||||
lowStaleness,
|
lowStaleness,
|
||||||
nodeSelectionConfig,
|
nodeSelectionConfig,
|
||||||
uploadselection.NodeFilters{},
|
nodeselection.NodeFilters{},
|
||||||
overlay.NewPlacementRules().CreateFilters,
|
overlay.NewPlacementRules().CreateFilters,
|
||||||
)
|
)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -224,7 +224,7 @@ func TestGetNodes(t *testing.T) {
|
|||||||
db.OverlayCache(),
|
db.OverlayCache(),
|
||||||
lowStaleness,
|
lowStaleness,
|
||||||
nodeSelectionConfig,
|
nodeSelectionConfig,
|
||||||
uploadselection.NodeFilters{},
|
nodeselection.NodeFilters{},
|
||||||
overlay.NewPlacementRules().CreateFilters,
|
overlay.NewPlacementRules().CreateFilters,
|
||||||
)
|
)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -289,13 +289,13 @@ func TestGetNodesConcurrent(t *testing.T) {
|
|||||||
ctx := testcontext.New(t)
|
ctx := testcontext.New(t)
|
||||||
defer ctx.Cleanup()
|
defer ctx.Cleanup()
|
||||||
|
|
||||||
reputableNodes := []*uploadselection.SelectedNode{{
|
reputableNodes := []*nodeselection.SelectedNode{{
|
||||||
ID: storj.NodeID{1},
|
ID: storj.NodeID{1},
|
||||||
Address: &pb.NodeAddress{Address: "127.0.0.9"},
|
Address: &pb.NodeAddress{Address: "127.0.0.9"},
|
||||||
LastNet: "127.0.0",
|
LastNet: "127.0.0",
|
||||||
LastIPPort: "127.0.0.9:8000",
|
LastIPPort: "127.0.0.9:8000",
|
||||||
}}
|
}}
|
||||||
newNodes := []*uploadselection.SelectedNode{{
|
newNodes := []*nodeselection.SelectedNode{{
|
||||||
ID: storj.NodeID{1},
|
ID: storj.NodeID{1},
|
||||||
Address: &pb.NodeAddress{Address: "127.0.0.10"},
|
Address: &pb.NodeAddress{Address: "127.0.0.10"},
|
||||||
LastNet: "127.0.0",
|
LastNet: "127.0.0",
|
||||||
@ -312,7 +312,7 @@ func TestGetNodesConcurrent(t *testing.T) {
|
|||||||
&mockDB,
|
&mockDB,
|
||||||
highStaleness,
|
highStaleness,
|
||||||
nodeSelectionConfig,
|
nodeSelectionConfig,
|
||||||
uploadselection.NodeFilters{},
|
nodeselection.NodeFilters{},
|
||||||
overlay.NewPlacementRules().CreateFilters,
|
overlay.NewPlacementRules().CreateFilters,
|
||||||
)
|
)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -359,7 +359,7 @@ func TestGetNodesConcurrent(t *testing.T) {
|
|||||||
&mockDB,
|
&mockDB,
|
||||||
lowStaleness,
|
lowStaleness,
|
||||||
nodeSelectionConfig,
|
nodeSelectionConfig,
|
||||||
uploadselection.NodeFilters{},
|
nodeselection.NodeFilters{},
|
||||||
overlay.NewPlacementRules().CreateFilters,
|
overlay.NewPlacementRules().CreateFilters,
|
||||||
)
|
)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -398,7 +398,7 @@ func TestGetNodesDistinct(t *testing.T) {
|
|||||||
ctx := testcontext.New(t)
|
ctx := testcontext.New(t)
|
||||||
defer ctx.Cleanup()
|
defer ctx.Cleanup()
|
||||||
|
|
||||||
reputableNodes := []*uploadselection.SelectedNode{{
|
reputableNodes := []*nodeselection.SelectedNode{{
|
||||||
ID: testrand.NodeID(),
|
ID: testrand.NodeID(),
|
||||||
Address: &pb.NodeAddress{Address: "127.0.0.9"},
|
Address: &pb.NodeAddress{Address: "127.0.0.9"},
|
||||||
LastNet: "127.0.0",
|
LastNet: "127.0.0",
|
||||||
@ -420,7 +420,7 @@ func TestGetNodesDistinct(t *testing.T) {
|
|||||||
LastIPPort: "127.0.2.7:8000",
|
LastIPPort: "127.0.2.7:8000",
|
||||||
}}
|
}}
|
||||||
|
|
||||||
newNodes := []*uploadselection.SelectedNode{{
|
newNodes := []*nodeselection.SelectedNode{{
|
||||||
ID: testrand.NodeID(),
|
ID: testrand.NodeID(),
|
||||||
Address: &pb.NodeAddress{Address: "127.0.0.10"},
|
Address: &pb.NodeAddress{Address: "127.0.0.10"},
|
||||||
LastNet: "127.0.0",
|
LastNet: "127.0.0",
|
||||||
@ -451,7 +451,7 @@ func TestGetNodesDistinct(t *testing.T) {
|
|||||||
&mockDB,
|
&mockDB,
|
||||||
highStaleness,
|
highStaleness,
|
||||||
config,
|
config,
|
||||||
uploadselection.NodeFilters{}.WithAutoExcludeSubnets(),
|
nodeselection.NodeFilters{}.WithAutoExcludeSubnets(),
|
||||||
overlay.NewPlacementRules().CreateFilters,
|
overlay.NewPlacementRules().CreateFilters,
|
||||||
)
|
)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -480,7 +480,7 @@ func TestGetNodesDistinct(t *testing.T) {
|
|||||||
|
|
||||||
{ // test that distinctIP=true allows selecting 6 nodes
|
{ // test that distinctIP=true allows selecting 6 nodes
|
||||||
// emulate DistinctIP=false behavior by filling in LastNets with unique addresses
|
// emulate DistinctIP=false behavior by filling in LastNets with unique addresses
|
||||||
for _, nodeList := range [][]*uploadselection.SelectedNode{reputableNodes, newNodes} {
|
for _, nodeList := range [][]*nodeselection.SelectedNode{reputableNodes, newNodes} {
|
||||||
for i := range nodeList {
|
for i := range nodeList {
|
||||||
nodeList[i].LastNet = nodeList[i].LastIPPort
|
nodeList[i].LastNet = nodeList[i].LastIPPort
|
||||||
}
|
}
|
||||||
@ -492,7 +492,7 @@ func TestGetNodesDistinct(t *testing.T) {
|
|||||||
&mockDB,
|
&mockDB,
|
||||||
highStaleness,
|
highStaleness,
|
||||||
config,
|
config,
|
||||||
uploadselection.NodeFilters{},
|
nodeselection.NodeFilters{},
|
||||||
overlay.NewPlacementRules().CreateFilters,
|
overlay.NewPlacementRules().CreateFilters,
|
||||||
)
|
)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -517,7 +517,7 @@ func TestGetNodesError(t *testing.T) {
|
|||||||
&mockDB,
|
&mockDB,
|
||||||
highStaleness,
|
highStaleness,
|
||||||
nodeSelectionConfig,
|
nodeSelectionConfig,
|
||||||
uploadselection.NodeFilters{},
|
nodeselection.NodeFilters{},
|
||||||
overlay.NewPlacementRules().CreateFilters,
|
overlay.NewPlacementRules().CreateFilters,
|
||||||
)
|
)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -552,7 +552,7 @@ func TestNewNodeFraction(t *testing.T) {
|
|||||||
db.OverlayCache(),
|
db.OverlayCache(),
|
||||||
lowStaleness,
|
lowStaleness,
|
||||||
nodeSelectionConfig,
|
nodeSelectionConfig,
|
||||||
uploadselection.NodeFilters{},
|
nodeselection.NodeFilters{},
|
||||||
overlay.NewPlacementRules().CreateFilters,
|
overlay.NewPlacementRules().CreateFilters,
|
||||||
)
|
)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -613,7 +613,7 @@ func BenchmarkGetNodes(b *testing.B) {
|
|||||||
require.NoError(b, err)
|
require.NoError(b, err)
|
||||||
placement := overlay.NewPlacementRules()
|
placement := overlay.NewPlacementRules()
|
||||||
placement.AddLegacyStaticRules()
|
placement.AddLegacyStaticRules()
|
||||||
defaultFilter := uploadselection.NodeFilters{}
|
defaultFilter := nodeselection.NodeFilters{}
|
||||||
|
|
||||||
db := NewMockUploadSelectionDb(
|
db := NewMockUploadSelectionDb(
|
||||||
generatedSelectedNodes(b, oldNodes),
|
generatedSelectedNodes(b, oldNodes),
|
||||||
@ -640,12 +640,12 @@ func BenchmarkGetNodes(b *testing.B) {
|
|||||||
|
|
||||||
// MockUploadSelection implements overlay.UploadSelectionDB with a static list.
|
// MockUploadSelection implements overlay.UploadSelectionDB with a static list.
|
||||||
type MockUploadSelectionDB struct {
|
type MockUploadSelectionDB struct {
|
||||||
new []*uploadselection.SelectedNode
|
new []*nodeselection.SelectedNode
|
||||||
reputable []*uploadselection.SelectedNode
|
reputable []*nodeselection.SelectedNode
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewMockUploadSelectionDb creates a MockUploadSelectionDB with the given reputable and new nodes.
|
// NewMockUploadSelectionDb creates a MockUploadSelectionDB with the given reputable and new nodes.
|
||||||
func NewMockUploadSelectionDb(reputable, new []*uploadselection.SelectedNode) *MockUploadSelectionDB {
|
func NewMockUploadSelectionDb(reputable, new []*nodeselection.SelectedNode) *MockUploadSelectionDB {
|
||||||
return &MockUploadSelectionDB{
|
return &MockUploadSelectionDB{
|
||||||
new: new,
|
new: new,
|
||||||
reputable: reputable,
|
reputable: reputable,
|
||||||
@ -654,17 +654,17 @@ func NewMockUploadSelectionDb(reputable, new []*uploadselection.SelectedNode) *M
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SelectAllStorageNodesUpload implements overlay.UploadSelectionDB.
|
// SelectAllStorageNodesUpload implements overlay.UploadSelectionDB.
|
||||||
func (m MockUploadSelectionDB) SelectAllStorageNodesUpload(ctx context.Context, selectionCfg overlay.NodeSelectionConfig) (reputable, new []*uploadselection.SelectedNode, err error) {
|
func (m MockUploadSelectionDB) SelectAllStorageNodesUpload(ctx context.Context, selectionCfg overlay.NodeSelectionConfig) (reputable, new []*nodeselection.SelectedNode, err error) {
|
||||||
return m.reputable, m.new, nil
|
return m.reputable, m.new, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ overlay.UploadSelectionDB = &MockUploadSelectionDB{}
|
var _ overlay.UploadSelectionDB = &MockUploadSelectionDB{}
|
||||||
|
|
||||||
func generatedSelectedNodes(b *testing.B, nodeNo int) []*uploadselection.SelectedNode {
|
func generatedSelectedNodes(b *testing.B, nodeNo int) []*nodeselection.SelectedNode {
|
||||||
nodes := make([]*uploadselection.SelectedNode, nodeNo)
|
nodes := make([]*nodeselection.SelectedNode, nodeNo)
|
||||||
ctx := testcontext.New(b)
|
ctx := testcontext.New(b)
|
||||||
for i := 0; i < nodeNo; i++ {
|
for i := 0; i < nodeNo; i++ {
|
||||||
node := uploadselection.SelectedNode{}
|
node := nodeselection.SelectedNode{}
|
||||||
identity, err := testidentity.NewTestIdentity(ctx)
|
identity, err := testidentity.NewTestIdentity(ctx)
|
||||||
require.NoError(b, err)
|
require.NoError(b, err)
|
||||||
node.ID = identity.ID
|
node.ID = identity.ID
|
||||||
|
@ -20,7 +20,7 @@ import (
|
|||||||
"storj.io/storj/satellite"
|
"storj.io/storj/satellite"
|
||||||
"storj.io/storj/satellite/metabase"
|
"storj.io/storj/satellite/metabase"
|
||||||
"storj.io/storj/satellite/nodeevents"
|
"storj.io/storj/satellite/nodeevents"
|
||||||
"storj.io/storj/satellite/nodeselection/uploadselection"
|
"storj.io/storj/satellite/nodeselection"
|
||||||
"storj.io/storj/satellite/overlay"
|
"storj.io/storj/satellite/overlay"
|
||||||
"storj.io/storj/satellite/repair/checker"
|
"storj.io/storj/satellite/repair/checker"
|
||||||
)
|
)
|
||||||
@ -60,8 +60,8 @@ func TestReliabilityCache_Concurrent(t *testing.T) {
|
|||||||
type fakeOverlayDB struct{ overlay.DB }
|
type fakeOverlayDB struct{ overlay.DB }
|
||||||
type fakeNodeEvents struct{ nodeevents.DB }
|
type fakeNodeEvents struct{ nodeevents.DB }
|
||||||
|
|
||||||
func (fakeOverlayDB) Reliable(context.Context, time.Duration, time.Duration) ([]uploadselection.SelectedNode, []uploadselection.SelectedNode, error) {
|
func (fakeOverlayDB) Reliable(context.Context, time.Duration, time.Duration) ([]nodeselection.SelectedNode, []nodeselection.SelectedNode, error) {
|
||||||
return []uploadselection.SelectedNode{
|
return []nodeselection.SelectedNode{
|
||||||
{ID: testrand.NodeID()},
|
{ID: testrand.NodeID()},
|
||||||
{ID: testrand.NodeID()},
|
{ID: testrand.NodeID()},
|
||||||
{ID: testrand.NodeID()},
|
{ID: testrand.NodeID()},
|
||||||
|
@ -21,7 +21,7 @@ import (
|
|||||||
"storj.io/common/sync2"
|
"storj.io/common/sync2"
|
||||||
"storj.io/storj/satellite/audit"
|
"storj.io/storj/satellite/audit"
|
||||||
"storj.io/storj/satellite/metabase"
|
"storj.io/storj/satellite/metabase"
|
||||||
"storj.io/storj/satellite/nodeselection/uploadselection"
|
"storj.io/storj/satellite/nodeselection"
|
||||||
"storj.io/storj/satellite/orders"
|
"storj.io/storj/satellite/orders"
|
||||||
"storj.io/storj/satellite/overlay"
|
"storj.io/storj/satellite/overlay"
|
||||||
"storj.io/storj/satellite/repair"
|
"storj.io/storj/satellite/repair"
|
||||||
@ -682,7 +682,7 @@ func (repairer *SegmentRepairer) classifySegmentPieces(ctx context.Context, segm
|
|||||||
|
|
||||||
reliablePieces := metabase.Pieces{}
|
reliablePieces := metabase.Pieces{}
|
||||||
|
|
||||||
collectLastNets := func(reliable []uploadselection.SelectedNode) {
|
collectLastNets := func(reliable []nodeselection.SelectedNode) {
|
||||||
for _, node := range reliable {
|
for _, node := range reliable {
|
||||||
pieceNum := nodeIDPieceMap[node.ID]
|
pieceNum := nodeIDPieceMap[node.ID]
|
||||||
reliablePieces = append(reliablePieces, metabase.Piece{
|
reliablePieces = append(reliablePieces, metabase.Piece{
|
||||||
@ -705,7 +705,7 @@ func (repairer *SegmentRepairer) classifySegmentPieces(ctx context.Context, segm
|
|||||||
if repairer.doPlacementCheck && segment.Placement != storj.EveryCountry {
|
if repairer.doPlacementCheck && segment.Placement != storj.EveryCountry {
|
||||||
result.OutOfPlacementPiecesSet = map[uint16]bool{}
|
result.OutOfPlacementPiecesSet = map[uint16]bool{}
|
||||||
|
|
||||||
checkPlacement := func(reliable []uploadselection.SelectedNode) {
|
checkPlacement := func(reliable []nodeselection.SelectedNode) {
|
||||||
for _, node := range reliable {
|
for _, node := range reliable {
|
||||||
if segment.Placement.AllowedCountry(node.CountryCode) {
|
if segment.Placement.AllowedCountry(node.CountryCode) {
|
||||||
continue
|
continue
|
||||||
|
@ -16,11 +16,11 @@ import (
|
|||||||
"storj.io/common/storj"
|
"storj.io/common/storj"
|
||||||
"storj.io/private/dbutil/pgutil"
|
"storj.io/private/dbutil/pgutil"
|
||||||
"storj.io/private/version"
|
"storj.io/private/version"
|
||||||
"storj.io/storj/satellite/nodeselection/uploadselection"
|
"storj.io/storj/satellite/nodeselection"
|
||||||
"storj.io/storj/satellite/overlay"
|
"storj.io/storj/satellite/overlay"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (cache *overlaycache) SelectStorageNodes(ctx context.Context, totalNeededNodes, newNodeCount int, criteria *overlay.NodeCriteria) (nodes []*uploadselection.SelectedNode, err error) {
|
func (cache *overlaycache) SelectStorageNodes(ctx context.Context, totalNeededNodes, newNodeCount int, criteria *overlay.NodeCriteria) (nodes []*nodeselection.SelectedNode, err error) {
|
||||||
defer mon.Task()(&ctx)(&err)
|
defer mon.Task()(&ctx)(&err)
|
||||||
if totalNeededNodes == 0 {
|
if totalNeededNodes == 0 {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
@ -87,7 +87,7 @@ func (cache *overlaycache) SelectStorageNodes(ctx context.Context, totalNeededNo
|
|||||||
return nodes, nil
|
return nodes, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cache *overlaycache) selectStorageNodesOnce(ctx context.Context, reputableNodeCount, newNodeCount int, criteria *overlay.NodeCriteria, excludedIDs []storj.NodeID, excludedNetworks []string) (reputableNodes, newNodes []*uploadselection.SelectedNode, err error) {
|
func (cache *overlaycache) selectStorageNodesOnce(ctx context.Context, reputableNodeCount, newNodeCount int, criteria *overlay.NodeCriteria, excludedIDs []storj.NodeID, excludedNetworks []string) (reputableNodes, newNodes []*nodeselection.SelectedNode, err error) {
|
||||||
defer mon.Task()(&ctx)(&err)
|
defer mon.Task()(&ctx)(&err)
|
||||||
|
|
||||||
newNodesCondition, err := nodeSelectionCondition(ctx, criteria, excludedIDs, excludedNetworks, true)
|
newNodesCondition, err := nodeSelectionCondition(ctx, criteria, excludedIDs, excludedNetworks, true)
|
||||||
@ -128,7 +128,7 @@ func (cache *overlaycache) selectStorageNodesOnce(ctx context.Context, reputable
|
|||||||
defer func() { err = errs.Combine(err, rows.Close()) }()
|
defer func() { err = errs.Combine(err, rows.Close()) }()
|
||||||
|
|
||||||
for rows.Next() {
|
for rows.Next() {
|
||||||
var node uploadselection.SelectedNode
|
var node nodeselection.SelectedNode
|
||||||
node.Address = &pb.NodeAddress{}
|
node.Address = &pb.NodeAddress{}
|
||||||
var lastIPPort sql.NullString
|
var lastIPPort sql.NullString
|
||||||
var isNew bool
|
var isNew bool
|
||||||
|
@ -23,7 +23,7 @@ import (
|
|||||||
"storj.io/private/dbutil/pgutil"
|
"storj.io/private/dbutil/pgutil"
|
||||||
"storj.io/private/tagsql"
|
"storj.io/private/tagsql"
|
||||||
"storj.io/private/version"
|
"storj.io/private/version"
|
||||||
"storj.io/storj/satellite/nodeselection/uploadselection"
|
"storj.io/storj/satellite/nodeselection"
|
||||||
"storj.io/storj/satellite/overlay"
|
"storj.io/storj/satellite/overlay"
|
||||||
"storj.io/storj/satellite/satellitedb/dbx"
|
"storj.io/storj/satellite/satellitedb/dbx"
|
||||||
)
|
)
|
||||||
@ -39,7 +39,7 @@ type overlaycache struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SelectAllStorageNodesUpload returns all nodes that qualify to store data, organized as reputable nodes and new nodes.
|
// SelectAllStorageNodesUpload returns all nodes that qualify to store data, organized as reputable nodes and new nodes.
|
||||||
func (cache *overlaycache) SelectAllStorageNodesUpload(ctx context.Context, selectionCfg overlay.NodeSelectionConfig) (reputable, new []*uploadselection.SelectedNode, err error) {
|
func (cache *overlaycache) SelectAllStorageNodesUpload(ctx context.Context, selectionCfg overlay.NodeSelectionConfig) (reputable, new []*nodeselection.SelectedNode, err error) {
|
||||||
for {
|
for {
|
||||||
reputable, new, err = cache.selectAllStorageNodesUpload(ctx, selectionCfg)
|
reputable, new, err = cache.selectAllStorageNodesUpload(ctx, selectionCfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -60,7 +60,7 @@ func (cache *overlaycache) SelectAllStorageNodesUpload(ctx context.Context, sele
|
|||||||
return reputable, new, err
|
return reputable, new, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cache *overlaycache) selectAllStorageNodesUpload(ctx context.Context, selectionCfg overlay.NodeSelectionConfig) (reputable, new []*uploadselection.SelectedNode, err error) {
|
func (cache *overlaycache) selectAllStorageNodesUpload(ctx context.Context, selectionCfg overlay.NodeSelectionConfig) (reputable, new []*nodeselection.SelectedNode, err error) {
|
||||||
defer mon.Task()(&ctx)(&err)
|
defer mon.Task()(&ctx)(&err)
|
||||||
|
|
||||||
query := `
|
query := `
|
||||||
@ -101,10 +101,10 @@ func (cache *overlaycache) selectAllStorageNodesUpload(ctx context.Context, sele
|
|||||||
}
|
}
|
||||||
defer func() { err = errs.Combine(err, rows.Close()) }()
|
defer func() { err = errs.Combine(err, rows.Close()) }()
|
||||||
|
|
||||||
var reputableNodes []*uploadselection.SelectedNode
|
var reputableNodes []*nodeselection.SelectedNode
|
||||||
var newNodes []*uploadselection.SelectedNode
|
var newNodes []*nodeselection.SelectedNode
|
||||||
for rows.Next() {
|
for rows.Next() {
|
||||||
var node uploadselection.SelectedNode
|
var node nodeselection.SelectedNode
|
||||||
node.Address = &pb.NodeAddress{}
|
node.Address = &pb.NodeAddress{}
|
||||||
var lastIPPort sql.NullString
|
var lastIPPort sql.NullString
|
||||||
var vettedAt *time.Time
|
var vettedAt *time.Time
|
||||||
@ -130,7 +130,7 @@ func (cache *overlaycache) selectAllStorageNodesUpload(ctx context.Context, sele
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SelectAllStorageNodesDownload returns all nodes that qualify to store data, organized as reputable nodes and new nodes.
|
// SelectAllStorageNodesDownload returns all nodes that qualify to store data, organized as reputable nodes and new nodes.
|
||||||
func (cache *overlaycache) SelectAllStorageNodesDownload(ctx context.Context, onlineWindow time.Duration, asOf overlay.AsOfSystemTimeConfig) (nodes []*uploadselection.SelectedNode, err error) {
|
func (cache *overlaycache) SelectAllStorageNodesDownload(ctx context.Context, onlineWindow time.Duration, asOf overlay.AsOfSystemTimeConfig) (nodes []*nodeselection.SelectedNode, err error) {
|
||||||
for {
|
for {
|
||||||
nodes, err = cache.selectAllStorageNodesDownload(ctx, onlineWindow, asOf)
|
nodes, err = cache.selectAllStorageNodesDownload(ctx, onlineWindow, asOf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -151,7 +151,7 @@ func (cache *overlaycache) SelectAllStorageNodesDownload(ctx context.Context, on
|
|||||||
return nodes, err
|
return nodes, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cache *overlaycache) selectAllStorageNodesDownload(ctx context.Context, onlineWindow time.Duration, asOfConfig overlay.AsOfSystemTimeConfig) (_ []*uploadselection.SelectedNode, err error) {
|
func (cache *overlaycache) selectAllStorageNodesDownload(ctx context.Context, onlineWindow time.Duration, asOfConfig overlay.AsOfSystemTimeConfig) (_ []*nodeselection.SelectedNode, err error) {
|
||||||
defer mon.Task()(&ctx)(&err)
|
defer mon.Task()(&ctx)(&err)
|
||||||
|
|
||||||
query := `
|
query := `
|
||||||
@ -173,9 +173,9 @@ func (cache *overlaycache) selectAllStorageNodesDownload(ctx context.Context, on
|
|||||||
}
|
}
|
||||||
defer func() { err = errs.Combine(err, rows.Close()) }()
|
defer func() { err = errs.Combine(err, rows.Close()) }()
|
||||||
|
|
||||||
var nodes []*uploadselection.SelectedNode
|
var nodes []*nodeselection.SelectedNode
|
||||||
for rows.Next() {
|
for rows.Next() {
|
||||||
var node uploadselection.SelectedNode
|
var node nodeselection.SelectedNode
|
||||||
node.Address = &pb.NodeAddress{}
|
node.Address = &pb.NodeAddress{}
|
||||||
var lastIPPort sql.NullString
|
var lastIPPort sql.NullString
|
||||||
var noise noiseScanner
|
var noise noiseScanner
|
||||||
@ -461,7 +461,7 @@ func (cache *overlaycache) knownReliableInExcludedCountries(ctx context.Context,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// KnownReliable filters a set of nodes to reliable nodes. List is split into online and offline nodes.
|
// KnownReliable filters a set of nodes to reliable nodes. List is split into online and offline nodes.
|
||||||
func (cache *overlaycache) KnownReliable(ctx context.Context, nodeIDs storj.NodeIDList, onlineWindow, asOfSystemInterval time.Duration) (online []uploadselection.SelectedNode, offline []uploadselection.SelectedNode, err error) {
|
func (cache *overlaycache) KnownReliable(ctx context.Context, nodeIDs storj.NodeIDList, onlineWindow, asOfSystemInterval time.Duration) (online []nodeselection.SelectedNode, offline []nodeselection.SelectedNode, err error) {
|
||||||
for {
|
for {
|
||||||
online, offline, err = cache.knownReliable(ctx, nodeIDs, onlineWindow, asOfSystemInterval)
|
online, offline, err = cache.knownReliable(ctx, nodeIDs, onlineWindow, asOfSystemInterval)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -476,7 +476,7 @@ func (cache *overlaycache) KnownReliable(ctx context.Context, nodeIDs storj.Node
|
|||||||
return online, offline, err
|
return online, offline, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cache *overlaycache) knownReliable(ctx context.Context, nodeIDs storj.NodeIDList, onlineWindow, asOfSystemInterval time.Duration) (online []uploadselection.SelectedNode, offline []uploadselection.SelectedNode, err error) {
|
func (cache *overlaycache) knownReliable(ctx context.Context, nodeIDs storj.NodeIDList, onlineWindow, asOfSystemInterval time.Duration) (online []nodeselection.SelectedNode, offline []nodeselection.SelectedNode, err error) {
|
||||||
defer mon.Task()(&ctx)(&err)
|
defer mon.Task()(&ctx)(&err)
|
||||||
|
|
||||||
if len(nodeIDs) == 0 {
|
if len(nodeIDs) == 0 {
|
||||||
@ -513,7 +513,7 @@ func (cache *overlaycache) knownReliable(ctx context.Context, nodeIDs storj.Node
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Reliable returns all nodes that are reliable, online and offline.
|
// Reliable returns all nodes that are reliable, online and offline.
|
||||||
func (cache *overlaycache) Reliable(ctx context.Context, onlineWindow, asOfSystemInterval time.Duration) (online []uploadselection.SelectedNode, offline []uploadselection.SelectedNode, err error) {
|
func (cache *overlaycache) Reliable(ctx context.Context, onlineWindow, asOfSystemInterval time.Duration) (online []nodeselection.SelectedNode, offline []nodeselection.SelectedNode, err error) {
|
||||||
for {
|
for {
|
||||||
online, offline, err = cache.reliable(ctx, onlineWindow, asOfSystemInterval)
|
online, offline, err = cache.reliable(ctx, onlineWindow, asOfSystemInterval)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -528,7 +528,7 @@ func (cache *overlaycache) Reliable(ctx context.Context, onlineWindow, asOfSyste
|
|||||||
return online, offline, nil
|
return online, offline, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cache *overlaycache) reliable(ctx context.Context, onlineWindow, asOfSystemInterval time.Duration) (online []uploadselection.SelectedNode, offline []uploadselection.SelectedNode, err error) {
|
func (cache *overlaycache) reliable(ctx context.Context, onlineWindow, asOfSystemInterval time.Duration) (online []nodeselection.SelectedNode, offline []nodeselection.SelectedNode, err error) {
|
||||||
defer mon.Task()(&ctx)(&err)
|
defer mon.Task()(&ctx)(&err)
|
||||||
|
|
||||||
err = withRows(cache.db.Query(ctx, `
|
err = withRows(cache.db.Query(ctx, `
|
||||||
@ -559,14 +559,14 @@ func (cache *overlaycache) reliable(ctx context.Context, onlineWindow, asOfSyste
|
|||||||
return online, offline, Error.Wrap(err)
|
return online, offline, Error.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func scanSelectedNode(rows tagsql.Rows) (uploadselection.SelectedNode, bool, error) {
|
func scanSelectedNode(rows tagsql.Rows) (nodeselection.SelectedNode, bool, error) {
|
||||||
var onlineNode bool
|
var onlineNode bool
|
||||||
var node uploadselection.SelectedNode
|
var node nodeselection.SelectedNode
|
||||||
node.Address = &pb.NodeAddress{}
|
node.Address = &pb.NodeAddress{}
|
||||||
var lastIPPort sql.NullString
|
var lastIPPort sql.NullString
|
||||||
err := rows.Scan(&node.ID, &node.Address.Address, &node.LastNet, &lastIPPort, &node.CountryCode, &onlineNode)
|
err := rows.Scan(&node.ID, &node.Address.Address, &node.LastNet, &lastIPPort, &node.CountryCode, &onlineNode)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return uploadselection.SelectedNode{}, false, err
|
return nodeselection.SelectedNode{}, false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if lastIPPort.Valid {
|
if lastIPPort.Valid {
|
||||||
@ -1481,7 +1481,7 @@ func (cache *overlaycache) TestNodeCountryCode(ctx context.Context, nodeID storj
|
|||||||
}
|
}
|
||||||
|
|
||||||
// IterateAllContactedNodes will call cb on all known nodes (used in restore trash contexts).
|
// IterateAllContactedNodes will call cb on all known nodes (used in restore trash contexts).
|
||||||
func (cache *overlaycache) IterateAllContactedNodes(ctx context.Context, cb func(context.Context, *uploadselection.SelectedNode) error) (err error) {
|
func (cache *overlaycache) IterateAllContactedNodes(ctx context.Context, cb func(context.Context, *nodeselection.SelectedNode) error) (err error) {
|
||||||
defer mon.Task()(&ctx)(&err)
|
defer mon.Task()(&ctx)(&err)
|
||||||
|
|
||||||
var rows tagsql.Rows
|
var rows tagsql.Rows
|
||||||
@ -1497,7 +1497,7 @@ func (cache *overlaycache) IterateAllContactedNodes(ctx context.Context, cb func
|
|||||||
defer func() { err = errs.Combine(err, rows.Close()) }()
|
defer func() { err = errs.Combine(err, rows.Close()) }()
|
||||||
|
|
||||||
for rows.Next() {
|
for rows.Next() {
|
||||||
var node uploadselection.SelectedNode
|
var node nodeselection.SelectedNode
|
||||||
node.Address = &pb.NodeAddress{}
|
node.Address = &pb.NodeAddress{}
|
||||||
|
|
||||||
var lastIPPort sql.NullString
|
var lastIPPort sql.NullString
|
||||||
@ -1600,7 +1600,7 @@ func (cache *overlaycache) OneTimeFixLastNets(ctx context.Context) error {
|
|||||||
return Error.Wrap(err)
|
return Error.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cache *overlaycache) UpdateNodeTags(ctx context.Context, tags uploadselection.NodeTags) error {
|
func (cache *overlaycache) UpdateNodeTags(ctx context.Context, tags nodeselection.NodeTags) error {
|
||||||
for _, t := range tags {
|
for _, t := range tags {
|
||||||
err := cache.db.ReplaceNoReturn_NodeTags(ctx,
|
err := cache.db.ReplaceNoReturn_NodeTags(ctx,
|
||||||
dbx.NodeTags_NodeId(t.NodeID.Bytes()),
|
dbx.NodeTags_NodeId(t.NodeID.Bytes()),
|
||||||
@ -1616,13 +1616,13 @@ func (cache *overlaycache) UpdateNodeTags(ctx context.Context, tags uploadselect
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cache *overlaycache) GetNodeTags(ctx context.Context, id storj.NodeID) (uploadselection.NodeTags, error) {
|
func (cache *overlaycache) GetNodeTags(ctx context.Context, id storj.NodeID) (nodeselection.NodeTags, error) {
|
||||||
rows, err := cache.db.All_NodeTags_By_NodeId(ctx, dbx.NodeTags_NodeId(id.Bytes()))
|
rows, err := cache.db.All_NodeTags_By_NodeId(ctx, dbx.NodeTags_NodeId(id.Bytes()))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, Error.Wrap(err)
|
return nil, Error.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var tags uploadselection.NodeTags
|
var tags nodeselection.NodeTags
|
||||||
for _, row := range rows {
|
for _, row := range rows {
|
||||||
nodeIDBytes, err := storj.NodeIDFromBytes(row.NodeId)
|
nodeIDBytes, err := storj.NodeIDFromBytes(row.NodeId)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1632,7 +1632,7 @@ func (cache *overlaycache) GetNodeTags(ctx context.Context, id storj.NodeID) (up
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return tags, Error.Wrap(errs.New("Invalid nodeID in the database: %x", row.NodeId))
|
return tags, Error.Wrap(errs.New("Invalid nodeID in the database: %x", row.NodeId))
|
||||||
}
|
}
|
||||||
tags = append(tags, uploadselection.NodeTag{
|
tags = append(tags, nodeselection.NodeTag{
|
||||||
NodeID: nodeIDBytes,
|
NodeID: nodeIDBytes,
|
||||||
Name: row.Name,
|
Name: row.Name,
|
||||||
Value: row.Value,
|
Value: row.Value,
|
||||||
@ -1643,13 +1643,13 @@ func (cache *overlaycache) GetNodeTags(ctx context.Context, id storj.NodeID) (up
|
|||||||
return tags, err
|
return tags, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cache *overlaycache) addNodeTags(ctx context.Context, nodes []*uploadselection.SelectedNode) error {
|
func (cache *overlaycache) addNodeTags(ctx context.Context, nodes []*nodeselection.SelectedNode) error {
|
||||||
rows, err := cache.db.All_NodeTags(ctx)
|
rows, err := cache.db.All_NodeTags(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Error.Wrap(err)
|
return Error.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
tagsByNode := map[storj.NodeID]uploadselection.NodeTags{}
|
tagsByNode := map[storj.NodeID]nodeselection.NodeTags{}
|
||||||
for _, row := range rows {
|
for _, row := range rows {
|
||||||
nodeID, err := storj.NodeIDFromBytes(row.NodeId)
|
nodeID, err := storj.NodeIDFromBytes(row.NodeId)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1659,7 +1659,7 @@ func (cache *overlaycache) addNodeTags(ctx context.Context, nodes []*uploadselec
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return Error.New("Invalid nodeID in the database: %x", row.NodeId)
|
return Error.New("Invalid nodeID in the database: %x", row.NodeId)
|
||||||
}
|
}
|
||||||
tagsByNode[nodeID] = append(tagsByNode[nodeID], uploadselection.NodeTag{
|
tagsByNode[nodeID] = append(tagsByNode[nodeID], nodeselection.NodeTag{
|
||||||
NodeID: nodeID,
|
NodeID: nodeID,
|
||||||
Name: row.Name,
|
Name: row.Name,
|
||||||
Value: row.Value,
|
Value: row.Value,
|
||||||
|
@ -24,7 +24,7 @@ import (
|
|||||||
"storj.io/private/version"
|
"storj.io/private/version"
|
||||||
"storj.io/storj/private/teststorj"
|
"storj.io/storj/private/teststorj"
|
||||||
"storj.io/storj/satellite"
|
"storj.io/storj/satellite"
|
||||||
"storj.io/storj/satellite/nodeselection/uploadselection"
|
"storj.io/storj/satellite/nodeselection"
|
||||||
"storj.io/storj/satellite/overlay"
|
"storj.io/storj/satellite/overlay"
|
||||||
"storj.io/storj/satellite/satellitedb/satellitedbtest"
|
"storj.io/storj/satellite/satellitedb/satellitedbtest"
|
||||||
)
|
)
|
||||||
@ -392,8 +392,8 @@ func TestOverlayCache_SelectAllStorageNodesDownloadUpload(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
if n%2 == 0 {
|
if n%2 == 0 {
|
||||||
err = cache.UpdateNodeTags(ctx, uploadselection.NodeTags{
|
err = cache.UpdateNodeTags(ctx, nodeselection.NodeTags{
|
||||||
uploadselection.NodeTag{
|
nodeselection.NodeTag{
|
||||||
NodeID: id,
|
NodeID: id,
|
||||||
SignedAt: time.Now(),
|
SignedAt: time.Now(),
|
||||||
Signer: tagSigner.ID,
|
Signer: tagSigner.ID,
|
||||||
@ -405,8 +405,8 @@ func TestOverlayCache_SelectAllStorageNodesDownloadUpload(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
checkNodes := func(selectedNodes []*uploadselection.SelectedNode) {
|
checkNodes := func(selectedNodes []*nodeselection.SelectedNode) {
|
||||||
selectedNodesMap := map[storj.NodeID]*uploadselection.SelectedNode{}
|
selectedNodesMap := map[storj.NodeID]*nodeselection.SelectedNode{}
|
||||||
for _, node := range selectedNodes {
|
for _, node := range selectedNodes {
|
||||||
selectedNodesMap[node.ID] = node
|
selectedNodesMap[node.ID] = node
|
||||||
}
|
}
|
||||||
@ -452,7 +452,7 @@ func TestOverlayCache_KnownReliable(t *testing.T) {
|
|||||||
satellitedbtest.Run(t, func(ctx *testcontext.Context, t *testing.T, db satellite.DB) {
|
satellitedbtest.Run(t, func(ctx *testcontext.Context, t *testing.T, db satellite.DB) {
|
||||||
cache := db.OverlayCache()
|
cache := db.OverlayCache()
|
||||||
|
|
||||||
allNodes := []uploadselection.SelectedNode{
|
allNodes := []nodeselection.SelectedNode{
|
||||||
addNode(ctx, t, cache, "online", "127.0.0.1", true, false, false, false, false),
|
addNode(ctx, t, cache, "online", "127.0.0.1", true, false, false, false, false),
|
||||||
addNode(ctx, t, cache, "offline", "127.0.0.2", false, false, false, false, false),
|
addNode(ctx, t, cache, "offline", "127.0.0.2", false, false, false, false, false),
|
||||||
addNode(ctx, t, cache, "disqalified", "127.0.0.3", false, true, false, false, false),
|
addNode(ctx, t, cache, "disqalified", "127.0.0.3", false, true, false, false, false),
|
||||||
@ -461,7 +461,7 @@ func TestOverlayCache_KnownReliable(t *testing.T) {
|
|||||||
addNode(ctx, t, cache, "exited", "127.0.0.6", false, false, false, false, true),
|
addNode(ctx, t, cache, "exited", "127.0.0.6", false, false, false, false, true),
|
||||||
}
|
}
|
||||||
|
|
||||||
ids := func(nodes ...uploadselection.SelectedNode) storj.NodeIDList {
|
ids := func(nodes ...nodeselection.SelectedNode) storj.NodeIDList {
|
||||||
nodeIds := storj.NodeIDList{}
|
nodeIds := storj.NodeIDList{}
|
||||||
for _, node := range nodes {
|
for _, node := range nodes {
|
||||||
nodeIds = append(nodeIds, node.ID)
|
nodeIds = append(nodeIds, node.ID)
|
||||||
@ -469,14 +469,14 @@ func TestOverlayCache_KnownReliable(t *testing.T) {
|
|||||||
return nodeIds
|
return nodeIds
|
||||||
}
|
}
|
||||||
|
|
||||||
nodes := func(nodes ...uploadselection.SelectedNode) []uploadselection.SelectedNode {
|
nodes := func(nodes ...nodeselection.SelectedNode) []nodeselection.SelectedNode {
|
||||||
return append([]uploadselection.SelectedNode{}, nodes...)
|
return append([]nodeselection.SelectedNode{}, nodes...)
|
||||||
}
|
}
|
||||||
|
|
||||||
type testCase struct {
|
type testCase struct {
|
||||||
IDs storj.NodeIDList
|
IDs storj.NodeIDList
|
||||||
Online []uploadselection.SelectedNode
|
Online []nodeselection.SelectedNode
|
||||||
Offline []uploadselection.SelectedNode
|
Offline []nodeselection.SelectedNode
|
||||||
}
|
}
|
||||||
|
|
||||||
shuffledNodeIDs := ids(allNodes...)
|
shuffledNodeIDs := ids(allNodes...)
|
||||||
@ -538,7 +538,7 @@ func TestOverlayCache_Reliable(t *testing.T) {
|
|||||||
satellitedbtest.Run(t, func(ctx *testcontext.Context, t *testing.T, db satellite.DB) {
|
satellitedbtest.Run(t, func(ctx *testcontext.Context, t *testing.T, db satellite.DB) {
|
||||||
cache := db.OverlayCache()
|
cache := db.OverlayCache()
|
||||||
|
|
||||||
allNodes := []uploadselection.SelectedNode{
|
allNodes := []nodeselection.SelectedNode{
|
||||||
addNode(ctx, t, cache, "online", "127.0.0.1", true, false, false, false, false),
|
addNode(ctx, t, cache, "online", "127.0.0.1", true, false, false, false, false),
|
||||||
addNode(ctx, t, cache, "offline", "127.0.0.2", false, false, false, false, false),
|
addNode(ctx, t, cache, "offline", "127.0.0.2", false, false, false, false, false),
|
||||||
addNode(ctx, t, cache, "disqalified", "127.0.0.3", false, true, false, false, false),
|
addNode(ctx, t, cache, "disqalified", "127.0.0.3", false, true, false, false, false),
|
||||||
@ -549,23 +549,23 @@ func TestOverlayCache_Reliable(t *testing.T) {
|
|||||||
|
|
||||||
type testCase struct {
|
type testCase struct {
|
||||||
OnlineWindow time.Duration
|
OnlineWindow time.Duration
|
||||||
Online []uploadselection.SelectedNode
|
Online []nodeselection.SelectedNode
|
||||||
Offline []uploadselection.SelectedNode
|
Offline []nodeselection.SelectedNode
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, tc := range []testCase{
|
for i, tc := range []testCase{
|
||||||
{
|
{
|
||||||
OnlineWindow: 1 * time.Hour,
|
OnlineWindow: 1 * time.Hour,
|
||||||
Online: []uploadselection.SelectedNode{allNodes[0]},
|
Online: []nodeselection.SelectedNode{allNodes[0]},
|
||||||
Offline: []uploadselection.SelectedNode{allNodes[1]},
|
Offline: []nodeselection.SelectedNode{allNodes[1]},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
OnlineWindow: 20 * time.Hour,
|
OnlineWindow: 20 * time.Hour,
|
||||||
Online: []uploadselection.SelectedNode{allNodes[0], allNodes[1]},
|
Online: []nodeselection.SelectedNode{allNodes[0], allNodes[1]},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
OnlineWindow: 1 * time.Microsecond,
|
OnlineWindow: 1 * time.Microsecond,
|
||||||
Offline: []uploadselection.SelectedNode{allNodes[0], allNodes[1]},
|
Offline: []nodeselection.SelectedNode{allNodes[0], allNodes[1]},
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
online, offline, err := cache.Reliable(ctx, tc.OnlineWindow, 0)
|
online, offline, err := cache.Reliable(ctx, tc.OnlineWindow, 0)
|
||||||
@ -580,8 +580,8 @@ func TestOverlayCache_Reliable(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func addNode(ctx context.Context, t *testing.T, cache overlay.DB, address, lastIPPort string, online, disqalified, auditSuspended, offlineSuspended, exited bool) uploadselection.SelectedNode {
|
func addNode(ctx context.Context, t *testing.T, cache overlay.DB, address, lastIPPort string, online, disqalified, auditSuspended, offlineSuspended, exited bool) nodeselection.SelectedNode {
|
||||||
selectedNode := uploadselection.SelectedNode{
|
selectedNode := nodeselection.SelectedNode{
|
||||||
ID: testrand.NodeID(),
|
ID: testrand.NodeID(),
|
||||||
Address: &pb.NodeAddress{Address: address},
|
Address: &pb.NodeAddress{Address: address},
|
||||||
LastNet: lastIPPort,
|
LastNet: lastIPPort,
|
||||||
|
@ -16,7 +16,7 @@ import (
|
|||||||
"storj.io/common/testrand"
|
"storj.io/common/testrand"
|
||||||
"storj.io/storj/private/testplanet"
|
"storj.io/storj/private/testplanet"
|
||||||
"storj.io/storj/satellite/metabase"
|
"storj.io/storj/satellite/metabase"
|
||||||
"storj.io/storj/satellite/nodeselection/uploadselection"
|
"storj.io/storj/satellite/nodeselection"
|
||||||
"storj.io/storj/storagenode"
|
"storj.io/storj/storagenode"
|
||||||
"storj.io/storj/storagenode/orders"
|
"storj.io/storj/storagenode/orders"
|
||||||
"storj.io/storj/storagenode/orders/ordersfile"
|
"storj.io/storj/storagenode/orders/ordersfile"
|
||||||
@ -41,7 +41,7 @@ func TestOrderDBSettle(t *testing.T) {
|
|||||||
_, orderLimits, piecePrivateKey, err := satellite.Orders.Service.CreatePutOrderLimits(
|
_, orderLimits, piecePrivateKey, err := satellite.Orders.Service.CreatePutOrderLimits(
|
||||||
ctx,
|
ctx,
|
||||||
metabase.BucketLocation{ProjectID: planet.Uplinks[0].Projects[0].ID, BucketName: bucketname},
|
metabase.BucketLocation{ProjectID: planet.Uplinks[0].Projects[0].ID, BucketName: bucketname},
|
||||||
[]*uploadselection.SelectedNode{
|
[]*nodeselection.SelectedNode{
|
||||||
{ID: node.ID(), LastIPPort: "fake", Address: new(pb.NodeAddress)},
|
{ID: node.ID(), LastIPPort: "fake", Address: new(pb.NodeAddress)},
|
||||||
},
|
},
|
||||||
time.Now().Add(2*time.Hour),
|
time.Now().Add(2*time.Hour),
|
||||||
@ -147,7 +147,7 @@ func TestOrderFileStoreAndDBSettle(t *testing.T) {
|
|||||||
_, orderLimits, piecePrivateKey, err := satellite.Orders.Service.CreatePutOrderLimits(
|
_, orderLimits, piecePrivateKey, err := satellite.Orders.Service.CreatePutOrderLimits(
|
||||||
ctx,
|
ctx,
|
||||||
metabase.BucketLocation{ProjectID: uplinkPeer.Projects[0].ID, BucketName: bucketname},
|
metabase.BucketLocation{ProjectID: uplinkPeer.Projects[0].ID, BucketName: bucketname},
|
||||||
[]*uploadselection.SelectedNode{
|
[]*nodeselection.SelectedNode{
|
||||||
{ID: node.ID(), LastIPPort: "fake", Address: new(pb.NodeAddress)},
|
{ID: node.ID(), LastIPPort: "fake", Address: new(pb.NodeAddress)},
|
||||||
},
|
},
|
||||||
time.Now().Add(2*time.Hour),
|
time.Now().Add(2*time.Hour),
|
||||||
|
Loading…
Reference in New Issue
Block a user