satellite/overlay: optimize FindStorageNodes
Reduce the number of fields returned from the query. Benchmark results in `satellite/overlay`: benchstat before.txt after2.txt name old time/op new time/op delta SelectStorageNodes-32 7.85ms ± 1% 6.27ms ± 1% -20.18% (p=0.002 n=10+4) SelectNewStorageNodes-32 8.21ms ± 1% 6.61ms ± 0% -19.53% (p=0.002 n=10+4) SelectStorageNodesExclusion-32 17.2ms ± 1% 15.9ms ± 1% -7.55% (p=0.002 n=10+4) SelectNewStorageNodesExclusion-32 17.8ms ± 2% 16.1ms ± 0% -9.38% (p=0.002 n=10+4) FindStorageNodes-32 48.4ms ± 1% 45.1ms ± 0% -6.69% (p=0.002 n=10+4) FindStorageNodesExclusion-32 79.2ms ± 1% 76.1ms ± 1% -3.89% (p=0.002 n=10+4) Benchmark results from `satellite/overlay` after making them parallel: benchstat before-parallel.txt after2-parallel.txt name old time/op new time/op delta SelectStorageNodes-32 548µs ± 1% 353µs ± 1% -35.60% (p=0.029 n=4+4) SelectNewStorageNodes-32 562µs ± 0% 368µs ± 0% -34.51% (p=0.029 n=4+4) SelectStorageNodesExclusion-32 1.02ms ± 1% 0.84ms ± 0% -18.08% (p=0.029 n=4+4) SelectNewStorageNodesExclusion-32 1.03ms ± 1% 0.86ms ± 2% -16.22% (p=0.029 n=4+4) FindStorageNodes-32 3.11ms ± 0% 2.79ms ± 1% -10.27% (p=0.029 n=4+4) FindStorageNodesExclusion-32 4.75ms ± 0% 4.43ms ± 1% -6.56% (p=0.029 n=4+4) Change-Id: I1d85e2764eb270f4c2b1998303ccfc1179d65b26
This commit is contained in:
parent
028ce16020
commit
cb781d66c7
@ -183,8 +183,8 @@ func TestDisqualifiedNodesGetNoUpload(t *testing.T) {
|
|||||||
|
|
||||||
assert.Len(t, nodes, 3)
|
assert.Len(t, nodes, 3)
|
||||||
for _, node := range nodes {
|
for _, node := range nodes {
|
||||||
assert.False(t, isDisqualified(t, ctx, satellitePeer, node.Id))
|
assert.False(t, isDisqualified(t, ctx, satellitePeer, node.ID))
|
||||||
assert.NotEqual(t, node.Id, disqualifiedNode)
|
assert.NotEqual(t, node.ID, disqualifiedNode)
|
||||||
}
|
}
|
||||||
|
|
||||||
})
|
})
|
||||||
|
@ -409,7 +409,7 @@ func (endpoint *Endpoint) processIncomplete(ctx context.Context, stream processS
|
|||||||
}
|
}
|
||||||
|
|
||||||
newNode := newNodes[0]
|
newNode := newNodes[0]
|
||||||
endpoint.log.Debug("found new node for piece transfer", zap.Stringer("original node ID", nodeID), zap.Stringer("replacement node ID", newNode.Id),
|
endpoint.log.Debug("found new node for piece transfer", zap.Stringer("original node ID", nodeID), zap.Stringer("replacement node ID", newNode.ID),
|
||||||
zap.ByteString("path", incomplete.Path), zap.Int32("piece num", incomplete.PieceNum))
|
zap.ByteString("path", incomplete.Path), zap.Int32("piece num", incomplete.PieceNum))
|
||||||
|
|
||||||
pieceID := remote.RootPieceId.Derive(nodeID, incomplete.PieceNum)
|
pieceID := remote.RootPieceId.Derive(nodeID, incomplete.PieceNum)
|
||||||
@ -420,7 +420,7 @@ func (endpoint *Endpoint) processIncomplete(ctx context.Context, stream processS
|
|||||||
}
|
}
|
||||||
|
|
||||||
bucketID := []byte(storj.JoinPaths(parts[0], parts[2]))
|
bucketID := []byte(storj.JoinPaths(parts[0], parts[2]))
|
||||||
limit, privateKey, err := endpoint.orders.CreateGracefulExitPutOrderLimit(ctx, bucketID, newNode.Id, incomplete.PieceNum, remote.RootPieceId, int32(pieceSize))
|
limit, privateKey, err := endpoint.orders.CreateGracefulExitPutOrderLimit(ctx, bucketID, newNode.ID, incomplete.PieceNum, remote.RootPieceId, int32(pieceSize))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Error.Wrap(err)
|
return Error.Wrap(err)
|
||||||
}
|
}
|
||||||
|
@ -384,7 +384,7 @@ func (service *Service) RandomSampleOfOrderLimits(limits []*pb.AddressedOrderLim
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CreatePutOrderLimits creates the order limits for uploading pieces to nodes.
|
// CreatePutOrderLimits creates the order limits for uploading pieces to nodes.
|
||||||
func (service *Service) CreatePutOrderLimits(ctx context.Context, bucketID []byte, nodes []*overlay.NodeDossier, expiration time.Time, maxPieceSize int64) (_ storj.PieceID, _ []*pb.AddressedOrderLimit, privateKey storj.PiecePrivateKey, err error) {
|
func (service *Service) CreatePutOrderLimits(ctx context.Context, bucketID []byte, nodes []*overlay.SelectedNode, expiration time.Time, maxPieceSize int64) (_ storj.PieceID, _ []*pb.AddressedOrderLimit, privateKey storj.PiecePrivateKey, err error) {
|
||||||
defer mon.Task()(&ctx)(&err)
|
defer mon.Task()(&ctx)(&err)
|
||||||
|
|
||||||
orderExpiration := time.Now().Add(service.orderExpiration)
|
orderExpiration := time.Now().Add(service.orderExpiration)
|
||||||
@ -408,8 +408,8 @@ func (service *Service) CreatePutOrderLimits(ctx context.Context, bucketID []byt
|
|||||||
SatelliteId: service.satellite.ID(),
|
SatelliteId: service.satellite.ID(),
|
||||||
SatelliteAddress: service.satelliteAddress,
|
SatelliteAddress: service.satelliteAddress,
|
||||||
UplinkPublicKey: piecePublicKey,
|
UplinkPublicKey: piecePublicKey,
|
||||||
StorageNodeId: node.Id,
|
StorageNodeId: node.ID,
|
||||||
PieceId: rootPieceID.Derive(node.Id, pieceNum),
|
PieceId: rootPieceID.Derive(node.ID, pieceNum),
|
||||||
Action: pb.PieceAction_PUT,
|
Action: pb.PieceAction_PUT,
|
||||||
Limit: maxPieceSize,
|
Limit: maxPieceSize,
|
||||||
PieceExpiration: expiration,
|
PieceExpiration: expiration,
|
||||||
@ -835,7 +835,7 @@ func (service *Service) CreateGetRepairOrderLimits(ctx context.Context, bucketID
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CreatePutRepairOrderLimits creates the order limits for uploading the repaired pieces of pointer to newNodes.
|
// CreatePutRepairOrderLimits creates the order limits for uploading the repaired pieces of pointer to newNodes.
|
||||||
func (service *Service) CreatePutRepairOrderLimits(ctx context.Context, bucketID []byte, pointer *pb.Pointer, getOrderLimits []*pb.AddressedOrderLimit, newNodes []*overlay.NodeDossier) (_ []*pb.AddressedOrderLimit, _ storj.PiecePrivateKey, err error) {
|
func (service *Service) CreatePutRepairOrderLimits(ctx context.Context, bucketID []byte, pointer *pb.Pointer, getOrderLimits []*pb.AddressedOrderLimit, newNodes []*overlay.SelectedNode) (_ []*pb.AddressedOrderLimit, _ storj.PiecePrivateKey, err error) {
|
||||||
defer mon.Task()(&ctx)(&err)
|
defer mon.Task()(&ctx)(&err)
|
||||||
orderExpiration := time.Now().Add(service.orderExpiration)
|
orderExpiration := time.Now().Add(service.orderExpiration)
|
||||||
|
|
||||||
@ -895,8 +895,8 @@ func (service *Service) CreatePutRepairOrderLimits(ctx context.Context, bucketID
|
|||||||
SatelliteId: service.satellite.ID(),
|
SatelliteId: service.satellite.ID(),
|
||||||
SatelliteAddress: service.satelliteAddress,
|
SatelliteAddress: service.satelliteAddress,
|
||||||
UplinkPublicKey: piecePublicKey,
|
UplinkPublicKey: piecePublicKey,
|
||||||
StorageNodeId: node.Id,
|
StorageNodeId: node.ID,
|
||||||
PieceId: rootPieceID.Derive(node.Id, pieceNum),
|
PieceId: rootPieceID.Derive(node.ID, pieceNum),
|
||||||
Action: pb.PieceAction_PUT_REPAIR,
|
Action: pb.PieceAction_PUT_REPAIR,
|
||||||
Limit: pieceSize,
|
Limit: pieceSize,
|
||||||
PieceExpiration: pointer.ExpirationDate,
|
PieceExpiration: pointer.ExpirationDate,
|
||||||
|
@ -38,6 +38,7 @@ func TestMinimumDiskSpace(t *testing.T) {
|
|||||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||||
node0 := planet.StorageNodes[0]
|
node0 := planet.StorageNodes[0]
|
||||||
node0.Contact.Chore.Pause(ctx)
|
node0.Contact.Chore.Pause(ctx)
|
||||||
|
|
||||||
nodeDossier := node0.Local()
|
nodeDossier := node0.Local()
|
||||||
ident := node0.Identity
|
ident := node0.Identity
|
||||||
peer := rpcpeer.Peer{
|
peer := rpcpeer.Peer{
|
||||||
@ -360,7 +361,7 @@ func TestNodeSelectionGracefulExit(t *testing.T) {
|
|||||||
|
|
||||||
// expect no exiting nodes in selection
|
// expect no exiting nodes in selection
|
||||||
for _, node := range response {
|
for _, node := range response {
|
||||||
assert.False(t, exitingNodes[node.Id])
|
assert.False(t, exitingNodes[node.ID])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
@ -37,9 +37,9 @@ var ErrNotEnoughNodes = errs.Class("not enough nodes")
|
|||||||
// architecture: Database
|
// architecture: Database
|
||||||
type DB interface {
|
type DB interface {
|
||||||
// SelectStorageNodes looks up nodes based on criteria
|
// SelectStorageNodes looks up nodes based on criteria
|
||||||
SelectStorageNodes(ctx context.Context, count int, criteria *NodeCriteria) ([]*NodeDossier, error)
|
SelectStorageNodes(ctx context.Context, count int, criteria *NodeCriteria) ([]*SelectedNode, error)
|
||||||
// SelectNewStorageNodes looks up nodes based on new node criteria
|
// SelectNewStorageNodes looks up nodes based on new node criteria
|
||||||
SelectNewStorageNodes(ctx context.Context, count int, criteria *NodeCriteria) ([]*NodeDossier, error)
|
SelectNewStorageNodes(ctx context.Context, count int, criteria *NodeCriteria) ([]*SelectedNode, error)
|
||||||
|
|
||||||
// Get looks up the node by nodeID
|
// Get looks up the node by nodeID
|
||||||
Get(ctx context.Context, nodeID storj.NodeID) (*NodeDossier, error)
|
Get(ctx context.Context, nodeID storj.NodeID) (*NodeDossier, error)
|
||||||
@ -218,6 +218,14 @@ type NodeLastContact struct {
|
|||||||
LastContactFailure time.Time
|
LastContactFailure time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SelectedNode is used as a result for creating orders limits.
|
||||||
|
type SelectedNode struct {
|
||||||
|
ID storj.NodeID
|
||||||
|
Address *pb.NodeAddress
|
||||||
|
LastNet string
|
||||||
|
LastIPPort string
|
||||||
|
}
|
||||||
|
|
||||||
// Service is used to store and handle node information
|
// Service is used to store and handle node information
|
||||||
//
|
//
|
||||||
// architecture: Service
|
// architecture: Service
|
||||||
@ -268,13 +276,13 @@ func (service *Service) IsOnline(node *NodeDossier) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// FindStorageNodes searches the overlay network for nodes that meet the provided requirements
|
// FindStorageNodes searches the overlay network for nodes that meet the provided requirements
|
||||||
func (service *Service) FindStorageNodes(ctx context.Context, req FindStorageNodesRequest) (_ []*NodeDossier, err error) {
|
func (service *Service) FindStorageNodes(ctx context.Context, req FindStorageNodesRequest) (_ []*SelectedNode, err error) {
|
||||||
defer mon.Task()(&ctx)(&err)
|
defer mon.Task()(&ctx)(&err)
|
||||||
return service.FindStorageNodesWithPreferences(ctx, req, &service.config.Node)
|
return service.FindStorageNodesWithPreferences(ctx, req, &service.config.Node)
|
||||||
}
|
}
|
||||||
|
|
||||||
// FindStorageNodesWithPreferences searches the overlay network for nodes that meet the provided criteria
|
// FindStorageNodesWithPreferences searches the overlay network for nodes that meet the provided criteria
|
||||||
func (service *Service) FindStorageNodesWithPreferences(ctx context.Context, req FindStorageNodesRequest, preferences *NodeSelectionConfig) (nodes []*NodeDossier, err error) {
|
func (service *Service) FindStorageNodesWithPreferences(ctx context.Context, req FindStorageNodesRequest, preferences *NodeSelectionConfig) (nodes []*SelectedNode, err error) {
|
||||||
defer mon.Task()(&ctx)(&err)
|
defer mon.Task()(&ctx)(&err)
|
||||||
|
|
||||||
// TODO: add sanity limits to requested node count
|
// TODO: add sanity limits to requested node count
|
||||||
@ -300,7 +308,7 @@ func (service *Service) FindStorageNodesWithPreferences(ctx context.Context, req
|
|||||||
newNodeCount = int(float64(reputableNodeCount) * preferences.NewNodeFraction)
|
newNodeCount = int(float64(reputableNodeCount) * preferences.NewNodeFraction)
|
||||||
}
|
}
|
||||||
|
|
||||||
var newNodes []*NodeDossier
|
var newNodes []*SelectedNode
|
||||||
if newNodeCount > 0 {
|
if newNodeCount > 0 {
|
||||||
newNodes, err = service.db.SelectNewStorageNodes(ctx, newNodeCount, &NodeCriteria{
|
newNodes, err = service.db.SelectNewStorageNodes(ctx, newNodeCount, &NodeCriteria{
|
||||||
FreeDisk: preferences.MinimumDiskSpace.Int64(),
|
FreeDisk: preferences.MinimumDiskSpace.Int64(),
|
||||||
@ -318,7 +326,7 @@ func (service *Service) FindStorageNodesWithPreferences(ctx context.Context, req
|
|||||||
|
|
||||||
// add selected new nodes ID and network to the excluded lists for reputable node selection
|
// add selected new nodes ID and network to the excluded lists for reputable node selection
|
||||||
for _, newNode := range newNodes {
|
for _, newNode := range newNodes {
|
||||||
excludedIDs = append(excludedIDs, newNode.Id)
|
excludedIDs = append(excludedIDs, newNode.ID)
|
||||||
if preferences.DistinctIP {
|
if preferences.DistinctIP {
|
||||||
excludedNetworks = append(excludedNetworks, newNode.LastNet)
|
excludedNetworks = append(excludedNetworks, newNode.LastNet)
|
||||||
}
|
}
|
||||||
|
@ -186,7 +186,7 @@ func TestRandomizedSelection(t *testing.T) {
|
|||||||
|
|
||||||
// select numNodesToSelect nodes selectIterations times
|
// select numNodesToSelect nodes selectIterations times
|
||||||
for i := 0; i < selectIterations; i++ {
|
for i := 0; i < selectIterations; i++ {
|
||||||
var nodes []*overlay.NodeDossier
|
var nodes []*overlay.SelectedNode
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
if i%2 == 0 {
|
if i%2 == 0 {
|
||||||
@ -205,7 +205,7 @@ func TestRandomizedSelection(t *testing.T) {
|
|||||||
require.Len(t, nodes, numNodesToSelect)
|
require.Len(t, nodes, numNodesToSelect)
|
||||||
|
|
||||||
for _, node := range nodes {
|
for _, node := range nodes {
|
||||||
nodeCounts[node.Id]++
|
nodeCounts[node.ID]++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -629,7 +629,7 @@ func TestSuspendedSelection(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var nodes []*overlay.NodeDossier
|
var nodes []*overlay.SelectedNode
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
numNodesToSelect := 10
|
numNodesToSelect := 10
|
||||||
@ -642,7 +642,7 @@ func TestSuspendedSelection(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Len(t, nodes, 3)
|
require.Len(t, nodes, 3)
|
||||||
for _, node := range nodes {
|
for _, node := range nodes {
|
||||||
require.False(t, suspendedIDs[node.Id])
|
require.False(t, suspendedIDs[node.ID])
|
||||||
}
|
}
|
||||||
|
|
||||||
// select 10 new nodes - 5 new, 2 suspended, so expect 3
|
// select 10 new nodes - 5 new, 2 suspended, so expect 3
|
||||||
@ -653,7 +653,7 @@ func TestSuspendedSelection(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Len(t, nodes, 3)
|
require.Len(t, nodes, 3)
|
||||||
for _, node := range nodes {
|
for _, node := range nodes {
|
||||||
require.False(t, suspendedIDs[node.Id])
|
require.False(t, suspendedIDs[node.ID])
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -33,7 +33,7 @@ type overlaycache struct {
|
|||||||
db *satelliteDB
|
db *satelliteDB
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cache *overlaycache) SelectStorageNodes(ctx context.Context, count int, criteria *overlay.NodeCriteria) (nodes []*overlay.NodeDossier, err error) {
|
func (cache *overlaycache) SelectStorageNodes(ctx context.Context, count int, criteria *overlay.NodeCriteria) (nodes []*overlay.SelectedNode, err error) {
|
||||||
defer mon.Task()(&ctx)(&err)
|
defer mon.Task()(&ctx)(&err)
|
||||||
|
|
||||||
nodeType := int(pb.NodeType_STORAGE)
|
nodeType := int(pb.NodeType_STORAGE)
|
||||||
@ -77,7 +77,7 @@ func (cache *overlaycache) SelectStorageNodes(ctx context.Context, count int, cr
|
|||||||
}
|
}
|
||||||
for _, n := range moreNodes {
|
for _, n := range moreNodes {
|
||||||
nodes = append(nodes, n)
|
nodes = append(nodes, n)
|
||||||
criteria.ExcludedIDs = append(criteria.ExcludedIDs, n.Id)
|
criteria.ExcludedIDs = append(criteria.ExcludedIDs, n.ID)
|
||||||
criteria.ExcludedNetworks = append(criteria.ExcludedNetworks, n.LastNet)
|
criteria.ExcludedNetworks = append(criteria.ExcludedNetworks, n.LastNet)
|
||||||
}
|
}
|
||||||
if len(nodes) == count {
|
if len(nodes) == count {
|
||||||
@ -88,7 +88,7 @@ func (cache *overlaycache) SelectStorageNodes(ctx context.Context, count int, cr
|
|||||||
return nodes, nil
|
return nodes, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cache *overlaycache) SelectNewStorageNodes(ctx context.Context, count int, criteria *overlay.NodeCriteria) (nodes []*overlay.NodeDossier, err error) {
|
func (cache *overlaycache) SelectNewStorageNodes(ctx context.Context, count int, criteria *overlay.NodeCriteria) (nodes []*overlay.SelectedNode, err error) {
|
||||||
defer mon.Task()(&ctx)(&err)
|
defer mon.Task()(&ctx)(&err)
|
||||||
|
|
||||||
nodeType := int(pb.NodeType_STORAGE)
|
nodeType := int(pb.NodeType_STORAGE)
|
||||||
@ -130,7 +130,7 @@ func (cache *overlaycache) SelectNewStorageNodes(ctx context.Context, count int,
|
|||||||
}
|
}
|
||||||
for _, n := range moreNodes {
|
for _, n := range moreNodes {
|
||||||
nodes = append(nodes, n)
|
nodes = append(nodes, n)
|
||||||
criteria.ExcludedIDs = append(criteria.ExcludedIDs, n.Id)
|
criteria.ExcludedIDs = append(criteria.ExcludedIDs, n.ID)
|
||||||
criteria.ExcludedNetworks = append(criteria.ExcludedNetworks, n.LastNet)
|
criteria.ExcludedNetworks = append(criteria.ExcludedNetworks, n.LastNet)
|
||||||
}
|
}
|
||||||
if len(nodes) == count {
|
if len(nodes) == count {
|
||||||
@ -167,7 +167,7 @@ func (cache *overlaycache) GetNodesNetwork(ctx context.Context, nodeIDs []storj.
|
|||||||
return nodeNets, Error.Wrap(rows.Err())
|
return nodeNets, Error.Wrap(rows.Err())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cache *overlaycache) queryNodes(ctx context.Context, excludedNodes []storj.NodeID, count int, safeQuery string, args ...interface{}) (_ []*overlay.NodeDossier, err error) {
|
func (cache *overlaycache) queryNodes(ctx context.Context, excludedNodes []storj.NodeID, count int, safeQuery string, args ...interface{}) (_ []*overlay.SelectedNode, err error) {
|
||||||
defer mon.Task()(&ctx)(&err)
|
defer mon.Task()(&ctx)(&err)
|
||||||
|
|
||||||
if count == 0 {
|
if count == 0 {
|
||||||
@ -185,10 +185,8 @@ func (cache *overlaycache) queryNodes(ctx context.Context, excludedNodes []storj
|
|||||||
args = append(args, count)
|
args = append(args, count)
|
||||||
|
|
||||||
var rows *sql.Rows
|
var rows *sql.Rows
|
||||||
rows, err = cache.db.Query(ctx, cache.db.Rebind(`SELECT id, type, address, last_net, last_ip_port,
|
rows, err = cache.db.Query(ctx, cache.db.Rebind(`
|
||||||
free_disk, total_audit_count, audit_success_count,
|
SELECT last_net, id, address, last_ip_port
|
||||||
total_uptime_count, uptime_success_count, disqualified, suspended,
|
|
||||||
audit_reputation_alpha, audit_reputation_beta
|
|
||||||
FROM nodes
|
FROM nodes
|
||||||
`+safeQuery+safeExcludeNodes+`
|
`+safeQuery+safeExcludeNodes+`
|
||||||
ORDER BY RANDOM()
|
ORDER BY RANDOM()
|
||||||
@ -199,29 +197,23 @@ func (cache *overlaycache) queryNodes(ctx context.Context, excludedNodes []storj
|
|||||||
}
|
}
|
||||||
defer func() { err = errs.Combine(err, rows.Close()) }()
|
defer func() { err = errs.Combine(err, rows.Close()) }()
|
||||||
|
|
||||||
var nodes []*overlay.NodeDossier
|
var nodes []*overlay.SelectedNode
|
||||||
for rows.Next() {
|
for rows.Next() {
|
||||||
dbNode := &dbx.Node{}
|
var node overlay.SelectedNode
|
||||||
err = rows.Scan(&dbNode.Id, &dbNode.Type, &dbNode.Address, &dbNode.LastNet, &dbNode.LastIpPort,
|
node.Address = &pb.NodeAddress{Transport: pb.NodeTransport_TCP_TLS_GRPC}
|
||||||
&dbNode.FreeDisk, &dbNode.TotalAuditCount, &dbNode.AuditSuccessCount,
|
|
||||||
&dbNode.TotalUptimeCount, &dbNode.UptimeSuccessCount, &dbNode.Disqualified, &dbNode.Suspended,
|
err = rows.Scan(&node.LastNet, &node.ID, &node.Address.Address, &node.LastIPPort)
|
||||||
&dbNode.AuditReputationAlpha, &dbNode.AuditReputationBeta,
|
|
||||||
)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
dossier, err := convertDBNode(ctx, dbNode)
|
nodes = append(nodes, &node)
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
nodes = append(nodes, dossier)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nodes, Error.Wrap(rows.Err())
|
return nodes, Error.Wrap(rows.Err())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cache *overlaycache) queryNodesDistinct(ctx context.Context, excludedIDs []storj.NodeID, excludedNodeNetworks []string, count int, safeQuery string, distinctIP bool, args ...interface{}) (_ []*overlay.NodeDossier, err error) {
|
func (cache *overlaycache) queryNodesDistinct(ctx context.Context, excludedIDs []storj.NodeID, excludedNodeNetworks []string, count int, safeQuery string, distinctIP bool, args ...interface{}) (_ []*overlay.SelectedNode, err error) {
|
||||||
defer mon.Task()(&ctx)(&err)
|
defer mon.Task()(&ctx)(&err)
|
||||||
|
|
||||||
if count == 0 {
|
if count == 0 {
|
||||||
@ -249,9 +241,7 @@ func (cache *overlaycache) queryNodesDistinct(ctx context.Context, excludedIDs [
|
|||||||
SELECT *
|
SELECT *
|
||||||
FROM (
|
FROM (
|
||||||
SELECT DISTINCT ON (last_net) last_net, -- choose at most 1 node from this network
|
SELECT DISTINCT ON (last_net) last_net, -- choose at most 1 node from this network
|
||||||
id, type, address, last_ip_port, free_disk, total_audit_count,
|
id, address, last_ip_port
|
||||||
audit_success_count, total_uptime_count, uptime_success_count,
|
|
||||||
audit_reputation_alpha, audit_reputation_beta
|
|
||||||
FROM nodes
|
FROM nodes
|
||||||
`+safeQuery+safeExcludeNodes+safeExcludeNetworks+`
|
`+safeQuery+safeExcludeNodes+safeExcludeNetworks+`
|
||||||
AND last_net <> '' -- select nodes with a network set
|
AND last_net <> '' -- select nodes with a network set
|
||||||
@ -264,22 +254,17 @@ func (cache *overlaycache) queryNodesDistinct(ctx context.Context, excludedIDs [
|
|||||||
}
|
}
|
||||||
defer func() { err = errs.Combine(err, rows.Close()) }()
|
defer func() { err = errs.Combine(err, rows.Close()) }()
|
||||||
|
|
||||||
var nodes []*overlay.NodeDossier
|
var nodes []*overlay.SelectedNode
|
||||||
for rows.Next() {
|
for rows.Next() {
|
||||||
dbNode := &dbx.Node{}
|
var node overlay.SelectedNode
|
||||||
err = rows.Scan(&dbNode.LastNet,
|
node.Address = &pb.NodeAddress{Transport: pb.NodeTransport_TCP_TLS_GRPC}
|
||||||
&dbNode.Id, &dbNode.Type, &dbNode.Address, &dbNode.LastIpPort, &dbNode.FreeDisk, &dbNode.TotalAuditCount,
|
|
||||||
&dbNode.AuditSuccessCount, &dbNode.TotalUptimeCount, &dbNode.UptimeSuccessCount,
|
err = rows.Scan(&node.LastNet, &node.ID, &node.Address.Address, &node.LastIPPort)
|
||||||
&dbNode.AuditReputationAlpha, &dbNode.AuditReputationBeta,
|
|
||||||
)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
dossier, err := convertDBNode(ctx, dbNode)
|
|
||||||
if err != nil {
|
nodes = append(nodes, &node)
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
nodes = append(nodes, dossier)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nodes, Error.Wrap(rows.Err())
|
return nodes, Error.Wrap(rows.Err())
|
||||||
|
Loading…
Reference in New Issue
Block a user