2019-01-24 20:15:10 +00:00
|
|
|
// Copyright (C) 2019 Storj Labs, Inc.
|
2018-12-17 20:14:16 +00:00
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
|
|
|
package satellitedb
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"database/sql"
|
2019-01-29 19:42:43 +00:00
|
|
|
"strings"
|
2019-03-29 08:53:43 +00:00
|
|
|
"time"
|
2018-12-17 20:14:16 +00:00
|
|
|
|
2019-05-19 16:10:46 +01:00
|
|
|
"github.com/lib/pq"
|
2019-07-10 03:36:09 +01:00
|
|
|
sqlite3 "github.com/mattn/go-sqlite3"
|
2019-01-15 16:08:45 +00:00
|
|
|
"github.com/zeebo/errs"
|
2019-07-10 03:36:09 +01:00
|
|
|
monkit "gopkg.in/spacemonkeygo/monkit.v2"
|
2019-01-15 16:08:45 +00:00
|
|
|
|
2019-04-10 07:04:24 +01:00
|
|
|
"storj.io/storj/internal/version"
|
2019-01-15 16:08:45 +00:00
|
|
|
"storj.io/storj/pkg/overlay"
|
|
|
|
"storj.io/storj/pkg/pb"
|
|
|
|
"storj.io/storj/pkg/storj"
|
2018-12-17 20:14:16 +00:00
|
|
|
dbx "storj.io/storj/satellite/satellitedb/dbx"
|
|
|
|
"storj.io/storj/storage"
|
|
|
|
)
|
|
|
|
|
2019-03-25 22:25:09 +00:00
|
|
|
var (
|
2019-06-20 14:56:04 +01:00
|
|
|
mon = monkit.Package()
|
2019-03-25 22:25:09 +00:00
|
|
|
)
|
|
|
|
|
2019-01-15 16:08:45 +00:00
|
|
|
var _ overlay.DB = (*overlaycache)(nil)
|
|
|
|
|
2018-12-17 20:14:16 +00:00
|
|
|
type overlaycache struct {
|
2018-12-27 09:56:25 +00:00
|
|
|
db *dbx.DB
|
2018-12-17 20:14:16 +00:00
|
|
|
}
|
|
|
|
|
2019-05-22 21:06:27 +01:00
|
|
|
func (cache *overlaycache) SelectStorageNodes(ctx context.Context, count int, criteria *overlay.NodeCriteria) (nodes []*pb.Node, err error) {
|
2019-06-04 12:55:38 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2019-02-11 16:35:28 +00:00
|
|
|
nodeType := int(pb.NodeType_STORAGE)
|
2019-04-10 07:04:24 +01:00
|
|
|
|
|
|
|
safeQuery := `
|
2019-06-18 10:14:31 +01:00
|
|
|
WHERE disqualified IS NULL
|
2019-06-06 01:21:32 +01:00
|
|
|
AND type = ?
|
|
|
|
AND free_bandwidth >= ?
|
|
|
|
AND free_disk >= ?
|
|
|
|
AND total_audit_count >= ?
|
|
|
|
AND total_uptime_count >= ?
|
2019-07-10 03:36:09 +01:00
|
|
|
AND (last_contact_success > ?
|
|
|
|
OR last_contact_success > last_contact_failure)`
|
2019-04-10 07:04:24 +01:00
|
|
|
args := append(make([]interface{}, 0, 13),
|
2019-06-06 01:21:32 +01:00
|
|
|
nodeType, criteria.FreeBandwidth, criteria.FreeDisk, criteria.AuditCount,
|
|
|
|
criteria.UptimeCount, time.Now().Add(-criteria.OnlineWindow))
|
2019-04-10 07:04:24 +01:00
|
|
|
|
|
|
|
if criteria.MinimumVersion != "" {
|
|
|
|
v, err := version.NewSemVer(criteria.MinimumVersion)
|
|
|
|
if err != nil {
|
|
|
|
return nil, Error.New("invalid node selection criteria version: %v", err)
|
|
|
|
}
|
|
|
|
safeQuery += `
|
2019-05-14 00:55:51 +01:00
|
|
|
AND (major > ? OR (major = ? AND (minor > ? OR (minor = ? AND patch >= ?))))
|
2019-04-10 07:04:24 +01:00
|
|
|
AND release`
|
|
|
|
args = append(args, v.Major, v.Major, v.Minor, v.Minor, v.Patch)
|
|
|
|
}
|
|
|
|
|
2019-05-22 21:06:27 +01:00
|
|
|
if !criteria.DistinctIP {
|
|
|
|
nodes, err = cache.queryNodes(ctx, criteria.ExcludedNodes, count, safeQuery, args...)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return nodes, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// query for distinct IPs
|
|
|
|
for i := 0; i < 3; i++ {
|
|
|
|
moreNodes, err := cache.queryNodesDistinct(ctx, criteria.ExcludedNodes, criteria.ExcludedIPs, count-len(nodes), safeQuery, criteria.DistinctIP, args...)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
for _, n := range moreNodes {
|
|
|
|
nodes = append(nodes, n)
|
|
|
|
criteria.ExcludedNodes = append(criteria.ExcludedNodes, n.Id)
|
|
|
|
criteria.ExcludedIPs = append(criteria.ExcludedIPs, n.LastIp)
|
|
|
|
}
|
|
|
|
if len(nodes) == count {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nodes, nil
|
2019-01-29 19:42:43 +00:00
|
|
|
}
|
|
|
|
|
2019-05-22 21:06:27 +01:00
|
|
|
func (cache *overlaycache) SelectNewStorageNodes(ctx context.Context, count int, criteria *overlay.NodeCriteria) (nodes []*pb.Node, err error) {
|
2019-06-04 12:55:38 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2019-02-11 16:35:28 +00:00
|
|
|
nodeType := int(pb.NodeType_STORAGE)
|
2019-04-10 07:04:24 +01:00
|
|
|
|
|
|
|
safeQuery := `
|
2019-06-18 10:14:31 +01:00
|
|
|
WHERE disqualified IS NULL
|
2019-06-06 01:21:32 +01:00
|
|
|
AND type = ?
|
|
|
|
AND free_bandwidth >= ?
|
|
|
|
AND free_disk >= ?
|
2019-06-18 18:40:28 +01:00
|
|
|
AND (total_audit_count < ? OR total_uptime_count < ?)
|
2019-07-10 03:36:09 +01:00
|
|
|
AND (last_contact_success > ?
|
|
|
|
OR last_contact_success > last_contact_failure)`
|
2019-04-10 07:04:24 +01:00
|
|
|
args := append(make([]interface{}, 0, 10),
|
2019-06-18 18:40:28 +01:00
|
|
|
nodeType, criteria.FreeBandwidth, criteria.FreeDisk, criteria.AuditCount, criteria.UptimeCount, time.Now().Add(-criteria.OnlineWindow))
|
2019-04-10 07:04:24 +01:00
|
|
|
|
|
|
|
if criteria.MinimumVersion != "" {
|
|
|
|
v, err := version.NewSemVer(criteria.MinimumVersion)
|
|
|
|
if err != nil {
|
|
|
|
return nil, Error.New("invalid node selection criteria version: %v", err)
|
|
|
|
}
|
|
|
|
safeQuery += `
|
2019-05-14 00:55:51 +01:00
|
|
|
AND (major > ? OR (major = ? AND (minor > ? OR (minor = ? AND patch >= ?))))
|
2019-04-10 07:04:24 +01:00
|
|
|
AND release`
|
|
|
|
args = append(args, v.Major, v.Major, v.Minor, v.Minor, v.Patch)
|
|
|
|
}
|
|
|
|
|
2019-05-22 21:06:27 +01:00
|
|
|
if !criteria.DistinctIP {
|
|
|
|
nodes, err = cache.queryNodes(ctx, criteria.ExcludedNodes, count, safeQuery, args...)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return nodes, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// query for distinct IPs
|
|
|
|
for i := 0; i < 3; i++ {
|
|
|
|
moreNodes, err := cache.queryNodesDistinct(ctx, criteria.ExcludedNodes, criteria.ExcludedIPs, count-len(nodes), safeQuery, criteria.DistinctIP, args...)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
for _, n := range moreNodes {
|
|
|
|
nodes = append(nodes, n)
|
|
|
|
criteria.ExcludedNodes = append(criteria.ExcludedNodes, n.Id)
|
|
|
|
criteria.ExcludedIPs = append(criteria.ExcludedIPs, n.LastIp)
|
|
|
|
}
|
|
|
|
if len(nodes) == count {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nodes, nil
|
2019-01-29 19:42:43 +00:00
|
|
|
}
|
|
|
|
|
2019-05-22 21:06:27 +01:00
|
|
|
func (cache *overlaycache) queryNodes(ctx context.Context, excludedNodes []storj.NodeID, count int, safeQuery string, args ...interface{}) (_ []*pb.Node, err error) {
|
2019-06-04 12:55:38 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2019-01-31 18:49:00 +00:00
|
|
|
if count == 0 {
|
|
|
|
return nil, nil
|
2019-01-29 19:42:43 +00:00
|
|
|
}
|
|
|
|
|
2019-01-31 18:49:00 +00:00
|
|
|
safeExcludeNodes := ""
|
2019-05-22 21:06:27 +01:00
|
|
|
if len(excludedNodes) > 0 {
|
|
|
|
safeExcludeNodes = ` AND id NOT IN (?` + strings.Repeat(", ?", len(excludedNodes)-1) + `)`
|
|
|
|
for _, id := range excludedNodes {
|
|
|
|
args = append(args, id.Bytes())
|
|
|
|
}
|
2019-01-29 19:42:43 +00:00
|
|
|
}
|
2019-05-22 21:06:27 +01:00
|
|
|
|
|
|
|
args = append(args, count)
|
|
|
|
|
|
|
|
var rows *sql.Rows
|
2019-07-10 03:36:09 +01:00
|
|
|
rows, err = cache.db.Query(cache.db.Rebind(`SELECT id, type, address, last_net,
|
|
|
|
free_bandwidth, free_disk, total_audit_count, audit_success_count,
|
2019-06-21 18:14:53 +01:00
|
|
|
total_uptime_count, uptime_success_count, disqualified, audit_reputation_alpha,
|
2019-06-18 14:45:02 +01:00
|
|
|
audit_reputation_beta, uptime_reputation_alpha, uptime_reputation_beta
|
2019-05-22 21:06:27 +01:00
|
|
|
FROM nodes
|
|
|
|
`+safeQuery+safeExcludeNodes+`
|
|
|
|
ORDER BY RANDOM()
|
|
|
|
LIMIT ?`), args...)
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
defer func() { err = errs.Combine(err, rows.Close()) }()
|
|
|
|
var nodes []*pb.Node
|
|
|
|
for rows.Next() {
|
|
|
|
dbNode := &dbx.Node{}
|
|
|
|
err = rows.Scan(&dbNode.Id, &dbNode.Type,
|
2019-06-24 16:33:18 +01:00
|
|
|
&dbNode.Address, &dbNode.LastNet, &dbNode.FreeBandwidth, &dbNode.FreeDisk,
|
2019-05-22 21:06:27 +01:00
|
|
|
&dbNode.TotalAuditCount, &dbNode.AuditSuccessCount,
|
2019-06-18 14:45:02 +01:00
|
|
|
&dbNode.TotalUptimeCount, &dbNode.UptimeSuccessCount, &dbNode.Disqualified,
|
|
|
|
&dbNode.AuditReputationAlpha, &dbNode.AuditReputationBeta,
|
|
|
|
&dbNode.UptimeReputationAlpha, &dbNode.UptimeReputationBeta,
|
|
|
|
)
|
2019-05-22 21:06:27 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2019-06-04 12:55:38 +01:00
|
|
|
dossier, err := convertDBNode(ctx, dbNode)
|
2019-05-22 21:06:27 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
nodes = append(nodes, &dossier.Node)
|
2019-01-29 19:42:43 +00:00
|
|
|
}
|
2019-05-22 21:06:27 +01:00
|
|
|
|
|
|
|
return nodes, rows.Err()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (cache *overlaycache) queryNodesDistinct(ctx context.Context, excludedNodes []storj.NodeID, excludedIPs []string, count int, safeQuery string, distinctIP bool, args ...interface{}) (_ []*pb.Node, err error) {
|
2019-06-04 12:55:38 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2019-05-22 21:06:27 +01:00
|
|
|
switch t := cache.db.DB.Driver().(type) {
|
|
|
|
case *sqlite3.SQLiteDriver:
|
|
|
|
return cache.sqliteQueryNodesDistinct(ctx, excludedNodes, excludedIPs, count, safeQuery, distinctIP, args...)
|
|
|
|
case *pq.Driver:
|
|
|
|
return cache.postgresQueryNodesDistinct(ctx, excludedNodes, excludedIPs, count, safeQuery, distinctIP, args...)
|
|
|
|
default:
|
|
|
|
return []*pb.Node{}, Error.New("Unsupported database %t", t)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (cache *overlaycache) sqliteQueryNodesDistinct(ctx context.Context, excludedNodes []storj.NodeID, excludedIPs []string, count int, safeQuery string, distinctIP bool, args ...interface{}) (_ []*pb.Node, err error) {
|
2019-06-04 12:55:38 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2019-05-22 21:06:27 +01:00
|
|
|
if count == 0 {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
safeExcludeNodes := ""
|
|
|
|
if len(excludedNodes) > 0 {
|
|
|
|
safeExcludeNodes = ` AND id NOT IN (?` + strings.Repeat(", ?", len(excludedNodes)-1) + `)`
|
|
|
|
for _, id := range excludedNodes {
|
|
|
|
args = append(args, id.Bytes())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
safeExcludeIPs := ""
|
|
|
|
if len(excludedIPs) > 0 {
|
2019-06-24 16:33:18 +01:00
|
|
|
safeExcludeIPs = ` AND last_net NOT IN (?` + strings.Repeat(", ?", len(excludedIPs)-1) + `)`
|
2019-05-22 21:06:27 +01:00
|
|
|
for _, ip := range excludedIPs {
|
|
|
|
args = append(args, ip)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-31 18:49:00 +00:00
|
|
|
args = append(args, count)
|
2019-01-29 19:42:43 +00:00
|
|
|
|
2019-07-10 03:36:09 +01:00
|
|
|
rows, err := cache.db.Query(cache.db.Rebind(`SELECT id, type, address, last_net,
|
|
|
|
free_bandwidth, free_disk, total_audit_count, audit_success_count,
|
2019-06-21 18:14:53 +01:00
|
|
|
total_uptime_count, uptime_success_count, disqualified, audit_reputation_alpha,
|
2019-06-18 14:45:02 +01:00
|
|
|
audit_reputation_beta, uptime_reputation_alpha, uptime_reputation_beta
|
2019-06-24 16:33:18 +01:00
|
|
|
FROM (SELECT *, Row_number() OVER(PARTITION BY last_net ORDER BY RANDOM()) rn
|
2019-05-22 21:06:27 +01:00
|
|
|
FROM nodes
|
|
|
|
`+safeQuery+safeExcludeNodes+safeExcludeIPs+`) n
|
|
|
|
WHERE rn = 1
|
|
|
|
ORDER BY RANDOM()
|
|
|
|
LIMIT ?`), args...)
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
defer func() { err = errs.Combine(err, rows.Close()) }()
|
|
|
|
var nodes []*pb.Node
|
|
|
|
for rows.Next() {
|
|
|
|
dbNode := &dbx.Node{}
|
|
|
|
err = rows.Scan(&dbNode.Id, &dbNode.Type,
|
2019-06-24 16:33:18 +01:00
|
|
|
&dbNode.Address, &dbNode.LastNet, &dbNode.FreeBandwidth, &dbNode.FreeDisk,
|
2019-05-22 21:06:27 +01:00
|
|
|
&dbNode.TotalAuditCount, &dbNode.AuditSuccessCount,
|
2019-06-18 14:45:02 +01:00
|
|
|
&dbNode.TotalUptimeCount, &dbNode.UptimeSuccessCount, &dbNode.Disqualified,
|
|
|
|
&dbNode.AuditReputationAlpha, &dbNode.AuditReputationBeta,
|
|
|
|
&dbNode.UptimeReputationAlpha, &dbNode.UptimeReputationBeta,
|
|
|
|
)
|
2019-05-22 21:06:27 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2019-06-04 12:55:38 +01:00
|
|
|
dossier, err := convertDBNode(ctx, dbNode)
|
2019-05-22 21:06:27 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
nodes = append(nodes, &dossier.Node)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nodes, rows.Err()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (cache *overlaycache) postgresQueryNodesDistinct(ctx context.Context, excludedNodes []storj.NodeID, excludedIPs []string, count int, safeQuery string, distinctIP bool, args ...interface{}) (_ []*pb.Node, err error) {
|
2019-06-04 12:55:38 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2019-05-22 21:06:27 +01:00
|
|
|
if count == 0 {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
safeExcludeNodes := ""
|
|
|
|
if len(excludedNodes) > 0 {
|
|
|
|
safeExcludeNodes = ` AND id NOT IN (?` + strings.Repeat(", ?", len(excludedNodes)-1) + `)`
|
|
|
|
for _, id := range excludedNodes {
|
|
|
|
args = append(args, id.Bytes())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
safeExcludeIPs := ""
|
|
|
|
if len(excludedIPs) > 0 {
|
2019-06-24 16:33:18 +01:00
|
|
|
safeExcludeIPs = ` AND last_net NOT IN (?` + strings.Repeat(", ?", len(excludedIPs)-1) + `)`
|
2019-05-22 21:06:27 +01:00
|
|
|
for _, ip := range excludedIPs {
|
|
|
|
args = append(args, ip)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
args = append(args, count)
|
|
|
|
|
2019-06-23 22:16:45 +01:00
|
|
|
rows, err := cache.db.Query(cache.db.Rebind(`
|
|
|
|
WITH candidates AS (
|
|
|
|
SELECT * FROM nodes
|
2019-05-22 21:06:27 +01:00
|
|
|
`+safeQuery+safeExcludeNodes+safeExcludeIPs+`
|
2019-06-23 22:16:45 +01:00
|
|
|
)
|
|
|
|
SELECT
|
2019-06-24 16:33:18 +01:00
|
|
|
id, type, address, last_net, free_bandwidth, free_disk, total_audit_count,
|
2019-06-23 22:16:45 +01:00
|
|
|
audit_success_count, total_uptime_count, uptime_success_count,
|
|
|
|
audit_reputation_alpha, audit_reputation_beta, uptime_reputation_alpha,
|
|
|
|
uptime_reputation_beta
|
|
|
|
FROM (
|
2019-07-25 20:17:12 +01:00
|
|
|
SELECT DISTINCT ON (last_net) * -- choose at max 1 node from this IP or network
|
|
|
|
FROM candidates
|
|
|
|
WHERE last_net <> '' -- don't try to IP-filter nodes with no known IP yet
|
|
|
|
ORDER BY last_net, RANDOM() -- equal chance of choosing any qualified node at this IP or network
|
2019-06-23 22:16:45 +01:00
|
|
|
) filteredcandidates
|
|
|
|
ORDER BY RANDOM() -- do the actual node selection from filtered pool
|
|
|
|
LIMIT ?`), args...)
|
2019-05-22 21:06:27 +01:00
|
|
|
|
2019-01-29 19:42:43 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-01-31 18:49:00 +00:00
|
|
|
defer func() { err = errs.Combine(err, rows.Close()) }()
|
|
|
|
var nodes []*pb.Node
|
2019-01-29 19:42:43 +00:00
|
|
|
for rows.Next() {
|
2019-03-29 08:53:43 +00:00
|
|
|
dbNode := &dbx.Node{}
|
|
|
|
err = rows.Scan(&dbNode.Id, &dbNode.Type,
|
2019-06-24 16:33:18 +01:00
|
|
|
&dbNode.Address, &dbNode.LastNet, &dbNode.FreeBandwidth, &dbNode.FreeDisk,
|
2019-03-29 08:53:43 +00:00
|
|
|
&dbNode.TotalAuditCount, &dbNode.AuditSuccessCount,
|
2019-06-18 14:45:02 +01:00
|
|
|
&dbNode.TotalUptimeCount, &dbNode.UptimeSuccessCount,
|
|
|
|
&dbNode.AuditReputationAlpha, &dbNode.AuditReputationBeta,
|
|
|
|
&dbNode.UptimeReputationAlpha, &dbNode.UptimeReputationBeta,
|
|
|
|
)
|
2019-01-29 19:42:43 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-06-04 12:55:38 +01:00
|
|
|
dossier, err := convertDBNode(ctx, dbNode)
|
2019-01-29 19:42:43 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-04-04 17:34:36 +01:00
|
|
|
nodes = append(nodes, &dossier.Node)
|
2019-01-29 19:42:43 +00:00
|
|
|
}
|
|
|
|
|
2019-01-31 18:49:00 +00:00
|
|
|
return nodes, rows.Err()
|
2019-01-29 19:42:43 +00:00
|
|
|
}
|
|
|
|
|
2019-01-15 16:08:45 +00:00
|
|
|
// Get looks up the node by nodeID
|
2019-06-04 12:55:38 +01:00
|
|
|
func (cache *overlaycache) Get(ctx context.Context, id storj.NodeID) (_ *overlay.NodeDossier, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2019-01-15 16:08:45 +00:00
|
|
|
if id.IsZero() {
|
|
|
|
return nil, overlay.ErrEmptyNode
|
2018-12-17 20:14:16 +00:00
|
|
|
}
|
|
|
|
|
2019-03-29 08:53:43 +00:00
|
|
|
node, err := cache.db.Get_Node_By_Id(ctx, dbx.Node_Id(id.Bytes()))
|
2019-01-15 16:08:45 +00:00
|
|
|
if err == sql.ErrNoRows {
|
2019-03-29 08:53:43 +00:00
|
|
|
return nil, overlay.ErrNodeNotFound.New(id.String())
|
2019-01-15 16:08:45 +00:00
|
|
|
}
|
2018-12-17 20:14:16 +00:00
|
|
|
if err != nil {
|
2019-01-15 16:08:45 +00:00
|
|
|
return nil, err
|
2018-12-17 20:14:16 +00:00
|
|
|
}
|
|
|
|
|
2019-06-04 12:55:38 +01:00
|
|
|
return convertDBNode(ctx, node)
|
2019-01-15 16:08:45 +00:00
|
|
|
}
|
|
|
|
|
2019-06-03 15:53:30 +01:00
|
|
|
// IsVetted returns whether or not the node reaches reputable thresholds
|
2019-06-04 12:55:38 +01:00
|
|
|
func (cache *overlaycache) IsVetted(ctx context.Context, id storj.NodeID, criteria *overlay.NodeCriteria) (_ bool, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2019-05-30 20:52:33 +01:00
|
|
|
row := cache.db.QueryRow(cache.db.Rebind(`SELECT id
|
|
|
|
FROM nodes
|
2019-06-04 12:55:38 +01:00
|
|
|
WHERE id = ?
|
2019-06-18 10:14:31 +01:00
|
|
|
AND disqualified IS NULL
|
2019-05-30 20:52:33 +01:00
|
|
|
AND type = ?
|
|
|
|
AND total_audit_count >= ?
|
|
|
|
AND total_uptime_count >= ?
|
2019-06-06 01:21:32 +01:00
|
|
|
`), id, pb.NodeType_STORAGE, criteria.AuditCount, criteria.UptimeCount)
|
2019-05-30 20:52:33 +01:00
|
|
|
var bytes *[]byte
|
2019-06-04 12:55:38 +01:00
|
|
|
err = row.Scan(&bytes)
|
2019-05-30 20:52:33 +01:00
|
|
|
if err != nil {
|
|
|
|
if err == sql.ErrNoRows {
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
|
2019-06-18 23:22:14 +01:00
|
|
|
// KnownOffline filters a set of nodes to offline nodes
|
|
|
|
func (cache *overlaycache) KnownOffline(ctx context.Context, criteria *overlay.NodeCriteria, nodeIds storj.NodeIDList) (offlineNodes storj.NodeIDList, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
if len(nodeIds) == 0 {
|
|
|
|
return nil, Error.New("no ids provided")
|
|
|
|
}
|
|
|
|
|
|
|
|
// get offline nodes
|
|
|
|
var rows *sql.Rows
|
|
|
|
switch t := cache.db.Driver().(type) {
|
|
|
|
case *sqlite3.SQLiteDriver:
|
|
|
|
args := make([]interface{}, 0, len(nodeIds)+1)
|
|
|
|
for i := range nodeIds {
|
|
|
|
args = append(args, nodeIds[i].Bytes())
|
|
|
|
}
|
|
|
|
args = append(args, time.Now().Add(-criteria.OnlineWindow))
|
|
|
|
|
|
|
|
rows, err = cache.db.Query(cache.db.Rebind(`
|
|
|
|
SELECT id FROM nodes
|
|
|
|
WHERE id IN (?`+strings.Repeat(", ?", len(nodeIds)-1)+`)
|
|
|
|
AND (
|
2019-07-10 03:36:09 +01:00
|
|
|
last_contact_success < last_contact_failure AND last_contact_success < ?
|
2019-06-18 23:22:14 +01:00
|
|
|
)
|
|
|
|
`), args...)
|
|
|
|
|
|
|
|
case *pq.Driver:
|
|
|
|
rows, err = cache.db.Query(`
|
|
|
|
SELECT id FROM nodes
|
|
|
|
WHERE id = any($1::bytea[])
|
|
|
|
AND (
|
2019-07-10 03:36:09 +01:00
|
|
|
last_contact_success < last_contact_failure AND last_contact_success < $2
|
2019-06-18 23:22:14 +01:00
|
|
|
)
|
|
|
|
`, postgresNodeIDList(nodeIds), time.Now().Add(-criteria.OnlineWindow),
|
|
|
|
)
|
|
|
|
default:
|
|
|
|
return nil, Error.New("Unsupported database %t", t)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
defer func() {
|
|
|
|
err = errs.Combine(err, rows.Close())
|
|
|
|
}()
|
|
|
|
|
|
|
|
for rows.Next() {
|
|
|
|
var id storj.NodeID
|
|
|
|
err = rows.Scan(&id)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
offlineNodes = append(offlineNodes, id)
|
|
|
|
}
|
|
|
|
return offlineNodes, nil
|
|
|
|
}
|
|
|
|
|
2019-05-01 14:45:52 +01:00
|
|
|
// KnownUnreliableOrOffline filters a set of nodes to unreliable or offlines node, independent of new
|
2019-05-08 18:59:50 +01:00
|
|
|
func (cache *overlaycache) KnownUnreliableOrOffline(ctx context.Context, criteria *overlay.NodeCriteria, nodeIds storj.NodeIDList) (badNodes storj.NodeIDList, err error) {
|
2019-06-04 12:55:38 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2019-05-01 14:45:52 +01:00
|
|
|
if len(nodeIds) == 0 {
|
|
|
|
return nil, Error.New("no ids provided")
|
|
|
|
}
|
|
|
|
|
2019-05-08 18:59:50 +01:00
|
|
|
// get reliable and online nodes
|
2019-05-19 16:10:46 +01:00
|
|
|
var rows *sql.Rows
|
|
|
|
switch t := cache.db.Driver().(type) {
|
|
|
|
case *sqlite3.SQLiteDriver:
|
|
|
|
args := make([]interface{}, 0, len(nodeIds)+3)
|
|
|
|
for i := range nodeIds {
|
|
|
|
args = append(args, nodeIds[i].Bytes())
|
|
|
|
}
|
2019-06-06 01:21:32 +01:00
|
|
|
args = append(args, time.Now().Add(-criteria.OnlineWindow))
|
2019-05-19 16:10:46 +01:00
|
|
|
|
|
|
|
rows, err = cache.db.Query(cache.db.Rebind(`
|
|
|
|
SELECT id FROM nodes
|
|
|
|
WHERE id IN (?`+strings.Repeat(", ?", len(nodeIds)-1)+`)
|
2019-06-18 10:14:31 +01:00
|
|
|
AND disqualified IS NULL
|
2019-07-10 03:36:09 +01:00
|
|
|
AND (last_contact_success > ? OR last_contact_success > last_contact_failure)
|
2019-05-19 16:10:46 +01:00
|
|
|
`), args...)
|
|
|
|
|
|
|
|
case *pq.Driver:
|
|
|
|
rows, err = cache.db.Query(`
|
|
|
|
SELECT id FROM nodes
|
|
|
|
WHERE id = any($1::bytea[])
|
2019-06-18 10:14:31 +01:00
|
|
|
AND disqualified IS NULL
|
2019-07-10 03:36:09 +01:00
|
|
|
AND (last_contact_success > $2 OR last_contact_success > last_contact_failure)
|
2019-06-06 01:21:32 +01:00
|
|
|
`, postgresNodeIDList(nodeIds), time.Now().Add(-criteria.OnlineWindow),
|
2019-05-19 16:10:46 +01:00
|
|
|
)
|
|
|
|
default:
|
|
|
|
return nil, Error.New("Unsupported database %t", t)
|
|
|
|
}
|
|
|
|
|
2019-05-01 14:45:52 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
defer func() {
|
|
|
|
err = errs.Combine(err, rows.Close())
|
|
|
|
}()
|
2019-05-08 18:59:50 +01:00
|
|
|
|
2019-05-19 16:10:46 +01:00
|
|
|
goodNodes := make(map[storj.NodeID]struct{}, len(nodeIds))
|
2019-05-01 14:45:52 +01:00
|
|
|
for rows.Next() {
|
|
|
|
var id storj.NodeID
|
|
|
|
err = rows.Scan(&id)
|
2018-12-17 20:14:16 +00:00
|
|
|
if err != nil {
|
2019-05-01 14:45:52 +01:00
|
|
|
return nil, err
|
2018-12-17 20:14:16 +00:00
|
|
|
}
|
2019-05-19 16:10:46 +01:00
|
|
|
goodNodes[id] = struct{}{}
|
2019-05-08 18:59:50 +01:00
|
|
|
}
|
|
|
|
for _, id := range nodeIds {
|
2019-05-19 16:10:46 +01:00
|
|
|
if _, ok := goodNodes[id]; !ok {
|
2019-05-08 18:59:50 +01:00
|
|
|
badNodes = append(badNodes, id)
|
|
|
|
}
|
2018-12-17 20:14:16 +00:00
|
|
|
}
|
2019-05-08 18:59:50 +01:00
|
|
|
return badNodes, nil
|
2018-12-17 20:14:16 +00:00
|
|
|
}
|
|
|
|
|
2019-07-08 23:04:35 +01:00
|
|
|
// Reliable returns all reliable nodes.
|
|
|
|
func (cache *overlaycache) Reliable(ctx context.Context, criteria *overlay.NodeCriteria) (nodes storj.NodeIDList, err error) {
|
|
|
|
// get reliable and online nodes
|
|
|
|
rows, err := cache.db.Query(cache.db.Rebind(`
|
|
|
|
SELECT id FROM nodes
|
|
|
|
WHERE disqualified IS NULL
|
2019-07-10 03:36:09 +01:00
|
|
|
AND (last_contact_success > ? OR last_contact_success > last_contact_failure)`),
|
2019-07-08 23:04:35 +01:00
|
|
|
time.Now().Add(-criteria.OnlineWindow))
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
defer func() {
|
|
|
|
err = errs.Combine(err, rows.Close())
|
|
|
|
}()
|
|
|
|
|
|
|
|
for rows.Next() {
|
|
|
|
var id storj.NodeID
|
|
|
|
err = rows.Scan(&id)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
nodes = append(nodes, id)
|
|
|
|
}
|
|
|
|
return nodes, nil
|
|
|
|
}
|
|
|
|
|
2019-01-30 16:29:18 +00:00
|
|
|
// Paginate will run through
|
2019-06-04 12:55:38 +01:00
|
|
|
func (cache *overlaycache) Paginate(ctx context.Context, offset int64, limit int) (_ []*overlay.NodeDossier, _ bool, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2019-01-30 16:29:18 +00:00
|
|
|
cursor := storj.NodeID{}
|
|
|
|
|
|
|
|
// more represents end of table. If there are more rows in the database, more will be true.
|
|
|
|
more := true
|
|
|
|
|
|
|
|
if limit <= 0 || limit > storage.LookupLimit {
|
|
|
|
limit = storage.LookupLimit
|
|
|
|
}
|
|
|
|
|
2019-03-29 08:53:43 +00:00
|
|
|
dbxInfos, err := cache.db.Limited_Node_By_Id_GreaterOrEqual_OrderBy_Asc_Id(ctx, dbx.Node_Id(cursor.Bytes()), limit, offset)
|
2019-01-30 16:29:18 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, false, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(dbxInfos) < limit {
|
|
|
|
more = false
|
|
|
|
}
|
|
|
|
|
2019-04-04 17:34:36 +01:00
|
|
|
infos := make([]*overlay.NodeDossier, len(dbxInfos))
|
2019-01-30 16:29:18 +00:00
|
|
|
for i, dbxInfo := range dbxInfos {
|
2019-06-04 12:55:38 +01:00
|
|
|
infos[i], err = convertDBNode(ctx, dbxInfo)
|
2019-01-30 16:29:18 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, false, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return infos, more, nil
|
|
|
|
}
|
|
|
|
|
2019-07-12 15:35:48 +01:00
|
|
|
// PaginateQualified will retrieve all qualified nodes
|
|
|
|
func (cache *overlaycache) PaginateQualified(ctx context.Context, offset int64, limit int) (_ []*pb.Node, _ bool, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
cursor := storj.NodeID{}
|
|
|
|
|
|
|
|
// more represents end of table. If there are more rows in the database, more will be true.
|
|
|
|
more := true
|
|
|
|
|
|
|
|
if limit <= 0 || limit > storage.LookupLimit {
|
|
|
|
limit = storage.LookupLimit
|
|
|
|
}
|
|
|
|
|
|
|
|
dbxInfos, err := cache.db.Limited_Node_Id_Node_LastNet_Node_Address_Node_Protocol_By_Id_GreaterOrEqual_And_Disqualified_Is_Null_OrderBy_Asc_Id(ctx, dbx.Node_Id(cursor.Bytes()), limit, offset)
|
|
|
|
if err != nil {
|
|
|
|
return nil, false, err
|
|
|
|
}
|
|
|
|
if len(dbxInfos) < limit {
|
|
|
|
more = false
|
|
|
|
}
|
|
|
|
|
|
|
|
infos := make([]*pb.Node, len(dbxInfos))
|
|
|
|
for i, dbxInfo := range dbxInfos {
|
|
|
|
infos[i], err = convertDBNodeToPBNode(ctx, dbxInfo)
|
|
|
|
if err != nil {
|
|
|
|
return nil, false, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return infos, more, nil
|
|
|
|
}
|
|
|
|
|
2019-04-22 10:07:50 +01:00
|
|
|
// Update updates node address
|
2019-06-20 14:56:04 +01:00
|
|
|
func (cache *overlaycache) UpdateAddress(ctx context.Context, info *pb.Node, defaults overlay.NodeSelectionConfig) (err error) {
|
2019-06-04 12:55:38 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2019-01-15 16:08:45 +00:00
|
|
|
if info == nil || info.Id.IsZero() {
|
|
|
|
return overlay.ErrEmptyNode
|
|
|
|
}
|
2018-12-17 20:14:16 +00:00
|
|
|
|
2019-01-15 16:08:45 +00:00
|
|
|
tx, err := cache.db.Open(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return Error.Wrap(err)
|
2018-12-17 20:14:16 +00:00
|
|
|
}
|
2019-01-15 16:08:45 +00:00
|
|
|
// TODO: use upsert
|
2019-03-29 08:53:43 +00:00
|
|
|
_, err = tx.Get_Node_By_Id(ctx, dbx.Node_Id(info.Id.Bytes()))
|
2019-01-15 16:08:45 +00:00
|
|
|
|
|
|
|
address := info.Address
|
|
|
|
if address == nil {
|
|
|
|
address = &pb.NodeAddress{}
|
2018-12-18 20:41:31 +00:00
|
|
|
}
|
2019-01-15 16:08:45 +00:00
|
|
|
|
2019-03-29 08:53:43 +00:00
|
|
|
if err != nil {
|
2019-04-22 10:07:50 +01:00
|
|
|
// add the node to DB for first time
|
2019-03-29 08:53:43 +00:00
|
|
|
_, err = tx.Create_Node(
|
2019-01-15 16:08:45 +00:00
|
|
|
ctx,
|
2019-03-29 08:53:43 +00:00
|
|
|
dbx.Node_Id(info.Id.Bytes()),
|
|
|
|
dbx.Node_Address(address.Address),
|
2019-06-24 16:33:18 +01:00
|
|
|
dbx.Node_LastNet(info.LastIp),
|
2019-03-29 08:53:43 +00:00
|
|
|
dbx.Node_Protocol(int(address.Transport)),
|
2019-04-22 10:07:50 +01:00
|
|
|
dbx.Node_Type(int(pb.NodeType_INVALID)),
|
|
|
|
dbx.Node_Email(""),
|
|
|
|
dbx.Node_Wallet(""),
|
|
|
|
dbx.Node_FreeBandwidth(-1),
|
|
|
|
dbx.Node_FreeDisk(-1),
|
|
|
|
dbx.Node_Major(0),
|
|
|
|
dbx.Node_Minor(0),
|
|
|
|
dbx.Node_Patch(0),
|
|
|
|
dbx.Node_Hash(""),
|
|
|
|
dbx.Node_Timestamp(time.Time{}),
|
|
|
|
dbx.Node_Release(false),
|
|
|
|
dbx.Node_Latency90(0),
|
|
|
|
dbx.Node_AuditSuccessCount(0),
|
|
|
|
dbx.Node_TotalAuditCount(0),
|
|
|
|
dbx.Node_UptimeSuccessCount(0),
|
|
|
|
dbx.Node_TotalUptimeCount(0),
|
2019-04-01 19:42:06 +01:00
|
|
|
dbx.Node_LastContactSuccess(time.Now()),
|
|
|
|
dbx.Node_LastContactFailure(time.Time{}),
|
2019-05-16 15:11:15 +01:00
|
|
|
dbx.Node_Contained(false),
|
2019-06-20 14:56:04 +01:00
|
|
|
dbx.Node_AuditReputationAlpha(defaults.AuditReputationAlpha0),
|
|
|
|
dbx.Node_AuditReputationBeta(defaults.AuditReputationBeta0),
|
|
|
|
dbx.Node_UptimeReputationAlpha(defaults.UptimeReputationAlpha0),
|
|
|
|
dbx.Node_UptimeReputationBeta(defaults.UptimeReputationBeta0),
|
2019-06-18 10:14:31 +01:00
|
|
|
dbx.Node_Create_Fields{
|
|
|
|
Disqualified: dbx.Node_Disqualified_Null(),
|
|
|
|
},
|
2019-01-15 16:08:45 +00:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return Error.Wrap(errs.Combine(err, tx.Rollback()))
|
|
|
|
}
|
|
|
|
} else {
|
2019-03-29 08:53:43 +00:00
|
|
|
update := dbx.Node_Update_Fields{
|
|
|
|
Address: dbx.Node_Address(address.Address),
|
2019-06-24 16:33:18 +01:00
|
|
|
LastNet: dbx.Node_LastNet(info.LastIp),
|
2019-03-29 08:53:43 +00:00
|
|
|
Protocol: dbx.Node_Protocol(int(address.Transport)),
|
|
|
|
}
|
|
|
|
|
|
|
|
_, err := tx.Update_Node_By_Id(ctx, dbx.Node_Id(info.Id.Bytes()), update)
|
2019-01-15 16:08:45 +00:00
|
|
|
if err != nil {
|
|
|
|
return Error.Wrap(errs.Combine(err, tx.Rollback()))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return Error.Wrap(tx.Commit())
|
2018-12-17 20:14:16 +00:00
|
|
|
}
|
|
|
|
|
2019-03-25 22:25:09 +00:00
|
|
|
// UpdateStats a single storagenode's stats in the db
|
|
|
|
func (cache *overlaycache) UpdateStats(ctx context.Context, updateReq *overlay.UpdateRequest) (stats *overlay.NodeStats, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
nodeID := updateReq.NodeID
|
|
|
|
|
|
|
|
tx, err := cache.db.Open(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return nil, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
dbNode, err := tx.Get_Node_By_Id(ctx, dbx.Node_Id(nodeID.Bytes()))
|
|
|
|
if err != nil {
|
2019-03-29 12:30:23 +00:00
|
|
|
return nil, Error.Wrap(errs.Combine(err, tx.Rollback()))
|
2019-03-25 22:25:09 +00:00
|
|
|
}
|
2019-06-20 14:56:04 +01:00
|
|
|
// do not update reputation if node is disqualified
|
|
|
|
if dbNode.Disqualified != nil {
|
|
|
|
return getNodeStats(dbNode), Error.Wrap(tx.Commit())
|
|
|
|
}
|
2019-03-25 22:25:09 +00:00
|
|
|
|
2019-06-20 14:56:04 +01:00
|
|
|
auditAlpha, auditBeta, totalAuditCount := updateReputation(
|
2019-03-25 22:25:09 +00:00
|
|
|
updateReq.AuditSuccess,
|
2019-06-20 14:56:04 +01:00
|
|
|
dbNode.AuditReputationAlpha,
|
|
|
|
dbNode.AuditReputationBeta,
|
|
|
|
updateReq.AuditLambda,
|
|
|
|
updateReq.AuditWeight,
|
|
|
|
dbNode.TotalAuditCount,
|
2019-03-25 22:25:09 +00:00
|
|
|
)
|
2019-06-20 14:56:04 +01:00
|
|
|
mon.FloatVal("audit_reputation_alpha").Observe(auditAlpha)
|
|
|
|
mon.FloatVal("audit_reputation_beta").Observe(auditBeta)
|
2019-03-25 22:25:09 +00:00
|
|
|
|
2019-06-20 14:56:04 +01:00
|
|
|
uptimeAlpha, uptimeBeta, totalUptimeCount := updateReputation(
|
2019-03-25 22:25:09 +00:00
|
|
|
updateReq.IsUp,
|
2019-06-20 14:56:04 +01:00
|
|
|
dbNode.UptimeReputationAlpha,
|
|
|
|
dbNode.UptimeReputationBeta,
|
|
|
|
updateReq.UptimeLambda,
|
|
|
|
updateReq.UptimeWeight,
|
|
|
|
dbNode.TotalUptimeCount,
|
2019-03-25 22:25:09 +00:00
|
|
|
)
|
2019-06-20 14:56:04 +01:00
|
|
|
mon.FloatVal("uptime_reputation_alpha").Observe(uptimeAlpha)
|
|
|
|
mon.FloatVal("uptime_reputation_beta").Observe(uptimeBeta)
|
2019-03-25 22:25:09 +00:00
|
|
|
|
|
|
|
updateFields := dbx.Node_Update_Fields{
|
2019-06-20 14:56:04 +01:00
|
|
|
TotalAuditCount: dbx.Node_TotalAuditCount(totalAuditCount),
|
|
|
|
TotalUptimeCount: dbx.Node_TotalUptimeCount(totalUptimeCount),
|
|
|
|
AuditReputationAlpha: dbx.Node_AuditReputationAlpha(auditAlpha),
|
|
|
|
AuditReputationBeta: dbx.Node_AuditReputationBeta(auditBeta),
|
|
|
|
UptimeReputationAlpha: dbx.Node_UptimeReputationAlpha(uptimeAlpha),
|
|
|
|
UptimeReputationBeta: dbx.Node_UptimeReputationBeta(uptimeBeta),
|
|
|
|
}
|
|
|
|
|
|
|
|
auditRep := auditAlpha / (auditAlpha + auditBeta)
|
|
|
|
if auditRep <= updateReq.AuditDQ {
|
|
|
|
updateFields.Disqualified = dbx.Node_Disqualified(time.Now().UTC())
|
|
|
|
}
|
|
|
|
|
|
|
|
uptimeRep := uptimeAlpha / (uptimeAlpha + uptimeBeta)
|
|
|
|
if uptimeRep <= updateReq.UptimeDQ {
|
|
|
|
// n.b. that this will overwrite the audit DQ timestamp
|
|
|
|
// if it has already been set.
|
|
|
|
updateFields.Disqualified = dbx.Node_Disqualified(time.Now().UTC())
|
2019-03-25 22:25:09 +00:00
|
|
|
}
|
|
|
|
|
2019-03-29 08:53:43 +00:00
|
|
|
if updateReq.IsUp {
|
2019-06-20 14:56:04 +01:00
|
|
|
updateFields.UptimeSuccessCount = dbx.Node_UptimeSuccessCount(dbNode.UptimeSuccessCount + 1)
|
2019-03-29 08:53:43 +00:00
|
|
|
updateFields.LastContactSuccess = dbx.Node_LastContactSuccess(time.Now())
|
|
|
|
} else {
|
|
|
|
updateFields.LastContactFailure = dbx.Node_LastContactFailure(time.Now())
|
|
|
|
}
|
2019-03-25 22:25:09 +00:00
|
|
|
|
2019-06-20 14:56:04 +01:00
|
|
|
if updateReq.AuditSuccess {
|
|
|
|
updateFields.AuditSuccessCount = dbx.Node_AuditSuccessCount(dbNode.AuditSuccessCount + 1)
|
|
|
|
}
|
|
|
|
|
2019-07-02 16:16:25 +01:00
|
|
|
// Updating node stats always exits it from containment mode
|
|
|
|
updateFields.Contained = dbx.Node_Contained(false)
|
|
|
|
|
2019-03-25 22:25:09 +00:00
|
|
|
dbNode, err = tx.Update_Node_By_Id(ctx, dbx.Node_Id(nodeID.Bytes()), updateFields)
|
|
|
|
if err != nil {
|
2019-03-29 12:30:23 +00:00
|
|
|
return nil, Error.Wrap(errs.Combine(err, tx.Rollback()))
|
2019-03-25 22:25:09 +00:00
|
|
|
}
|
|
|
|
|
2019-07-02 16:16:25 +01:00
|
|
|
// Cleanup containment table too
|
|
|
|
_, err = tx.Delete_PendingAudits_By_NodeId(ctx, dbx.PendingAudits_NodeId(nodeID.Bytes()))
|
|
|
|
if err != nil {
|
|
|
|
return nil, Error.Wrap(errs.Combine(err, tx.Rollback()))
|
|
|
|
}
|
|
|
|
|
2019-04-08 18:52:53 +01:00
|
|
|
// TODO: Allegedly tx.Get_Node_By_Id and tx.Update_Node_By_Id should never return a nil value for dbNode,
|
|
|
|
// however we've seen from some crashes that it does. We need to track down the cause of these crashes
|
|
|
|
// but for now we're adding a nil check to prevent a panic.
|
|
|
|
if dbNode == nil {
|
2019-06-03 15:37:43 +01:00
|
|
|
return nil, Error.Wrap(errs.Combine(errs.New("unable to get node by ID: %v", nodeID), tx.Rollback()))
|
2019-04-08 18:52:53 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return getNodeStats(dbNode), Error.Wrap(tx.Commit())
|
2019-03-25 22:25:09 +00:00
|
|
|
}
|
|
|
|
|
2019-04-10 07:04:24 +01:00
|
|
|
// UpdateNodeInfo updates the email and wallet for a given node ID for satellite payments.
|
|
|
|
func (cache *overlaycache) UpdateNodeInfo(ctx context.Context, nodeID storj.NodeID, nodeInfo *pb.InfoResponse) (stats *overlay.NodeDossier, err error) {
|
2019-03-25 22:25:09 +00:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2019-04-10 07:04:24 +01:00
|
|
|
var updateFields dbx.Node_Update_Fields
|
|
|
|
if nodeInfo != nil {
|
2019-04-22 10:07:50 +01:00
|
|
|
if nodeInfo.GetType() != pb.NodeType_INVALID {
|
|
|
|
updateFields.Type = dbx.Node_Type(int(nodeInfo.GetType()))
|
|
|
|
}
|
2019-04-10 07:04:24 +01:00
|
|
|
if nodeInfo.GetOperator() != nil {
|
|
|
|
updateFields.Wallet = dbx.Node_Wallet(nodeInfo.GetOperator().GetWallet())
|
|
|
|
updateFields.Email = dbx.Node_Email(nodeInfo.GetOperator().GetEmail())
|
|
|
|
}
|
|
|
|
if nodeInfo.GetCapacity() != nil {
|
|
|
|
updateFields.FreeDisk = dbx.Node_FreeDisk(nodeInfo.GetCapacity().GetFreeDisk())
|
|
|
|
updateFields.FreeBandwidth = dbx.Node_FreeBandwidth(nodeInfo.GetCapacity().GetFreeBandwidth())
|
|
|
|
}
|
|
|
|
if nodeInfo.GetVersion() != nil {
|
|
|
|
semVer, err := version.NewSemVer(nodeInfo.GetVersion().GetVersion())
|
|
|
|
if err != nil {
|
2019-04-22 10:07:50 +01:00
|
|
|
return nil, errs.New("unable to convert version to semVer")
|
2019-04-10 07:04:24 +01:00
|
|
|
}
|
|
|
|
updateFields.Major = dbx.Node_Major(semVer.Major)
|
|
|
|
updateFields.Minor = dbx.Node_Minor(semVer.Minor)
|
|
|
|
updateFields.Patch = dbx.Node_Patch(semVer.Patch)
|
|
|
|
updateFields.Hash = dbx.Node_Hash(nodeInfo.GetVersion().GetCommitHash())
|
2019-07-08 19:24:42 +01:00
|
|
|
updateFields.Timestamp = dbx.Node_Timestamp(nodeInfo.GetVersion().Timestamp)
|
2019-04-10 07:04:24 +01:00
|
|
|
updateFields.Release = dbx.Node_Release(nodeInfo.GetVersion().GetRelease())
|
|
|
|
}
|
2019-03-25 22:25:09 +00:00
|
|
|
}
|
|
|
|
|
2019-04-04 17:34:36 +01:00
|
|
|
updatedDBNode, err := cache.db.Update_Node_By_Id(ctx, dbx.Node_Id(nodeID.Bytes()), updateFields)
|
2019-03-25 22:25:09 +00:00
|
|
|
if err != nil {
|
2019-04-04 17:34:36 +01:00
|
|
|
return nil, Error.Wrap(err)
|
2019-03-25 22:25:09 +00:00
|
|
|
}
|
|
|
|
|
2019-06-04 12:55:38 +01:00
|
|
|
return convertDBNode(ctx, updatedDBNode)
|
2019-03-25 22:25:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// UpdateUptime updates a single storagenode's uptime stats in the db
|
2019-06-20 14:56:04 +01:00
|
|
|
func (cache *overlaycache) UpdateUptime(ctx context.Context, nodeID storj.NodeID, isUp bool, lambda, weight, uptimeDQ float64) (stats *overlay.NodeStats, err error) {
|
2019-03-25 22:25:09 +00:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
tx, err := cache.db.Open(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return nil, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
dbNode, err := tx.Get_Node_By_Id(ctx, dbx.Node_Id(nodeID.Bytes()))
|
|
|
|
if err != nil {
|
2019-03-29 12:30:23 +00:00
|
|
|
return nil, Error.Wrap(errs.Combine(err, tx.Rollback()))
|
2019-03-25 22:25:09 +00:00
|
|
|
}
|
2019-06-20 14:56:04 +01:00
|
|
|
// do not update reputation if node is disqualified
|
|
|
|
if dbNode.Disqualified != nil {
|
|
|
|
return getNodeStats(dbNode), Error.Wrap(tx.Commit())
|
|
|
|
}
|
|
|
|
|
|
|
|
updateFields := dbx.Node_Update_Fields{}
|
|
|
|
uptimeAlpha, uptimeBeta, totalUptimeCount := updateReputation(
|
|
|
|
isUp,
|
|
|
|
dbNode.UptimeReputationAlpha,
|
|
|
|
dbNode.UptimeReputationBeta,
|
|
|
|
lambda,
|
|
|
|
weight,
|
|
|
|
dbNode.TotalUptimeCount,
|
|
|
|
)
|
|
|
|
mon.FloatVal("uptime_reputation_alpha").Observe(uptimeAlpha)
|
|
|
|
mon.FloatVal("uptime_reputation_beta").Observe(uptimeBeta)
|
|
|
|
|
|
|
|
updateFields.UptimeReputationAlpha = dbx.Node_UptimeReputationAlpha(uptimeAlpha)
|
|
|
|
updateFields.UptimeReputationBeta = dbx.Node_UptimeReputationBeta(uptimeBeta)
|
|
|
|
updateFields.TotalUptimeCount = dbx.Node_TotalUptimeCount(totalUptimeCount)
|
|
|
|
|
|
|
|
uptimeRep := uptimeAlpha / (uptimeAlpha + uptimeBeta)
|
|
|
|
if uptimeRep <= uptimeDQ {
|
|
|
|
updateFields.Disqualified = dbx.Node_Disqualified(time.Now().UTC())
|
|
|
|
}
|
2019-03-25 22:25:09 +00:00
|
|
|
|
2019-06-18 13:54:52 +01:00
|
|
|
lastContactSuccess := dbNode.LastContactSuccess
|
|
|
|
lastContactFailure := dbNode.LastContactFailure
|
|
|
|
mon.Meter("uptime_updates").Mark(1)
|
2019-06-20 14:56:04 +01:00
|
|
|
if isUp {
|
|
|
|
updateFields.UptimeSuccessCount = dbx.Node_UptimeSuccessCount(dbNode.UptimeSuccessCount + 1)
|
|
|
|
updateFields.LastContactSuccess = dbx.Node_LastContactSuccess(time.Now())
|
2019-06-18 13:54:52 +01:00
|
|
|
|
|
|
|
mon.Meter("uptime_update_successes").Mark(1)
|
|
|
|
// we have seen this node in the past 24 hours
|
|
|
|
if time.Now().Sub(lastContactFailure) > time.Hour*24 {
|
|
|
|
mon.Meter("uptime_seen_24h").Mark(1)
|
|
|
|
}
|
|
|
|
// we have seen this node in the past week
|
|
|
|
if time.Now().Sub(lastContactFailure) > time.Hour*24*7 {
|
|
|
|
mon.Meter("uptime_seen_week").Mark(1)
|
|
|
|
}
|
2019-03-29 08:53:43 +00:00
|
|
|
} else {
|
|
|
|
updateFields.LastContactFailure = dbx.Node_LastContactFailure(time.Now())
|
2019-06-20 14:56:04 +01:00
|
|
|
|
|
|
|
mon.Meter("uptime_update_failures").Mark(1)
|
|
|
|
// it's been over 24 hours since we've seen this node
|
|
|
|
if time.Now().Sub(lastContactSuccess) > time.Hour*24 {
|
|
|
|
mon.Meter("uptime_not_seen_24h").Mark(1)
|
|
|
|
}
|
|
|
|
// it's been over a week since we've seen this node
|
|
|
|
if time.Now().Sub(lastContactSuccess) > time.Hour*24*7 {
|
|
|
|
mon.Meter("uptime_not_seen_week").Mark(1)
|
|
|
|
}
|
2019-03-25 22:25:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
dbNode, err = tx.Update_Node_By_Id(ctx, dbx.Node_Id(nodeID.Bytes()), updateFields)
|
|
|
|
if err != nil {
|
2019-03-29 12:30:23 +00:00
|
|
|
return nil, Error.Wrap(errs.Combine(err, tx.Rollback()))
|
2019-03-25 22:25:09 +00:00
|
|
|
}
|
2019-04-08 18:52:53 +01:00
|
|
|
// TODO: Allegedly tx.Get_Node_By_Id and tx.Update_Node_By_Id should never return a nil value for dbNode,
|
|
|
|
// however we've seen from some crashes that it does. We need to track down the cause of these crashes
|
|
|
|
// but for now we're adding a nil check to prevent a panic.
|
|
|
|
if dbNode == nil {
|
2019-06-03 15:37:43 +01:00
|
|
|
return nil, Error.Wrap(errs.Combine(errs.New("unable to get node by ID: %v", nodeID), tx.Rollback()))
|
2019-04-08 18:52:53 +01:00
|
|
|
}
|
2019-03-25 22:25:09 +00:00
|
|
|
|
2019-04-08 18:52:53 +01:00
|
|
|
return getNodeStats(dbNode), Error.Wrap(tx.Commit())
|
2019-03-25 22:25:09 +00:00
|
|
|
}
|
|
|
|
|
2019-06-04 12:55:38 +01:00
|
|
|
func convertDBNode(ctx context.Context, info *dbx.Node) (_ *overlay.NodeDossier, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2019-01-15 16:08:45 +00:00
|
|
|
if info == nil {
|
|
|
|
return nil, Error.New("missing info")
|
|
|
|
}
|
|
|
|
|
2019-03-29 08:53:43 +00:00
|
|
|
id, err := storj.NodeIDFromBytes(info.Id)
|
2019-01-15 16:08:45 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-04-10 07:04:24 +01:00
|
|
|
ver := &version.SemVer{
|
|
|
|
Major: info.Major,
|
|
|
|
Minor: info.Minor,
|
|
|
|
Patch: info.Patch,
|
|
|
|
}
|
|
|
|
|
2019-04-04 17:34:36 +01:00
|
|
|
node := &overlay.NodeDossier{
|
|
|
|
Node: pb.Node{
|
2019-05-22 21:06:27 +01:00
|
|
|
Id: id,
|
2019-06-24 16:33:18 +01:00
|
|
|
LastIp: info.LastNet,
|
2019-04-04 17:34:36 +01:00
|
|
|
Address: &pb.NodeAddress{
|
|
|
|
Address: info.Address,
|
|
|
|
Transport: pb.NodeTransport(info.Protocol),
|
|
|
|
},
|
2019-01-15 16:08:45 +00:00
|
|
|
},
|
2019-04-04 17:34:36 +01:00
|
|
|
Type: pb.NodeType(info.Type),
|
|
|
|
Operator: pb.NodeOperator{
|
2019-03-29 08:53:43 +00:00
|
|
|
Email: info.Email,
|
|
|
|
Wallet: info.Wallet,
|
2019-01-15 16:08:45 +00:00
|
|
|
},
|
2019-04-04 17:34:36 +01:00
|
|
|
Capacity: pb.NodeCapacity{
|
2019-01-15 16:08:45 +00:00
|
|
|
FreeBandwidth: info.FreeBandwidth,
|
|
|
|
FreeDisk: info.FreeDisk,
|
|
|
|
},
|
2019-06-20 14:56:04 +01:00
|
|
|
Reputation: *getNodeStats(info),
|
2019-04-10 07:04:24 +01:00
|
|
|
Version: pb.NodeVersion{
|
|
|
|
Version: ver.String(),
|
|
|
|
CommitHash: info.Hash,
|
2019-07-08 19:24:42 +01:00
|
|
|
Timestamp: info.Timestamp,
|
2019-04-10 07:04:24 +01:00
|
|
|
Release: info.Release,
|
|
|
|
},
|
2019-05-30 22:38:23 +01:00
|
|
|
Contained: info.Contained,
|
|
|
|
Disqualified: info.Disqualified,
|
2019-01-15 16:08:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return node, nil
|
2018-12-17 20:14:16 +00:00
|
|
|
}
|
2019-03-25 22:25:09 +00:00
|
|
|
|
2019-07-12 15:35:48 +01:00
|
|
|
func convertDBNodeToPBNode(ctx context.Context, info *dbx.Id_LastNet_Address_Protocol_Row) (_ *pb.Node, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
if info == nil {
|
|
|
|
return nil, Error.New("missing info")
|
|
|
|
}
|
|
|
|
|
|
|
|
id, err := storj.NodeIDFromBytes(info.Id)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return &pb.Node{
|
|
|
|
Id: id,
|
|
|
|
LastIp: info.LastNet,
|
|
|
|
Address: &pb.NodeAddress{
|
|
|
|
Address: info.Address,
|
|
|
|
Transport: pb.NodeTransport(info.Protocol),
|
|
|
|
},
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2019-04-08 18:52:53 +01:00
|
|
|
func getNodeStats(dbNode *dbx.Node) *overlay.NodeStats {
|
2019-03-25 22:25:09 +00:00
|
|
|
nodeStats := &overlay.NodeStats{
|
2019-06-20 14:56:04 +01:00
|
|
|
Latency90: dbNode.Latency90,
|
|
|
|
AuditCount: dbNode.TotalAuditCount,
|
|
|
|
UptimeCount: dbNode.TotalUptimeCount,
|
|
|
|
LastContactSuccess: dbNode.LastContactSuccess,
|
|
|
|
LastContactFailure: dbNode.LastContactFailure,
|
|
|
|
AuditReputationAlpha: dbNode.AuditReputationAlpha,
|
|
|
|
AuditReputationBeta: dbNode.AuditReputationBeta,
|
|
|
|
UptimeReputationAlpha: dbNode.UptimeReputationAlpha,
|
|
|
|
UptimeReputationBeta: dbNode.UptimeReputationBeta,
|
|
|
|
Disqualified: dbNode.Disqualified,
|
2019-03-25 22:25:09 +00:00
|
|
|
}
|
|
|
|
return nodeStats
|
|
|
|
}
|
|
|
|
|
2019-06-20 14:56:04 +01:00
|
|
|
// updateReputation uses the Beta distribution model to determine a node's reputation.
|
|
|
|
// lambda is the "forgetting factor" which determines how much past info is kept when determining current reputation score.
|
|
|
|
// w is the normalization weight that affects how severely new updates affect the current reputation distribution.
|
|
|
|
func updateReputation(isSuccess bool, alpha, beta, lambda, w float64, totalCount int64) (newAlpha, newBeta float64, updatedCount int64) {
|
|
|
|
// v is a single feedback value that allows us to update both alpha and beta
|
|
|
|
var v float64 = -1
|
|
|
|
if isSuccess {
|
|
|
|
v = 1
|
|
|
|
}
|
|
|
|
newAlpha = lambda*alpha + w*(1+v)/2
|
|
|
|
newBeta = lambda*beta + w*(1-v)/2
|
|
|
|
return newAlpha, newBeta, totalCount + 1
|
2019-03-25 22:25:09 +00:00
|
|
|
}
|