2019-01-24 20:15:10 +00:00
|
|
|
// Copyright (C) 2019 Storj Labs, Inc.
|
2018-12-14 20:17:30 +00:00
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
|
|
|
package satellitedb
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"database/sql"
|
|
|
|
"strings"
|
|
|
|
|
|
|
|
"github.com/zeebo/errs"
|
|
|
|
monkit "gopkg.in/spacemonkeygo/monkit.v2"
|
|
|
|
|
2019-03-01 17:46:34 +00:00
|
|
|
"storj.io/storj/pkg/pb"
|
2018-12-14 20:17:30 +00:00
|
|
|
"storj.io/storj/pkg/statdb"
|
|
|
|
"storj.io/storj/pkg/storj"
|
2018-12-19 22:34:20 +00:00
|
|
|
"storj.io/storj/pkg/utils"
|
2018-12-14 20:17:30 +00:00
|
|
|
dbx "storj.io/storj/satellite/satellitedb/dbx"
|
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
|
|
|
mon = monkit.Package()
|
|
|
|
errAuditSuccess = errs.Class("statdb audit success error")
|
|
|
|
errUptime = errs.Class("statdb uptime error")
|
|
|
|
)
|
|
|
|
|
|
|
|
// StatDB implements the statdb RPC service
|
|
|
|
type statDB struct {
|
|
|
|
db *dbx.DB
|
|
|
|
}
|
|
|
|
|
2018-12-19 18:44:03 +00:00
|
|
|
func getNodeStats(nodeID storj.NodeID, dbNode *dbx.Node) *statdb.NodeStats {
|
|
|
|
nodeStats := &statdb.NodeStats{
|
|
|
|
NodeID: nodeID,
|
|
|
|
AuditSuccessRatio: dbNode.AuditSuccessRatio,
|
|
|
|
AuditSuccessCount: dbNode.AuditSuccessCount,
|
|
|
|
AuditCount: dbNode.TotalAuditCount,
|
|
|
|
UptimeRatio: dbNode.UptimeRatio,
|
|
|
|
UptimeSuccessCount: dbNode.UptimeSuccessCount,
|
|
|
|
UptimeCount: dbNode.TotalUptimeCount,
|
2019-03-01 17:46:34 +00:00
|
|
|
Operator: pb.NodeOperator{
|
|
|
|
Email: dbNode.Email,
|
|
|
|
Wallet: dbNode.Wallet,
|
|
|
|
},
|
2018-12-19 18:44:03 +00:00
|
|
|
}
|
|
|
|
return nodeStats
|
|
|
|
}
|
|
|
|
|
2018-12-14 20:17:30 +00:00
|
|
|
// Create a db entry for the provided storagenode
|
2018-12-19 18:44:03 +00:00
|
|
|
func (s *statDB) Create(ctx context.Context, nodeID storj.NodeID, startingStats *statdb.NodeStats) (stats *statdb.NodeStats, err error) {
|
2018-12-14 20:17:30 +00:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
var (
|
|
|
|
totalAuditCount int64
|
|
|
|
auditSuccessCount int64
|
|
|
|
auditSuccessRatio float64
|
|
|
|
totalUptimeCount int64
|
|
|
|
uptimeSuccessCount int64
|
|
|
|
uptimeRatio float64
|
2019-03-01 17:46:34 +00:00
|
|
|
wallet string
|
|
|
|
email string
|
2018-12-14 20:17:30 +00:00
|
|
|
)
|
|
|
|
|
2018-12-19 18:44:03 +00:00
|
|
|
if startingStats != nil {
|
|
|
|
totalAuditCount = startingStats.AuditCount
|
|
|
|
auditSuccessCount = startingStats.AuditSuccessCount
|
2018-12-14 20:17:30 +00:00
|
|
|
auditSuccessRatio, err = checkRatioVars(auditSuccessCount, totalAuditCount)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errAuditSuccess.Wrap(err)
|
|
|
|
}
|
|
|
|
|
2018-12-19 18:44:03 +00:00
|
|
|
totalUptimeCount = startingStats.UptimeCount
|
|
|
|
uptimeSuccessCount = startingStats.UptimeSuccessCount
|
2018-12-14 20:17:30 +00:00
|
|
|
uptimeRatio, err = checkRatioVars(uptimeSuccessCount, totalUptimeCount)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errUptime.Wrap(err)
|
|
|
|
}
|
2019-03-01 17:46:34 +00:00
|
|
|
wallet = startingStats.Operator.Wallet
|
|
|
|
email = startingStats.Operator.Email
|
2018-12-14 20:17:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
dbNode, err := s.db.Create_Node(
|
|
|
|
ctx,
|
2018-12-19 18:44:03 +00:00
|
|
|
dbx.Node_Id(nodeID.Bytes()),
|
2018-12-14 20:17:30 +00:00
|
|
|
dbx.Node_AuditSuccessCount(auditSuccessCount),
|
|
|
|
dbx.Node_TotalAuditCount(totalAuditCount),
|
|
|
|
dbx.Node_AuditSuccessRatio(auditSuccessRatio),
|
|
|
|
dbx.Node_UptimeSuccessCount(uptimeSuccessCount),
|
|
|
|
dbx.Node_TotalUptimeCount(totalUptimeCount),
|
|
|
|
dbx.Node_UptimeRatio(uptimeRatio),
|
2019-03-01 17:46:34 +00:00
|
|
|
dbx.Node_Wallet(wallet),
|
|
|
|
dbx.Node_Email(email),
|
2018-12-14 20:17:30 +00:00
|
|
|
)
|
|
|
|
if err != nil {
|
2018-12-19 22:34:20 +00:00
|
|
|
return nil, Error.Wrap(err)
|
2018-12-14 20:17:30 +00:00
|
|
|
}
|
|
|
|
|
2018-12-19 18:44:03 +00:00
|
|
|
nodeStats := getNodeStats(nodeID, dbNode)
|
|
|
|
return nodeStats, nil
|
2018-12-14 20:17:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Get a storagenode's stats from the db
|
2018-12-19 18:44:03 +00:00
|
|
|
func (s *statDB) Get(ctx context.Context, nodeID storj.NodeID) (stats *statdb.NodeStats, err error) {
|
2018-12-14 20:17:30 +00:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2018-12-19 18:44:03 +00:00
|
|
|
dbNode, err := s.db.Get_Node_By_Id(ctx, dbx.Node_Id(nodeID.Bytes()))
|
2018-12-14 20:17:30 +00:00
|
|
|
if err != nil {
|
2018-12-19 22:34:20 +00:00
|
|
|
return nil, Error.Wrap(err)
|
2018-12-14 20:17:30 +00:00
|
|
|
}
|
|
|
|
|
2018-12-19 18:44:03 +00:00
|
|
|
nodeStats := getNodeStats(nodeID, dbNode)
|
|
|
|
return nodeStats, nil
|
2018-12-14 20:17:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// FindInvalidNodes finds a subset of storagenodes that fail to meet minimum reputation requirements
|
2018-12-19 18:44:03 +00:00
|
|
|
func (s *statDB) FindInvalidNodes(ctx context.Context, nodeIDs storj.NodeIDList, maxStats *statdb.NodeStats) (invalidIDs storj.NodeIDList, err error) {
|
2018-12-14 20:17:30 +00:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
var invalidIds storj.NodeIDList
|
|
|
|
|
2018-12-19 18:44:03 +00:00
|
|
|
maxAuditSuccess := maxStats.AuditSuccessRatio
|
|
|
|
maxUptime := maxStats.UptimeRatio
|
2018-12-14 20:17:30 +00:00
|
|
|
|
2018-12-19 18:44:03 +00:00
|
|
|
rows, err := s.findInvalidNodesQuery(nodeIDs, maxAuditSuccess, maxUptime)
|
2018-12-14 20:17:30 +00:00
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
defer func() {
|
2018-12-19 22:34:20 +00:00
|
|
|
err = utils.CombineErrors(err, rows.Close())
|
2018-12-14 20:17:30 +00:00
|
|
|
}()
|
|
|
|
|
|
|
|
for rows.Next() {
|
|
|
|
node := &dbx.Node{}
|
|
|
|
err = rows.Scan(&node.Id, &node.TotalAuditCount, &node.TotalUptimeCount, &node.AuditSuccessRatio, &node.UptimeRatio)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
id, err := storj.NodeIDFromBytes(node.Id)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
invalidIds = append(invalidIds, id)
|
|
|
|
}
|
|
|
|
|
2018-12-19 18:44:03 +00:00
|
|
|
return invalidIds, nil
|
2018-12-14 20:17:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *statDB) findInvalidNodesQuery(nodeIds storj.NodeIDList, auditSuccess, uptime float64) (*sql.Rows, error) {
|
|
|
|
args := make([]interface{}, len(nodeIds))
|
|
|
|
for i, id := range nodeIds {
|
|
|
|
args[i] = id.Bytes()
|
|
|
|
}
|
|
|
|
args = append(args, auditSuccess, uptime)
|
|
|
|
|
|
|
|
rows, err := s.db.Query(s.db.Rebind(`SELECT nodes.id, nodes.total_audit_count,
|
|
|
|
nodes.total_uptime_count, nodes.audit_success_ratio,
|
|
|
|
nodes.uptime_ratio
|
|
|
|
FROM nodes
|
|
|
|
WHERE nodes.id IN (?`+strings.Repeat(", ?", len(nodeIds)-1)+`)
|
|
|
|
AND nodes.total_audit_count > 0
|
|
|
|
AND nodes.total_uptime_count > 0
|
|
|
|
AND (
|
|
|
|
nodes.audit_success_ratio < ?
|
|
|
|
OR nodes.uptime_ratio < ?
|
|
|
|
)`), args...)
|
|
|
|
|
|
|
|
return rows, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update a single storagenode's stats in the db
|
2018-12-19 18:44:03 +00:00
|
|
|
func (s *statDB) Update(ctx context.Context, updateReq *statdb.UpdateRequest) (stats *statdb.NodeStats, err error) {
|
2018-12-14 20:17:30 +00:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2018-12-19 18:44:03 +00:00
|
|
|
nodeID := updateReq.NodeID
|
2018-12-14 20:17:30 +00:00
|
|
|
|
2018-12-19 22:34:20 +00:00
|
|
|
tx, err := s.db.Open(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return nil, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
dbNode, err := tx.Get_Node_By_Id(ctx, dbx.Node_Id(nodeID.Bytes()))
|
2018-12-14 20:17:30 +00:00
|
|
|
if err != nil {
|
2018-12-19 22:34:20 +00:00
|
|
|
return nil, Error.Wrap(utils.CombineErrors(err, tx.Rollback()))
|
2018-12-14 20:17:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
auditSuccessCount := dbNode.AuditSuccessCount
|
|
|
|
totalAuditCount := dbNode.TotalAuditCount
|
|
|
|
var auditSuccessRatio float64
|
|
|
|
uptimeSuccessCount := dbNode.UptimeSuccessCount
|
|
|
|
totalUptimeCount := dbNode.TotalUptimeCount
|
|
|
|
var uptimeRatio float64
|
|
|
|
|
2018-12-19 18:44:03 +00:00
|
|
|
auditSuccessCount, totalAuditCount, auditSuccessRatio = updateRatioVars(
|
|
|
|
updateReq.AuditSuccess,
|
|
|
|
auditSuccessCount,
|
|
|
|
totalAuditCount,
|
|
|
|
)
|
2018-12-14 20:17:30 +00:00
|
|
|
|
2018-12-19 18:44:03 +00:00
|
|
|
uptimeSuccessCount, totalUptimeCount, uptimeRatio = updateRatioVars(
|
|
|
|
updateReq.IsUp,
|
|
|
|
uptimeSuccessCount,
|
|
|
|
totalUptimeCount,
|
|
|
|
)
|
2018-12-14 20:17:30 +00:00
|
|
|
|
2018-12-19 18:44:03 +00:00
|
|
|
updateFields := dbx.Node_Update_Fields{
|
|
|
|
AuditSuccessCount: dbx.Node_AuditSuccessCount(auditSuccessCount),
|
|
|
|
TotalAuditCount: dbx.Node_TotalAuditCount(totalAuditCount),
|
|
|
|
AuditSuccessRatio: dbx.Node_AuditSuccessRatio(auditSuccessRatio),
|
|
|
|
UptimeSuccessCount: dbx.Node_UptimeSuccessCount(uptimeSuccessCount),
|
|
|
|
TotalUptimeCount: dbx.Node_TotalUptimeCount(totalUptimeCount),
|
|
|
|
UptimeRatio: dbx.Node_UptimeRatio(uptimeRatio),
|
2018-12-14 20:17:30 +00:00
|
|
|
}
|
|
|
|
|
2018-12-19 18:44:03 +00:00
|
|
|
updateFields.UptimeSuccessCount = dbx.Node_UptimeSuccessCount(uptimeSuccessCount)
|
|
|
|
updateFields.TotalUptimeCount = dbx.Node_TotalUptimeCount(totalUptimeCount)
|
|
|
|
updateFields.UptimeRatio = dbx.Node_UptimeRatio(uptimeRatio)
|
|
|
|
|
2018-12-19 22:34:20 +00:00
|
|
|
dbNode, err = tx.Update_Node_By_Id(ctx, dbx.Node_Id(nodeID.Bytes()), updateFields)
|
2018-12-14 20:17:30 +00:00
|
|
|
if err != nil {
|
2018-12-19 22:34:20 +00:00
|
|
|
return nil, Error.Wrap(utils.CombineErrors(err, tx.Rollback()))
|
2018-12-14 20:17:30 +00:00
|
|
|
}
|
|
|
|
|
2018-12-19 18:44:03 +00:00
|
|
|
nodeStats := getNodeStats(nodeID, dbNode)
|
2018-12-19 22:34:20 +00:00
|
|
|
return nodeStats, Error.Wrap(tx.Commit())
|
2018-12-14 20:17:30 +00:00
|
|
|
}
|
|
|
|
|
2019-03-01 17:46:34 +00:00
|
|
|
// UpdateStats takes a NodeStats struct and updates the appropriate node with that information
|
|
|
|
func (s *statDB) UpdateOperator(ctx context.Context, nodeID storj.NodeID, operator pb.NodeOperator) (stats *statdb.NodeStats, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
tx, err := s.db.Open(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return nil, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
updateFields := dbx.Node_Update_Fields{
|
|
|
|
Wallet: dbx.Node_Wallet(operator.GetWallet()),
|
|
|
|
Email: dbx.Node_Email(operator.GetEmail()),
|
|
|
|
}
|
|
|
|
|
|
|
|
updatedDBNode, err := tx.Update_Node_By_Id(ctx, dbx.Node_Id(nodeID.Bytes()), updateFields)
|
|
|
|
if err != nil {
|
|
|
|
return nil, Error.Wrap(tx.Rollback())
|
|
|
|
}
|
|
|
|
|
|
|
|
updated := getNodeStats(nodeID, updatedDBNode)
|
|
|
|
|
|
|
|
return updated, utils.CombineErrors(err, tx.Commit())
|
|
|
|
}
|
|
|
|
|
2018-12-14 20:17:30 +00:00
|
|
|
// UpdateUptime updates a single storagenode's uptime stats in the db
|
2018-12-19 18:44:03 +00:00
|
|
|
func (s *statDB) UpdateUptime(ctx context.Context, nodeID storj.NodeID, isUp bool) (stats *statdb.NodeStats, err error) {
|
2018-12-14 20:17:30 +00:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2018-12-19 22:34:20 +00:00
|
|
|
tx, err := s.db.Open(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return nil, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
dbNode, err := tx.Get_Node_By_Id(ctx, dbx.Node_Id(nodeID.Bytes()))
|
2018-12-14 20:17:30 +00:00
|
|
|
if err != nil {
|
2018-12-19 22:34:20 +00:00
|
|
|
return nil, Error.Wrap(utils.CombineErrors(err, tx.Rollback()))
|
2018-12-14 20:17:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
uptimeSuccessCount := dbNode.UptimeSuccessCount
|
|
|
|
totalUptimeCount := dbNode.TotalUptimeCount
|
|
|
|
var uptimeRatio float64
|
|
|
|
|
|
|
|
updateFields := dbx.Node_Update_Fields{}
|
|
|
|
|
|
|
|
uptimeSuccessCount, totalUptimeCount, uptimeRatio = updateRatioVars(
|
2018-12-19 18:44:03 +00:00
|
|
|
isUp,
|
2018-12-14 20:17:30 +00:00
|
|
|
uptimeSuccessCount,
|
|
|
|
totalUptimeCount,
|
|
|
|
)
|
|
|
|
|
|
|
|
updateFields.UptimeSuccessCount = dbx.Node_UptimeSuccessCount(uptimeSuccessCount)
|
|
|
|
updateFields.TotalUptimeCount = dbx.Node_TotalUptimeCount(totalUptimeCount)
|
|
|
|
updateFields.UptimeRatio = dbx.Node_UptimeRatio(uptimeRatio)
|
|
|
|
|
2018-12-19 22:34:20 +00:00
|
|
|
dbNode, err = tx.Update_Node_By_Id(ctx, dbx.Node_Id(nodeID.Bytes()), updateFields)
|
2018-12-14 20:17:30 +00:00
|
|
|
if err != nil {
|
2018-12-19 22:34:20 +00:00
|
|
|
return nil, Error.Wrap(utils.CombineErrors(err, tx.Rollback()))
|
2018-12-14 20:17:30 +00:00
|
|
|
}
|
|
|
|
|
2018-12-19 18:44:03 +00:00
|
|
|
nodeStats := getNodeStats(nodeID, dbNode)
|
2018-12-19 22:34:20 +00:00
|
|
|
return nodeStats, Error.Wrap(tx.Commit())
|
2018-12-14 20:17:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// UpdateAuditSuccess updates a single storagenode's uptime stats in the db
|
2018-12-19 18:44:03 +00:00
|
|
|
func (s *statDB) UpdateAuditSuccess(ctx context.Context, nodeID storj.NodeID, auditSuccess bool) (stats *statdb.NodeStats, err error) {
|
2018-12-14 20:17:30 +00:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2018-12-19 22:34:20 +00:00
|
|
|
tx, err := s.db.Open(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return nil, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
dbNode, err := tx.Get_Node_By_Id(ctx, dbx.Node_Id(nodeID.Bytes()))
|
2018-12-14 20:17:30 +00:00
|
|
|
if err != nil {
|
2018-12-19 22:34:20 +00:00
|
|
|
return nil, Error.Wrap(utils.CombineErrors(err, tx.Rollback()))
|
2018-12-14 20:17:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
auditSuccessCount := dbNode.AuditSuccessCount
|
|
|
|
totalAuditCount := dbNode.TotalAuditCount
|
|
|
|
var auditRatio float64
|
|
|
|
|
|
|
|
updateFields := dbx.Node_Update_Fields{}
|
|
|
|
|
|
|
|
auditSuccessCount, totalAuditCount, auditRatio = updateRatioVars(
|
2018-12-19 18:44:03 +00:00
|
|
|
auditSuccess,
|
2018-12-14 20:17:30 +00:00
|
|
|
auditSuccessCount,
|
|
|
|
totalAuditCount,
|
|
|
|
)
|
|
|
|
|
|
|
|
updateFields.AuditSuccessCount = dbx.Node_AuditSuccessCount(auditSuccessCount)
|
|
|
|
updateFields.TotalAuditCount = dbx.Node_TotalAuditCount(totalAuditCount)
|
|
|
|
updateFields.AuditSuccessRatio = dbx.Node_AuditSuccessRatio(auditRatio)
|
|
|
|
|
2018-12-19 22:34:20 +00:00
|
|
|
dbNode, err = tx.Update_Node_By_Id(ctx, dbx.Node_Id(nodeID.Bytes()), updateFields)
|
2018-12-14 20:17:30 +00:00
|
|
|
if err != nil {
|
2018-12-19 22:34:20 +00:00
|
|
|
return nil, Error.Wrap(utils.CombineErrors(err, tx.Rollback()))
|
2018-12-14 20:17:30 +00:00
|
|
|
}
|
|
|
|
|
2018-12-19 18:44:03 +00:00
|
|
|
nodeStats := getNodeStats(nodeID, dbNode)
|
2018-12-19 22:34:20 +00:00
|
|
|
return nodeStats, Error.Wrap(tx.Commit())
|
2018-12-14 20:17:30 +00:00
|
|
|
}
|
|
|
|
|
2019-01-02 10:31:49 +00:00
|
|
|
// UpdateBatch for updating multiple storage nodes' stats in the db
|
2018-12-19 18:44:03 +00:00
|
|
|
func (s *statDB) UpdateBatch(ctx context.Context, updateReqList []*statdb.UpdateRequest) (
|
|
|
|
statsList []*statdb.NodeStats, failedUpdateReqs []*statdb.UpdateRequest, err error) {
|
2018-12-14 20:17:30 +00:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2018-12-19 18:44:03 +00:00
|
|
|
var nodeStatsList []*statdb.NodeStats
|
2018-12-19 22:34:20 +00:00
|
|
|
var allErrors []error
|
2018-12-19 18:44:03 +00:00
|
|
|
failedUpdateReqs = []*statdb.UpdateRequest{}
|
|
|
|
for _, updateReq := range updateReqList {
|
2018-12-14 20:17:30 +00:00
|
|
|
|
2018-12-19 18:44:03 +00:00
|
|
|
nodeStats, err := s.Update(ctx, updateReq)
|
2018-12-14 20:17:30 +00:00
|
|
|
if err != nil {
|
2018-12-19 22:34:20 +00:00
|
|
|
allErrors = append(allErrors, err)
|
2018-12-19 18:44:03 +00:00
|
|
|
failedUpdateReqs = append(failedUpdateReqs, updateReq)
|
2018-12-14 20:17:30 +00:00
|
|
|
} else {
|
2018-12-19 18:44:03 +00:00
|
|
|
nodeStatsList = append(nodeStatsList, nodeStats)
|
2018-12-14 20:17:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-12-19 22:34:20 +00:00
|
|
|
if len(allErrors) > 0 {
|
|
|
|
return nodeStatsList, failedUpdateReqs, Error.Wrap(utils.CombineErrors(allErrors...))
|
|
|
|
}
|
|
|
|
return nodeStatsList, nil, nil
|
2018-12-14 20:17:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// CreateEntryIfNotExists creates a statdb node entry and saves to statdb if it didn't already exist
|
2018-12-19 18:44:03 +00:00
|
|
|
func (s *statDB) CreateEntryIfNotExists(ctx context.Context, nodeID storj.NodeID) (stats *statdb.NodeStats, err error) {
|
2018-12-14 20:17:30 +00:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2018-12-19 18:44:03 +00:00
|
|
|
getStats, err := s.Get(ctx, nodeID)
|
2018-12-14 20:17:30 +00:00
|
|
|
// TODO: figure out better way to confirm error is type dbx.ErrorCode_NoRows
|
|
|
|
if err != nil && strings.Contains(err.Error(), "no rows in result set") {
|
2018-12-19 18:44:03 +00:00
|
|
|
createStats, err := s.Create(ctx, nodeID, nil)
|
2018-12-14 20:17:30 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2018-12-19 18:44:03 +00:00
|
|
|
return createStats, nil
|
2018-12-14 20:17:30 +00:00
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2018-12-19 18:44:03 +00:00
|
|
|
return getStats, nil
|
2018-12-14 20:17:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func updateRatioVars(newStatus bool, successCount, totalCount int64) (int64, int64, float64) {
|
|
|
|
totalCount++
|
|
|
|
if newStatus {
|
|
|
|
successCount++
|
|
|
|
}
|
|
|
|
newRatio := float64(successCount) / float64(totalCount)
|
|
|
|
return successCount, totalCount, newRatio
|
|
|
|
}
|
|
|
|
|
|
|
|
func checkRatioVars(successCount, totalCount int64) (ratio float64, err error) {
|
|
|
|
if successCount < 0 {
|
|
|
|
return 0, errs.New("success count less than 0")
|
|
|
|
}
|
|
|
|
if totalCount < 0 {
|
|
|
|
return 0, errs.New("total count less than 0")
|
|
|
|
}
|
|
|
|
if successCount > totalCount {
|
|
|
|
return 0, errs.New("success count greater than total count")
|
|
|
|
}
|
|
|
|
if totalCount == 0 {
|
|
|
|
return 0, nil
|
|
|
|
}
|
|
|
|
ratio = float64(successCount) / float64(totalCount)
|
|
|
|
return ratio, nil
|
|
|
|
}
|