2019-01-24 20:15:10 +00:00
|
|
|
// Copyright (C) 2019 Storj Labs, Inc.
|
2018-04-18 17:55:28 +01:00
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
2018-06-13 19:22:32 +01:00
|
|
|
package overlay
|
2018-04-18 16:34:15 +01:00
|
|
|
|
|
|
|
import (
|
2018-05-16 19:47:59 +01:00
|
|
|
"context"
|
2019-01-15 16:08:45 +00:00
|
|
|
"errors"
|
2018-04-18 16:34:15 +01:00
|
|
|
|
2018-06-13 19:22:32 +01:00
|
|
|
"github.com/zeebo/errs"
|
2018-12-22 04:51:42 +00:00
|
|
|
"go.uber.org/zap"
|
2018-11-16 16:31:14 +00:00
|
|
|
|
2018-09-18 05:39:06 +01:00
|
|
|
"storj.io/storj/pkg/pb"
|
2018-11-30 13:40:13 +00:00
|
|
|
"storj.io/storj/pkg/storj"
|
2018-06-13 19:22:32 +01:00
|
|
|
"storj.io/storj/storage"
|
2018-04-18 16:34:15 +01:00
|
|
|
)
|
|
|
|
|
2018-09-05 17:10:35 +01:00
|
|
|
const (
|
|
|
|
// OverlayBucket is the string representing the bucket used for a bolt-backed overlay dht cache
|
|
|
|
OverlayBucket = "overlay"
|
|
|
|
)
|
|
|
|
|
2018-12-17 18:47:26 +00:00
|
|
|
// ErrEmptyNode is returned when the nodeID is empty
|
|
|
|
var ErrEmptyNode = errs.New("empty node ID")
|
|
|
|
|
|
|
|
// ErrNodeNotFound is returned if a node does not exist in database
|
2018-11-21 17:31:27 +00:00
|
|
|
var ErrNodeNotFound = errs.New("Node not found")
|
|
|
|
|
2018-12-17 18:47:26 +00:00
|
|
|
// ErrBucketNotFound is returned if a bucket is unable to be found in the routing table
|
2018-11-21 17:31:27 +00:00
|
|
|
var ErrBucketNotFound = errs.New("Bucket not found")
|
|
|
|
|
2019-01-31 18:49:00 +00:00
|
|
|
// ErrNotEnoughNodes is when selecting nodes failed with the given parameters
|
|
|
|
var ErrNotEnoughNodes = errs.Class("not enough nodes")
|
|
|
|
|
2018-07-09 23:43:32 +01:00
|
|
|
// OverlayError creates class of errors for stack traces
|
|
|
|
var OverlayError = errs.Class("Overlay Error")
|
2018-06-05 22:06:37 +01:00
|
|
|
|
2019-01-15 16:08:45 +00:00
|
|
|
// DB implements the database for overlay.Cache
|
|
|
|
type DB interface {
|
2019-02-11 19:24:51 +00:00
|
|
|
// SelectStorageNodes looks up nodes based on criteria
|
|
|
|
SelectStorageNodes(ctx context.Context, count int, criteria *NodeCriteria) ([]*pb.Node, error)
|
|
|
|
// SelectNewStorageNodes looks up nodes based on new node criteria
|
|
|
|
SelectNewStorageNodes(ctx context.Context, count int, criteria *NewNodeCriteria) ([]*pb.Node, error)
|
2019-01-31 18:49:00 +00:00
|
|
|
|
2019-01-15 16:08:45 +00:00
|
|
|
// Get looks up the node by nodeID
|
|
|
|
Get(ctx context.Context, nodeID storj.NodeID) (*pb.Node, error)
|
|
|
|
// GetAll looks up nodes based on the ids from the overlay cache
|
|
|
|
GetAll(ctx context.Context, nodeIDs storj.NodeIDList) ([]*pb.Node, error)
|
|
|
|
// List lists nodes starting from cursor
|
|
|
|
List(ctx context.Context, cursor storj.NodeID, limit int) ([]*pb.Node, error)
|
2019-01-30 16:29:18 +00:00
|
|
|
// Paginate will page through the database nodes
|
|
|
|
Paginate(ctx context.Context, offset int64, limit int) ([]*pb.Node, bool, error)
|
2019-01-15 16:08:45 +00:00
|
|
|
// Update updates node information
|
|
|
|
Update(ctx context.Context, value *pb.Node) error
|
|
|
|
// Delete deletes node based on id
|
|
|
|
Delete(ctx context.Context, id storj.NodeID) error
|
2019-03-25 22:25:09 +00:00
|
|
|
|
|
|
|
// Create adds a new stats entry for node.
|
|
|
|
Create(ctx context.Context, nodeID storj.NodeID, initial *NodeStats) (stats *NodeStats, err error)
|
|
|
|
// GetStats returns node stats.
|
|
|
|
GetStats(ctx context.Context, nodeID storj.NodeID) (stats *NodeStats, err error)
|
|
|
|
// FindInvalidNodes finds a subset of storagenodes that have stats below provided reputation requirements.
|
|
|
|
FindInvalidNodes(ctx context.Context, nodeIDs storj.NodeIDList, maxStats *NodeStats) (invalid storj.NodeIDList, err error)
|
|
|
|
// UpdateStats all parts of single storagenode's stats.
|
|
|
|
UpdateStats(ctx context.Context, request *UpdateRequest) (stats *NodeStats, err error)
|
|
|
|
// UpdateOperator updates the email and wallet for a given node ID for satellite payments.
|
|
|
|
UpdateOperator(ctx context.Context, node storj.NodeID, updatedOperator pb.NodeOperator) (stats *NodeStats, err error)
|
|
|
|
// UpdateUptime updates a single storagenode's uptime stats.
|
|
|
|
UpdateUptime(ctx context.Context, nodeID storj.NodeID, isUp bool) (stats *NodeStats, err error)
|
|
|
|
// UpdateAuditSuccess updates a single storagenode's audit stats.
|
|
|
|
UpdateAuditSuccess(ctx context.Context, nodeID storj.NodeID, auditSuccess bool) (stats *NodeStats, err error)
|
|
|
|
// UpdateBatch for updating multiple storage nodes' stats.
|
|
|
|
UpdateBatch(ctx context.Context, requests []*UpdateRequest) (statslist []*NodeStats, failed []*UpdateRequest, err error)
|
|
|
|
// CreateEntryIfNotExists creates a node stats entry if it didn't already exist.
|
|
|
|
CreateEntryIfNotExists(ctx context.Context, nodeID storj.NodeID) (stats *NodeStats, err error)
|
2019-01-15 16:08:45 +00:00
|
|
|
}
|
|
|
|
|
2019-03-23 08:06:11 +00:00
|
|
|
// FindStorageNodesRequest defines easy request parameters.
|
|
|
|
type FindStorageNodesRequest struct {
|
|
|
|
MinimumRequiredNodes int
|
|
|
|
RequestedCount int
|
|
|
|
|
|
|
|
FreeBandwidth int64
|
|
|
|
FreeDisk int64
|
|
|
|
|
|
|
|
ExcludedNodes []storj.NodeID
|
|
|
|
}
|
|
|
|
|
|
|
|
// NodeCriteria are the requirements for selecting nodes
|
|
|
|
type NodeCriteria struct {
|
|
|
|
FreeBandwidth int64
|
|
|
|
FreeDisk int64
|
|
|
|
|
|
|
|
AuditCount int64
|
|
|
|
AuditSuccessRatio float64
|
|
|
|
UptimeCount int64
|
|
|
|
UptimeSuccessRatio float64
|
|
|
|
|
|
|
|
Excluded []storj.NodeID
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewNodeCriteria are the requirement for selecting new nodes
|
|
|
|
type NewNodeCriteria struct {
|
|
|
|
FreeBandwidth int64
|
|
|
|
FreeDisk int64
|
|
|
|
|
|
|
|
AuditThreshold int64
|
|
|
|
|
|
|
|
Excluded []storj.NodeID
|
|
|
|
}
|
|
|
|
|
2019-03-25 22:25:09 +00:00
|
|
|
// UpdateRequest is used to update a node status.
|
|
|
|
type UpdateRequest struct {
|
|
|
|
NodeID storj.NodeID
|
|
|
|
AuditSuccess bool
|
|
|
|
IsUp bool
|
|
|
|
}
|
|
|
|
|
|
|
|
// NodeStats contains statistics abot a node.
|
|
|
|
type NodeStats struct {
|
|
|
|
NodeID storj.NodeID
|
|
|
|
AuditSuccessRatio float64
|
|
|
|
AuditSuccessCount int64
|
|
|
|
AuditCount int64
|
|
|
|
UptimeRatio float64
|
|
|
|
UptimeSuccessCount int64
|
|
|
|
UptimeCount int64
|
|
|
|
Operator pb.NodeOperator
|
|
|
|
}
|
|
|
|
|
2019-03-23 08:06:11 +00:00
|
|
|
// Cache is used to store and handle node information
|
2018-06-13 19:22:32 +01:00
|
|
|
type Cache struct {
|
2019-03-23 08:06:11 +00:00
|
|
|
log *zap.Logger
|
|
|
|
db DB
|
|
|
|
preferences NodeSelectionConfig
|
2018-04-18 16:34:15 +01:00
|
|
|
}
|
|
|
|
|
2018-12-20 13:57:54 +00:00
|
|
|
// NewCache returns a new Cache
|
2019-03-25 22:25:09 +00:00
|
|
|
func NewCache(log *zap.Logger, db DB, preferences NodeSelectionConfig) *Cache {
|
2019-03-23 08:06:11 +00:00
|
|
|
return &Cache{
|
|
|
|
log: log,
|
|
|
|
db: db,
|
|
|
|
preferences: preferences,
|
|
|
|
}
|
2018-12-20 13:57:54 +00:00
|
|
|
}
|
|
|
|
|
2019-01-18 13:54:08 +00:00
|
|
|
// Close closes resources
|
|
|
|
func (cache *Cache) Close() error { return nil }
|
|
|
|
|
2018-12-20 13:57:54 +00:00
|
|
|
// Inspect lists limited number of items in the cache
|
|
|
|
func (cache *Cache) Inspect(ctx context.Context) (storage.Keys, error) {
|
2019-01-15 16:08:45 +00:00
|
|
|
// TODO: implement inspection tools
|
|
|
|
return nil, errors.New("not implemented")
|
2018-06-13 19:22:32 +01:00
|
|
|
}
|
|
|
|
|
2019-01-30 16:29:18 +00:00
|
|
|
// List returns a list of nodes from the cache DB
|
2019-03-23 08:06:11 +00:00
|
|
|
func (cache *Cache) List(ctx context.Context, cursor storj.NodeID, limit int) (_ []*pb.Node, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2019-01-30 16:29:18 +00:00
|
|
|
return cache.db.List(ctx, cursor, limit)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Paginate returns a list of `limit` nodes starting from `start` offset.
|
2019-03-23 08:06:11 +00:00
|
|
|
func (cache *Cache) Paginate(ctx context.Context, offset int64, limit int) (_ []*pb.Node, _ bool, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2019-01-30 16:29:18 +00:00
|
|
|
return cache.db.Paginate(ctx, offset, limit)
|
|
|
|
}
|
|
|
|
|
2018-09-11 05:52:14 +01:00
|
|
|
// Get looks up the provided nodeID from the overlay cache
|
2019-03-23 08:06:11 +00:00
|
|
|
func (cache *Cache) Get(ctx context.Context, nodeID storj.NodeID) (_ *pb.Node, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2018-12-17 18:47:26 +00:00
|
|
|
if nodeID.IsZero() {
|
|
|
|
return nil, ErrEmptyNode
|
|
|
|
}
|
2019-01-15 16:08:45 +00:00
|
|
|
return cache.db.Get(ctx, nodeID)
|
2018-04-18 16:34:15 +01:00
|
|
|
}
|
|
|
|
|
2019-03-23 08:06:11 +00:00
|
|
|
// OfflineNodes returns indices of the nodes that are offline
|
|
|
|
func (cache *Cache) OfflineNodes(ctx context.Context, nodes []storj.NodeID) (offline []int, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2019-01-31 18:49:00 +00:00
|
|
|
|
2019-03-23 08:06:11 +00:00
|
|
|
// TODO: optimize
|
|
|
|
results, err := cache.GetAll(ctx, nodes)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
for i, r := range results {
|
|
|
|
if r == nil {
|
|
|
|
offline = append(offline, i)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return offline, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// FindStorageNodes searches the overlay network for nodes that meet the provided requirements
|
|
|
|
func (cache *Cache) FindStorageNodes(ctx context.Context, req FindStorageNodesRequest) ([]*pb.Node, error) {
|
|
|
|
return cache.FindStorageNodesWithPreferences(ctx, req, &cache.preferences)
|
|
|
|
}
|
|
|
|
|
|
|
|
// FindStorageNodesWithPreferences searches the overlay network for nodes that meet the provided criteria
|
|
|
|
func (cache *Cache) FindStorageNodesWithPreferences(ctx context.Context, req FindStorageNodesRequest, preferences *NodeSelectionConfig) (_ []*pb.Node, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2019-01-31 18:49:00 +00:00
|
|
|
|
|
|
|
// TODO: verify logic
|
|
|
|
|
|
|
|
// TODO: add sanity limits to requested node count
|
|
|
|
// TODO: add sanity limits to excluded nodes
|
|
|
|
|
2019-03-23 08:06:11 +00:00
|
|
|
reputableNodeCount := req.MinimumRequiredNodes
|
2019-01-31 18:49:00 +00:00
|
|
|
if reputableNodeCount <= 0 {
|
2019-03-23 08:06:11 +00:00
|
|
|
reputableNodeCount = req.RequestedCount
|
2019-01-31 18:49:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
auditCount := preferences.AuditCount
|
|
|
|
if auditCount < preferences.NewNodeAuditThreshold {
|
|
|
|
auditCount = preferences.NewNodeAuditThreshold
|
|
|
|
}
|
|
|
|
|
2019-02-11 19:24:51 +00:00
|
|
|
reputableNodes, err := cache.db.SelectStorageNodes(ctx, reputableNodeCount, &NodeCriteria{
|
2019-03-23 08:06:11 +00:00
|
|
|
FreeBandwidth: req.FreeBandwidth,
|
|
|
|
FreeDisk: req.FreeDisk,
|
2019-01-31 18:49:00 +00:00
|
|
|
|
|
|
|
AuditCount: auditCount,
|
|
|
|
AuditSuccessRatio: preferences.AuditSuccessRatio,
|
|
|
|
UptimeCount: preferences.UptimeCount,
|
|
|
|
UptimeSuccessRatio: preferences.UptimeRatio,
|
|
|
|
|
2019-03-23 08:06:11 +00:00
|
|
|
Excluded: req.ExcludedNodes,
|
2019-01-31 18:49:00 +00:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
newNodeCount := int64(float64(reputableNodeCount) * preferences.NewNodePercentage)
|
2019-02-11 19:24:51 +00:00
|
|
|
newNodes, err := cache.db.SelectNewStorageNodes(ctx, int(newNodeCount), &NewNodeCriteria{
|
2019-03-23 08:06:11 +00:00
|
|
|
FreeBandwidth: req.FreeBandwidth,
|
|
|
|
FreeDisk: req.FreeDisk,
|
2019-01-31 18:49:00 +00:00
|
|
|
|
|
|
|
AuditThreshold: preferences.NewNodeAuditThreshold,
|
|
|
|
|
2019-03-23 08:06:11 +00:00
|
|
|
Excluded: req.ExcludedNodes,
|
2019-01-31 18:49:00 +00:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
nodes := []*pb.Node{}
|
|
|
|
nodes = append(nodes, newNodes...)
|
|
|
|
nodes = append(nodes, reputableNodes...)
|
|
|
|
|
|
|
|
if len(reputableNodes) < reputableNodeCount {
|
|
|
|
return nodes, ErrNotEnoughNodes.New("requested %d found %d", reputableNodeCount, len(reputableNodes))
|
|
|
|
}
|
|
|
|
|
|
|
|
return nodes, nil
|
|
|
|
}
|
|
|
|
|
2019-01-15 16:08:45 +00:00
|
|
|
// GetAll looks up the provided ids from the overlay cache
|
2019-03-23 08:06:11 +00:00
|
|
|
func (cache *Cache) GetAll(ctx context.Context, ids storj.NodeIDList) (_ []*pb.Node, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2019-01-15 16:08:45 +00:00
|
|
|
if len(ids) == 0 {
|
|
|
|
return nil, OverlayError.New("no ids provided")
|
2018-09-11 05:52:14 +01:00
|
|
|
}
|
2019-01-15 16:08:45 +00:00
|
|
|
|
|
|
|
return cache.db.GetAll(ctx, ids)
|
2018-09-11 05:52:14 +01:00
|
|
|
}
|
|
|
|
|
2019-03-23 08:06:11 +00:00
|
|
|
// Put adds a node id and proto definition into the overlay cache and stat db
|
|
|
|
func (cache *Cache) Put(ctx context.Context, nodeID storj.NodeID, value pb.Node) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2018-11-20 16:54:52 +00:00
|
|
|
// If we get a Node without an ID (i.e. bootstrap node)
|
|
|
|
// we don't want to add to the routing tbale
|
2018-12-17 18:47:26 +00:00
|
|
|
if nodeID.IsZero() {
|
2018-11-20 16:54:52 +00:00
|
|
|
return nil
|
|
|
|
}
|
2019-01-15 16:08:45 +00:00
|
|
|
if nodeID != value.Id {
|
|
|
|
return errors.New("invalid request")
|
|
|
|
}
|
2018-11-20 16:54:52 +00:00
|
|
|
|
2019-03-25 22:25:09 +00:00
|
|
|
// get existing node rep, or create a new overlay node with 0 rep
|
|
|
|
stats, err := cache.db.CreateEntryIfNotExists(ctx, nodeID)
|
2018-12-04 20:18:26 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-01-15 16:08:45 +00:00
|
|
|
|
2018-12-04 20:18:26 +00:00
|
|
|
value.Reputation = &pb.NodeStats{
|
2018-12-19 18:44:03 +00:00
|
|
|
AuditSuccessRatio: stats.AuditSuccessRatio,
|
|
|
|
AuditSuccessCount: stats.AuditSuccessCount,
|
|
|
|
AuditCount: stats.AuditCount,
|
|
|
|
UptimeRatio: stats.UptimeRatio,
|
|
|
|
UptimeSuccessCount: stats.UptimeSuccessCount,
|
|
|
|
UptimeCount: stats.UptimeCount,
|
2018-12-04 20:18:26 +00:00
|
|
|
}
|
|
|
|
|
2019-01-15 16:08:45 +00:00
|
|
|
return cache.db.Update(ctx, &value)
|
2018-04-18 16:34:15 +01:00
|
|
|
}
|
2018-12-21 18:35:21 +00:00
|
|
|
|
|
|
|
// Delete will remove the node from the cache. Used when a node hard disconnects or fails
|
|
|
|
// to pass a PING multiple times.
|
2019-03-23 08:06:11 +00:00
|
|
|
func (cache *Cache) Delete(ctx context.Context, id storj.NodeID) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2018-12-21 18:35:21 +00:00
|
|
|
if id.IsZero() {
|
|
|
|
return ErrEmptyNode
|
|
|
|
}
|
2019-03-23 08:06:11 +00:00
|
|
|
|
2019-01-15 16:08:45 +00:00
|
|
|
return cache.db.Delete(ctx, id)
|
2018-12-21 18:35:21 +00:00
|
|
|
}
|
2018-12-22 04:51:42 +00:00
|
|
|
|
2019-03-25 22:25:09 +00:00
|
|
|
// Create adds a new stats entry for node.
|
|
|
|
func (cache *Cache) Create(ctx context.Context, nodeID storj.NodeID, initial *NodeStats) (stats *NodeStats, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
return cache.db.Create(ctx, nodeID, initial)
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetStats returns node stats.
|
|
|
|
func (cache *Cache) GetStats(ctx context.Context, nodeID storj.NodeID) (stats *NodeStats, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
return cache.db.GetStats(ctx, nodeID)
|
|
|
|
}
|
|
|
|
|
|
|
|
// FindInvalidNodes finds a subset of storagenodes that have stats below provided reputation requirements.
|
|
|
|
func (cache *Cache) FindInvalidNodes(ctx context.Context, nodeIDs storj.NodeIDList, maxStats *NodeStats) (invalid storj.NodeIDList, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
return cache.db.FindInvalidNodes(ctx, nodeIDs, maxStats)
|
|
|
|
}
|
|
|
|
|
|
|
|
// UpdateStats all parts of single storagenode's stats.
|
|
|
|
func (cache *Cache) UpdateStats(ctx context.Context, request *UpdateRequest) (stats *NodeStats, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
return cache.db.UpdateStats(ctx, request)
|
|
|
|
}
|
|
|
|
|
|
|
|
// UpdateOperator updates the email and wallet for a given node ID for satellite payments.
|
|
|
|
func (cache *Cache) UpdateOperator(ctx context.Context, node storj.NodeID, updatedOperator pb.NodeOperator) (stats *NodeStats, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
return cache.db.UpdateOperator(ctx, node, updatedOperator)
|
|
|
|
}
|
|
|
|
|
|
|
|
// UpdateUptime updates a single storagenode's uptime stats.
|
|
|
|
func (cache *Cache) UpdateUptime(ctx context.Context, nodeID storj.NodeID, isUp bool) (stats *NodeStats, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
return cache.db.UpdateUptime(ctx, nodeID, isUp)
|
|
|
|
}
|
|
|
|
|
2018-12-22 04:51:42 +00:00
|
|
|
// ConnFailure implements the Transport Observer `ConnFailure` function
|
|
|
|
func (cache *Cache) ConnFailure(ctx context.Context, node *pb.Node, failureError error) {
|
2019-03-23 08:06:11 +00:00
|
|
|
var err error
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2018-12-22 04:51:42 +00:00
|
|
|
// TODO: Kademlia paper specifies 5 unsuccessful PINGs before removing the node
|
|
|
|
// from our routing table, but this is the cache so maybe we want to treat
|
|
|
|
// it differently.
|
2019-03-25 22:25:09 +00:00
|
|
|
_, err = cache.db.UpdateUptime(ctx, node.Id, false)
|
2018-12-22 04:51:42 +00:00
|
|
|
if err != nil {
|
2019-03-25 22:25:09 +00:00
|
|
|
zap.L().Debug("error updating uptime for node", zap.Error(err))
|
2018-12-22 04:51:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// ConnSuccess implements the Transport Observer `ConnSuccess` function
|
|
|
|
func (cache *Cache) ConnSuccess(ctx context.Context, node *pb.Node) {
|
2019-03-23 08:06:11 +00:00
|
|
|
var err error
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
err = cache.Put(ctx, node.Id, *node)
|
2018-12-22 04:51:42 +00:00
|
|
|
if err != nil {
|
2019-03-25 22:25:09 +00:00
|
|
|
zap.L().Debug("error updating uptime for node", zap.Error(err))
|
2018-12-22 04:51:42 +00:00
|
|
|
}
|
2019-03-25 22:25:09 +00:00
|
|
|
_, err = cache.db.UpdateUptime(ctx, node.Id, true)
|
2018-12-22 04:51:42 +00:00
|
|
|
if err != nil {
|
2019-03-25 22:25:09 +00:00
|
|
|
zap.L().Debug("error updating node connection info", zap.Error(err))
|
2018-12-22 04:51:42 +00:00
|
|
|
}
|
|
|
|
}
|