2018-11-08 16:18:28 +00:00
|
|
|
// Copyright (C) 2018 Storj Labs, Inc.
|
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
2018-11-14 01:22:18 +00:00
|
|
|
package tally
|
2018-11-08 16:18:28 +00:00
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/gogo/protobuf/proto"
|
|
|
|
"go.uber.org/zap"
|
2018-11-29 18:39:27 +00:00
|
|
|
|
2018-11-26 21:49:55 +00:00
|
|
|
dbx "storj.io/storj/pkg/accounting/dbx"
|
2018-11-08 16:18:28 +00:00
|
|
|
"storj.io/storj/pkg/kademlia"
|
|
|
|
"storj.io/storj/pkg/node"
|
|
|
|
"storj.io/storj/pkg/pb"
|
|
|
|
"storj.io/storj/pkg/pointerdb"
|
|
|
|
"storj.io/storj/pkg/provider"
|
2018-11-29 18:39:27 +00:00
|
|
|
"storj.io/storj/pkg/storj"
|
2018-11-08 16:18:28 +00:00
|
|
|
"storj.io/storj/storage"
|
|
|
|
)
|
|
|
|
|
2018-11-14 01:22:18 +00:00
|
|
|
// Tally is the service for accounting for data stored on each storage node
|
2018-11-08 16:18:28 +00:00
|
|
|
type Tally interface {
|
|
|
|
Run(ctx context.Context) error
|
|
|
|
}
|
|
|
|
|
|
|
|
type tally struct {
|
|
|
|
pointerdb *pointerdb.Server
|
|
|
|
overlay pb.OverlayServer
|
|
|
|
kademlia *kademlia.Kademlia
|
|
|
|
limit int
|
|
|
|
logger *zap.Logger
|
|
|
|
ticker *time.Ticker
|
2018-11-26 21:49:55 +00:00
|
|
|
db *dbx.DB
|
2018-11-08 16:18:28 +00:00
|
|
|
}
|
|
|
|
|
2018-11-26 21:49:55 +00:00
|
|
|
func newTally(logger *zap.Logger, db *dbx.DB, pointerdb *pointerdb.Server, overlay pb.OverlayServer, kademlia *kademlia.Kademlia, limit int, interval time.Duration) (*tally, error) {
|
2018-11-08 16:18:28 +00:00
|
|
|
return &tally{
|
|
|
|
pointerdb: pointerdb,
|
|
|
|
overlay: overlay,
|
|
|
|
kademlia: kademlia,
|
|
|
|
limit: limit,
|
|
|
|
logger: logger,
|
2018-11-14 01:22:18 +00:00
|
|
|
ticker: time.NewTicker(interval),
|
2018-11-26 21:49:55 +00:00
|
|
|
db: db,
|
|
|
|
}, nil
|
2018-11-08 16:18:28 +00:00
|
|
|
}
|
|
|
|
|
2018-11-14 01:22:18 +00:00
|
|
|
// Run the tally loop
|
2018-11-08 16:18:28 +00:00
|
|
|
func (t *tally) Run(ctx context.Context) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
for {
|
|
|
|
err = t.identifyActiveNodes(ctx)
|
|
|
|
if err != nil {
|
2018-11-14 01:22:18 +00:00
|
|
|
zap.L().Error("Tally failed", zap.Error(err))
|
2018-11-08 16:18:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-t.ticker.C: // wait for the next interval to happen
|
2018-11-14 01:22:18 +00:00
|
|
|
case <-ctx.Done(): // or the tally is canceled via context
|
2018-11-26 21:49:55 +00:00
|
|
|
_ = t.db.Close()
|
2018-11-08 16:18:28 +00:00
|
|
|
return ctx.Err()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// identifyActiveNodes iterates through pointerdb and identifies nodes that have storage on them
|
|
|
|
func (t *tally) identifyActiveNodes(ctx context.Context) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
rt, err := t.kademlia.GetRoutingTable(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return Error.Wrap(err)
|
|
|
|
}
|
|
|
|
self := rt.Local()
|
|
|
|
identity := &provider.FullIdentity{} //do i need anything in here?
|
|
|
|
client, err := node.NewNodeClient(identity, self, t.kademlia)
|
|
|
|
if err != nil {
|
|
|
|
return Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
err = t.pointerdb.Iterate(ctx, &pb.IterateRequest{Recurse: true},
|
|
|
|
func(it storage.Iterator) error {
|
|
|
|
var item storage.ListItem
|
|
|
|
lim := t.limit
|
|
|
|
if lim <= 0 || lim > storage.LookupLimit {
|
|
|
|
lim = storage.LookupLimit
|
|
|
|
}
|
|
|
|
for ; lim > 0 && it.Next(&item); lim-- {
|
|
|
|
pointer := &pb.Pointer{}
|
|
|
|
err = proto.Unmarshal(item.Value, pointer)
|
|
|
|
if err != nil {
|
|
|
|
return Error.Wrap(err)
|
|
|
|
}
|
|
|
|
pieces := pointer.Remote.RemotePieces
|
2018-11-29 18:39:27 +00:00
|
|
|
var nodeIDs storj.NodeIDList
|
2018-11-08 16:18:28 +00:00
|
|
|
for _, p := range pieces {
|
2018-11-29 18:39:27 +00:00
|
|
|
nodeIDs = append(nodeIDs, p.NodeId)
|
2018-11-08 16:18:28 +00:00
|
|
|
}
|
|
|
|
online, err := t.onlineNodes(ctx, nodeIDs)
|
|
|
|
if err != nil {
|
|
|
|
return Error.Wrap(err)
|
|
|
|
}
|
|
|
|
go t.tallyAtRestStorage(ctx, pointer, online, client)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-11-29 18:39:27 +00:00
|
|
|
func (t *tally) onlineNodes(ctx context.Context, nodeIDs storj.NodeIDList) (online []*pb.Node, err error) {
|
|
|
|
responses, err := t.overlay.BulkLookup(ctx, pb.NodeIDsToLookupRequests(nodeIDs))
|
2018-11-08 16:18:28 +00:00
|
|
|
if err != nil {
|
|
|
|
return []*pb.Node{}, err
|
|
|
|
}
|
2018-11-29 18:39:27 +00:00
|
|
|
nodes := pb.LookupResponsesToNodes(responses)
|
2018-11-08 16:18:28 +00:00
|
|
|
for _, n := range nodes {
|
|
|
|
if n != nil {
|
|
|
|
online = append(online, n)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return online, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (t *tally) tallyAtRestStorage(ctx context.Context, pointer *pb.Pointer, nodes []*pb.Node, client node.Client) {
|
2018-11-20 17:09:35 +00:00
|
|
|
segmentSize := pointer.GetSegmentSize()
|
2018-11-08 16:18:28 +00:00
|
|
|
minReq := pointer.Remote.Redundancy.GetMinReq()
|
|
|
|
if minReq <= 0 {
|
|
|
|
zap.L().Error("minReq must be an int greater than 0")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
pieceSize := segmentSize / int64(minReq)
|
|
|
|
for _, n := range nodes {
|
|
|
|
nodeAvail := true
|
|
|
|
var err error
|
|
|
|
ok := t.needToContact(n.Id)
|
|
|
|
if ok {
|
|
|
|
nodeAvail, err = client.Ping(ctx, *n)
|
|
|
|
if err != nil {
|
|
|
|
zap.L().Error("ping failed")
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if nodeAvail {
|
|
|
|
err := t.updateGranularTable(n.Id, pieceSize)
|
|
|
|
if err != nil {
|
|
|
|
zap.L().Error("update failed")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-29 18:39:27 +00:00
|
|
|
func (t *tally) needToContact(id storj.NodeID) bool {
|
2018-11-08 16:18:28 +00:00
|
|
|
//TODO
|
|
|
|
//check db if node was updated within the last time period
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2018-11-29 18:39:27 +00:00
|
|
|
func (t *tally) updateGranularTable(id storj.NodeID, pieceSize int64) error {
|
2018-11-08 16:18:28 +00:00
|
|
|
//TODO
|
|
|
|
return nil
|
|
|
|
}
|