Statdb master db v3 848 (#830)

* intial changes to migrate statdb to masterdb framework

* statdb refactor compiles

* added TestCreateDoesNotExist testcase

* Initial port of statdb to masterdb framework working

* refactored statdb proto def to pkg/statdb

* removed statdb/proto folder

* moved pb.Node to storj.NodeID

* CreateEntryIfNotExistsRequest moved pd.Node to storj.NodeID

* moved the fields from pb.Node to statdb.UpdateRequest

ported TestUpdateExists, TestUpdateUptimeExists, TestUpdateAuditSuccessExists TestUpdateBatchExists
This commit is contained in:
aligeti 2018-12-14 15:17:30 -05:00 committed by GitHub
parent 9bdee7b106
commit 5e1b02ca8b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
47 changed files with 1857 additions and 3786 deletions

View File

@ -30,7 +30,6 @@ import (
"storj.io/storj/pkg/process"
"storj.io/storj/pkg/provider"
"storj.io/storj/pkg/satellite/satelliteweb"
"storj.io/storj/pkg/statdb"
"storj.io/storj/pkg/utils"
"storj.io/storj/satellite/satellitedb"
)
@ -49,7 +48,6 @@ type Satellite struct {
Checker checker.Config
Repairer repairer.Config
Audit audit.Config
StatDB statdb.Config
BwAgreement bwagreement.Config
Web satelliteweb.Config
Database string `help:"satellite database connection string" default:"sqlite3://$CONFDIR/master.db"`
@ -131,7 +129,6 @@ func cmdRun(cmd *cobra.Command, args []string) (err error) {
errch <- runCfg.Satellite.Identity.Run(ctx,
grpcauth.NewAPIKeyInterceptor(),
runCfg.Satellite.Kademlia,
runCfg.Satellite.StatDB,
runCfg.Satellite.Audit,
runCfg.Satellite.Overlay,
runCfg.Satellite.Discovery,

View File

@ -4,6 +4,8 @@
package main
import (
"context"
"go.uber.org/zap"
"storj.io/storj/pkg/overlay"
@ -20,14 +22,13 @@ type cacheConfig struct {
DatabaseURL string `help:"the database connection string to use"`
}
func (c cacheConfig) open() (*overlay.Cache, error) {
func (c cacheConfig) open(ctx context.Context) (*overlay.Cache, error) {
driver, source, err := utils.SplitDBURL(c.DatabaseURL)
if err != nil {
return nil, Error.Wrap(err)
}
var db storage.KeyValueStore
var sdb *statdb.StatDB
switch driver {
case "bolt":
@ -48,10 +49,12 @@ func (c cacheConfig) open() (*overlay.Cache, error) {
// add logger
db = storelogger.New(zap.L().Named("oc"), db)
sdb, err = statdb.NewStatDB("postgres", source, zap.L()) //todo: unhardcode this
if err != nil {
return nil, Error.New("statdb error: %s", err)
sdb, ok := ctx.Value("masterdb").(interface {
StatDB() statdb.DB
})
if !ok {
return nil, Error.New("unable to get master db instance")
}
return overlay.NewOverlayCache(db, nil, sdb), nil
return overlay.NewOverlayCache(db, nil, sdb.StatDB()), nil
}

View File

@ -48,7 +48,8 @@ func init() {
}
func cmdList(cmd *cobra.Command, args []string) (err error) {
c, err := cacheCfg.open()
ctx := process.Ctx(cmd)
c, err := cacheCfg.open(ctx)
if err != nil {
return err
}
@ -78,6 +79,7 @@ func cmdList(cmd *cobra.Command, args []string) (err error) {
}
func cmdAdd(cmd *cobra.Command, args []string) (err error) {
ctx := process.Ctx(cmd)
j, err := ioutil.ReadFile(cacheCfg.NodesPath)
if err != nil {
return errs.Wrap(err)
@ -88,7 +90,7 @@ func cmdAdd(cmd *cobra.Command, args []string) (err error) {
return errs.Wrap(err)
}
c, err := cacheCfg.open()
c, err := cacheCfg.open(ctx)
if err != nil {
return err
}

View File

@ -30,7 +30,6 @@ import (
"storj.io/storj/pkg/pointerdb"
"storj.io/storj/pkg/process"
"storj.io/storj/pkg/provider"
"storj.io/storj/pkg/statdb"
"storj.io/storj/pkg/storj"
"storj.io/storj/satellite/satellitedb"
"storj.io/storj/storage/redis"
@ -67,7 +66,6 @@ var (
Kademlia kademlia.Config
PointerDB pointerdb.Config
Overlay overlay.Config
StatDB statdb.Config
Checker checker.Config
Repairer repairer.Config
Audit audit.Config
@ -125,7 +123,6 @@ func cmdRun(cmd *cobra.Command, args []string) (err error) {
grpcauth.NewAPIKeyInterceptor(),
runCfg.Kademlia,
runCfg.PointerDB,
runCfg.StatDB,
runCfg.Overlay,
runCfg.Checker,
runCfg.Repairer,

View File

@ -5,9 +5,7 @@ package testplanet
import (
"context"
"fmt"
"io"
"math/rand"
"net"
"go.uber.org/zap"
@ -25,6 +23,7 @@ import (
"storj.io/storj/pkg/storj"
"storj.io/storj/pkg/transport"
"storj.io/storj/pkg/utils"
"storj.io/storj/satellite/satellitedb"
"storj.io/storj/storage/teststore"
)
@ -38,8 +37,9 @@ type Node struct {
Provider *provider.Provider
Kademlia *kademlia.Kademlia
Discovery *discovery.Discovery
StatDB *statdb.StatDB
StatDB statdb.DB
Overlay *overlay.Cache
Database *satellitedb.DB
Dependencies []io.Closer
}
@ -155,6 +155,17 @@ func (node *Node) DialOverlay(destination *Node) (overlay.Client, error) {
// initOverlay creates overlay for a given planet
func (node *Node) initOverlay(planet *Planet) error {
var err error
node.Database, err = satellitedb.NewInMemory()
if err != nil {
return err
}
err = node.Database.CreateTables()
if err != nil {
return err
}
routing, err := kademlia.NewRoutingTable(node.Info, teststore.New(), teststore.New())
if err != nil {
return err
@ -166,12 +177,7 @@ func (node *Node) initOverlay(planet *Planet) error {
}
node.Kademlia = kad
dbPath := fmt.Sprintf("file:memdb%d?mode=memory&cache=shared", rand.Int63())
sdb, err := statdb.NewStatDB("sqlite3", dbPath, zap.NewNop())
if err != nil {
return err
}
node.StatDB = sdb
node.StatDB = node.Database.StatDB()
node.Overlay = overlay.NewOverlayCache(teststore.New(), node.Kademlia, node.StatDB)
node.Discovery = discovery.NewDiscovery(node.Overlay, node.Kademlia, node.StatDB)

View File

@ -6,34 +6,38 @@ package audit
import (
"context"
"storj.io/storj/pkg/pb"
"github.com/zeebo/errs"
"storj.io/storj/pkg/statdb"
statsproto "storj.io/storj/pkg/statdb/proto"
"storj.io/storj/pkg/storj"
)
type reporter interface {
RecordAudits(ctx context.Context, failedNodes []*pb.Node) (err error)
RecordAudits(ctx context.Context, failedNodes []*statdb.UpdateRequest) (err error)
}
// Reporter records audit reports in statdb and implements the reporter interface
type Reporter struct {
statdb *statdb.StatDB
statdb statdb.DB
maxRetries int
}
// NewReporter instantiates a reporter
func NewReporter(ctx context.Context, statDBPort string, maxRetries int, apiKey string) (reporter *Reporter, err error) {
sdb := statdb.LoadFromContext(ctx)
return &Reporter{statdb: sdb, maxRetries: maxRetries}, nil
sdb, ok := ctx.Value("masterdb").(interface {
StatDB() statdb.DB
})
if !ok {
return nil, errs.New("unable to get master db instance")
}
return &Reporter{statdb: sdb.StatDB(), maxRetries: maxRetries}, nil
}
// RecordAudits saves failed audit details to statdb
func (reporter *Reporter) RecordAudits(ctx context.Context, nodes []*pb.Node) (err error) {
func (reporter *Reporter) RecordAudits(ctx context.Context, nodes []*statdb.UpdateRequest) (err error) {
retries := 0
for len(nodes) > 0 && retries < reporter.maxRetries {
res, err := reporter.statdb.UpdateBatch(ctx, &statsproto.UpdateBatchRequest{
res, err := reporter.statdb.UpdateBatch(ctx, &statdb.UpdateBatchRequest{
NodeList: nodes,
})
if err != nil {
@ -48,10 +52,10 @@ func (reporter *Reporter) RecordAudits(ctx context.Context, nodes []*pb.Node) (e
return nil
}
func setAuditFailStatus(ctx context.Context, failedNodes storj.NodeIDList) (failStatusNodes []*pb.Node) {
func setAuditFailStatus(ctx context.Context, failedNodes storj.NodeIDList) (failStatusNodes []*statdb.UpdateRequest) {
for i := range failedNodes {
setNode := &pb.Node{
Id: failedNodes[i],
setNode := &statdb.UpdateRequest{
Node: failedNodes[i],
AuditSuccess: false,
IsUp: true,
UpdateAuditSuccess: true,
@ -63,10 +67,10 @@ func setAuditFailStatus(ctx context.Context, failedNodes storj.NodeIDList) (fail
}
// TODO: offline nodes should maybe be marked as failing the audit in the future
func setOfflineStatus(ctx context.Context, offlineNodeIDs storj.NodeIDList) (offlineStatusNodes []*pb.Node) {
func setOfflineStatus(ctx context.Context, offlineNodeIDs storj.NodeIDList) (offlineStatusNodes []*statdb.UpdateRequest) {
for i := range offlineNodeIDs {
setNode := &pb.Node{
Id: offlineNodeIDs[i],
setNode := &statdb.UpdateRequest{
Node: offlineNodeIDs[i],
IsUp: false,
UpdateUptime: true,
}
@ -75,10 +79,10 @@ func setOfflineStatus(ctx context.Context, offlineNodeIDs storj.NodeIDList) (off
return offlineStatusNodes
}
func setSuccessStatus(ctx context.Context, offlineNodeIDs storj.NodeIDList) (successStatusNodes []*pb.Node) {
func setSuccessStatus(ctx context.Context, offlineNodeIDs storj.NodeIDList) (successStatusNodes []*statdb.UpdateRequest) {
for i := range offlineNodeIDs {
setNode := &pb.Node{
Id: offlineNodeIDs[i],
setNode := &statdb.UpdateRequest{
Node: offlineNodeIDs[i],
AuditSuccess: true,
IsUp: true,
UpdateAuditSuccess: true,

View File

@ -11,13 +11,13 @@ import (
"github.com/gogo/protobuf/proto"
"github.com/vivint/infectious"
"gopkg.in/spacemonkeygo/monkit.v2"
monkit "gopkg.in/spacemonkeygo/monkit.v2"
"storj.io/storj/pkg/overlay"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/piecestore/psclient"
"storj.io/storj/pkg/provider"
sdbproto "storj.io/storj/pkg/statdb/proto"
"storj.io/storj/pkg/statdb"
"storj.io/storj/pkg/storj"
"storj.io/storj/pkg/transport"
"storj.io/storj/pkg/utils"
@ -208,7 +208,7 @@ func calcPadded(size int64, blockSize int) int64 {
}
// verify downloads shares then verifies the data correctness at the given stripe
func (verifier *Verifier) verify(ctx context.Context, stripeIndex int, pointer *pb.Pointer, authorization *pb.SignedMessage) (verifiedNodes []*sdbproto.Node, err error) {
func (verifier *Verifier) verify(ctx context.Context, stripeIndex int, pointer *pb.Pointer, authorization *pb.SignedMessage) (verifiedNodes []*statdb.UpdateRequest, err error) {
defer mon.Task()(&ctx)(&err)
shares, nodes, err := verifier.downloader.DownloadShares(ctx, pointer, stripeIndex, authorization)
@ -260,7 +260,7 @@ func getSuccessNodes(ctx context.Context, nodes map[int]*pb.Node, failedNodes, o
}
// setVerifiedNodes creates a combined array of offline nodes, failed audit nodes, and success nodes with their stats set to the statdb proto Node type
func setVerifiedNodes(ctx context.Context, offlineNodes, failedNodes, successNodes storj.NodeIDList) (verifiedNodes []*sdbproto.Node) {
func setVerifiedNodes(ctx context.Context, offlineNodes, failedNodes, successNodes storj.NodeIDList) (verifiedNodes []*statdb.UpdateRequest) {
offlineStatusNodes := setOfflineStatus(ctx, offlineNodes)
failStatusNodes := setAuditFailStatus(ctx, failedNodes)
successStatusNodes := setSuccessStatus(ctx, successNodes)

View File

@ -15,7 +15,6 @@ import (
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/pointerdb"
"storj.io/storj/pkg/statdb"
statpb "storj.io/storj/pkg/statdb/proto"
"storj.io/storj/pkg/storj"
"storj.io/storj/storage"
)
@ -27,7 +26,7 @@ type Checker interface {
// Checker contains the information needed to do checks for missing pieces
type checker struct {
statdb *statdb.StatDB
statdb statdb.DB
pointerdb *pointerdb.Server
repairQueue *queue.Queue
overlay pb.OverlayServer
@ -38,7 +37,7 @@ type checker struct {
}
// newChecker creates a new instance of checker
func newChecker(pointerdb *pointerdb.Server, sdb *statdb.StatDB, repairQueue *queue.Queue, overlay pb.OverlayServer, irrdb irreparable.DB, limit int, logger *zap.Logger, interval time.Duration) *checker {
func newChecker(pointerdb *pointerdb.Server, sdb statdb.DB, repairQueue *queue.Queue, overlay pb.OverlayServer, irrdb irreparable.DB, limit int, logger *zap.Logger, interval time.Duration) *checker {
return &checker{
statdb: sdb,
pointerdb: pointerdb,
@ -167,7 +166,7 @@ func (c *checker) offlineNodes(ctx context.Context, nodeIDs storj.NodeIDList) (o
// Find invalidNodes by checking the audit results that are place in statdb
func (c *checker) invalidNodes(ctx context.Context, nodeIDs storj.NodeIDList) (invalidNodes []int32, err error) {
// filter if nodeIDs have invalid pieces from auditing results
findInvalidNodesReq := &statpb.FindInvalidNodesRequest{
findInvalidNodesReq := &statdb.FindInvalidNodesRequest{
NodeIds: nodeIDs,
MaxStats: &pb.NodeStats{
AuditSuccessRatio: 0, // TODO: update when we have stats added to statdb

View File

@ -5,7 +5,6 @@ package checker
import (
"context"
"fmt"
"math/rand"
"sort"
"strconv"
@ -23,7 +22,6 @@ import (
"storj.io/storj/pkg/overlay/mocks"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/pointerdb"
"storj.io/storj/pkg/statdb"
"storj.io/storj/pkg/storj"
"storj.io/storj/satellite/satellitedb"
"storj.io/storj/storage/redis"
@ -39,10 +37,6 @@ func TestIdentifyInjuredSegments(t *testing.T) {
pointerdb := pointerdb.NewServer(teststore.New(), &overlay.Cache{}, logger, pointerdb.Config{}, nil)
assert.NotNil(t, pointerdb)
sdb, err := statdb.NewStatDB("sqlite3", fmt.Sprintf("file:memdb%d?mode=memory&cache=shared", rand.Int63()), logger)
assert.NotNil(t, sdb)
assert.NoError(t, err)
repairQueue := queue.NewQueue(testqueue.New())
const N = 25
@ -105,7 +99,7 @@ func TestIdentifyInjuredSegments(t *testing.T) {
}()
err = db.CreateTables()
assert.NoError(t, err)
checker := newChecker(pointerdb, sdb, repairQueue, overlayServer, db.Irreparable(), limit, logger, interval)
checker := newChecker(pointerdb, db.StatDB(), repairQueue, overlayServer, db.Irreparable(), limit, logger, interval)
assert.NoError(t, err)
err = checker.identifyInjuredSegments(ctx)
assert.NoError(t, err)
@ -130,10 +124,6 @@ func TestOfflineNodes(t *testing.T) {
pointerdb := pointerdb.NewServer(teststore.New(), &overlay.Cache{}, logger, pointerdb.Config{}, nil)
assert.NotNil(t, pointerdb)
sdb, err := statdb.NewStatDB("sqlite3", fmt.Sprintf("file:memdb%d?mode=memory&cache=shared", rand.Int63()), logger)
assert.NotNil(t, sdb)
assert.NoError(t, err)
repairQueue := queue.NewQueue(testqueue.New())
const N = 50
nodes := []*pb.Node{}
@ -162,7 +152,7 @@ func TestOfflineNodes(t *testing.T) {
}()
err = db.CreateTables()
assert.NoError(t, err)
checker := newChecker(pointerdb, sdb, repairQueue, overlayServer, db.Irreparable(), limit, logger, interval)
checker := newChecker(pointerdb, db.StatDB(), repairQueue, overlayServer, db.Irreparable(), limit, logger, interval)
assert.NoError(t, err)
offline, err := checker.offlineNodes(ctx, nodeIDs)
assert.NoError(t, err)
@ -174,10 +164,6 @@ func BenchmarkIdentifyInjuredSegments(b *testing.B) {
pointerdb := pointerdb.NewServer(teststore.New(), &overlay.Cache{}, logger, pointerdb.Config{}, nil)
assert.NotNil(b, pointerdb)
sdb, err := statdb.NewStatDB("sqlite3", fmt.Sprintf("file:memdb%d?mode=memory&cache=shared", rand.Int63()), logger)
assert.NotNil(b, sdb)
assert.NoError(b, err)
// creating in-memory db and opening connection
db, err := satellitedb.NewInMemory()
defer func() {
@ -248,7 +234,7 @@ func BenchmarkIdentifyInjuredSegments(b *testing.B) {
for i := 0; i < b.N; i++ {
interval := time.Second
assert.NoError(b, err)
checker := newChecker(pointerdb, sdb, repairQueue, overlayServer, db.Irreparable(), limit, logger, interval)
checker := newChecker(pointerdb, db.StatDB(), repairQueue, overlayServer, db.Irreparable(), limit, logger, interval)
assert.NoError(b, err)
err = checker.identifyInjuredSegments(ctx)

View File

@ -7,7 +7,6 @@ import (
"context"
"time"
"github.com/zeebo/errs"
"go.uber.org/zap"
"storj.io/storj/pkg/datarepair/irreparable"
@ -32,16 +31,18 @@ func (c Config) initialize(ctx context.Context) (Checker, error) {
return nil, Error.New("failed to load pointerdb from context")
}
sdb := statdb.LoadFromContext(ctx)
if sdb == nil {
return nil, Error.New("failed to load statdb from context")
sdb, ok := ctx.Value("masterdb").(interface {
StatDB() statdb.DB
})
if !ok {
return nil, Error.New("unable to get master db instance")
}
db, ok := ctx.Value("masterdb").(interface {
Irreparable() irreparable.DB
})
if !ok {
return nil, errs.New("unable to get master db instance")
return nil, Error.New("unable to get master db instance")
}
o := overlay.LoadServerFromContext(ctx)
redisQ, err := redis.NewQueueFrom(c.QueueAddress)
@ -49,7 +50,7 @@ func (c Config) initialize(ctx context.Context) (Checker, error) {
return nil, Error.Wrap(err)
}
repairQueue := queue.NewQueue(redisQ)
return newChecker(pdb, sdb, repairQueue, o, db.Irreparable(), 0, zap.L(), c.Interval), nil
return newChecker(pdb, sdb.StatDB(), repairQueue, o, db.Irreparable(), 0, zap.L(), c.Interval), nil
}
// Run runs the checker with configured values

View File

@ -45,8 +45,13 @@ func (c Config) Run(ctx context.Context, server *provider.Provider) (err error)
ol := overlay.LoadFromContext(ctx)
kad := kademlia.LoadFromContext(ctx)
stat := statdb.LoadFromContext(ctx)
discovery := NewDiscovery(ol, kad, stat)
stat, ok := ctx.Value("masterdb").(interface {
StatDB() statdb.DB
})
if !ok {
return Error.New("unable to get master db instance")
}
discovery := NewDiscovery(ol, kad, stat.StatDB())
zap.L().Debug("Starting discovery")

View File

@ -15,11 +15,11 @@ import (
type Discovery struct {
cache *overlay.Cache
kad *kademlia.Kademlia
statdb *statdb.StatDB
statdb statdb.DB
}
// NewDiscovery Returns a new Discovery instance with cache, kad, and statdb loaded on
func NewDiscovery(ol *overlay.Cache, kad *kademlia.Kademlia, stat *statdb.StatDB) *Discovery {
func NewDiscovery(ol *overlay.Cache, kad *kademlia.Kademlia, stat statdb.DB) *Discovery {
return &Discovery{
cache: ol,
kad: kad,

View File

@ -8,7 +8,7 @@ import (
"github.com/zeebo/errs"
"go.uber.org/zap"
"gopkg.in/spacemonkeygo/monkit.v2"
monkit "gopkg.in/spacemonkeygo/monkit.v2"
"storj.io/storj/pkg/kademlia"
"storj.io/storj/pkg/overlay"
@ -42,9 +42,11 @@ func (c Config) Run(ctx context.Context, server *provider.Provider) (err error)
return Error.New("programmer error: overlay responsibility unstarted")
}
sdb := statdb.LoadFromContext(ctx)
if sdb == nil {
return Error.New("programmer error: statdb responsibility unstarted")
sdb, ok := ctx.Value("masterdb").(interface {
StatDB() statdb.DB
})
if !ok {
return Error.New("unable to get master db instance")
}
id, err := provider.NewFullIdentity(ctx, 12, 4)
@ -56,7 +58,7 @@ func (c Config) Run(ctx context.Context, server *provider.Provider) (err error)
dht: kad,
identity: id,
cache: ol,
statdb: sdb,
statdb: sdb.StatDB(),
logger: zap.L(),
metrics: monkit.Default,
}

View File

@ -16,7 +16,6 @@ import (
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/provider"
"storj.io/storj/pkg/statdb"
statsproto "storj.io/storj/pkg/statdb/proto"
"storj.io/storj/pkg/storj"
)
@ -29,7 +28,7 @@ var (
type Server struct {
dht dht.DHT
cache *overlay.Cache
statdb *statdb.StatDB
statdb statdb.DB
logger *zap.Logger
metrics *monkit.Registry
identity *provider.FullIdentity
@ -148,8 +147,8 @@ func (srv *Server) LookupNode(ctx context.Context, req *pb.LookupNodeRequest) (*
// GetStats returns the stats for a particular node ID
func (srv *Server) GetStats(ctx context.Context, req *pb.GetStatsRequest) (*pb.GetStatsResponse, error) {
getReq := &statsproto.GetRequest{
NodeId: req.NodeId,
getReq := &statdb.GetRequest{
Node: req.NodeId,
}
res, err := srv.statdb.Get(ctx, getReq)
if err != nil {
@ -166,17 +165,14 @@ func (srv *Server) GetStats(ctx context.Context, req *pb.GetStatsRequest) (*pb.G
// CreateStats creates a node with specified stats
func (srv *Server) CreateStats(ctx context.Context, req *pb.CreateStatsRequest) (*pb.CreateStatsResponse, error) {
node := &statsproto.Node{
Id: req.NodeId,
}
stats := &statsproto.NodeStats{
stats := &pb.NodeStats{
AuditCount: req.AuditCount,
AuditSuccessCount: req.AuditSuccessCount,
UptimeCount: req.UptimeCount,
UptimeSuccessCount: req.UptimeSuccessCount,
}
createReq := &statsproto.CreateRequest{
Node: node,
createReq := &statdb.CreateRequest{
Node: req.NodeId,
Stats: stats,
}
_, err := srv.statdb.Create(ctx, createReq)

View File

@ -12,7 +12,6 @@ import (
"storj.io/storj/pkg/dht"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/statdb"
statproto "storj.io/storj/pkg/statdb/proto"
"storj.io/storj/pkg/storj"
"storj.io/storj/storage"
)
@ -35,11 +34,11 @@ var OverlayError = errs.Class("Overlay Error")
type Cache struct {
DB storage.KeyValueStore
DHT dht.DHT
StatDB *statdb.StatDB
StatDB statdb.DB
}
// NewOverlayCache returns a new Cache
func NewOverlayCache(db storage.KeyValueStore, dht dht.DHT, sdb *statdb.StatDB) *Cache {
func NewOverlayCache(db storage.KeyValueStore, dht dht.DHT, sdb statdb.DB) *Cache {
return &Cache{DB: db, DHT: dht, StatDB: sdb}
}
@ -98,10 +97,8 @@ func (o *Cache) Put(ctx context.Context, nodeID storj.NodeID, value pb.Node) err
}
// get existing node rep, or create a new statdb node with 0 rep
res, err := o.StatDB.CreateEntryIfNotExists(ctx, &statproto.CreateEntryIfNotExistsRequest{
Node: &pb.Node{
Id: nodeID,
},
res, err := o.StatDB.CreateEntryIfNotExists(ctx, &statdb.CreateEntryIfNotExistsRequest{
Node: nodeID,
})
if err != nil {
return err

View File

@ -15,6 +15,7 @@ import (
"storj.io/storj/pkg/overlay"
"storj.io/storj/pkg/statdb"
"storj.io/storj/pkg/storj"
"storj.io/storj/satellite/satellitedb"
"storj.io/storj/storage"
"storj.io/storj/storage/boltdb"
"storj.io/storj/storage/redis"
@ -29,7 +30,7 @@ var (
invalid2ID = teststorj.NodeIDFromString("invalid2")
)
func testCache(ctx context.Context, t *testing.T, store storage.KeyValueStore, sdb *statdb.StatDB) {
func testCache(ctx context.Context, t *testing.T, store storage.KeyValueStore, sdb statdb.DB) {
cache := overlay.Cache{DB: store, StatDB: sdb}
{ // Put
@ -145,7 +146,11 @@ func TestCache_Store(t *testing.T) {
}
defer ctx.Check(planet.Shutdown)
planet.Start(ctx)
sdb := planet.Satellites[0].StatDB
testCache(ctx, t, teststore.New(), sdb)
sdb, err := satellitedb.NewInMemory()
if err != nil {
t.Fatal(err)
}
testCache(ctx, t, teststore.New(), sdb.StatDB())
}

View File

@ -61,9 +61,11 @@ func (c Config) Run(ctx context.Context, server *provider.Provider) (
return Error.New("programmer error: kademlia responsibility unstarted")
}
sdb := statdb.LoadFromContext(ctx)
if sdb == nil {
return Error.New("programmer error: statdb responsibility unstarted")
sdb, ok := ctx.Value("masterdb").(interface {
StatDB() statdb.DB
})
if !ok {
return Error.New("unable to get master db instance")
}
driver, source, err := utils.SplitDBURL(c.DatabaseURL)
@ -90,7 +92,7 @@ func (c Config) Run(ctx context.Context, server *provider.Provider) (
return Error.New("database scheme not supported: %s", driver)
}
cache := NewOverlayCache(db, kad, sdb)
cache := NewOverlayCache(db, kad, sdb.StatDB())
srv := NewServer(zap.L(), cache, kad)
pb.RegisterOverlayServer(server.GRPC(), srv)

View File

@ -8,7 +8,6 @@ import (
"github.com/stretchr/testify/assert"
"storj.io/storj/pkg/kademlia"
"storj.io/storj/pkg/statdb"
)
func TestRun(t *testing.T) {
@ -18,10 +17,6 @@ func TestRun(t *testing.T) {
var kadKey kademlia.CtxKey
ctx := context.WithValue(bctx, kadKey, kad)
sdb := &statdb.StatDB{}
var statKey statdb.CtxKey
ctx = context.WithValue(ctx, statKey, sdb)
// run with nil
err := Config{}.Run(context.Background(), nil)
assert.Error(t, err)
@ -30,13 +25,12 @@ func TestRun(t *testing.T) {
// run with nil, pass pointer to Kademlia in context
err = Config{}.Run(ctx, nil)
assert.Error(t, err)
assert.Equal(t, "overlay error: Could not parse DB URL ", err.Error())
assert.Equal(t, "overlay error: unable to get master db instance", err.Error())
// db scheme redis conn fail
err = Config{DatabaseURL: "redis://somedir/overlay.db/?db=1"}.Run(ctx, nil)
assert.Error(t, err)
assert.Equal(t, "redis error: ping failed: dial tcp: address somedir: missing port in address", err.Error())
// db scheme bolt conn fail
err = Config{DatabaseURL: "bolt://somedir/overlay.db"}.Run(ctx, nil)

View File

@ -7,8 +7,10 @@ import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
import context "golang.org/x/net/context"
import grpc "google.golang.org/grpc"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
@ -41,7 +43,7 @@ func (x AgreementsSummary_Status) String() string {
return proto.EnumName(AgreementsSummary_Status_name, int32(x))
}
func (AgreementsSummary_Status) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_bandwidth_01db992f91c47bae, []int{0, 0}
return fileDescriptor_bandwidth_acc7ad1b0a6a13c6, []int{0, 0}
}
type AgreementsSummary struct {
@ -55,7 +57,7 @@ func (m *AgreementsSummary) Reset() { *m = AgreementsSummary{} }
func (m *AgreementsSummary) String() string { return proto.CompactTextString(m) }
func (*AgreementsSummary) ProtoMessage() {}
func (*AgreementsSummary) Descriptor() ([]byte, []int) {
return fileDescriptor_bandwidth_01db992f91c47bae, []int{0}
return fileDescriptor_bandwidth_acc7ad1b0a6a13c6, []int{0}
}
func (m *AgreementsSummary) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_AgreementsSummary.Unmarshal(m, b)
@ -95,8 +97,9 @@ var _ grpc.ClientConn
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for Bandwidth service
// BandwidthClient is the client API for Bandwidth service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type BandwidthClient interface {
BandwidthAgreements(ctx context.Context, in *RenterBandwidthAllocation, opts ...grpc.CallOption) (*AgreementsSummary, error)
}
@ -118,8 +121,7 @@ func (c *bandwidthClient) BandwidthAgreements(ctx context.Context, in *RenterBan
return out, nil
}
// Server API for Bandwidth service
// BandwidthServer is the server API for Bandwidth service.
type BandwidthServer interface {
BandwidthAgreements(context.Context, *RenterBandwidthAllocation) (*AgreementsSummary, error)
}
@ -159,9 +161,9 @@ var _Bandwidth_serviceDesc = grpc.ServiceDesc{
Metadata: "bandwidth.proto",
}
func init() { proto.RegisterFile("bandwidth.proto", fileDescriptor_bandwidth_01db992f91c47bae) }
func init() { proto.RegisterFile("bandwidth.proto", fileDescriptor_bandwidth_acc7ad1b0a6a13c6) }
var fileDescriptor_bandwidth_01db992f91c47bae = []byte{
var fileDescriptor_bandwidth_acc7ad1b0a6a13c6 = []byte{
// 196 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4f, 0x4a, 0xcc, 0x4b,
0x29, 0xcf, 0x4c, 0x29, 0xc9, 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x84, 0x0b, 0x48,

View File

@ -31,7 +31,7 @@ func (m *InjuredSegment) Reset() { *m = InjuredSegment{} }
func (m *InjuredSegment) String() string { return proto.CompactTextString(m) }
func (*InjuredSegment) ProtoMessage() {}
func (*InjuredSegment) Descriptor() ([]byte, []int) {
return fileDescriptor_datarepair_13e4beab54f194bd, []int{0}
return fileDescriptor_datarepair_ed86aa1a63e3d6e4, []int{0}
}
func (m *InjuredSegment) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_InjuredSegment.Unmarshal(m, b)
@ -69,9 +69,9 @@ func init() {
proto.RegisterType((*InjuredSegment)(nil), "repair.InjuredSegment")
}
func init() { proto.RegisterFile("datarepair.proto", fileDescriptor_datarepair_13e4beab54f194bd) }
func init() { proto.RegisterFile("datarepair.proto", fileDescriptor_datarepair_ed86aa1a63e3d6e4) }
var fileDescriptor_datarepair_13e4beab54f194bd = []byte{
var fileDescriptor_datarepair_ed86aa1a63e3d6e4 = []byte{
// 119 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x48, 0x49, 0x2c, 0x49,
0x2c, 0x4a, 0x2d, 0x48, 0xcc, 0x2c, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x83, 0xf0,

View File

@ -8,8 +8,10 @@ import fmt "fmt"
import math "math"
import _ "github.com/gogo/protobuf/gogoproto"
import context "golang.org/x/net/context"
import grpc "google.golang.org/grpc"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
@ -34,7 +36,7 @@ func (m *GetStatsRequest) Reset() { *m = GetStatsRequest{} }
func (m *GetStatsRequest) String() string { return proto.CompactTextString(m) }
func (*GetStatsRequest) ProtoMessage() {}
func (*GetStatsRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_inspector_4d95b6ca0d033b47, []int{0}
return fileDescriptor_inspector_efccc2bc3c2c22f4, []int{0}
}
func (m *GetStatsRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_GetStatsRequest.Unmarshal(m, b)
@ -68,7 +70,7 @@ func (m *GetStatsResponse) Reset() { *m = GetStatsResponse{} }
func (m *GetStatsResponse) String() string { return proto.CompactTextString(m) }
func (*GetStatsResponse) ProtoMessage() {}
func (*GetStatsResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_inspector_4d95b6ca0d033b47, []int{1}
return fileDescriptor_inspector_efccc2bc3c2c22f4, []int{1}
}
func (m *GetStatsResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_GetStatsResponse.Unmarshal(m, b)
@ -132,7 +134,7 @@ func (m *CreateStatsRequest) Reset() { *m = CreateStatsRequest{} }
func (m *CreateStatsRequest) String() string { return proto.CompactTextString(m) }
func (*CreateStatsRequest) ProtoMessage() {}
func (*CreateStatsRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_inspector_4d95b6ca0d033b47, []int{2}
return fileDescriptor_inspector_efccc2bc3c2c22f4, []int{2}
}
func (m *CreateStatsRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_CreateStatsRequest.Unmarshal(m, b)
@ -190,7 +192,7 @@ func (m *CreateStatsResponse) Reset() { *m = CreateStatsResponse{} }
func (m *CreateStatsResponse) String() string { return proto.CompactTextString(m) }
func (*CreateStatsResponse) ProtoMessage() {}
func (*CreateStatsResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_inspector_4d95b6ca0d033b47, []int{3}
return fileDescriptor_inspector_efccc2bc3c2c22f4, []int{3}
}
func (m *CreateStatsResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_CreateStatsResponse.Unmarshal(m, b)
@ -223,7 +225,7 @@ func (m *CountNodesResponse) Reset() { *m = CountNodesResponse{} }
func (m *CountNodesResponse) String() string { return proto.CompactTextString(m) }
func (*CountNodesResponse) ProtoMessage() {}
func (*CountNodesResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_inspector_4d95b6ca0d033b47, []int{4}
return fileDescriptor_inspector_efccc2bc3c2c22f4, []int{4}
}
func (m *CountNodesResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_CountNodesResponse.Unmarshal(m, b)
@ -267,7 +269,7 @@ func (m *CountNodesRequest) Reset() { *m = CountNodesRequest{} }
func (m *CountNodesRequest) String() string { return proto.CompactTextString(m) }
func (*CountNodesRequest) ProtoMessage() {}
func (*CountNodesRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_inspector_4d95b6ca0d033b47, []int{5}
return fileDescriptor_inspector_efccc2bc3c2c22f4, []int{5}
}
func (m *CountNodesRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_CountNodesRequest.Unmarshal(m, b)
@ -298,7 +300,7 @@ func (m *GetBucketsRequest) Reset() { *m = GetBucketsRequest{} }
func (m *GetBucketsRequest) String() string { return proto.CompactTextString(m) }
func (*GetBucketsRequest) ProtoMessage() {}
func (*GetBucketsRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_inspector_4d95b6ca0d033b47, []int{6}
return fileDescriptor_inspector_efccc2bc3c2c22f4, []int{6}
}
func (m *GetBucketsRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_GetBucketsRequest.Unmarshal(m, b)
@ -330,7 +332,7 @@ func (m *GetBucketsResponse) Reset() { *m = GetBucketsResponse{} }
func (m *GetBucketsResponse) String() string { return proto.CompactTextString(m) }
func (*GetBucketsResponse) ProtoMessage() {}
func (*GetBucketsResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_inspector_4d95b6ca0d033b47, []int{7}
return fileDescriptor_inspector_efccc2bc3c2c22f4, []int{7}
}
func (m *GetBucketsResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_GetBucketsResponse.Unmarshal(m, b)
@ -369,7 +371,7 @@ func (m *GetBucketRequest) Reset() { *m = GetBucketRequest{} }
func (m *GetBucketRequest) String() string { return proto.CompactTextString(m) }
func (*GetBucketRequest) ProtoMessage() {}
func (*GetBucketRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_inspector_4d95b6ca0d033b47, []int{8}
return fileDescriptor_inspector_efccc2bc3c2c22f4, []int{8}
}
func (m *GetBucketRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_GetBucketRequest.Unmarshal(m, b)
@ -401,7 +403,7 @@ func (m *GetBucketResponse) Reset() { *m = GetBucketResponse{} }
func (m *GetBucketResponse) String() string { return proto.CompactTextString(m) }
func (*GetBucketResponse) ProtoMessage() {}
func (*GetBucketResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_inspector_4d95b6ca0d033b47, []int{9}
return fileDescriptor_inspector_efccc2bc3c2c22f4, []int{9}
}
func (m *GetBucketResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_GetBucketResponse.Unmarshal(m, b)
@ -439,7 +441,7 @@ func (m *Bucket) Reset() { *m = Bucket{} }
func (m *Bucket) String() string { return proto.CompactTextString(m) }
func (*Bucket) ProtoMessage() {}
func (*Bucket) Descriptor() ([]byte, []int) {
return fileDescriptor_inspector_4d95b6ca0d033b47, []int{10}
return fileDescriptor_inspector_efccc2bc3c2c22f4, []int{10}
}
func (m *Bucket) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Bucket.Unmarshal(m, b)
@ -477,7 +479,7 @@ func (m *BucketList) Reset() { *m = BucketList{} }
func (m *BucketList) String() string { return proto.CompactTextString(m) }
func (*BucketList) ProtoMessage() {}
func (*BucketList) Descriptor() ([]byte, []int) {
return fileDescriptor_inspector_4d95b6ca0d033b47, []int{11}
return fileDescriptor_inspector_efccc2bc3c2c22f4, []int{11}
}
func (m *BucketList) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_BucketList.Unmarshal(m, b)
@ -517,7 +519,7 @@ func (m *PingNodeRequest) Reset() { *m = PingNodeRequest{} }
func (m *PingNodeRequest) String() string { return proto.CompactTextString(m) }
func (*PingNodeRequest) ProtoMessage() {}
func (*PingNodeRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_inspector_4d95b6ca0d033b47, []int{12}
return fileDescriptor_inspector_efccc2bc3c2c22f4, []int{12}
}
func (m *PingNodeRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PingNodeRequest.Unmarshal(m, b)
@ -555,7 +557,7 @@ func (m *PingNodeResponse) Reset() { *m = PingNodeResponse{} }
func (m *PingNodeResponse) String() string { return proto.CompactTextString(m) }
func (*PingNodeResponse) ProtoMessage() {}
func (*PingNodeResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_inspector_4d95b6ca0d033b47, []int{13}
return fileDescriptor_inspector_efccc2bc3c2c22f4, []int{13}
}
func (m *PingNodeResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PingNodeResponse.Unmarshal(m, b)
@ -594,7 +596,7 @@ func (m *LookupNodeRequest) Reset() { *m = LookupNodeRequest{} }
func (m *LookupNodeRequest) String() string { return proto.CompactTextString(m) }
func (*LookupNodeRequest) ProtoMessage() {}
func (*LookupNodeRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_inspector_4d95b6ca0d033b47, []int{14}
return fileDescriptor_inspector_efccc2bc3c2c22f4, []int{14}
}
func (m *LookupNodeRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_LookupNodeRequest.Unmarshal(m, b)
@ -640,7 +642,7 @@ func (m *LookupNodeResponse) Reset() { *m = LookupNodeResponse{} }
func (m *LookupNodeResponse) String() string { return proto.CompactTextString(m) }
func (*LookupNodeResponse) ProtoMessage() {}
func (*LookupNodeResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_inspector_4d95b6ca0d033b47, []int{15}
return fileDescriptor_inspector_efccc2bc3c2c22f4, []int{15}
}
func (m *LookupNodeResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_LookupNodeResponse.Unmarshal(m, b)
@ -701,8 +703,9 @@ var _ grpc.ClientConn
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for Inspector service
// InspectorClient is the client API for Inspector service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type InspectorClient interface {
// Kad/Overlay commands:
// CountNodes returns the number of nodes in the cache and in the routing table
@ -793,8 +796,7 @@ func (c *inspectorClient) CreateStats(ctx context.Context, in *CreateStatsReques
return out, nil
}
// Server API for Inspector service
// InspectorServer is the server API for Inspector service.
type InspectorServer interface {
// Kad/Overlay commands:
// CountNodes returns the number of nodes in the cache and in the routing table
@ -981,9 +983,9 @@ var _Inspector_serviceDesc = grpc.ServiceDesc{
Metadata: "inspector.proto",
}
func init() { proto.RegisterFile("inspector.proto", fileDescriptor_inspector_4d95b6ca0d033b47) }
func init() { proto.RegisterFile("inspector.proto", fileDescriptor_inspector_efccc2bc3c2c22f4) }
var fileDescriptor_inspector_4d95b6ca0d033b47 = []byte{
var fileDescriptor_inspector_efccc2bc3c2c22f4 = []byte{
// 636 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0xdd, 0x8e, 0xd2, 0x40,
0x18, 0xb5, 0xa5, 0xcb, 0xc2, 0x07, 0x81, 0x65, 0x58, 0x13, 0x52, 0x70, 0xc1, 0xb9, 0x50, 0xe2,

View File

@ -31,7 +31,7 @@ func (m *SerializableMeta) Reset() { *m = SerializableMeta{} }
func (m *SerializableMeta) String() string { return proto.CompactTextString(m) }
func (*SerializableMeta) ProtoMessage() {}
func (*SerializableMeta) Descriptor() ([]byte, []int) {
return fileDescriptor_meta_12afffbb4ed1a0bb, []int{0}
return fileDescriptor_meta_16703f2dafac4e74, []int{0}
}
func (m *SerializableMeta) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_SerializableMeta.Unmarshal(m, b)
@ -70,9 +70,9 @@ func init() {
proto.RegisterMapType((map[string]string)(nil), "objects.SerializableMeta.UserDefinedEntry")
}
func init() { proto.RegisterFile("meta.proto", fileDescriptor_meta_12afffbb4ed1a0bb) }
func init() { proto.RegisterFile("meta.proto", fileDescriptor_meta_16703f2dafac4e74) }
var fileDescriptor_meta_12afffbb4ed1a0bb = []byte{
var fileDescriptor_meta_16703f2dafac4e74 = []byte{
// 191 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0xca, 0x4d, 0x2d, 0x49,
0xd4, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0xcf, 0x4f, 0xca, 0x4a, 0x4d, 0x2e, 0x29, 0x56,

View File

@ -40,7 +40,7 @@ func (x NodeType) String() string {
return proto.EnumName(NodeType_name, int32(x))
}
func (NodeType) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_node_9c604679ec4520fa, []int{0}
return fileDescriptor_node_e8dcfefe0066c005, []int{0}
}
// NodeTransport is an enum of possible transports for the overlay network
@ -61,7 +61,7 @@ func (x NodeTransport) String() string {
return proto.EnumName(NodeTransport_name, int32(x))
}
func (NodeTransport) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_node_9c604679ec4520fa, []int{1}
return fileDescriptor_node_e8dcfefe0066c005, []int{1}
}
// NodeRestrictions contains all relevant data about a nodes ability to store data
@ -77,7 +77,7 @@ func (m *NodeRestrictions) Reset() { *m = NodeRestrictions{} }
func (m *NodeRestrictions) String() string { return proto.CompactTextString(m) }
func (*NodeRestrictions) ProtoMessage() {}
func (*NodeRestrictions) Descriptor() ([]byte, []int) {
return fileDescriptor_node_9c604679ec4520fa, []int{0}
return fileDescriptor_node_e8dcfefe0066c005, []int{0}
}
func (m *NodeRestrictions) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_NodeRestrictions.Unmarshal(m, b)
@ -136,7 +136,7 @@ func (m *Node) Reset() { *m = Node{} }
func (m *Node) String() string { return proto.CompactTextString(m) }
func (*Node) ProtoMessage() {}
func (*Node) Descriptor() ([]byte, []int) {
return fileDescriptor_node_9c604679ec4520fa, []int{1}
return fileDescriptor_node_e8dcfefe0066c005, []int{1}
}
func (m *Node) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Node.Unmarshal(m, b)
@ -246,7 +246,7 @@ func (m *NodeAddress) Reset() { *m = NodeAddress{} }
func (m *NodeAddress) String() string { return proto.CompactTextString(m) }
func (*NodeAddress) ProtoMessage() {}
func (*NodeAddress) Descriptor() ([]byte, []int) {
return fileDescriptor_node_9c604679ec4520fa, []int{2}
return fileDescriptor_node_e8dcfefe0066c005, []int{2}
}
func (m *NodeAddress) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_NodeAddress.Unmarshal(m, b)
@ -299,7 +299,7 @@ func (m *NodeStats) Reset() { *m = NodeStats{} }
func (m *NodeStats) String() string { return proto.CompactTextString(m) }
func (*NodeStats) ProtoMessage() {}
func (*NodeStats) Descriptor() ([]byte, []int) {
return fileDescriptor_node_9c604679ec4520fa, []int{3}
return fileDescriptor_node_e8dcfefe0066c005, []int{3}
}
func (m *NodeStats) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_NodeStats.Unmarshal(m, b)
@ -380,7 +380,7 @@ func (m *NodeMetadata) Reset() { *m = NodeMetadata{} }
func (m *NodeMetadata) String() string { return proto.CompactTextString(m) }
func (*NodeMetadata) ProtoMessage() {}
func (*NodeMetadata) Descriptor() ([]byte, []int) {
return fileDescriptor_node_9c604679ec4520fa, []int{4}
return fileDescriptor_node_e8dcfefe0066c005, []int{4}
}
func (m *NodeMetadata) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_NodeMetadata.Unmarshal(m, b)
@ -424,9 +424,9 @@ func init() {
proto.RegisterEnum("node.NodeTransport", NodeTransport_name, NodeTransport_value)
}
func init() { proto.RegisterFile("node.proto", fileDescriptor_node_9c604679ec4520fa) }
func init() { proto.RegisterFile("node.proto", fileDescriptor_node_e8dcfefe0066c005) }
var fileDescriptor_node_9c604679ec4520fa = []byte{
var fileDescriptor_node_e8dcfefe0066c005 = []byte{
// 620 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x94, 0xcf, 0x4e, 0xdb, 0x4a,
0x14, 0xc6, 0x49, 0xe2, 0xfc, 0xf1, 0xb1, 0x93, 0x1b, 0x0e, 0x08, 0x59, 0xf7, 0xea, 0x5e, 0x82,

View File

@ -9,8 +9,10 @@ import math "math"
import _ "github.com/gogo/protobuf/gogoproto"
import duration "github.com/golang/protobuf/ptypes/duration"
import context "golang.org/x/net/context"
import grpc "google.golang.org/grpc"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
@ -52,7 +54,7 @@ func (x Restriction_Operator) String() string {
return proto.EnumName(Restriction_Operator_name, int32(x))
}
func (Restriction_Operator) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_overlay_99a51b72c26b0776, []int{11, 0}
return fileDescriptor_overlay_55263e8e6ceef0b4, []int{11, 0}
}
type Restriction_Operand int32
@ -75,7 +77,7 @@ func (x Restriction_Operand) String() string {
return proto.EnumName(Restriction_Operand_name, int32(x))
}
func (Restriction_Operand) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_overlay_99a51b72c26b0776, []int{11, 1}
return fileDescriptor_overlay_55263e8e6ceef0b4, []int{11, 1}
}
// LookupRequest is is request message for the lookup rpc call
@ -90,7 +92,7 @@ func (m *LookupRequest) Reset() { *m = LookupRequest{} }
func (m *LookupRequest) String() string { return proto.CompactTextString(m) }
func (*LookupRequest) ProtoMessage() {}
func (*LookupRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_overlay_99a51b72c26b0776, []int{0}
return fileDescriptor_overlay_55263e8e6ceef0b4, []int{0}
}
func (m *LookupRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_LookupRequest.Unmarshal(m, b)
@ -122,7 +124,7 @@ func (m *LookupResponse) Reset() { *m = LookupResponse{} }
func (m *LookupResponse) String() string { return proto.CompactTextString(m) }
func (*LookupResponse) ProtoMessage() {}
func (*LookupResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_overlay_99a51b72c26b0776, []int{1}
return fileDescriptor_overlay_55263e8e6ceef0b4, []int{1}
}
func (m *LookupResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_LookupResponse.Unmarshal(m, b)
@ -161,7 +163,7 @@ func (m *LookupRequests) Reset() { *m = LookupRequests{} }
func (m *LookupRequests) String() string { return proto.CompactTextString(m) }
func (*LookupRequests) ProtoMessage() {}
func (*LookupRequests) Descriptor() ([]byte, []int) {
return fileDescriptor_overlay_99a51b72c26b0776, []int{2}
return fileDescriptor_overlay_55263e8e6ceef0b4, []int{2}
}
func (m *LookupRequests) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_LookupRequests.Unmarshal(m, b)
@ -200,7 +202,7 @@ func (m *LookupResponses) Reset() { *m = LookupResponses{} }
func (m *LookupResponses) String() string { return proto.CompactTextString(m) }
func (*LookupResponses) ProtoMessage() {}
func (*LookupResponses) Descriptor() ([]byte, []int) {
return fileDescriptor_overlay_99a51b72c26b0776, []int{3}
return fileDescriptor_overlay_55263e8e6ceef0b4, []int{3}
}
func (m *LookupResponses) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_LookupResponses.Unmarshal(m, b)
@ -239,7 +241,7 @@ func (m *FindStorageNodesResponse) Reset() { *m = FindStorageNodesRespon
func (m *FindStorageNodesResponse) String() string { return proto.CompactTextString(m) }
func (*FindStorageNodesResponse) ProtoMessage() {}
func (*FindStorageNodesResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_overlay_99a51b72c26b0776, []int{4}
return fileDescriptor_overlay_55263e8e6ceef0b4, []int{4}
}
func (m *FindStorageNodesResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_FindStorageNodesResponse.Unmarshal(m, b)
@ -282,7 +284,7 @@ func (m *FindStorageNodesRequest) Reset() { *m = FindStorageNodesRequest
func (m *FindStorageNodesRequest) String() string { return proto.CompactTextString(m) }
func (*FindStorageNodesRequest) ProtoMessage() {}
func (*FindStorageNodesRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_overlay_99a51b72c26b0776, []int{5}
return fileDescriptor_overlay_55263e8e6ceef0b4, []int{5}
}
func (m *FindStorageNodesRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_FindStorageNodesRequest.Unmarshal(m, b)
@ -347,7 +349,7 @@ func (m *OverlayOptions) Reset() { *m = OverlayOptions{} }
func (m *OverlayOptions) String() string { return proto.CompactTextString(m) }
func (*OverlayOptions) ProtoMessage() {}
func (*OverlayOptions) Descriptor() ([]byte, []int) {
return fileDescriptor_overlay_99a51b72c26b0776, []int{6}
return fileDescriptor_overlay_55263e8e6ceef0b4, []int{6}
}
func (m *OverlayOptions) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_OverlayOptions.Unmarshal(m, b)
@ -416,7 +418,7 @@ func (m *QueryRequest) Reset() { *m = QueryRequest{} }
func (m *QueryRequest) String() string { return proto.CompactTextString(m) }
func (*QueryRequest) ProtoMessage() {}
func (*QueryRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_overlay_99a51b72c26b0776, []int{7}
return fileDescriptor_overlay_55263e8e6ceef0b4, []int{7}
}
func (m *QueryRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_QueryRequest.Unmarshal(m, b)
@ -476,7 +478,7 @@ func (m *QueryResponse) Reset() { *m = QueryResponse{} }
func (m *QueryResponse) String() string { return proto.CompactTextString(m) }
func (*QueryResponse) ProtoMessage() {}
func (*QueryResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_overlay_99a51b72c26b0776, []int{8}
return fileDescriptor_overlay_55263e8e6ceef0b4, []int{8}
}
func (m *QueryResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_QueryResponse.Unmarshal(m, b)
@ -520,7 +522,7 @@ func (m *PingRequest) Reset() { *m = PingRequest{} }
func (m *PingRequest) String() string { return proto.CompactTextString(m) }
func (*PingRequest) ProtoMessage() {}
func (*PingRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_overlay_99a51b72c26b0776, []int{9}
return fileDescriptor_overlay_55263e8e6ceef0b4, []int{9}
}
func (m *PingRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PingRequest.Unmarshal(m, b)
@ -550,7 +552,7 @@ func (m *PingResponse) Reset() { *m = PingResponse{} }
func (m *PingResponse) String() string { return proto.CompactTextString(m) }
func (*PingResponse) ProtoMessage() {}
func (*PingResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_overlay_99a51b72c26b0776, []int{10}
return fileDescriptor_overlay_55263e8e6ceef0b4, []int{10}
}
func (m *PingResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PingResponse.Unmarshal(m, b)
@ -583,7 +585,7 @@ func (m *Restriction) Reset() { *m = Restriction{} }
func (m *Restriction) String() string { return proto.CompactTextString(m) }
func (*Restriction) ProtoMessage() {}
func (*Restriction) Descriptor() ([]byte, []int) {
return fileDescriptor_overlay_99a51b72c26b0776, []int{11}
return fileDescriptor_overlay_55263e8e6ceef0b4, []int{11}
}
func (m *Restriction) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Restriction.Unmarshal(m, b)
@ -649,8 +651,9 @@ var _ grpc.ClientConn
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for Overlay service
// OverlayClient is the client API for Overlay service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type OverlayClient interface {
// Lookup finds a nodes address from the network
Lookup(ctx context.Context, in *LookupRequest, opts ...grpc.CallOption) (*LookupResponse, error)
@ -695,8 +698,7 @@ func (c *overlayClient) FindStorageNodes(ctx context.Context, in *FindStorageNod
return out, nil
}
// Server API for Overlay service
// OverlayServer is the server API for Overlay service.
type OverlayServer interface {
// Lookup finds a nodes address from the network
Lookup(context.Context, *LookupRequest) (*LookupResponse, error)
@ -785,8 +787,9 @@ var _Overlay_serviceDesc = grpc.ServiceDesc{
Metadata: "overlay.proto",
}
// Client API for Nodes service
// NodesClient is the client API for Nodes service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type NodesClient interface {
Query(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (*QueryResponse, error)
Ping(ctx context.Context, in *PingRequest, opts ...grpc.CallOption) (*PingResponse, error)
@ -818,8 +821,7 @@ func (c *nodesClient) Ping(ctx context.Context, in *PingRequest, opts ...grpc.Ca
return out, nil
}
// Server API for Nodes service
// NodesServer is the server API for Nodes service.
type NodesServer interface {
Query(context.Context, *QueryRequest) (*QueryResponse, error)
Ping(context.Context, *PingRequest) (*PingResponse, error)
@ -882,9 +884,9 @@ var _Nodes_serviceDesc = grpc.ServiceDesc{
Metadata: "overlay.proto",
}
func init() { proto.RegisterFile("overlay.proto", fileDescriptor_overlay_99a51b72c26b0776) }
func init() { proto.RegisterFile("overlay.proto", fileDescriptor_overlay_55263e8e6ceef0b4) }
var fileDescriptor_overlay_99a51b72c26b0776 = []byte{
var fileDescriptor_overlay_55263e8e6ceef0b4 = []byte{
// 845 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0xdd, 0x8e, 0xdb, 0x44,
0x14, 0x5e, 0xe7, 0xc7, 0xc9, 0x9e, 0x24, 0x5e, 0x6b, 0xd4, 0xee, 0x06, 0x03, 0xdd, 0x60, 0x55,

View File

@ -8,8 +8,10 @@ import fmt "fmt"
import math "math"
import _ "github.com/gogo/protobuf/gogoproto"
import context "golang.org/x/net/context"
import grpc "google.golang.org/grpc"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
@ -42,7 +44,7 @@ func (x PayerBandwidthAllocation_Action) String() string {
return proto.EnumName(PayerBandwidthAllocation_Action_name, int32(x))
}
func (PayerBandwidthAllocation_Action) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_piecestore_f6b209c85f045534, []int{0, 0}
return fileDescriptor_piecestore_c22028cf5808832f, []int{0, 0}
}
type PayerBandwidthAllocation struct {
@ -57,7 +59,7 @@ func (m *PayerBandwidthAllocation) Reset() { *m = PayerBandwidthAllocati
func (m *PayerBandwidthAllocation) String() string { return proto.CompactTextString(m) }
func (*PayerBandwidthAllocation) ProtoMessage() {}
func (*PayerBandwidthAllocation) Descriptor() ([]byte, []int) {
return fileDescriptor_piecestore_f6b209c85f045534, []int{0}
return fileDescriptor_piecestore_c22028cf5808832f, []int{0}
}
func (m *PayerBandwidthAllocation) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PayerBandwidthAllocation.Unmarshal(m, b)
@ -108,7 +110,7 @@ func (m *PayerBandwidthAllocation_Data) Reset() { *m = PayerBandwidthAll
func (m *PayerBandwidthAllocation_Data) String() string { return proto.CompactTextString(m) }
func (*PayerBandwidthAllocation_Data) ProtoMessage() {}
func (*PayerBandwidthAllocation_Data) Descriptor() ([]byte, []int) {
return fileDescriptor_piecestore_f6b209c85f045534, []int{0, 0}
return fileDescriptor_piecestore_c22028cf5808832f, []int{0, 0}
}
func (m *PayerBandwidthAllocation_Data) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PayerBandwidthAllocation_Data.Unmarshal(m, b)
@ -175,7 +177,7 @@ func (m *RenterBandwidthAllocation) Reset() { *m = RenterBandwidthAlloca
func (m *RenterBandwidthAllocation) String() string { return proto.CompactTextString(m) }
func (*RenterBandwidthAllocation) ProtoMessage() {}
func (*RenterBandwidthAllocation) Descriptor() ([]byte, []int) {
return fileDescriptor_piecestore_f6b209c85f045534, []int{1}
return fileDescriptor_piecestore_c22028cf5808832f, []int{1}
}
func (m *RenterBandwidthAllocation) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_RenterBandwidthAllocation.Unmarshal(m, b)
@ -223,7 +225,7 @@ func (m *RenterBandwidthAllocation_Data) Reset() { *m = RenterBandwidthA
func (m *RenterBandwidthAllocation_Data) String() string { return proto.CompactTextString(m) }
func (*RenterBandwidthAllocation_Data) ProtoMessage() {}
func (*RenterBandwidthAllocation_Data) Descriptor() ([]byte, []int) {
return fileDescriptor_piecestore_f6b209c85f045534, []int{1, 0}
return fileDescriptor_piecestore_c22028cf5808832f, []int{1, 0}
}
func (m *RenterBandwidthAllocation_Data) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_RenterBandwidthAllocation_Data.Unmarshal(m, b)
@ -277,7 +279,7 @@ func (m *PieceStore) Reset() { *m = PieceStore{} }
func (m *PieceStore) String() string { return proto.CompactTextString(m) }
func (*PieceStore) ProtoMessage() {}
func (*PieceStore) Descriptor() ([]byte, []int) {
return fileDescriptor_piecestore_f6b209c85f045534, []int{2}
return fileDescriptor_piecestore_c22028cf5808832f, []int{2}
}
func (m *PieceStore) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PieceStore.Unmarshal(m, b)
@ -332,7 +334,7 @@ func (m *PieceStore_PieceData) Reset() { *m = PieceStore_PieceData{} }
func (m *PieceStore_PieceData) String() string { return proto.CompactTextString(m) }
func (*PieceStore_PieceData) ProtoMessage() {}
func (*PieceStore_PieceData) Descriptor() ([]byte, []int) {
return fileDescriptor_piecestore_f6b209c85f045534, []int{2, 0}
return fileDescriptor_piecestore_c22028cf5808832f, []int{2, 0}
}
func (m *PieceStore_PieceData) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PieceStore_PieceData.Unmarshal(m, b)
@ -386,7 +388,7 @@ func (m *PieceId) Reset() { *m = PieceId{} }
func (m *PieceId) String() string { return proto.CompactTextString(m) }
func (*PieceId) ProtoMessage() {}
func (*PieceId) Descriptor() ([]byte, []int) {
return fileDescriptor_piecestore_f6b209c85f045534, []int{3}
return fileDescriptor_piecestore_c22028cf5808832f, []int{3}
}
func (m *PieceId) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PieceId.Unmarshal(m, b)
@ -433,7 +435,7 @@ func (m *PieceSummary) Reset() { *m = PieceSummary{} }
func (m *PieceSummary) String() string { return proto.CompactTextString(m) }
func (*PieceSummary) ProtoMessage() {}
func (*PieceSummary) Descriptor() ([]byte, []int) {
return fileDescriptor_piecestore_f6b209c85f045534, []int{4}
return fileDescriptor_piecestore_c22028cf5808832f, []int{4}
}
func (m *PieceSummary) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PieceSummary.Unmarshal(m, b)
@ -487,7 +489,7 @@ func (m *PieceRetrieval) Reset() { *m = PieceRetrieval{} }
func (m *PieceRetrieval) String() string { return proto.CompactTextString(m) }
func (*PieceRetrieval) ProtoMessage() {}
func (*PieceRetrieval) Descriptor() ([]byte, []int) {
return fileDescriptor_piecestore_f6b209c85f045534, []int{5}
return fileDescriptor_piecestore_c22028cf5808832f, []int{5}
}
func (m *PieceRetrieval) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PieceRetrieval.Unmarshal(m, b)
@ -542,7 +544,7 @@ func (m *PieceRetrieval_PieceData) Reset() { *m = PieceRetrieval_PieceDa
func (m *PieceRetrieval_PieceData) String() string { return proto.CompactTextString(m) }
func (*PieceRetrieval_PieceData) ProtoMessage() {}
func (*PieceRetrieval_PieceData) Descriptor() ([]byte, []int) {
return fileDescriptor_piecestore_f6b209c85f045534, []int{5, 0}
return fileDescriptor_piecestore_c22028cf5808832f, []int{5, 0}
}
func (m *PieceRetrieval_PieceData) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PieceRetrieval_PieceData.Unmarshal(m, b)
@ -595,7 +597,7 @@ func (m *PieceRetrievalStream) Reset() { *m = PieceRetrievalStream{} }
func (m *PieceRetrievalStream) String() string { return proto.CompactTextString(m) }
func (*PieceRetrievalStream) ProtoMessage() {}
func (*PieceRetrievalStream) Descriptor() ([]byte, []int) {
return fileDescriptor_piecestore_f6b209c85f045534, []int{6}
return fileDescriptor_piecestore_c22028cf5808832f, []int{6}
}
func (m *PieceRetrievalStream) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PieceRetrievalStream.Unmarshal(m, b)
@ -642,7 +644,7 @@ func (m *PieceDelete) Reset() { *m = PieceDelete{} }
func (m *PieceDelete) String() string { return proto.CompactTextString(m) }
func (*PieceDelete) ProtoMessage() {}
func (*PieceDelete) Descriptor() ([]byte, []int) {
return fileDescriptor_piecestore_f6b209c85f045534, []int{7}
return fileDescriptor_piecestore_c22028cf5808832f, []int{7}
}
func (m *PieceDelete) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PieceDelete.Unmarshal(m, b)
@ -687,7 +689,7 @@ func (m *PieceDeleteSummary) Reset() { *m = PieceDeleteSummary{} }
func (m *PieceDeleteSummary) String() string { return proto.CompactTextString(m) }
func (*PieceDeleteSummary) ProtoMessage() {}
func (*PieceDeleteSummary) Descriptor() ([]byte, []int) {
return fileDescriptor_piecestore_f6b209c85f045534, []int{8}
return fileDescriptor_piecestore_c22028cf5808832f, []int{8}
}
func (m *PieceDeleteSummary) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PieceDeleteSummary.Unmarshal(m, b)
@ -726,7 +728,7 @@ func (m *PieceStoreSummary) Reset() { *m = PieceStoreSummary{} }
func (m *PieceStoreSummary) String() string { return proto.CompactTextString(m) }
func (*PieceStoreSummary) ProtoMessage() {}
func (*PieceStoreSummary) Descriptor() ([]byte, []int) {
return fileDescriptor_piecestore_f6b209c85f045534, []int{9}
return fileDescriptor_piecestore_c22028cf5808832f, []int{9}
}
func (m *PieceStoreSummary) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PieceStoreSummary.Unmarshal(m, b)
@ -770,7 +772,7 @@ func (m *StatsReq) Reset() { *m = StatsReq{} }
func (m *StatsReq) String() string { return proto.CompactTextString(m) }
func (*StatsReq) ProtoMessage() {}
func (*StatsReq) Descriptor() ([]byte, []int) {
return fileDescriptor_piecestore_f6b209c85f045534, []int{10}
return fileDescriptor_piecestore_c22028cf5808832f, []int{10}
}
func (m *StatsReq) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_StatsReq.Unmarshal(m, b)
@ -804,7 +806,7 @@ func (m *StatSummary) Reset() { *m = StatSummary{} }
func (m *StatSummary) String() string { return proto.CompactTextString(m) }
func (*StatSummary) ProtoMessage() {}
func (*StatSummary) Descriptor() ([]byte, []int) {
return fileDescriptor_piecestore_f6b209c85f045534, []int{11}
return fileDescriptor_piecestore_c22028cf5808832f, []int{11}
}
func (m *StatSummary) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_StatSummary.Unmarshal(m, b)
@ -865,7 +867,7 @@ func (m *SignedMessage) Reset() { *m = SignedMessage{} }
func (m *SignedMessage) String() string { return proto.CompactTextString(m) }
func (*SignedMessage) ProtoMessage() {}
func (*SignedMessage) Descriptor() ([]byte, []int) {
return fileDescriptor_piecestore_f6b209c85f045534, []int{12}
return fileDescriptor_piecestore_c22028cf5808832f, []int{12}
}
func (m *SignedMessage) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_SignedMessage.Unmarshal(m, b)
@ -935,8 +937,9 @@ var _ grpc.ClientConn
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for PieceStoreRoutes service
// PieceStoreRoutesClient is the client API for PieceStoreRoutes service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type PieceStoreRoutesClient interface {
Piece(ctx context.Context, in *PieceId, opts ...grpc.CallOption) (*PieceSummary, error)
Retrieve(ctx context.Context, opts ...grpc.CallOption) (PieceStoreRoutes_RetrieveClient, error)
@ -1045,8 +1048,7 @@ func (c *pieceStoreRoutesClient) Stats(ctx context.Context, in *StatsReq, opts .
return out, nil
}
// Server API for PieceStoreRoutes service
// PieceStoreRoutesServer is the server API for PieceStoreRoutes service.
type PieceStoreRoutesServer interface {
Piece(context.Context, *PieceId) (*PieceSummary, error)
Retrieve(PieceStoreRoutes_RetrieveServer) error
@ -1198,9 +1200,9 @@ var _PieceStoreRoutes_serviceDesc = grpc.ServiceDesc{
Metadata: "piecestore.proto",
}
func init() { proto.RegisterFile("piecestore.proto", fileDescriptor_piecestore_f6b209c85f045534) }
func init() { proto.RegisterFile("piecestore.proto", fileDescriptor_piecestore_c22028cf5808832f) }
var fileDescriptor_piecestore_f6b209c85f045534 = []byte{
var fileDescriptor_piecestore_c22028cf5808832f = []byte{
// 946 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xcf, 0x6e, 0xdb, 0xc6,
0x13, 0x36, 0x29, 0x5b, 0xb2, 0x46, 0x7f, 0xac, 0xac, 0x8d, 0xdf, 0x4f, 0x26, 0xe2, 0x5a, 0x60,

View File

@ -9,8 +9,10 @@ import math "math"
import _ "github.com/gogo/protobuf/gogoproto"
import timestamp "github.com/golang/protobuf/ptypes/timestamp"
import context "golang.org/x/net/context"
import grpc "google.golang.org/grpc"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
@ -40,7 +42,7 @@ func (x RedundancyScheme_SchemeType) String() string {
return proto.EnumName(RedundancyScheme_SchemeType_name, int32(x))
}
func (RedundancyScheme_SchemeType) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_pointerdb_b7facd921d32685d, []int{0, 0}
return fileDescriptor_pointerdb_21b4d7ef3abc5ac1, []int{0, 0}
}
type Pointer_DataType int32
@ -63,7 +65,7 @@ func (x Pointer_DataType) String() string {
return proto.EnumName(Pointer_DataType_name, int32(x))
}
func (Pointer_DataType) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_pointerdb_b7facd921d32685d, []int{3, 0}
return fileDescriptor_pointerdb_21b4d7ef3abc5ac1, []int{3, 0}
}
type RedundancyScheme struct {
@ -83,7 +85,7 @@ func (m *RedundancyScheme) Reset() { *m = RedundancyScheme{} }
func (m *RedundancyScheme) String() string { return proto.CompactTextString(m) }
func (*RedundancyScheme) ProtoMessage() {}
func (*RedundancyScheme) Descriptor() ([]byte, []int) {
return fileDescriptor_pointerdb_b7facd921d32685d, []int{0}
return fileDescriptor_pointerdb_21b4d7ef3abc5ac1, []int{0}
}
func (m *RedundancyScheme) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_RedundancyScheme.Unmarshal(m, b)
@ -157,7 +159,7 @@ func (m *RemotePiece) Reset() { *m = RemotePiece{} }
func (m *RemotePiece) String() string { return proto.CompactTextString(m) }
func (*RemotePiece) ProtoMessage() {}
func (*RemotePiece) Descriptor() ([]byte, []int) {
return fileDescriptor_pointerdb_b7facd921d32685d, []int{1}
return fileDescriptor_pointerdb_21b4d7ef3abc5ac1, []int{1}
}
func (m *RemotePiece) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_RemotePiece.Unmarshal(m, b)
@ -199,7 +201,7 @@ func (m *RemoteSegment) Reset() { *m = RemoteSegment{} }
func (m *RemoteSegment) String() string { return proto.CompactTextString(m) }
func (*RemoteSegment) ProtoMessage() {}
func (*RemoteSegment) Descriptor() ([]byte, []int) {
return fileDescriptor_pointerdb_b7facd921d32685d, []int{2}
return fileDescriptor_pointerdb_21b4d7ef3abc5ac1, []int{2}
}
func (m *RemoteSegment) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_RemoteSegment.Unmarshal(m, b)
@ -265,7 +267,7 @@ func (m *Pointer) Reset() { *m = Pointer{} }
func (m *Pointer) String() string { return proto.CompactTextString(m) }
func (*Pointer) ProtoMessage() {}
func (*Pointer) Descriptor() ([]byte, []int) {
return fileDescriptor_pointerdb_b7facd921d32685d, []int{3}
return fileDescriptor_pointerdb_21b4d7ef3abc5ac1, []int{3}
}
func (m *Pointer) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Pointer.Unmarshal(m, b)
@ -347,7 +349,7 @@ func (m *PutRequest) Reset() { *m = PutRequest{} }
func (m *PutRequest) String() string { return proto.CompactTextString(m) }
func (*PutRequest) ProtoMessage() {}
func (*PutRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_pointerdb_b7facd921d32685d, []int{4}
return fileDescriptor_pointerdb_21b4d7ef3abc5ac1, []int{4}
}
func (m *PutRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PutRequest.Unmarshal(m, b)
@ -393,7 +395,7 @@ func (m *GetRequest) Reset() { *m = GetRequest{} }
func (m *GetRequest) String() string { return proto.CompactTextString(m) }
func (*GetRequest) ProtoMessage() {}
func (*GetRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_pointerdb_b7facd921d32685d, []int{5}
return fileDescriptor_pointerdb_21b4d7ef3abc5ac1, []int{5}
}
func (m *GetRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_GetRequest.Unmarshal(m, b)
@ -437,7 +439,7 @@ func (m *ListRequest) Reset() { *m = ListRequest{} }
func (m *ListRequest) String() string { return proto.CompactTextString(m) }
func (*ListRequest) ProtoMessage() {}
func (*ListRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_pointerdb_b7facd921d32685d, []int{6}
return fileDescriptor_pointerdb_21b4d7ef3abc5ac1, []int{6}
}
func (m *ListRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ListRequest.Unmarshal(m, b)
@ -510,7 +512,7 @@ func (m *PutResponse) Reset() { *m = PutResponse{} }
func (m *PutResponse) String() string { return proto.CompactTextString(m) }
func (*PutResponse) ProtoMessage() {}
func (*PutResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_pointerdb_b7facd921d32685d, []int{7}
return fileDescriptor_pointerdb_21b4d7ef3abc5ac1, []int{7}
}
func (m *PutResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PutResponse.Unmarshal(m, b)
@ -545,7 +547,7 @@ func (m *GetResponse) Reset() { *m = GetResponse{} }
func (m *GetResponse) String() string { return proto.CompactTextString(m) }
func (*GetResponse) ProtoMessage() {}
func (*GetResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_pointerdb_b7facd921d32685d, []int{8}
return fileDescriptor_pointerdb_21b4d7ef3abc5ac1, []int{8}
}
func (m *GetResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_GetResponse.Unmarshal(m, b)
@ -606,7 +608,7 @@ func (m *ListResponse) Reset() { *m = ListResponse{} }
func (m *ListResponse) String() string { return proto.CompactTextString(m) }
func (*ListResponse) ProtoMessage() {}
func (*ListResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_pointerdb_b7facd921d32685d, []int{9}
return fileDescriptor_pointerdb_21b4d7ef3abc5ac1, []int{9}
}
func (m *ListResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ListResponse.Unmarshal(m, b)
@ -653,7 +655,7 @@ func (m *ListResponse_Item) Reset() { *m = ListResponse_Item{} }
func (m *ListResponse_Item) String() string { return proto.CompactTextString(m) }
func (*ListResponse_Item) ProtoMessage() {}
func (*ListResponse_Item) Descriptor() ([]byte, []int) {
return fileDescriptor_pointerdb_b7facd921d32685d, []int{9, 0}
return fileDescriptor_pointerdb_21b4d7ef3abc5ac1, []int{9, 0}
}
func (m *ListResponse_Item) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ListResponse_Item.Unmarshal(m, b)
@ -705,7 +707,7 @@ func (m *DeleteRequest) Reset() { *m = DeleteRequest{} }
func (m *DeleteRequest) String() string { return proto.CompactTextString(m) }
func (*DeleteRequest) ProtoMessage() {}
func (*DeleteRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_pointerdb_b7facd921d32685d, []int{10}
return fileDescriptor_pointerdb_21b4d7ef3abc5ac1, []int{10}
}
func (m *DeleteRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_DeleteRequest.Unmarshal(m, b)
@ -743,7 +745,7 @@ func (m *DeleteResponse) Reset() { *m = DeleteResponse{} }
func (m *DeleteResponse) String() string { return proto.CompactTextString(m) }
func (*DeleteResponse) ProtoMessage() {}
func (*DeleteResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_pointerdb_b7facd921d32685d, []int{11}
return fileDescriptor_pointerdb_21b4d7ef3abc5ac1, []int{11}
}
func (m *DeleteResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_DeleteResponse.Unmarshal(m, b)
@ -778,7 +780,7 @@ func (m *IterateRequest) Reset() { *m = IterateRequest{} }
func (m *IterateRequest) String() string { return proto.CompactTextString(m) }
func (*IterateRequest) ProtoMessage() {}
func (*IterateRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_pointerdb_b7facd921d32685d, []int{12}
return fileDescriptor_pointerdb_21b4d7ef3abc5ac1, []int{12}
}
func (m *IterateRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_IterateRequest.Unmarshal(m, b)
@ -837,7 +839,7 @@ func (m *PayerBandwidthAllocationRequest) Reset() { *m = PayerBandwidthA
func (m *PayerBandwidthAllocationRequest) String() string { return proto.CompactTextString(m) }
func (*PayerBandwidthAllocationRequest) ProtoMessage() {}
func (*PayerBandwidthAllocationRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_pointerdb_b7facd921d32685d, []int{13}
return fileDescriptor_pointerdb_21b4d7ef3abc5ac1, []int{13}
}
func (m *PayerBandwidthAllocationRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PayerBandwidthAllocationRequest.Unmarshal(m, b)
@ -875,7 +877,7 @@ func (m *PayerBandwidthAllocationResponse) Reset() { *m = PayerBandwidth
func (m *PayerBandwidthAllocationResponse) String() string { return proto.CompactTextString(m) }
func (*PayerBandwidthAllocationResponse) ProtoMessage() {}
func (*PayerBandwidthAllocationResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_pointerdb_b7facd921d32685d, []int{14}
return fileDescriptor_pointerdb_21b4d7ef3abc5ac1, []int{14}
}
func (m *PayerBandwidthAllocationResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PayerBandwidthAllocationResponse.Unmarshal(m, b)
@ -931,8 +933,9 @@ var _ grpc.ClientConn
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for PointerDB service
// PointerDBClient is the client API for PointerDB service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type PointerDBClient interface {
// Put formats and hands off a file path to be saved to boltdb
Put(ctx context.Context, in *PutRequest, opts ...grpc.CallOption) (*PutResponse, error)
@ -999,8 +1002,7 @@ func (c *pointerDBClient) PayerBandwidthAllocation(ctx context.Context, in *Paye
return out, nil
}
// Server API for PointerDB service
// PointerDBServer is the server API for PointerDB service.
type PointerDBServer interface {
// Put formats and hands off a file path to be saved to boltdb
Put(context.Context, *PutRequest) (*PutResponse, error)
@ -1137,9 +1139,9 @@ var _PointerDB_serviceDesc = grpc.ServiceDesc{
Metadata: "pointerdb.proto",
}
func init() { proto.RegisterFile("pointerdb.proto", fileDescriptor_pointerdb_b7facd921d32685d) }
func init() { proto.RegisterFile("pointerdb.proto", fileDescriptor_pointerdb_21b4d7ef3abc5ac1) }
var fileDescriptor_pointerdb_b7facd921d32685d = []byte{
var fileDescriptor_pointerdb_21b4d7ef3abc5ac1 = []byte{
// 1092 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x5d, 0x6f, 0x1b, 0x45,
0x17, 0xae, 0xbf, 0xe3, 0xb3, 0x76, 0xea, 0x77, 0xd4, 0x37, 0xdd, 0xba, 0x45, 0x09, 0x8b, 0x80,

View File

@ -30,7 +30,7 @@ func (m *SegmentMeta) Reset() { *m = SegmentMeta{} }
func (m *SegmentMeta) String() string { return proto.CompactTextString(m) }
func (*SegmentMeta) ProtoMessage() {}
func (*SegmentMeta) Descriptor() ([]byte, []int) {
return fileDescriptor_streams_2b972859339b7bc3, []int{0}
return fileDescriptor_streams_c0d9754174b032dc, []int{0}
}
func (m *SegmentMeta) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_SegmentMeta.Unmarshal(m, b)
@ -78,7 +78,7 @@ func (m *StreamInfo) Reset() { *m = StreamInfo{} }
func (m *StreamInfo) String() string { return proto.CompactTextString(m) }
func (*StreamInfo) ProtoMessage() {}
func (*StreamInfo) Descriptor() ([]byte, []int) {
return fileDescriptor_streams_2b972859339b7bc3, []int{1}
return fileDescriptor_streams_c0d9754174b032dc, []int{1}
}
func (m *StreamInfo) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_StreamInfo.Unmarshal(m, b)
@ -140,7 +140,7 @@ func (m *StreamMeta) Reset() { *m = StreamMeta{} }
func (m *StreamMeta) String() string { return proto.CompactTextString(m) }
func (*StreamMeta) ProtoMessage() {}
func (*StreamMeta) Descriptor() ([]byte, []int) {
return fileDescriptor_streams_2b972859339b7bc3, []int{2}
return fileDescriptor_streams_c0d9754174b032dc, []int{2}
}
func (m *StreamMeta) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_StreamMeta.Unmarshal(m, b)
@ -194,9 +194,9 @@ func init() {
proto.RegisterType((*StreamMeta)(nil), "streams.StreamMeta")
}
func init() { proto.RegisterFile("streams.proto", fileDescriptor_streams_2b972859339b7bc3) }
func init() { proto.RegisterFile("streams.proto", fileDescriptor_streams_c0d9754174b032dc) }
var fileDescriptor_streams_2b972859339b7bc3 = []byte{
var fileDescriptor_streams_c0d9754174b032dc = []byte{
// 304 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0x51, 0xcb, 0x4e, 0xc3, 0x30,
0x10, 0x54, 0x5f, 0x50, 0xb6, 0x29, 0x05, 0x03, 0x52, 0x04, 0x17, 0x14, 0x0e, 0x20, 0x84, 0x7a,

View File

@ -62,13 +62,7 @@ func CopyNode(src *Node) (dst *Node) {
}
}
node.AuditSuccess = src.AuditSuccess
node.IsUp = src.IsUp
node.LatencyList = src.LatencyList
node.Type = src.Type
node.UpdateAuditSuccess = src.UpdateAuditSuccess
node.UpdateLatency = src.UpdateLatency
node.UpdateUptime = src.UpdateUptime
return &node
}

View File

@ -1,45 +0,0 @@
// Copyright (C) 2018 Storj Labs, Inc.
// See LICENSE for copying information.
package statdb
import (
"context"
"go.uber.org/zap"
"storj.io/storj/pkg/provider"
)
//CtxKey Used as statdb key
type CtxKey int
const (
ctxKeyStats CtxKey = iota
)
// Config is a configuration struct that is everything you need to start a
// StatDB responsibility
type Config struct {
DatabaseURL string `help:"the database connection string to use" default:"$CONFDIR/stats.db"`
DatabaseDriver string `help:"the database driver to use" default:"sqlite3"`
}
// Run implements the provider.Responsibility interface
func (c Config) Run(ctx context.Context, server *provider.Provider) error {
ns, err := NewStatDB(c.DatabaseDriver, c.DatabaseURL, zap.L())
if err != nil {
return err
}
return server.Run(context.WithValue(ctx, ctxKeyStats, ns))
}
// LoadFromContext loads an existing StatDB from the Provider context
// stack if one exists.
func LoadFromContext(ctx context.Context) *StatDB {
if v, ok := ctx.Value(ctxKeyStats).(*StatDB); ok {
return v
}
return nil
}

View File

@ -1,17 +0,0 @@
// Copyright (C) 2018 Storj Labs, Inc.
// See LICENSE for copying information.
package statdb
// go:generate dbx.v1 schema -d postgres -d sqlite3 statdb.dbx .
// go:generate dbx.v1 golang -d postgres -d sqlite3 statdb.dbx .
import (
"github.com/zeebo/errs"
)
func init() {
// catch dbx errors
c := errs.Class("statdb")
WrapErr = func(e *Error) error { return c.Wrap(e) }
}

View File

@ -1,25 +0,0 @@
// dbx.v1 golang statdb.dbx .
model node (
key id
field id blob
field audit_success_count int64 (updatable)
field total_audit_count int64 (updatable)
field audit_success_ratio float64 (updatable)
field uptime_success_count int64 (updatable)
field total_uptime_count int64 (updatable)
field uptime_ratio float64 (updatable)
field created_at timestamp ( autoinsert )
field updated_at timestamp ( autoinsert, autoupdate )
)
create node ( )
update node ( where node.id = ? )
delete node ( where node.id = ? )
read one (
select node
where node.id = ?
)

File diff suppressed because it is too large Load Diff

View File

@ -1,14 +0,0 @@
-- AUTOGENERATED BY gopkg.in/spacemonkeygo/dbx.v1
-- DO NOT EDIT
CREATE TABLE nodes (
id bytea NOT NULL,
audit_success_count bigint NOT NULL,
total_audit_count bigint NOT NULL,
audit_success_ratio double precision NOT NULL,
uptime_success_count bigint NOT NULL,
total_uptime_count bigint NOT NULL,
uptime_ratio double precision NOT NULL,
created_at timestamp with time zone NOT NULL,
updated_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id )
);

View File

@ -1,14 +0,0 @@
-- AUTOGENERATED BY gopkg.in/spacemonkeygo/dbx.v1
-- DO NOT EDIT
CREATE TABLE nodes (
id BLOB NOT NULL,
audit_success_count INTEGER NOT NULL,
total_audit_count INTEGER NOT NULL,
audit_success_ratio REAL NOT NULL,
uptime_success_count INTEGER NOT NULL,
total_uptime_count INTEGER NOT NULL,
uptime_ratio REAL NOT NULL,
created_at TIMESTAMP NOT NULL,
updated_at TIMESTAMP NOT NULL,
PRIMARY KEY ( id )
);

View File

@ -1,21 +0,0 @@
// Copyright (C) 2018 Storj Labs, Inc.
// See LICENSE for copying information.
package statdb
import (
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/storj"
)
// NodeID is an alias to storj.NodeID for use in generated protobuf code
type NodeID = storj.NodeID
// NodeIDList is an alias to storj.NodeIDList for use in generated protobuf code
type NodeIDList = storj.NodeIDList
// Node is an alias to storj.Node for use in generated protobuf code
type Node = pb.Node
// NodeStats is an alias to storj.NodeStats for use in generated protobuf code
type NodeStats = pb.NodeStats

File diff suppressed because it is too large Load Diff

View File

@ -1,112 +0,0 @@
// Copyright (C) 2018 Storj Labs, Inc.
// See LICENSE for copying information.
syntax = "proto3";
package statdb;
import "gogo.proto";
import "node.proto";
// StatDB defines the interface for retrieving and updating storagenode stats
service StatDB {
// Create a db entry for the provided storagenode ID
rpc Create(CreateRequest) returns (CreateResponse);
// Get uses a storagenode ID to get that storagenode's stats
rpc Get(GetRequest) returns (GetResponse);
// FindInvalidNodes gets a subset of storagenodes that fail to meet minimum reputation args
rpc FindInvalidNodes(FindInvalidNodesRequest) returns (FindInvalidNodesResponse);
// Update updates all stats for a single storagenode
rpc Update(UpdateRequest) returns (UpdateResponse);
// UpdateUptime updates uptime stats for a single storagenode
rpc UpdateUptime(UpdateUptimeRequest) returns (UpdateUptimeResponse);
// UpdateAuditSuccess updates audit success stats for a single storagenode
rpc UpdateAuditSuccess(UpdateAuditSuccessRequest) returns (UpdateAuditSuccessResponse);
// UpdateBatch updates storagenode stats for multiple farmers at a time
rpc UpdateBatch(UpdateBatchRequest) returns (UpdateBatchResponse);
// CreateEntryIfNotExists creates a db entry if it didn't exist
rpc CreateEntryIfNotExists(CreateEntryIfNotExistsRequest) returns (CreateEntryIfNotExistsResponse);
}
// CreateRequest is a request message for the Create rpc call
message CreateRequest {
node.Node node = 1;
node.NodeStats stats = 2;
}
// CreateResponse is a response message for the Create rpc call
message CreateResponse {
node.NodeStats stats = 1;
}
// GetRequest is a request message for the Get rpc call
message GetRequest {
bytes node_id = 1 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false];
}
// GetResponse is a response message for the Get rpc call
message GetResponse {
node.NodeStats stats = 1;
}
// FindInvalidNodesRequest is a request message for the FindInvalidNodes rpc call
message FindInvalidNodesRequest {
repeated bytes node_ids = 1 [(gogoproto.customtype) = "NodeID"];
node.NodeStats max_stats = 2;
}
// FindInvalidNodesResponse is a response message for the FindInvalidNodes rpc call
message FindInvalidNodesResponse {
repeated bytes invalid_ids = 1 [(gogoproto.customtype) = "NodeID"];
}
// UpdateRequest is a request message for the Update rpc call
message UpdateRequest {
node.Node node = 1;
}
// UpdateRequest is a response message for the Update rpc call
message UpdateResponse {
node.NodeStats stats = 1;
}
// UpdateUptimeRequest is a request message for the UpdateUptime rpc call
message UpdateUptimeRequest {
node.Node node = 1;
}
// UpdateUptimeResponse is a response message for the UpdateUptime rpc call
message UpdateUptimeResponse {
node.NodeStats stats = 1;
}
// UpdateAuditSuccessRequest is a request message for the UpdateAuditSuccess rpc call
message UpdateAuditSuccessRequest {
node.Node node = 1;
}
// UpdateAuditSuccessResponse is a response message for the UpdateAuditSuccess rpc call
message UpdateAuditSuccessResponse {
node.NodeStats stats = 1;
}
// UpdateBatchRequest is a request message for the UpdateBatch rpc call
message UpdateBatchRequest {
repeated node.Node node_list = 1;
}
// UpdateBatchResponse is a response message for the UpdateBatch rpc call
message UpdateBatchResponse {
repeated node.NodeStats stats_list = 1;
repeated node.Node failed_nodes = 2;
}
// CreateEntryIfNotExistsRequest is a request message for the CreateEntryIfNotExists rpc call
message CreateEntryIfNotExistsRequest {
node.Node node = 1;
}
// CreateEntryIfNotExistsResponse is a response message for the CreateEntryIfNotExists rpc call
message CreateEntryIfNotExistsResponse {
node.NodeStats stats = 1;
}

View File

@ -5,428 +5,131 @@ package statdb
import (
"context"
"database/sql"
"strings"
"github.com/zeebo/errs"
"go.uber.org/zap"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
monkit "gopkg.in/spacemonkeygo/monkit.v2"
"storj.io/storj/internal/migrate"
dbx "storj.io/storj/pkg/statdb/dbx"
pb "storj.io/storj/pkg/statdb/proto"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/storj"
)
var (
mon = monkit.Package()
errAuditSuccess = errs.Class("statdb audit success error")
errUptime = errs.Class("statdb uptime error")
)
// DB interface for database operations
type DB interface {
// Create a db entry for the provided storagenode
Create(ctx context.Context, createReq *CreateRequest) (resp *CreateResponse, err error)
// StatDB implements the statdb RPC service
type StatDB struct {
log *zap.Logger
DB *dbx.DB
// Get a storagenode's stats from the db
Get(ctx context.Context, getReq *GetRequest) (resp *GetResponse, err error)
// FindInvalidNodes finds a subset of storagenodes that fail to meet minimum reputation requirements
FindInvalidNodes(ctx context.Context, getReq *FindInvalidNodesRequest) (resp *FindInvalidNodesResponse, err error)
// Update a single storagenode's stats in the db
Update(ctx context.Context, updateReq *UpdateRequest) (resp *UpdateResponse, err error)
// UpdateUptime updates a single storagenode's uptime stats in the db
UpdateUptime(ctx context.Context, updateReq *UpdateUptimeRequest) (resp *UpdateUptimeResponse, err error)
// UpdateAuditSuccess updates a single storagenode's uptime stats in the db
UpdateAuditSuccess(ctx context.Context, updateReq *UpdateAuditSuccessRequest) (resp *UpdateAuditSuccessResponse, err error)
// UpdateBatch for updating multiple farmers' stats in the db
UpdateBatch(ctx context.Context, updateBatchReq *UpdateBatchRequest) (resp *UpdateBatchResponse, err error)
// CreateEntryIfNotExists creates a statdb node entry and saves to statdb if it didn't already exist
CreateEntryIfNotExists(ctx context.Context, createIfReq *CreateEntryIfNotExistsRequest) (resp *CreateEntryIfNotExistsResponse, err error)
}
// NewStatDB creates instance of StatDB
func NewStatDB(driver, source string, log *zap.Logger) (*StatDB, error) {
db, err := dbx.Open(driver, source)
if err != nil {
return nil, Error.New("failed opening database %q, %q: %v",
driver, source, err)
}
err = migrate.Create("statdb", db)
if err != nil {
return nil, Error.Wrap(err)
}
return &StatDB{
DB: db,
log: log,
}, nil
// CreateRequest is a statdb create request message
type CreateRequest struct {
Node storj.NodeID
Stats *pb.NodeStats
}
// Create a db entry for the provided storagenode
func (s *StatDB) Create(ctx context.Context, createReq *pb.CreateRequest) (resp *pb.CreateResponse, err error) {
defer mon.Task()(&ctx)(&err)
var (
totalAuditCount int64
auditSuccessCount int64
auditSuccessRatio float64
totalUptimeCount int64
uptimeSuccessCount int64
uptimeRatio float64
)
stats := createReq.Stats
if stats != nil {
totalAuditCount = stats.AuditCount
auditSuccessCount = stats.AuditSuccessCount
auditSuccessRatio, err = checkRatioVars(auditSuccessCount, totalAuditCount)
if err != nil {
return nil, errAuditSuccess.Wrap(err)
}
totalUptimeCount = stats.UptimeCount
uptimeSuccessCount = stats.UptimeSuccessCount
uptimeRatio, err = checkRatioVars(uptimeSuccessCount, totalUptimeCount)
if err != nil {
return nil, errUptime.Wrap(err)
}
}
node := createReq.Node
dbNode, err := s.DB.Create_Node(
ctx,
dbx.Node_Id(node.Id.Bytes()),
dbx.Node_AuditSuccessCount(auditSuccessCount),
dbx.Node_TotalAuditCount(totalAuditCount),
dbx.Node_AuditSuccessRatio(auditSuccessRatio),
dbx.Node_UptimeSuccessCount(uptimeSuccessCount),
dbx.Node_TotalUptimeCount(totalUptimeCount),
dbx.Node_UptimeRatio(uptimeRatio),
)
if err != nil {
return nil, Error.Wrap(status.Errorf(codes.Internal, err.Error()))
}
nodeStats := &pb.NodeStats{
NodeId: node.Id,
AuditSuccessRatio: dbNode.AuditSuccessRatio,
AuditCount: dbNode.TotalAuditCount,
UptimeRatio: dbNode.UptimeRatio,
UptimeCount: dbNode.TotalUptimeCount,
}
return &pb.CreateResponse{
Stats: nodeStats,
}, nil
// CreateResponse is a statdb create response message
type CreateResponse struct {
Stats *pb.NodeStats
}
// Get a storagenode's stats from the db
func (s *StatDB) Get(ctx context.Context, getReq *pb.GetRequest) (resp *pb.GetResponse, err error) {
defer mon.Task()(&ctx)(&err)
dbNode, err := s.DB.Get_Node_By_Id(ctx, dbx.Node_Id(getReq.NodeId.Bytes()))
if err != nil {
return nil, Error.Wrap(status.Errorf(codes.Internal, err.Error()))
}
nodeStats := &pb.NodeStats{
NodeId: getReq.NodeId,
AuditSuccessRatio: dbNode.AuditSuccessRatio,
AuditCount: dbNode.TotalAuditCount,
UptimeRatio: dbNode.UptimeRatio,
UptimeCount: dbNode.TotalUptimeCount,
}
return &pb.GetResponse{
Stats: nodeStats,
}, nil
// GetRequest is a statdb get request message
type GetRequest struct {
Node storj.NodeID
}
// FindInvalidNodes finds a subset of storagenodes that fail to meet minimum reputation requirements
func (s *StatDB) FindInvalidNodes(ctx context.Context, getReq *pb.FindInvalidNodesRequest) (resp *pb.FindInvalidNodesResponse, err error) {
defer mon.Task()(&ctx)(&err)
var invalidIds storj.NodeIDList
nodeIds := getReq.NodeIds
maxAuditSuccess := getReq.MaxStats.AuditSuccessRatio
maxUptime := getReq.MaxStats.UptimeRatio
rows, err := s.findInvalidNodesQuery(nodeIds, maxAuditSuccess, maxUptime)
if err != nil {
return nil, Error.Wrap(err)
}
defer func() {
err = rows.Close()
if err != nil {
s.log.Error(err.Error())
}
}()
for rows.Next() {
node := &dbx.Node{}
err = rows.Scan(&node.Id, &node.TotalAuditCount, &node.TotalUptimeCount, &node.AuditSuccessRatio, &node.UptimeRatio)
if err != nil {
return nil, Error.Wrap(err)
}
id, err := storj.NodeIDFromBytes(node.Id)
if err != nil {
return nil, Error.Wrap(err)
}
invalidIds = append(invalidIds, id)
}
return &pb.FindInvalidNodesResponse{
InvalidIds: invalidIds,
}, nil
// GetResponse is a statdb get response message
type GetResponse struct {
Stats *pb.NodeStats
}
func (s *StatDB) findInvalidNodesQuery(nodeIds storj.NodeIDList, auditSuccess, uptime float64) (*sql.Rows, error) {
args := make([]interface{}, len(nodeIds))
for i, id := range nodeIds {
args[i] = id.Bytes()
}
args = append(args, auditSuccess, uptime)
rows, err := s.DB.Query(`SELECT nodes.id, nodes.total_audit_count,
nodes.total_uptime_count, nodes.audit_success_ratio,
nodes.uptime_ratio
FROM nodes
WHERE nodes.id IN (?`+strings.Repeat(", ?", len(nodeIds)-1)+`)
AND nodes.total_audit_count > 0
AND nodes.total_uptime_count > 0
AND (
nodes.audit_success_ratio < ?
OR nodes.uptime_ratio < ?
)`, args...)
return rows, Error.Wrap(err)
// FindInvalidNodesRequest is a statdb find invalid node request message
type FindInvalidNodesRequest struct {
NodeIds []storj.NodeID
MaxStats *pb.NodeStats
}
// Update a single storagenode's stats in the db
func (s *StatDB) Update(ctx context.Context, updateReq *pb.UpdateRequest) (resp *pb.UpdateResponse, err error) {
defer mon.Task()(&ctx)(&err)
node := updateReq.GetNode()
createIfReq := &pb.CreateEntryIfNotExistsRequest{
Node: updateReq.GetNode(),
}
_, err = s.CreateEntryIfNotExists(ctx, createIfReq)
if err != nil {
return nil, Error.Wrap(err)
}
dbNode, err := s.DB.Get_Node_By_Id(ctx, dbx.Node_Id(node.Id.Bytes()))
if err != nil {
return nil, Error.Wrap(status.Errorf(codes.Internal, err.Error()))
}
auditSuccessCount := dbNode.AuditSuccessCount
totalAuditCount := dbNode.TotalAuditCount
var auditSuccessRatio float64
uptimeSuccessCount := dbNode.UptimeSuccessCount
totalUptimeCount := dbNode.TotalUptimeCount
var uptimeRatio float64
updateFields := dbx.Node_Update_Fields{}
if node.UpdateAuditSuccess {
auditSuccessCount, totalAuditCount, auditSuccessRatio = updateRatioVars(
node.AuditSuccess,
auditSuccessCount,
totalAuditCount,
)
updateFields.AuditSuccessCount = dbx.Node_AuditSuccessCount(auditSuccessCount)
updateFields.TotalAuditCount = dbx.Node_TotalAuditCount(totalAuditCount)
updateFields.AuditSuccessRatio = dbx.Node_AuditSuccessRatio(auditSuccessRatio)
}
if node.UpdateUptime {
uptimeSuccessCount, totalUptimeCount, uptimeRatio = updateRatioVars(
node.IsUp,
uptimeSuccessCount,
totalUptimeCount,
)
updateFields.UptimeSuccessCount = dbx.Node_UptimeSuccessCount(uptimeSuccessCount)
updateFields.TotalUptimeCount = dbx.Node_TotalUptimeCount(totalUptimeCount)
updateFields.UptimeRatio = dbx.Node_UptimeRatio(uptimeRatio)
}
dbNode, err = s.DB.Update_Node_By_Id(ctx, dbx.Node_Id(node.Id.Bytes()), updateFields)
if err != nil {
return nil, Error.Wrap(status.Errorf(codes.Internal, err.Error()))
}
nodeStats := &pb.NodeStats{
NodeId: node.Id,
AuditSuccessRatio: dbNode.AuditSuccessRatio,
AuditCount: dbNode.TotalAuditCount,
UptimeRatio: dbNode.UptimeRatio,
UptimeCount: dbNode.TotalUptimeCount,
}
return &pb.UpdateResponse{
Stats: nodeStats,
}, nil
// FindInvalidNodesResponse is a statdb find invalid node response message
type FindInvalidNodesResponse struct {
InvalidIds []storj.NodeID
}
// UpdateUptime updates a single storagenode's uptime stats in the db
func (s *StatDB) UpdateUptime(ctx context.Context, updateReq *pb.UpdateUptimeRequest) (resp *pb.UpdateUptimeResponse, err error) {
defer mon.Task()(&ctx)(&err)
node := updateReq.GetNode()
dbNode, err := s.DB.Get_Node_By_Id(ctx, dbx.Node_Id(node.Id.Bytes()))
if err != nil {
return nil, Error.Wrap(status.Errorf(codes.Internal, err.Error()))
}
uptimeSuccessCount := dbNode.UptimeSuccessCount
totalUptimeCount := dbNode.TotalUptimeCount
var uptimeRatio float64
updateFields := dbx.Node_Update_Fields{}
uptimeSuccessCount, totalUptimeCount, uptimeRatio = updateRatioVars(
node.IsUp,
uptimeSuccessCount,
totalUptimeCount,
)
updateFields.UptimeSuccessCount = dbx.Node_UptimeSuccessCount(uptimeSuccessCount)
updateFields.TotalUptimeCount = dbx.Node_TotalUptimeCount(totalUptimeCount)
updateFields.UptimeRatio = dbx.Node_UptimeRatio(uptimeRatio)
dbNode, err = s.DB.Update_Node_By_Id(ctx, dbx.Node_Id(node.Id.Bytes()), updateFields)
if err != nil {
return nil, Error.Wrap(status.Errorf(codes.Internal, err.Error()))
}
nodeStats := &pb.NodeStats{
NodeId: node.Id,
AuditSuccessRatio: dbNode.AuditSuccessRatio,
AuditCount: dbNode.TotalAuditCount,
UptimeRatio: dbNode.UptimeRatio,
UptimeCount: dbNode.TotalUptimeCount,
}
return &pb.UpdateUptimeResponse{
Stats: nodeStats,
}, nil
// UpdateRequest is a statdb update request message
type UpdateRequest struct {
Node storj.NodeID
UpdateAuditSuccess bool
AuditSuccess bool
UpdateUptime bool
IsUp bool
}
// UpdateAuditSuccess updates a single storagenode's uptime stats in the db
func (s *StatDB) UpdateAuditSuccess(ctx context.Context, updateReq *pb.UpdateAuditSuccessRequest) (resp *pb.UpdateAuditSuccessResponse, err error) {
defer mon.Task()(&ctx)(&err)
node := updateReq.GetNode()
dbNode, err := s.DB.Get_Node_By_Id(ctx, dbx.Node_Id(node.Id.Bytes()))
if err != nil {
return nil, Error.Wrap(status.Errorf(codes.Internal, err.Error()))
}
auditSuccessCount := dbNode.AuditSuccessCount
totalAuditCount := dbNode.TotalAuditCount
var auditRatio float64
updateFields := dbx.Node_Update_Fields{}
auditSuccessCount, totalAuditCount, auditRatio = updateRatioVars(
node.AuditSuccess,
auditSuccessCount,
totalAuditCount,
)
updateFields.AuditSuccessCount = dbx.Node_AuditSuccessCount(auditSuccessCount)
updateFields.TotalAuditCount = dbx.Node_TotalAuditCount(totalAuditCount)
updateFields.AuditSuccessRatio = dbx.Node_AuditSuccessRatio(auditRatio)
dbNode, err = s.DB.Update_Node_By_Id(ctx, dbx.Node_Id(node.Id.Bytes()), updateFields)
if err != nil {
return nil, Error.Wrap(status.Errorf(codes.Internal, err.Error()))
}
nodeStats := &pb.NodeStats{
NodeId: node.Id,
AuditSuccessRatio: dbNode.AuditSuccessRatio,
AuditCount: dbNode.TotalAuditCount,
UptimeRatio: dbNode.UptimeRatio,
UptimeCount: dbNode.TotalUptimeCount,
}
return &pb.UpdateAuditSuccessResponse{
Stats: nodeStats,
}, nil
// UpdateResponse is a statdb update response message
type UpdateResponse struct {
Stats *pb.NodeStats
}
// UpdateBatch for updating multiple farmers' stats in the db
func (s *StatDB) UpdateBatch(ctx context.Context, updateBatchReq *pb.UpdateBatchRequest) (resp *pb.UpdateBatchResponse, err error) {
defer mon.Task()(&ctx)(&err)
var nodeStatsList []*pb.NodeStats
var failedNodes []*pb.Node
for _, node := range updateBatchReq.NodeList {
updateReq := &pb.UpdateRequest{
Node: node,
}
updateRes, err := s.Update(ctx, updateReq)
if err != nil {
s.log.Error(err.Error())
failedNodes = append(failedNodes, node)
} else {
nodeStatsList = append(nodeStatsList, updateRes.Stats)
}
}
updateBatchRes := &pb.UpdateBatchResponse{
FailedNodes: failedNodes,
StatsList: nodeStatsList,
}
return updateBatchRes, nil
// UpdateUptimeRequest is a statdb uptime request message
type UpdateUptimeRequest struct {
Node storj.NodeID
IsUp bool
}
// CreateEntryIfNotExists creates a statdb node entry and saves to statdb if it didn't already exist
func (s *StatDB) CreateEntryIfNotExists(ctx context.Context, createIfReq *pb.CreateEntryIfNotExistsRequest) (resp *pb.CreateEntryIfNotExistsResponse, err error) {
defer mon.Task()(&ctx)(&err)
getReq := &pb.GetRequest{
NodeId: createIfReq.Node.Id,
}
getRes, err := s.Get(ctx, getReq)
// TODO: figure out better way to confirm error is type dbx.ErrorCode_NoRows
if err != nil && strings.Contains(err.Error(), "no rows in result set") {
createReq := &pb.CreateRequest{
Node: createIfReq.Node,
}
res, err := s.Create(ctx, createReq)
if err != nil {
return nil, Error.Wrap(err)
}
createEntryIfNotExistsRes := &pb.CreateEntryIfNotExistsResponse{
Stats: res.Stats,
}
return createEntryIfNotExistsRes, nil
}
if err != nil {
return nil, Error.Wrap(err)
}
createEntryIfNotExistsRes := &pb.CreateEntryIfNotExistsResponse{
Stats: getRes.Stats,
}
return createEntryIfNotExistsRes, nil
// UpdateUptimeResponse is a statdb uptime response message
type UpdateUptimeResponse struct {
Stats *pb.NodeStats
}
func updateRatioVars(newStatus bool, successCount, totalCount int64) (int64, int64, float64) {
totalCount++
if newStatus {
successCount++
}
newRatio := float64(successCount) / float64(totalCount)
return successCount, totalCount, newRatio
// UpdateAuditSuccessRequest is a statdb audit request message
type UpdateAuditSuccessRequest struct {
Node storj.NodeID
AuditSuccess bool
}
func checkRatioVars(successCount, totalCount int64) (ratio float64, err error) {
if successCount < 0 {
return 0, Error.Wrap(errs.New("success count less than 0"))
}
if totalCount < 0 {
return 0, Error.Wrap(errs.New("total count less than 0"))
}
if successCount > totalCount {
return 0, Error.Wrap(errs.New("success count greater than total count"))
}
ratio = float64(successCount) / float64(totalCount)
return ratio, nil
// UpdateAuditSuccessResponse is a statdb audit response message
type UpdateAuditSuccessResponse struct {
Stats *pb.NodeStats
}
// UpdateBatchRequest is a statdb update batch request message
type UpdateBatchRequest struct {
NodeList []*UpdateRequest
}
// UpdateBatchResponse is a statdb update batch response message
type UpdateBatchResponse struct {
StatsList []*pb.NodeStats
FailedNodes []*UpdateRequest
}
// GetFailedNodes returns failed node list
func (m *UpdateBatchResponse) GetFailedNodes() []*UpdateRequest {
if m != nil {
return m.FailedNodes
}
return nil
}
// CreateEntryIfNotExistsRequest is a statdb create entry request message
type CreateEntryIfNotExistsRequest struct {
Node storj.NodeID
}
// CreateEntryIfNotExistsResponse is a statdb create response message
type CreateEntryIfNotExistsResponse struct {
Stats *pb.NodeStats
}

View File

@ -1,465 +0,0 @@
// Copyright (C) 2018 Storj Labs, Inc.
// See LICENSE for copying information.
package statdb_test
import (
"context"
"fmt"
"math/rand"
"testing"
"github.com/stretchr/testify/assert"
"go.uber.org/zap"
"storj.io/storj/internal/teststorj"
"storj.io/storj/pkg/statdb"
dbx "storj.io/storj/pkg/statdb/dbx"
pb "storj.io/storj/pkg/statdb/proto"
"storj.io/storj/pkg/storj"
)
var (
ctx = context.Background()
)
func TestCreateDoesNotExist(t *testing.T) {
dbPath := getDBPath()
statdb, db, err := getServerAndDB(dbPath)
assert.NoError(t, err)
nodeID := teststorj.NodeIDFromString("testnodeid")
node := &pb.Node{Id: nodeID}
createReq := &pb.CreateRequest{
Node: node,
}
resp, err := statdb.Create(ctx, createReq)
assert.NoError(t, err)
stats := resp.Stats
assert.EqualValues(t, 0, stats.AuditSuccessRatio)
assert.EqualValues(t, 0, stats.UptimeRatio)
nodeInfo, err := db.Get_Node_By_Id(ctx, dbx.Node_Id(nodeID.Bytes()))
assert.NoError(t, err)
assert.EqualValues(t, nodeID.Bytes(), nodeInfo.Id)
assert.EqualValues(t, 0, nodeInfo.AuditSuccessRatio)
assert.EqualValues(t, 0, nodeInfo.UptimeRatio)
}
func TestCreateExists(t *testing.T) {
dbPath := getDBPath()
statdb, db, err := getServerAndDB(dbPath)
assert.NoError(t, err)
nodeID := teststorj.NodeIDFromString("testnodeid")
auditSuccessCount, totalAuditCount, auditRatio := getRatio(4, 10)
uptimeSuccessCount, totalUptimeCount, uptimeRatio := getRatio(8, 25)
err = createNode(ctx, db, nodeID, auditSuccessCount, totalAuditCount, auditRatio,
uptimeSuccessCount, totalUptimeCount, uptimeRatio)
assert.NoError(t, err)
node := &pb.Node{Id: nodeID}
createReq := &pb.CreateRequest{
Node: node,
}
_, err = statdb.Create(ctx, createReq)
assert.Error(t, err)
}
func TestCreateWithStats(t *testing.T) {
dbPath := getDBPath()
statdb, db, err := getServerAndDB(dbPath)
assert.NoError(t, err)
auditSuccessCount, totalAuditCount, auditRatio := getRatio(4, 10)
uptimeSuccessCount, totalUptimeCount, uptimeRatio := getRatio(8, 25)
nodeID := teststorj.NodeIDFromString("testnodeid")
node := &pb.Node{Id: nodeID}
stats := &pb.NodeStats{
AuditCount: totalAuditCount,
AuditSuccessCount: auditSuccessCount,
UptimeCount: totalUptimeCount,
UptimeSuccessCount: uptimeSuccessCount,
}
createReq := &pb.CreateRequest{
Node: node,
Stats: stats,
}
resp, err := statdb.Create(ctx, createReq)
assert.NoError(t, err)
s := resp.Stats
assert.EqualValues(t, auditRatio, s.AuditSuccessRatio)
assert.EqualValues(t, uptimeRatio, s.UptimeRatio)
nodeInfo, err := db.Get_Node_By_Id(ctx, dbx.Node_Id(nodeID.Bytes()))
assert.NoError(t, err)
assert.EqualValues(t, nodeID.Bytes(), nodeInfo.Id)
assert.EqualValues(t, auditRatio, nodeInfo.AuditSuccessRatio)
assert.EqualValues(t, uptimeRatio, nodeInfo.UptimeRatio)
}
func TestGetExists(t *testing.T) {
dbPath := getDBPath()
statdb, db, err := getServerAndDB(dbPath)
assert.NoError(t, err)
nodeID := teststorj.NodeIDFromString("testnodeid")
auditSuccessCount, totalAuditCount, auditRatio := getRatio(4, 10)
uptimeSuccessCount, totalUptimeCount, uptimeRatio := getRatio(8, 25)
err = createNode(ctx, db, nodeID, auditSuccessCount, totalAuditCount, auditRatio,
uptimeSuccessCount, totalUptimeCount, uptimeRatio)
assert.NoError(t, err)
getReq := &pb.GetRequest{
NodeId: nodeID,
}
resp, err := statdb.Get(ctx, getReq)
assert.NoError(t, err)
stats := resp.Stats
assert.EqualValues(t, auditRatio, stats.AuditSuccessRatio)
assert.EqualValues(t, uptimeRatio, stats.UptimeRatio)
}
func TestGetDoesNotExist(t *testing.T) {
dbPath := getDBPath()
statdb, _, err := getServerAndDB(dbPath)
assert.NoError(t, err)
nodeID := teststorj.NodeIDFromString("testnodeid")
getReq := &pb.GetRequest{
NodeId: nodeID,
}
_, err = statdb.Get(ctx, getReq)
assert.Error(t, err)
}
func TestFindInvalidNodes(t *testing.T) {
NodeIDs := teststorj.NodeIDsFromStrings("id1", "id2", "id3", "id4", "id5", "id6", "id7")
dbPath := getDBPath()
statdb, db, err := getServerAndDB(dbPath)
assert.NoError(t, err)
for _, tt := range []struct {
nodeID storj.NodeID
auditSuccessCount int64
totalAuditCount int64
auditRatio float64
uptimeSuccessCount int64
totalUptimeCount int64
uptimeRatio float64
}{
{NodeIDs[0], 20, 20, 1, 20, 20, 1}, // good audit success
{NodeIDs[1], 5, 20, 0.25, 20, 20, 1}, // bad audit success, good uptime
{NodeIDs[2], 20, 20, 1, 5, 20, 0.25}, // good audit success, bad uptime
{NodeIDs[3], 0, 0, 0, 20, 20, 1}, // "bad" audit success, no audits
{NodeIDs[4], 20, 20, 1, 0, 0, 0}, // "bad" uptime success, no checks
{NodeIDs[5], 0, 1, 0, 5, 5, 1}, // bad audit success exactly one audit
{NodeIDs[6], 0, 20, 0, 20, 20, 1}, // bad ratios, excluded from query
} {
err = createNode(ctx, db, tt.nodeID, tt.auditSuccessCount, tt.totalAuditCount, tt.auditRatio,
tt.uptimeSuccessCount, tt.totalUptimeCount, tt.uptimeRatio)
assert.NoError(t, err)
}
findInvalidNodesReq := &pb.FindInvalidNodesRequest{
NodeIds: storj.NodeIDList{
NodeIDs[0], NodeIDs[1],
NodeIDs[2], NodeIDs[3],
NodeIDs[4], NodeIDs[5],
},
MaxStats: &pb.NodeStats{
AuditSuccessRatio: 0.5,
UptimeRatio: 0.5,
},
}
resp, err := statdb.FindInvalidNodes(ctx, findInvalidNodesReq)
assert.NoError(t, err)
invalid := resp.InvalidIds
assert.Contains(t, invalid, NodeIDs[1])
assert.Contains(t, invalid, NodeIDs[2])
assert.Contains(t, invalid, NodeIDs[5])
assert.Len(t, invalid, 3)
}
func TestUpdateExists(t *testing.T) {
dbPath := getDBPath()
statdb, db, err := getServerAndDB(dbPath)
assert.NoError(t, err)
nodeID := teststorj.NodeIDFromString("testnodeid")
auditSuccessCount, totalAuditCount, auditRatio := getRatio(4, 10)
uptimeSuccessCount, totalUptimeCount, uptimeRatio := getRatio(8, 25)
err = createNode(ctx, db, nodeID, auditSuccessCount, totalAuditCount, auditRatio,
uptimeSuccessCount, totalUptimeCount, uptimeRatio)
assert.NoError(t, err)
node := &pb.Node{
Id: nodeID,
UpdateAuditSuccess: true,
AuditSuccess: true,
UpdateUptime: true,
IsUp: false,
}
updateReq := &pb.UpdateRequest{
Node: node,
}
resp, err := statdb.Update(ctx, updateReq)
assert.NoError(t, err)
_, _, newAuditRatio := getRatio(int(auditSuccessCount+1), int(totalAuditCount+1))
_, _, newUptimeRatio := getRatio(int(uptimeSuccessCount), int(totalUptimeCount+1))
stats := resp.Stats
assert.EqualValues(t, newAuditRatio, stats.AuditSuccessRatio)
assert.EqualValues(t, newUptimeRatio, stats.UptimeRatio)
}
func TestUpdateUptimeExists(t *testing.T) {
dbPath := getDBPath()
statdb, db, err := getServerAndDB(dbPath)
assert.NoError(t, err)
nodeID := teststorj.NodeIDFromString("testnodeid")
auditSuccessCount, totalAuditCount, auditRatio := getRatio(4, 10)
uptimeSuccessCount, totalUptimeCount, uptimeRatio := getRatio(8, 25)
err = createNode(ctx, db, nodeID, auditSuccessCount, totalAuditCount, auditRatio,
uptimeSuccessCount, totalUptimeCount, uptimeRatio)
assert.NoError(t, err)
node := &pb.Node{
Id: nodeID,
IsUp: false,
}
updateReq := &pb.UpdateUptimeRequest{
Node: node,
}
resp, err := statdb.UpdateUptime(ctx, updateReq)
assert.NoError(t, err)
_, _, newUptimeRatio := getRatio(int(uptimeSuccessCount), int(totalUptimeCount+1))
stats := resp.Stats
assert.EqualValues(t, auditRatio, stats.AuditSuccessRatio)
assert.EqualValues(t, totalAuditCount, stats.AuditCount)
assert.EqualValues(t, newUptimeRatio, stats.UptimeRatio)
}
func TestUpdateAuditSuccessExists(t *testing.T) {
dbPath := getDBPath()
statdb, db, err := getServerAndDB(dbPath)
assert.NoError(t, err)
nodeID := teststorj.NodeIDFromString("testnodeid")
auditSuccessCount, totalAuditCount, auditRatio := getRatio(4, 10)
uptimeSuccessCount, totalUptimeCount, uptimeRatio := getRatio(8, 25)
err = createNode(ctx, db, nodeID, auditSuccessCount, totalAuditCount, auditRatio,
uptimeSuccessCount, totalUptimeCount, uptimeRatio)
assert.NoError(t, err)
node := &pb.Node{
Id: nodeID,
AuditSuccess: false,
}
updateReq := &pb.UpdateAuditSuccessRequest{
Node: node,
}
resp, err := statdb.UpdateAuditSuccess(ctx, updateReq)
assert.NoError(t, err)
_, _, newAuditRatio := getRatio(int(auditSuccessCount), int(totalAuditCount+1))
stats := resp.Stats
assert.EqualValues(t, newAuditRatio, stats.AuditSuccessRatio)
assert.EqualValues(t, totalAuditCount+1, stats.AuditCount)
assert.EqualValues(t, uptimeRatio, stats.UptimeRatio)
}
func TestUpdateBatchExists(t *testing.T) {
dbPath := getDBPath()
statdb, db, err := getServerAndDB(dbPath)
assert.NoError(t, err)
nodeID1 := teststorj.NodeIDFromString("testnodeid1")
nodeID2 := teststorj.NodeIDFromString("testnodeid2")
auditSuccessCount1, totalAuditCount1, auditRatio1 := getRatio(4, 10)
uptimeSuccessCount1, totalUptimeCount1, uptimeRatio1 := getRatio(8, 25)
err = createNode(ctx, db, nodeID1, auditSuccessCount1, totalAuditCount1, auditRatio1,
uptimeSuccessCount1, totalUptimeCount1, uptimeRatio1)
assert.NoError(t, err)
auditSuccessCount2, totalAuditCount2, auditRatio2 := getRatio(7, 10)
uptimeSuccessCount2, totalUptimeCount2, uptimeRatio2 := getRatio(8, 20)
err = createNode(ctx, db, nodeID2, auditSuccessCount2, totalAuditCount2, auditRatio2,
uptimeSuccessCount2, totalUptimeCount2, uptimeRatio2)
assert.NoError(t, err)
node1 := &pb.Node{
Id: nodeID1,
UpdateAuditSuccess: true,
AuditSuccess: true,
UpdateUptime: true,
IsUp: false,
}
node2 := &pb.Node{
Id: nodeID2,
UpdateAuditSuccess: true,
AuditSuccess: true,
UpdateUptime: false,
}
updateBatchReq := &pb.UpdateBatchRequest{
NodeList: []*pb.Node{node1, node2},
}
resp, err := statdb.UpdateBatch(ctx, updateBatchReq)
assert.NoError(t, err)
_, _, newAuditRatio1 := getRatio(int(auditSuccessCount1+1), int(totalAuditCount1+1))
_, _, newUptimeRatio1 := getRatio(int(uptimeSuccessCount1), int(totalUptimeCount1+1))
_, _, newAuditRatio2 := getRatio(int(auditSuccessCount2+1), int(totalAuditCount2+1))
stats1 := resp.StatsList[0]
stats2 := resp.StatsList[1]
assert.EqualValues(t, newAuditRatio1, stats1.AuditSuccessRatio)
assert.EqualValues(t, newUptimeRatio1, stats1.UptimeRatio)
assert.EqualValues(t, newAuditRatio2, stats2.AuditSuccessRatio)
assert.EqualValues(t, uptimeRatio2, stats2.UptimeRatio)
}
func TestUpdateBatchDoesNotExist(t *testing.T) {
dbPath := getDBPath()
statdb, db, err := getServerAndDB(dbPath)
assert.NoError(t, err)
nodeID1 := teststorj.NodeIDFromString("testnodeid1")
nodeID2 := teststorj.NodeIDFromString("testnodeid2")
auditSuccessCount1, totalAuditCount1, auditRatio1 := getRatio(4, 10)
uptimeSuccessCount1, totalUptimeCount1, uptimeRatio1 := getRatio(8, 25)
err = createNode(ctx, db, nodeID1, auditSuccessCount1, totalAuditCount1, auditRatio1,
uptimeSuccessCount1, totalUptimeCount1, uptimeRatio1)
assert.NoError(t, err)
node1 := &pb.Node{
Id: nodeID1,
UpdateAuditSuccess: true,
AuditSuccess: true,
UpdateUptime: true,
IsUp: false,
}
node2 := &pb.Node{
Id: nodeID2,
UpdateAuditSuccess: true,
AuditSuccess: true,
UpdateUptime: false,
}
updateBatchReq := &pb.UpdateBatchRequest{
NodeList: []*pb.Node{node1, node2},
}
_, err = statdb.UpdateBatch(ctx, updateBatchReq)
assert.NoError(t, err)
}
func TestUpdateBatchEmpty(t *testing.T) {
dbPath := getDBPath()
statdb, db, err := getServerAndDB(dbPath)
assert.NoError(t, err)
nodeID1 := teststorj.NodeIDFromString("testnodeid1")
auditSuccessCount1, totalAuditCount1, auditRatio1 := getRatio(4, 10)
uptimeSuccessCount1, totalUptimeCount1, uptimeRatio1 := getRatio(8, 25)
err = createNode(ctx, db, nodeID1, auditSuccessCount1, totalAuditCount1, auditRatio1,
uptimeSuccessCount1, totalUptimeCount1, uptimeRatio1)
assert.NoError(t, err)
updateBatchReq := &pb.UpdateBatchRequest{
NodeList: []*pb.Node{},
}
resp, err := statdb.UpdateBatch(ctx, updateBatchReq)
assert.NoError(t, err)
assert.Equal(t, len(resp.StatsList), 0)
}
func TestCreateEntryIfNotExists(t *testing.T) {
dbPath := getDBPath()
statdb, db, err := getServerAndDB(dbPath)
assert.NoError(t, err)
nodeID1 := teststorj.NodeIDFromString("testnodeid1")
nodeID2 := teststorj.NodeIDFromString("testnodeid2")
auditSuccessCount1, totalAuditCount1, auditRatio1 := getRatio(4, 10)
uptimeSuccessCount1, totalUptimeCount1, uptimeRatio1 := getRatio(8, 25)
err = createNode(ctx, db, nodeID1, auditSuccessCount1, totalAuditCount1, auditRatio1,
uptimeSuccessCount1, totalUptimeCount1, uptimeRatio1)
assert.NoError(t, err)
node1 := &pb.Node{Id: nodeID1}
createIfNotExistsReq1 := &pb.CreateEntryIfNotExistsRequest{
Node: node1,
}
_, err = statdb.CreateEntryIfNotExists(ctx, createIfNotExistsReq1)
assert.NoError(t, err)
nodeInfo1, err := db.Get_Node_By_Id(ctx, dbx.Node_Id(nodeID1.Bytes()))
assert.NoError(t, err)
assert.EqualValues(t, nodeID1.Bytes(), nodeInfo1.Id)
assert.EqualValues(t, auditRatio1, nodeInfo1.AuditSuccessRatio)
assert.EqualValues(t, uptimeRatio1, nodeInfo1.UptimeRatio)
node2 := &pb.Node{Id: nodeID2}
createIfNotExistsReq2 := &pb.CreateEntryIfNotExistsRequest{
Node: node2,
}
_, err = statdb.CreateEntryIfNotExists(ctx, createIfNotExistsReq2)
assert.NoError(t, err)
nodeInfo2, err := db.Get_Node_By_Id(ctx, dbx.Node_Id(nodeID2.Bytes()))
assert.NoError(t, err)
assert.EqualValues(t, nodeID2.Bytes(), nodeInfo2.Id)
assert.EqualValues(t, 0, nodeInfo2.AuditSuccessRatio)
assert.EqualValues(t, 0, nodeInfo2.UptimeRatio)
}
func getDBPath() string {
return fmt.Sprintf("file:memdb%d?mode=memory&cache=shared", rand.Int63())
}
func getServerAndDB(path string) (sdb *statdb.StatDB, db *dbx.DB, err error) {
sdb, err = statdb.NewStatDB("sqlite3", path, zap.NewNop())
if err != nil {
return &statdb.StatDB{}, &dbx.DB{}, err
}
db, err = dbx.Open("sqlite3", path)
if err != nil {
return &statdb.StatDB{}, &dbx.DB{}, err
}
return sdb, db, err
}
func createNode(ctx context.Context, db *dbx.DB, nodeID storj.NodeID,
auditSuccessCount, totalAuditCount int64, auditRatio float64,
uptimeSuccessCount, totalUptimeCount int64, uptimeRatio float64) error {
_, err := db.Create_Node(
ctx,
dbx.Node_Id(nodeID.Bytes()),
dbx.Node_AuditSuccessCount(auditSuccessCount),
dbx.Node_TotalAuditCount(totalAuditCount),
dbx.Node_AuditSuccessRatio(auditRatio),
dbx.Node_UptimeSuccessCount(uptimeSuccessCount),
dbx.Node_TotalUptimeCount(totalUptimeCount),
dbx.Node_UptimeRatio(uptimeRatio),
)
return err
}
func getRatio(s, t int) (success, total int64, ratio float64) {
ratio = float64(s) / float64(t)
return int64(s), int64(t), ratio
}

View File

@ -0,0 +1,334 @@
// Copyright (C) 2018 Storj Labs, Inc.
// See LICENSE for copying information.
package statdb_test
import (
"context"
"testing"
"github.com/stretchr/testify/assert"
"storj.io/storj/internal/testcontext"
"storj.io/storj/internal/teststorj"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/statdb"
"storj.io/storj/pkg/storj"
"storj.io/storj/satellite/satellitedb"
"storj.io/storj/satellite/satellitedb/satellitedbtest"
)
var (
nodeID = teststorj.NodeIDFromString("testnodeid")
)
func getRatio(s, t int) (success, total int64, ratio float64) {
ratio = float64(s) / float64(t)
return int64(s), int64(t), ratio
}
func TestStatdb(t *testing.T) {
satellitedbtest.Run(t, func(t *testing.T, db *satellitedb.DB) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
testDatabase(ctx, t, db.StatDB())
})
}
func testDatabase(ctx context.Context, t *testing.T, sdb statdb.DB) {
t.Run("TestCreateNewAndWithStats", func(t *testing.T) {
auditSuccessCount, auditCount, auditSuccessRatio := getRatio(4, 10)
uptimeSuccessCount, uptimeCount, uptimeRatio := getRatio(8, 25)
nodeStats := &pb.NodeStats{
AuditSuccessRatio: auditSuccessRatio,
UptimeRatio: uptimeRatio,
AuditCount: auditCount,
AuditSuccessCount: auditSuccessCount,
UptimeCount: uptimeCount,
UptimeSuccessCount: uptimeSuccessCount,
}
createReq := &statdb.CreateRequest{
Node: nodeID,
Stats: nodeStats,
}
resp, err := sdb.Create(ctx, createReq)
assert.NoError(t, err)
s := resp.Stats
assert.EqualValues(t, auditSuccessRatio, s.AuditSuccessRatio)
assert.EqualValues(t, uptimeRatio, s.UptimeRatio)
getReq := &statdb.GetRequest{
Node: nodeID,
}
getResp, err := sdb.Get(ctx, getReq)
assert.NoError(t, err)
assert.EqualValues(t, nodeID, getResp.Stats.NodeId)
assert.EqualValues(t, auditCount, getResp.Stats.AuditCount)
assert.EqualValues(t, auditSuccessCount, getResp.Stats.AuditSuccessCount)
assert.EqualValues(t, auditSuccessRatio, getResp.Stats.AuditSuccessRatio)
assert.EqualValues(t, uptimeCount, getResp.Stats.UptimeCount)
assert.EqualValues(t, uptimeSuccessCount, getResp.Stats.UptimeSuccessCount)
assert.EqualValues(t, uptimeRatio, getResp.Stats.UptimeRatio)
})
t.Run("TestCreateExists", func(t *testing.T) {
auditSuccessCount, auditCount, auditSuccessRatio := getRatio(4, 10)
uptimeSuccessCount, uptimeCount, uptimeRatio := getRatio(8, 25)
nodeStats := &pb.NodeStats{
AuditSuccessRatio: auditSuccessRatio,
UptimeRatio: uptimeRatio,
AuditCount: auditCount,
AuditSuccessCount: auditSuccessCount,
UptimeCount: uptimeCount,
UptimeSuccessCount: uptimeSuccessCount,
}
createReq := &statdb.CreateRequest{
Node: nodeID,
Stats: nodeStats,
}
_, err := sdb.Create(ctx, createReq)
assert.Error(t, err)
})
t.Run("TestGetDoesNotExist", func(t *testing.T) {
noNodeID := teststorj.NodeIDFromString("testnoNodeid")
getReq := &statdb.GetRequest{
Node: noNodeID,
}
_, err := sdb.Get(ctx, getReq)
assert.Error(t, err)
})
t.Run("TestFindInvalidNodes", func(t *testing.T) {
invalidNodeIDs := teststorj.NodeIDsFromStrings("id1", "id2", "id3", "id4", "id5", "id6", "id7")
for _, tt := range []struct {
nodeID storj.NodeID
auditSuccessCount int64
auditCount int64
auditSuccessRatio float64
uptimeSuccessCount int64
uptimeCount int64
uptimeRatio float64
}{
{invalidNodeIDs[0], 20, 20, 1, 20, 20, 1}, // good audit success
{invalidNodeIDs[1], 5, 20, 0.25, 20, 20, 1}, // bad audit success, good uptime
{invalidNodeIDs[2], 20, 20, 1, 5, 20, 0.25}, // good audit success, bad uptime
{invalidNodeIDs[3], 0, 0, 0, 20, 20, 1}, // "bad" audit success, no audits
{invalidNodeIDs[4], 20, 20, 1, 0, 0, 0.25}, // "bad" uptime success, no checks
{invalidNodeIDs[5], 0, 1, 0, 5, 5, 1}, // bad audit success exactly one audit
{invalidNodeIDs[6], 0, 20, 0, 20, 20, 1}, // bad ratios, excluded from query
} {
nodeStats := &pb.NodeStats{
AuditSuccessRatio: tt.auditSuccessRatio,
UptimeRatio: tt.uptimeRatio,
AuditCount: tt.auditCount,
AuditSuccessCount: tt.auditSuccessCount,
UptimeCount: tt.uptimeCount,
UptimeSuccessCount: tt.uptimeSuccessCount,
}
createReq := &statdb.CreateRequest{
Node: tt.nodeID,
Stats: nodeStats,
}
_, err := sdb.Create(ctx, createReq)
assert.NoError(t, err)
}
findInvalidNodesReq := &statdb.FindInvalidNodesRequest{
NodeIds: storj.NodeIDList{
invalidNodeIDs[0], invalidNodeIDs[1],
invalidNodeIDs[2], invalidNodeIDs[3],
invalidNodeIDs[4], invalidNodeIDs[5],
},
MaxStats: &pb.NodeStats{
AuditSuccessRatio: 0.5,
UptimeRatio: 0.5,
},
}
resp, err := sdb.FindInvalidNodes(ctx, findInvalidNodesReq)
assert.NoError(t, err)
invalid := resp.InvalidIds
assert.Contains(t, invalid, invalidNodeIDs[1])
assert.Contains(t, invalid, invalidNodeIDs[2])
assert.Contains(t, invalid, invalidNodeIDs[5])
assert.Len(t, invalid, 3)
})
t.Run("TestUpdateExists", func(t *testing.T) {
auditSuccessCount, auditCount, auditSuccessRatio := getRatio(4, 10)
uptimeSuccessCount, uptimeCount, uptimeRatio := getRatio(8, 25)
getReq := &statdb.GetRequest{
Node: nodeID,
}
getResp, err := sdb.Get(ctx, getReq)
assert.NoError(t, err)
assert.EqualValues(t, nodeID, getResp.Stats.NodeId)
assert.EqualValues(t, auditCount, getResp.Stats.AuditCount)
assert.EqualValues(t, auditSuccessCount, getResp.Stats.AuditSuccessCount)
assert.EqualValues(t, auditSuccessRatio, getResp.Stats.AuditSuccessRatio)
assert.EqualValues(t, uptimeCount, getResp.Stats.UptimeCount)
assert.EqualValues(t, uptimeSuccessCount, getResp.Stats.UptimeSuccessCount)
assert.EqualValues(t, uptimeRatio, getResp.Stats.UptimeRatio)
updateReq := &statdb.UpdateRequest{
Node: nodeID,
UpdateAuditSuccess: true,
AuditSuccess: true,
UpdateUptime: true,
IsUp: false,
}
updResp, err := sdb.Update(ctx, updateReq)
assert.NoError(t, err)
_, _, newAuditRatio := getRatio(int(auditSuccessCount+1), int(auditCount+1))
_, _, newUptimeRatio := getRatio(int(uptimeSuccessCount), int(uptimeCount+1))
stats := updResp.Stats
assert.EqualValues(t, newAuditRatio, stats.AuditSuccessRatio)
assert.EqualValues(t, newUptimeRatio, stats.UptimeRatio)
})
t.Run("TestUpdateUptimeExists", func(t *testing.T) {
auditSuccessCount, auditCount, auditSuccessRatio := getRatio(5, 11)
uptimeSuccessCount, uptimeCount, uptimeRatio := getRatio(8, 26)
getReq := &statdb.GetRequest{
Node: nodeID,
}
getResp, err := sdb.Get(ctx, getReq)
assert.NoError(t, err)
assert.EqualValues(t, nodeID, getResp.Stats.NodeId)
assert.EqualValues(t, auditCount, getResp.Stats.AuditCount)
assert.EqualValues(t, auditSuccessCount, getResp.Stats.AuditSuccessCount)
assert.EqualValues(t, auditSuccessRatio, getResp.Stats.AuditSuccessRatio)
assert.EqualValues(t, uptimeCount, getResp.Stats.UptimeCount)
assert.EqualValues(t, uptimeSuccessCount, getResp.Stats.UptimeSuccessCount)
assert.EqualValues(t, uptimeRatio, getResp.Stats.UptimeRatio)
updateReq := &statdb.UpdateUptimeRequest{
Node: nodeID,
IsUp: false,
}
resp, err := sdb.UpdateUptime(ctx, updateReq)
assert.NoError(t, err)
_, _, newUptimeRatio := getRatio(int(uptimeSuccessCount), int(uptimeCount+1))
stats := resp.Stats
assert.EqualValues(t, auditSuccessRatio, stats.AuditSuccessRatio)
assert.EqualValues(t, auditCount, stats.AuditCount)
assert.EqualValues(t, newUptimeRatio, stats.UptimeRatio)
})
t.Run("TestUpdateAuditSuccessExists", func(t *testing.T) {
auditSuccessCount, auditCount, auditSuccessRatio := getRatio(5, 11)
uptimeSuccessCount, uptimeCount, uptimeRatio := getRatio(8, 27)
getReq := &statdb.GetRequest{
Node: nodeID,
}
getResp, err := sdb.Get(ctx, getReq)
assert.NoError(t, err)
assert.EqualValues(t, nodeID, getResp.Stats.NodeId)
assert.EqualValues(t, auditCount, getResp.Stats.AuditCount)
assert.EqualValues(t, auditSuccessCount, getResp.Stats.AuditSuccessCount)
assert.EqualValues(t, auditSuccessRatio, getResp.Stats.AuditSuccessRatio)
assert.EqualValues(t, uptimeCount, getResp.Stats.UptimeCount)
assert.EqualValues(t, uptimeSuccessCount, getResp.Stats.UptimeSuccessCount)
assert.EqualValues(t, uptimeRatio, getResp.Stats.UptimeRatio)
updateReq := &statdb.UpdateAuditSuccessRequest{
Node: nodeID,
AuditSuccess: false,
}
resp, err := sdb.UpdateAuditSuccess(ctx, updateReq)
assert.NoError(t, err)
_, _, newAuditRatio := getRatio(int(auditSuccessCount), int(auditCount+1))
stats := resp.Stats
assert.EqualValues(t, newAuditRatio, stats.AuditSuccessRatio)
assert.EqualValues(t, auditCount+1, stats.AuditCount)
assert.EqualValues(t, uptimeRatio, stats.UptimeRatio)
})
t.Run("TestUpdateBatchExists", func(t *testing.T) {
nodeID1 := teststorj.NodeIDFromString("testnodeid1")
auditSuccessCount1, auditCount1, auditRatio1 := getRatio(4, 10)
uptimeSuccessCount1, uptimeCount1, uptimeRatio1 := getRatio(8, 25)
nodeStats := &pb.NodeStats{
AuditSuccessCount: auditSuccessCount1,
AuditCount: auditCount1,
AuditSuccessRatio: auditRatio1,
UptimeSuccessCount: uptimeSuccessCount1,
UptimeCount: uptimeCount1,
UptimeRatio: uptimeRatio1,
}
createReq := &statdb.CreateRequest{
Node: nodeID1,
Stats: nodeStats,
}
resp, err := sdb.Create(ctx, createReq)
assert.NoError(t, err)
s := resp.Stats
assert.EqualValues(t, auditRatio1, s.AuditSuccessRatio)
assert.EqualValues(t, uptimeRatio1, s.UptimeRatio)
nodeID2 := teststorj.NodeIDFromString("testnodeid2")
auditSuccessCount2, auditCount2, auditRatio2 := getRatio(7, 10)
uptimeSuccessCount2, uptimeCount2, uptimeRatio2 := getRatio(8, 20)
nodeStats = &pb.NodeStats{
AuditSuccessCount: auditSuccessCount2,
AuditCount: auditCount2,
AuditSuccessRatio: auditRatio2,
UptimeSuccessCount: uptimeSuccessCount2,
UptimeCount: uptimeCount2,
UptimeRatio: uptimeRatio2,
}
createReq = &statdb.CreateRequest{
Node: nodeID2,
Stats: nodeStats,
}
resp, err = sdb.Create(ctx, createReq)
assert.NoError(t, err)
s = resp.Stats
assert.EqualValues(t, auditRatio2, s.AuditSuccessRatio)
assert.EqualValues(t, uptimeRatio2, s.UptimeRatio)
node1 := &statdb.UpdateRequest{
Node: nodeID1,
UpdateAuditSuccess: true,
AuditSuccess: true,
UpdateUptime: true,
IsUp: false,
}
node2 := &statdb.UpdateRequest{
Node: nodeID2,
UpdateAuditSuccess: true,
AuditSuccess: true,
UpdateUptime: false,
}
updateBatchReq := &statdb.UpdateBatchRequest{
NodeList: []*statdb.UpdateRequest{node1, node2},
}
batchUpdResp, err := sdb.UpdateBatch(ctx, updateBatchReq)
assert.NoError(t, err)
_, _, newAuditRatio1 := getRatio(int(auditSuccessCount1+1), int(auditCount1+1))
_, _, newUptimeRatio1 := getRatio(int(uptimeSuccessCount1), int(uptimeCount1+1))
_, _, newAuditRatio2 := getRatio(int(auditSuccessCount2+1), int(auditCount2+1))
stats1 := batchUpdResp.StatsList[0]
stats2 := batchUpdResp.StatsList[1]
assert.EqualValues(t, newAuditRatio1, stats1.AuditSuccessRatio)
assert.EqualValues(t, newUptimeRatio1, stats1.UptimeRatio)
assert.EqualValues(t, newAuditRatio2, stats2.AuditSuccessRatio)
assert.EqualValues(t, uptimeRatio2, stats2.UptimeRatio)
})
}

View File

@ -10,6 +10,7 @@ import (
"storj.io/storj/pkg/accounting"
"storj.io/storj/pkg/bwagreement"
"storj.io/storj/pkg/datarepair/irreparable"
"storj.io/storj/pkg/statdb"
"storj.io/storj/pkg/utils"
dbx "storj.io/storj/satellite/satellitedb/dbx"
)
@ -53,10 +54,10 @@ func (db *DB) BandwidthAgreement() bwagreement.DB {
// return &pointerDB{db: db.db}
// }
// // StatDB is a getter for StatDB repository
// func (db *DB) StatDB() statdb.DB {
// return &statDB{db: db.db}
// }
// StatDB is a getter for StatDB repository
func (db *DB) StatDB() statdb.DB {
return &statDB{db: db.db}
}
// // OverlayCacheDB is a getter for OverlayCacheDB repository
// func (db *DB) OverlayCacheDB() overlay.DB {

View File

@ -3,6 +3,7 @@
package satellitedb
//go:generate dbx.v1 schema -d postgres -d sqlite3 satellitedb.dbx .
//go:generate dbx.v1 golang -d postgres -d sqlite3 satellitedb.dbx .
import (

View File

@ -112,4 +112,28 @@ read one (
read all (
select raw
where raw.node_id = ?
)
)
// dbx.v1 golang statdb.dbx .
model node (
key id
field id blob
field audit_success_count int64 (updatable)
field total_audit_count int64 (updatable)
field audit_success_ratio float64 (updatable)
field uptime_success_count int64 (updatable)
field total_uptime_count int64 (updatable)
field uptime_ratio float64 (updatable)
field created_at timestamp ( autoinsert )
field updated_at timestamp ( autoinsert, autoupdate )
)
create node ( )
update node ( where node.id = ? )
delete node ( where node.id = ? )
read one (
select node
where node.id = ?
)

File diff suppressed because it is too large Load Diff

View File

@ -14,3 +14,40 @@ CREATE TABLE irreparabledbs (
repair_attempt_count bigint NOT NULL,
PRIMARY KEY ( segmentpath )
);
CREATE TABLE nodes (
id bytea NOT NULL,
audit_success_count bigint NOT NULL,
total_audit_count bigint NOT NULL,
audit_success_ratio double precision NOT NULL,
uptime_success_count bigint NOT NULL,
total_uptime_count bigint NOT NULL,
uptime_ratio double precision NOT NULL,
created_at timestamp with time zone NOT NULL,
updated_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE raws (
id bigserial NOT NULL,
node_id text NOT NULL,
interval_end_time timestamp with time zone NOT NULL,
data_total bigint NOT NULL,
data_type integer NOT NULL,
created_at timestamp with time zone NOT NULL,
updated_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE rollups (
id bigserial NOT NULL,
node_id text NOT NULL,
start_time timestamp with time zone NOT NULL,
interval bigint NOT NULL,
data_type integer NOT NULL,
created_at timestamp with time zone NOT NULL,
updated_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE timestamps (
name text NOT NULL,
value timestamp with time zone NOT NULL,
PRIMARY KEY ( name )
);

View File

@ -14,3 +14,40 @@ CREATE TABLE irreparabledbs (
repair_attempt_count INTEGER NOT NULL,
PRIMARY KEY ( segmentpath )
);
CREATE TABLE nodes (
id BLOB NOT NULL,
audit_success_count INTEGER NOT NULL,
total_audit_count INTEGER NOT NULL,
audit_success_ratio REAL NOT NULL,
uptime_success_count INTEGER NOT NULL,
total_uptime_count INTEGER NOT NULL,
uptime_ratio REAL NOT NULL,
created_at TIMESTAMP NOT NULL,
updated_at TIMESTAMP NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE raws (
id INTEGER NOT NULL,
node_id TEXT NOT NULL,
interval_end_time TIMESTAMP NOT NULL,
data_total INTEGER NOT NULL,
data_type INTEGER NOT NULL,
created_at TIMESTAMP NOT NULL,
updated_at TIMESTAMP NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE rollups (
id INTEGER NOT NULL,
node_id TEXT NOT NULL,
start_time TIMESTAMP NOT NULL,
interval INTEGER NOT NULL,
data_type INTEGER NOT NULL,
created_at TIMESTAMP NOT NULL,
updated_at TIMESTAMP NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE timestamps (
name TEXT NOT NULL,
value TIMESTAMP NOT NULL,
PRIMARY KEY ( name )
);

View File

@ -0,0 +1,418 @@
// Copyright (C) 2018 Storj Labs, Inc.
// See LICENSE for copying information.
package satellitedb
import (
"context"
"database/sql"
"strings"
"github.com/zeebo/errs"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
monkit "gopkg.in/spacemonkeygo/monkit.v2"
pb "storj.io/storj/pkg/pb"
"storj.io/storj/pkg/statdb"
"storj.io/storj/pkg/storj"
dbx "storj.io/storj/satellite/satellitedb/dbx"
)
var (
mon = monkit.Package()
errAuditSuccess = errs.Class("statdb audit success error")
errUptime = errs.Class("statdb uptime error")
)
// StatDB implements the statdb RPC service
type statDB struct {
db *dbx.DB
}
// Create a db entry for the provided storagenode
func (s *statDB) Create(ctx context.Context, createReq *statdb.CreateRequest) (resp *statdb.CreateResponse, err error) {
defer mon.Task()(&ctx)(&err)
var (
totalAuditCount int64
auditSuccessCount int64
auditSuccessRatio float64
totalUptimeCount int64
uptimeSuccessCount int64
uptimeRatio float64
)
stats := createReq.Stats
if stats != nil {
totalAuditCount = stats.AuditCount
auditSuccessCount = stats.AuditSuccessCount
auditSuccessRatio, err = checkRatioVars(auditSuccessCount, totalAuditCount)
if err != nil {
return nil, errAuditSuccess.Wrap(err)
}
totalUptimeCount = stats.UptimeCount
uptimeSuccessCount = stats.UptimeSuccessCount
uptimeRatio, err = checkRatioVars(uptimeSuccessCount, totalUptimeCount)
if err != nil {
return nil, errUptime.Wrap(err)
}
}
node := createReq.Node
dbNode, err := s.db.Create_Node(
ctx,
dbx.Node_Id(node.Bytes()),
dbx.Node_AuditSuccessCount(auditSuccessCount),
dbx.Node_TotalAuditCount(totalAuditCount),
dbx.Node_AuditSuccessRatio(auditSuccessRatio),
dbx.Node_UptimeSuccessCount(uptimeSuccessCount),
dbx.Node_TotalUptimeCount(totalUptimeCount),
dbx.Node_UptimeRatio(uptimeRatio),
)
if err != nil {
return nil, status.Errorf(codes.Internal, err.Error())
}
nodeStats := &pb.NodeStats{
NodeId: node,
AuditSuccessRatio: dbNode.AuditSuccessRatio,
AuditCount: dbNode.TotalAuditCount,
UptimeRatio: dbNode.UptimeRatio,
UptimeCount: dbNode.TotalUptimeCount,
}
return &statdb.CreateResponse{
Stats: nodeStats,
}, nil
}
// Get a storagenode's stats from the db
func (s *statDB) Get(ctx context.Context, getReq *statdb.GetRequest) (resp *statdb.GetResponse, err error) {
defer mon.Task()(&ctx)(&err)
dbNode, err := s.db.Get_Node_By_Id(ctx, dbx.Node_Id(getReq.Node.Bytes()))
if err != nil {
return nil, status.Errorf(codes.Internal, err.Error())
}
nodeStats := &pb.NodeStats{
NodeId: getReq.Node,
AuditCount: dbNode.TotalAuditCount,
AuditSuccessCount: dbNode.AuditSuccessCount,
AuditSuccessRatio: dbNode.AuditSuccessRatio,
UptimeCount: dbNode.TotalUptimeCount,
UptimeSuccessCount: dbNode.UptimeSuccessCount,
UptimeRatio: dbNode.UptimeRatio,
}
return &statdb.GetResponse{
Stats: nodeStats,
}, nil
}
// FindInvalidNodes finds a subset of storagenodes that fail to meet minimum reputation requirements
func (s *statDB) FindInvalidNodes(ctx context.Context, getReq *statdb.FindInvalidNodesRequest) (resp *statdb.FindInvalidNodesResponse, err error) {
defer mon.Task()(&ctx)(&err)
var invalidIds storj.NodeIDList
nodeIds := getReq.NodeIds
maxAuditSuccess := getReq.MaxStats.AuditSuccessRatio
maxUptime := getReq.MaxStats.UptimeRatio
rows, err := s.findInvalidNodesQuery(nodeIds, maxAuditSuccess, maxUptime)
if err != nil {
return nil, err
}
defer func() {
_ = rows.Close()
}()
for rows.Next() {
node := &dbx.Node{}
err = rows.Scan(&node.Id, &node.TotalAuditCount, &node.TotalUptimeCount, &node.AuditSuccessRatio, &node.UptimeRatio)
if err != nil {
return nil, err
}
id, err := storj.NodeIDFromBytes(node.Id)
if err != nil {
return nil, err
}
invalidIds = append(invalidIds, id)
}
return &statdb.FindInvalidNodesResponse{
InvalidIds: invalidIds,
}, nil
}
func (s *statDB) findInvalidNodesQuery(nodeIds storj.NodeIDList, auditSuccess, uptime float64) (*sql.Rows, error) {
args := make([]interface{}, len(nodeIds))
for i, id := range nodeIds {
args[i] = id.Bytes()
}
args = append(args, auditSuccess, uptime)
rows, err := s.db.Query(s.db.Rebind(`SELECT nodes.id, nodes.total_audit_count,
nodes.total_uptime_count, nodes.audit_success_ratio,
nodes.uptime_ratio
FROM nodes
WHERE nodes.id IN (?`+strings.Repeat(", ?", len(nodeIds)-1)+`)
AND nodes.total_audit_count > 0
AND nodes.total_uptime_count > 0
AND (
nodes.audit_success_ratio < ?
OR nodes.uptime_ratio < ?
)`), args...)
return rows, err
}
// Update a single storagenode's stats in the db
func (s *statDB) Update(ctx context.Context, updateReq *statdb.UpdateRequest) (resp *statdb.UpdateResponse, err error) {
defer mon.Task()(&ctx)(&err)
createIfReq := &statdb.CreateEntryIfNotExistsRequest{
Node: updateReq.Node,
}
_, err = s.CreateEntryIfNotExists(ctx, createIfReq)
if err != nil {
return nil, err
}
dbNode, err := s.db.Get_Node_By_Id(ctx, dbx.Node_Id(updateReq.Node.Bytes()))
if err != nil {
return nil, status.Errorf(codes.Internal, err.Error())
}
auditSuccessCount := dbNode.AuditSuccessCount
totalAuditCount := dbNode.TotalAuditCount
var auditSuccessRatio float64
uptimeSuccessCount := dbNode.UptimeSuccessCount
totalUptimeCount := dbNode.TotalUptimeCount
var uptimeRatio float64
updateFields := dbx.Node_Update_Fields{}
if updateReq.UpdateAuditSuccess {
auditSuccessCount, totalAuditCount, auditSuccessRatio = updateRatioVars(
updateReq.AuditSuccess,
auditSuccessCount,
totalAuditCount,
)
updateFields.AuditSuccessCount = dbx.Node_AuditSuccessCount(auditSuccessCount)
updateFields.TotalAuditCount = dbx.Node_TotalAuditCount(totalAuditCount)
updateFields.AuditSuccessRatio = dbx.Node_AuditSuccessRatio(auditSuccessRatio)
}
if updateReq.UpdateUptime {
uptimeSuccessCount, totalUptimeCount, uptimeRatio = updateRatioVars(
updateReq.IsUp,
uptimeSuccessCount,
totalUptimeCount,
)
updateFields.UptimeSuccessCount = dbx.Node_UptimeSuccessCount(uptimeSuccessCount)
updateFields.TotalUptimeCount = dbx.Node_TotalUptimeCount(totalUptimeCount)
updateFields.UptimeRatio = dbx.Node_UptimeRatio(uptimeRatio)
}
dbNode, err = s.db.Update_Node_By_Id(ctx, dbx.Node_Id(updateReq.Node.Bytes()), updateFields)
if err != nil {
return nil, status.Errorf(codes.Internal, err.Error())
}
nodeStats := &pb.NodeStats{
NodeId: updateReq.Node,
AuditCount: dbNode.TotalAuditCount,
AuditSuccessCount: dbNode.AuditSuccessCount,
AuditSuccessRatio: dbNode.AuditSuccessRatio,
UptimeCount: dbNode.TotalUptimeCount,
UptimeSuccessCount: dbNode.UptimeSuccessCount,
UptimeRatio: dbNode.UptimeRatio,
}
return &statdb.UpdateResponse{
Stats: nodeStats,
}, nil
}
// UpdateUptime updates a single storagenode's uptime stats in the db
func (s *statDB) UpdateUptime(ctx context.Context, updateReq *statdb.UpdateUptimeRequest) (resp *statdb.UpdateUptimeResponse, err error) {
defer mon.Task()(&ctx)(&err)
node := updateReq.Node
dbNode, err := s.db.Get_Node_By_Id(ctx, dbx.Node_Id(node.Bytes()))
if err != nil {
return nil, status.Errorf(codes.Internal, err.Error())
}
uptimeSuccessCount := dbNode.UptimeSuccessCount
totalUptimeCount := dbNode.TotalUptimeCount
var uptimeRatio float64
updateFields := dbx.Node_Update_Fields{}
uptimeSuccessCount, totalUptimeCount, uptimeRatio = updateRatioVars(
updateReq.IsUp,
uptimeSuccessCount,
totalUptimeCount,
)
updateFields.UptimeSuccessCount = dbx.Node_UptimeSuccessCount(uptimeSuccessCount)
updateFields.TotalUptimeCount = dbx.Node_TotalUptimeCount(totalUptimeCount)
updateFields.UptimeRatio = dbx.Node_UptimeRatio(uptimeRatio)
dbNode, err = s.db.Update_Node_By_Id(ctx, dbx.Node_Id(node.Bytes()), updateFields)
if err != nil {
return nil, status.Errorf(codes.Internal, err.Error())
}
nodeStats := &pb.NodeStats{
NodeId: node,
AuditCount: dbNode.TotalAuditCount,
AuditSuccessCount: dbNode.AuditSuccessCount,
AuditSuccessRatio: dbNode.AuditSuccessRatio,
UptimeCount: dbNode.TotalUptimeCount,
UptimeSuccessCount: dbNode.UptimeSuccessCount,
UptimeRatio: dbNode.UptimeRatio,
}
return &statdb.UpdateUptimeResponse{
Stats: nodeStats,
}, nil
}
// UpdateAuditSuccess updates a single storagenode's uptime stats in the db
func (s *statDB) UpdateAuditSuccess(ctx context.Context, updateReq *statdb.UpdateAuditSuccessRequest) (resp *statdb.UpdateAuditSuccessResponse, err error) {
defer mon.Task()(&ctx)(&err)
node := updateReq.Node
dbNode, err := s.db.Get_Node_By_Id(ctx, dbx.Node_Id(node.Bytes()))
if err != nil {
return nil, status.Errorf(codes.Internal, err.Error())
}
auditSuccessCount := dbNode.AuditSuccessCount
totalAuditCount := dbNode.TotalAuditCount
var auditRatio float64
updateFields := dbx.Node_Update_Fields{}
auditSuccessCount, totalAuditCount, auditRatio = updateRatioVars(
updateReq.AuditSuccess,
auditSuccessCount,
totalAuditCount,
)
updateFields.AuditSuccessCount = dbx.Node_AuditSuccessCount(auditSuccessCount)
updateFields.TotalAuditCount = dbx.Node_TotalAuditCount(totalAuditCount)
updateFields.AuditSuccessRatio = dbx.Node_AuditSuccessRatio(auditRatio)
dbNode, err = s.db.Update_Node_By_Id(ctx, dbx.Node_Id(node.Bytes()), updateFields)
if err != nil {
return nil, status.Errorf(codes.Internal, err.Error())
}
nodeStats := &pb.NodeStats{
NodeId: node,
AuditSuccessRatio: dbNode.AuditSuccessRatio,
AuditCount: dbNode.TotalAuditCount,
UptimeRatio: dbNode.UptimeRatio,
UptimeCount: dbNode.TotalUptimeCount,
}
return &statdb.UpdateAuditSuccessResponse{
Stats: nodeStats,
}, nil
}
// UpdateBatch for updating multiple farmers' stats in the db
func (s *statDB) UpdateBatch(ctx context.Context, updateBatchReq *statdb.UpdateBatchRequest) (resp *statdb.UpdateBatchResponse, err error) {
defer mon.Task()(&ctx)(&err)
var nodeStatsList []*pb.NodeStats
var failedNodes []*statdb.UpdateRequest
for _, node := range updateBatchReq.NodeList {
updateReq := &statdb.UpdateRequest{
Node: node.Node,
UpdateAuditSuccess: node.UpdateAuditSuccess,
AuditSuccess: node.AuditSuccess,
UpdateUptime: node.UpdateUptime,
IsUp: node.IsUp,
}
updateRes, err := s.Update(ctx, updateReq)
if err != nil {
//@TODO ASK s.log.Error(err.Error())
failedNodes = append(failedNodes, node)
} else {
nodeStatsList = append(nodeStatsList, updateRes.Stats)
}
}
updateBatchRes := &statdb.UpdateBatchResponse{
FailedNodes: failedNodes,
StatsList: nodeStatsList,
}
return updateBatchRes, nil
}
// CreateEntryIfNotExists creates a statdb node entry and saves to statdb if it didn't already exist
func (s *statDB) CreateEntryIfNotExists(ctx context.Context, createIfReq *statdb.CreateEntryIfNotExistsRequest) (resp *statdb.CreateEntryIfNotExistsResponse, err error) {
defer mon.Task()(&ctx)(&err)
getReq := &statdb.GetRequest{
Node: createIfReq.Node,
}
getRes, err := s.Get(ctx, getReq)
// TODO: figure out better way to confirm error is type dbx.ErrorCode_NoRows
if err != nil && strings.Contains(err.Error(), "no rows in result set") {
createReq := &statdb.CreateRequest{
Node: createIfReq.Node,
}
res, err := s.Create(ctx, createReq)
if err != nil {
return nil, err
}
createEntryIfNotExistsRes := &statdb.CreateEntryIfNotExistsResponse{
Stats: res.Stats,
}
return createEntryIfNotExistsRes, nil
}
if err != nil {
return nil, err
}
createEntryIfNotExistsRes := &statdb.CreateEntryIfNotExistsResponse{
Stats: getRes.Stats,
}
return createEntryIfNotExistsRes, nil
}
func updateRatioVars(newStatus bool, successCount, totalCount int64) (int64, int64, float64) {
totalCount++
if newStatus {
successCount++
}
newRatio := float64(successCount) / float64(totalCount)
return successCount, totalCount, newRatio
}
func checkRatioVars(successCount, totalCount int64) (ratio float64, err error) {
if successCount < 0 {
return 0, errs.New("success count less than 0")
}
if totalCount < 0 {
return 0, errs.New("total count less than 0")
}
if successCount > totalCount {
return 0, errs.New("success count greater than total count")
}
if totalCount == 0 {
return 0, nil
}
ratio = float64(successCount) / float64(totalCount)
return ratio, nil
}