preparing for use of customtype gogo extension with NodeID type (#693)

* preparing for use of `customtype` gogo extension with `NodeID` type

* review changes

* preparing for use of `customtype` gogo extension with `NodeID` type

* review changes

* wip

* tests passing

* wip fixing tests

* more wip test fixing

* remove NodeIDList from proto files

* linter fixes

* linter fixes

* linter/review fixes

* more freaking linter fixes

* omg just kill me - linterrrrrrrr

* travis linter, i will muder you and your family in your sleep

* goimports everything - burn in hell travis

* goimports update

* go mod tidy
This commit is contained in:
Bryan White 2018-11-29 19:39:27 +01:00 committed by GitHub
parent a21855ac37
commit 2a0c4e60d2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
124 changed files with 2954 additions and 2483 deletions

View File

@ -16,7 +16,8 @@ import (
"github.com/spf13/cobra"
"github.com/zeebo/errs"
"storj.io/storj/pkg/node"
"storj.io/storj/pkg/storj"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/process"
"storj.io/storj/pkg/provider"
@ -108,7 +109,7 @@ type Inspector struct {
// and the overlay cache
func NewInspector(address string) (*Inspector, error) {
ctx := context.Background()
identity, err := node.NewFullIdentity(ctx, 12, 4)
identity, err := provider.NewFullIdentity(ctx, 12, 4)
if err != nil {
return &Inspector{}, ErrIdentity.Wrap(err)
}
@ -173,9 +174,13 @@ func GetBucket(cmd *cobra.Command, args []string) (err error) {
if err != nil {
return ErrInspectorDial.Wrap(err)
}
nodeID, err := storj.NodeIDFromString(args[0])
if err != nil {
return err
}
bucket, err := i.client.GetBucket(context.Background(), &pb.GetBucketRequest{
Id: args[0],
Id: nodeID,
})
if err != nil {
@ -191,6 +196,10 @@ func PingNode(cmd *cobra.Command, args []string) (err error) {
if len(args) < 2 {
return errs.New("Must provide a node ID and address to ping")
}
nodeID, err := storj.NodeIDFromString(args[0])
if err != nil {
return err
}
i, err := NewInspector(*Addr)
if err != nil {
@ -200,7 +209,7 @@ func PingNode(cmd *cobra.Command, args []string) (err error) {
fmt.Printf("Pinging node %s at %s", args[0], args[1])
p, err := i.client.PingNode(context.Background(), &pb.PingNodeRequest{
Id: args[0],
Id: nodeID,
Address: args[1],
})
@ -215,16 +224,19 @@ func GetStats(cmd *cobra.Command, args []string) (err error) {
return ErrInspectorDial.Wrap(err)
}
idStr := args[0]
nodeID, err := storj.NodeIDFromString(args[0])
if err != nil {
return err
}
res, err := i.client.GetStats(context.Background(), &pb.GetStatsRequest{
NodeId: idStr,
NodeId: nodeID,
})
if err != nil {
return ErrRequest.Wrap(err)
}
fmt.Printf("Stats for ID %s:\n", idStr)
fmt.Printf("Stats for ID %s:\n", nodeID)
fmt.Printf("AuditSuccessRatio: %f, UptimeRatio: %f, AuditCount: %d\n",
res.AuditRatio, res.UptimeRatio, res.AuditCount)
return nil
@ -249,15 +261,18 @@ func GetCSVStats(cmd *cobra.Command, args []string) (err error) {
return ErrArgs.Wrap(err)
}
idStr := line[0]
nodeID, err := storj.NodeIDFromString(line[0])
if err != nil {
return err
}
res, err := i.client.GetStats(context.Background(), &pb.GetStatsRequest{
NodeId: idStr,
NodeId: nodeID,
})
if err != nil {
return ErrRequest.Wrap(err)
}
fmt.Printf("Stats for ID %s:\n", idStr)
fmt.Printf("Stats for ID %s:\n", nodeID)
fmt.Printf("AuditSuccessRatio: %f, UptimeRatio: %f, AuditCount: %d\n",
res.AuditRatio, res.UptimeRatio, res.AuditCount)
}
@ -271,7 +286,10 @@ func CreateStats(cmd *cobra.Command, args []string) (err error) {
return ErrInspectorDial.Wrap(err)
}
idStr := args[0]
nodeID, err := storj.NodeIDFromString(args[0])
if err != nil {
return err
}
auditCount, err := strconv.ParseInt(args[1], 10, 64)
if err != nil {
return ErrArgs.New("audit count must be an int")
@ -290,7 +308,7 @@ func CreateStats(cmd *cobra.Command, args []string) (err error) {
}
_, err = i.client.CreateStats(context.Background(), &pb.CreateStatsRequest{
NodeId: idStr,
NodeId: nodeID,
AuditCount: auditCount,
AuditSuccessCount: auditSuccessCount,
UptimeCount: uptimeCount,
@ -300,7 +318,7 @@ func CreateStats(cmd *cobra.Command, args []string) (err error) {
return ErrRequest.Wrap(err)
}
fmt.Printf("Created statdb entry for ID %s\n", idStr)
fmt.Printf("Created statdb entry for ID %s\n", nodeID)
return nil
}
@ -323,7 +341,10 @@ func CreateCSVStats(cmd *cobra.Command, args []string) (err error) {
return ErrArgs.Wrap(err)
}
idStr := line[0]
nodeID, err := storj.NodeIDFromString(line[0])
if err != nil {
return err
}
auditCount, err := strconv.ParseInt(line[1], 10, 64)
if err != nil {
return ErrArgs.New("audit count must be an int")
@ -342,7 +363,7 @@ func CreateCSVStats(cmd *cobra.Command, args []string) (err error) {
}
_, err = i.client.CreateStats(context.Background(), &pb.CreateStatsRequest{
NodeId: idStr,
NodeId: nodeID,
AuditCount: auditCount,
AuditSuccessCount: auditSuccessCount,
UptimeCount: uptimeCount,
@ -352,7 +373,7 @@ func CreateCSVStats(cmd *cobra.Command, args []string) (err error) {
return ErrRequest.Wrap(err)
}
fmt.Printf("Created statdb entry for ID %s\n", idStr)
fmt.Printf("Created statdb entry for ID %s\n", nodeID)
}
return nil
}

View File

@ -11,6 +11,8 @@ import (
"github.com/zeebo/errs"
"go.uber.org/zap"
"storj.io/storj/pkg/storj"
"storj.io/storj/pkg/cfgstruct"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/process"
@ -57,16 +59,20 @@ func cmdList(cmd *cobra.Command, args []string) (err error) {
return err
}
for _, k := range keys {
n, err := c.Get(process.Ctx(cmd), string(k))
nodeIDs, err := storj.NodeIDsFromBytes(keys.ByteSlices())
if err != nil {
return err
}
for _, id := range nodeIDs {
n, err := c.Get(process.Ctx(cmd), id)
if err != nil {
zap.S().Infof("ID: %s; error getting value\n", k)
zap.S().Infof("ID: %s; error getting value\n", id.String())
}
if n != nil {
zap.S().Infof("ID: %s; Address: %s\n", k, n.Address.Address)
zap.S().Infof("ID: %s; Address: %s\n", id.String(), n.Address.Address)
continue
}
zap.S().Infof("ID: %s: nil\n", k)
zap.S().Infof("ID: %s: nil\n", id.String())
}
return nil
@ -89,9 +95,13 @@ func cmdAdd(cmd *cobra.Command, args []string) (err error) {
}
for i, a := range nodes {
id, err := storj.NodeIDFromString(i)
if err != nil {
zap.S().Error(err)
}
zap.S().Infof("adding node ID: %s; Address: %s", i, a)
err := c.Put(i, pb.Node{
Id: i,
err = c.Put(id, pb.Node{
Id: id,
// TODO: NodeType is missing
Address: &pb.NodeAddress{
Transport: 0,

View File

@ -16,6 +16,8 @@ import (
"github.com/spf13/cobra"
"github.com/zeebo/errs"
"storj.io/storj/pkg/storj"
"storj.io/storj/pkg/auth/grpcauth"
"storj.io/storj/pkg/bwagreement"
dbmanager "storj.io/storj/pkg/bwagreement/database-manager"
@ -188,8 +190,8 @@ func cmdDiag(cmd *cobra.Command, args []string) (err error) {
}
// attributes per uplinkid
summaries := make(map[string]*UplinkSummary)
uplinkIDs := []string{}
summaries := make(map[storj.NodeID]*UplinkSummary)
uplinkIDs := storj.NodeIDList{}
for _, baRow := range baRows {
// deserializing rbad you get payerbwallocation, total & storage node id
@ -204,7 +206,7 @@ func cmdDiag(cmd *cobra.Command, args []string) (err error) {
return err
}
uplinkID := string(pbad.GetUplinkId())
uplinkID := pbad.UplinkId
summary, ok := summaries[uplinkID]
if !ok {
summaries[uplinkID] = &UplinkSummary{}
@ -228,7 +230,7 @@ func cmdDiag(cmd *cobra.Command, args []string) (err error) {
fmt.Fprintln(w, "UplinkID\tTotal\t# Of Transactions\tPUT Action\tGET Action\t")
// populate the row fields
sort.Strings(uplinkIDs)
sort.Sort(uplinkIDs)
for _, uplinkID := range uplinkIDs {
summary := summaries[uplinkID]
fmt.Fprint(w, uplinkID, "\t", summary.TotalBytes, "\t", summary.TotalTransactions, "\t", summary.PutActionCount, "\t", summary.GetActionCount, "\t\n")

View File

@ -14,10 +14,12 @@ import (
"github.com/gogo/protobuf/proto"
"github.com/spf13/cobra"
"storj.io/storj/pkg/storj"
"storj.io/storj/pkg/cfgstruct"
"storj.io/storj/pkg/kademlia"
"storj.io/storj/pkg/pb"
psserver "storj.io/storj/pkg/piecestore/psserver"
"storj.io/storj/pkg/piecestore/psserver"
"storj.io/storj/pkg/piecestore/psserver/psdb"
"storj.io/storj/pkg/process"
"storj.io/storj/pkg/provider"
@ -144,8 +146,8 @@ func cmdDiag(cmd *cobra.Command, args []string) (err error) {
}
// attributes per satelliteid
summaries := make(map[string]*SatelliteSummary)
satelliteIDs := []string{}
summaries := make(map[storj.NodeID]*SatelliteSummary)
satelliteIDs := storj.NodeIDList{}
for _, rbaVal := range bwAgreements {
for _, rbaDataVal := range rbaVal {
@ -161,12 +163,11 @@ func cmdDiag(cmd *cobra.Command, args []string) (err error) {
return err
}
satelliteID := string(pbad.GetSatelliteId())
summary, ok := summaries[satelliteID]
summary, ok := summaries[pbad.SatelliteId]
if !ok {
summaries[satelliteID] = &SatelliteSummary{}
satelliteIDs = append(satelliteIDs, satelliteID)
summary = summaries[satelliteID]
summaries[pbad.SatelliteId] = &SatelliteSummary{}
satelliteIDs = append(satelliteIDs, pbad.SatelliteId)
summary = summaries[pbad.SatelliteId]
}
// fill the summary info
@ -187,7 +188,7 @@ func cmdDiag(cmd *cobra.Command, args []string) (err error) {
fmt.Fprintln(w, "SatelliteID\tTotal\t# Of Transactions\tPUT Action\tGET Action\t")
// populate the row fields
sort.Strings(satelliteIDs)
sort.Sort(satelliteIDs)
for _, satelliteID := range satelliteIDs {
summary := summaries[satelliteID]
fmt.Fprint(w, satelliteID, "\t", summary.TotalBytes, "\t", summary.TotalTransactions, "\t", summary.PutActionCount, "\t", summary.GetActionCount, "\t\n")

View File

@ -10,6 +10,8 @@ import (
"google.golang.org/grpc"
"storj.io/storj/pkg/storj"
"storj.io/storj/pkg/cfgstruct"
"storj.io/storj/pkg/provider"
)
@ -31,7 +33,7 @@ func main() {
if err != nil {
panic(err)
}
dialOption, err := identity.DialOption("")
dialOption, err := identity.DialOption(storj.NodeID{})
if err != nil {
panic(err)
}

View File

@ -28,11 +28,12 @@ var argError = errs.Class("argError")
func main() {
cobra.EnableCommandSorting = false
ca, err := provider.NewTestCA(ctx)
clientIdent, err := provider.NewFullIdentity(ctx, 12, 4)
if err != nil {
log.Fatal(err)
}
identity, err := ca.NewIdentity()
serverIdent, err := provider.NewFullIdentity(ctx, 12, 4)
if err != nil {
log.Fatal(err)
}
@ -44,9 +45,9 @@ func main() {
Address: ":7777",
Transport: 0,
},
Id: "test-node-id-1234567",
Id: serverIdent.ID,
}
tc := transport.NewClient(identity)
tc := transport.NewClient(clientIdent)
psClient, err := psclient.NewPSClient(ctx, tc, n, 0)
if err != nil {
log.Fatalf("could not initialize Client: %s", err)
@ -82,6 +83,11 @@ func main() {
return argError.New(fmt.Sprintf("path (%s) is a directory, not a file", inputfile))
}
satelliteIdent, err := provider.NewFullIdentity(ctx, 12, 4)
if err != nil {
return err
}
var length = fileInfo.Size()
var ttl = time.Now().Add(24 * time.Hour)
@ -91,7 +97,7 @@ func main() {
id := psclient.NewPieceID()
allocationData := &pb.PayerBandwidthAllocation_Data{
SatelliteId: []byte("OhHeyThisIsAnUnrealFakeSatellite"),
SatelliteId: satelliteIdent.ID,
Action: pb.PayerBandwidthAllocation_PUT,
CreatedUnixSec: time.Now().Unix(),
}
@ -154,8 +160,13 @@ func main() {
return err
}
satelliteIdent, err := provider.NewFullIdentity(ctx, 12, 4)
if err != nil {
return err
}
allocationData := &pb.PayerBandwidthAllocation_Data{
SatelliteId: []byte("OhHeyThisIsAnUnrealFakeSatellite"),
SatelliteId: satelliteIdent.ID,
Action: pb.PayerBandwidthAllocation_GET,
CreatedUnixSec: time.Now().Unix(),
}

View File

@ -36,12 +36,7 @@ func main() {
logger, _ := zap.NewDevelopment()
defer printError(logger.Sync)
ca, err := provider.NewTestCA(ctx)
if err != nil {
logger.Error("Failed to create certificate authority: ", zap.Error(err))
os.Exit(1)
}
identity, err := ca.NewIdentity()
identity, err := provider.NewFullIdentity(ctx, 12, 4)
if err != nil {
logger.Error("Failed to create full identity: ", zap.Error(err))
os.Exit(1)

View File

@ -44,17 +44,16 @@ func main() {
logger, _ := zap.NewDevelopment()
defer printError(logger.Sync)
ca, err := provider.NewTestCA(ctx)
if err != nil {
logger.Error("Failed to create certificate authority: ", zap.Error(err))
os.Exit(1)
idents := make([]*provider.FullIdentity, 3)
for i := range idents {
var err error
idents[i], err = provider.NewFullIdentity(ctx, 12, 4)
if err != nil {
logger.Error("Failed to create certificate authority: ", zap.Error(err))
os.Exit(1)
}
}
identity, err := ca.NewIdentity()
if err != nil {
logger.Error("Failed to create full identity: ", zap.Error(err))
os.Exit(1)
}
client, err := sdbclient.NewClient(identity, port, apiKey)
client, err := sdbclient.NewClient(idents[0], port, apiKey)
if err != nil {
logger.Error("Failed to create sdbclient: ", zap.Error(err))
}
@ -63,25 +62,25 @@ func main() {
// Test farmers
farmer1 := proto.Node{
NodeId: []byte("nodeid1"),
Id: idents[1].ID,
UpdateAuditSuccess: false,
UpdateUptime: false,
}
farmer2 := proto.Node{
NodeId: []byte("nodeid2"),
Id: idents[2].ID,
UpdateAuditSuccess: false,
UpdateUptime: false,
}
// Example Creates
err = client.Create(ctx, farmer1.NodeId)
err = client.Create(ctx, farmer1.Id)
if err != nil || status.Code(err) == codes.Internal {
logger.Error("failed to create", zap.Error(err))
os.Exit(1)
}
logger.Info("Farmer 1 created successfully")
err = client.Create(ctx, farmer2.NodeId)
err = client.Create(ctx, farmer2.Id)
if err != nil || status.Code(err) == codes.Internal {
logger.Error("failed to create", zap.Error(err))
os.Exit(1)
@ -94,7 +93,7 @@ func main() {
farmer1.UpdateAuditSuccess = true
farmer1.UpdateUptime = true
nodeStats, err := client.Update(ctx, farmer1.NodeId, farmer1.AuditSuccess, farmer1.IsUp, nil)
nodeStats, err := client.Update(ctx, farmer1.Id, farmer1.AuditSuccess, farmer1.IsUp, nil)
if err != nil || status.Code(err) == codes.Internal {
logger.Error("failed to update", zap.Error(err))
os.Exit(1)
@ -124,7 +123,7 @@ func main() {
}
// Example Get
nodeStats, err = client.Get(ctx, farmer1.NodeId)
nodeStats, err = client.Get(ctx, farmer1.Id)
if err != nil || status.Code(err) == codes.Internal {
logger.Error("failed to update", zap.Error(err))
os.Exit(1)
@ -132,7 +131,7 @@ func main() {
logger.Info("Farmer 1 after Get 1")
printNodeStats(*nodeStats, *logger)
nodeStats, err = client.Get(ctx, farmer2.NodeId)
nodeStats, err = client.Get(ctx, farmer2.Id)
if err != nil || status.Code(err) == codes.Internal {
logger.Error("failed to update", zap.Error(err))
os.Exit(1)

1
go.mod
View File

@ -100,6 +100,7 @@ require (
// force specific versions for minio
require (
github.com/btcsuite/btcutil v0.0.0-20180706230648-ab6388e0c60a
github.com/garyburd/redigo v1.0.1-0.20170216214944-0d253a66e6e1 // indirect
github.com/go-ole/go-ole v1.2.1 // indirect
github.com/graphql-go/graphql v0.7.6

2
go.sum
View File

@ -26,6 +26,8 @@ github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCS
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4=
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
github.com/btcsuite/btcutil v0.0.0-20180706230648-ab6388e0c60a h1:RQMUrEILyYJEoAT34XS/kLu40vC0+po/UfxrBBA4qZE=
github.com/btcsuite/btcutil v0.0.0-20180706230648-ab6388e0c60a/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
github.com/cheggaaa/pb v1.0.5-0.20160713104425-73ae1d68fe0b h1:CMRCnhHx4xVxJy+wPsS67xmi9RHGNctLMoVn9Q1Kit8=
github.com/cheggaaa/pb v1.0.5-0.20160713104425-73ae1d68fe0b/go.mod h1:pQciLPpbU0oxA0h+VJYYLxO+XeDQb5pZijXscXHm81s=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=

View File

@ -0,0 +1,35 @@
// Copyright (C) 2018 Storj Labs, Inc.
// See LICENSE for copying information.
package testidentity
import (
"context"
"storj.io/storj/pkg/provider"
)
// NewTestIdentity is a helper function to generate new node identities with
// correct difficulty and concurrency
func NewTestIdentity() (*provider.FullIdentity, error) {
ca, err := provider.NewCA(context.Background(), provider.NewCAOptions{
Difficulty: 12,
Concurrency: 4,
})
if err != nil {
return nil, err
}
identity, err := ca.NewIdentity()
if err != nil {
return nil, err
}
return identity, err
}
// NewTestCA returns a ca with a default difficulty and concurrency for use in tests
func NewTestCA(ctx context.Context) (*provider.FullCertificateAuthority, error) {
return provider.NewCA(ctx, provider.NewCAOptions{
Difficulty: 12,
Concurrency: 4,
})
}

View File

@ -13,6 +13,8 @@ import (
"go.uber.org/zap"
"google.golang.org/grpc"
"storj.io/storj/pkg/storj"
"storj.io/storj/pkg/auth/grpcauth"
"storj.io/storj/pkg/kademlia"
"storj.io/storj/pkg/overlay"
@ -66,7 +68,7 @@ func (planet *Planet) newNode(name string, nodeType pb.NodeType) (*Node, error)
}
node.Info = pb.Node{
Id: node.Identity.ID.String(),
Id: node.Identity.ID,
Type: nodeType,
Address: &pb.NodeAddress{
Transport: pb.NodeTransport_TCP_TLS_GRPC,
@ -76,14 +78,13 @@ func (planet *Planet) newNode(name string, nodeType pb.NodeType) (*Node, error)
planet.nodes = append(planet.nodes, node)
planet.nodeInfos = append(planet.nodeInfos, node.Info)
planet.nodeLinks = append(planet.nodeLinks, node.Info.Id+":"+node.Listener.Addr().String())
planet.nodeLinks = append(planet.nodeLinks, node.Info.Id.String()+":"+node.Listener.Addr().String())
return node, nil
}
// ID returns node id
// TODO: switch to storj.NodeID
func (node *Node) ID() string { return node.Info.Id }
func (node *Node) ID() storj.NodeID { return node.Info.Id }
// Addr retursn node address
func (node *Node) Addr() string { return node.Info.Address.Address }

View File

@ -141,7 +141,7 @@ func New(t zaptest.TestingT, satelliteCount, storageNodeCount, uplinkCount int)
// init storage nodes
for _, node := range planet.StorageNodes {
storageDir := filepath.Join(planet.directory, node.ID())
storageDir := filepath.Join(planet.directory, node.ID().String())
serverdb, err := psdb.OpenInMemory(context.Background(), storageDir)
if err != nil {

View File

@ -0,0 +1,59 @@
// Copyright (C) 2018 Storj Labs, Inc.
// See LICENSE for copying information.
package teststorj
import (
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/storj"
)
// NodeIDFromBytes returns a node ID consisting of the bytes
// and padding to the node ID length
func NodeIDFromBytes(b []byte) storj.NodeID {
id, _ := storj.NodeIDFromBytes(fit(b))
return id
}
// NodeIDFromString returns node ID consisting of the strings
// and padding to the node ID length
func NodeIDFromString(s string) storj.NodeID {
return NodeIDFromBytes([]byte(s))
}
// NodeIDsFromBytes returns node IDs consisting of the byte slices
// and padding to the node ID length
func NodeIDsFromBytes(bs ...[]byte) (ids storj.NodeIDList) {
for _, b := range bs {
ids = append(ids, NodeIDFromBytes(b))
}
return ids
}
// NodeIDsFromStrings returns node IDs consisting of the strings
// and padding to the node ID length
func NodeIDsFromStrings(strs ...string) (ids storj.NodeIDList) {
for _, s := range strs {
ids = append(ids, NodeIDFromString(s))
}
return ids
}
// used to pad node IDs
func fit(b []byte) []byte {
l := len(storj.NodeID{})
if len(b) < l {
return fit(append(b, 255))
// return fit(append([]byte{1}, b...))
}
return b[:l]
}
// MockNode returns a pb node with an ID consisting of the string
// and padding to the node ID length
func MockNode(s string) *pb.Node {
id := NodeIDFromString(s)
var node pb.Node
node.Id = id
return &node
}

View File

@ -8,6 +8,7 @@ import (
"time"
"go.uber.org/zap"
"storj.io/storj/pkg/accounting"
"storj.io/storj/pkg/provider"
)

View File

@ -8,6 +8,7 @@ import (
"time"
"go.uber.org/zap"
dbx "storj.io/storj/pkg/accounting/dbx"
)

View File

@ -8,6 +8,7 @@ import (
"time"
"go.uber.org/zap"
"storj.io/storj/pkg/accounting"
"storj.io/storj/pkg/kademlia"
"storj.io/storj/pkg/overlay"

View File

@ -9,14 +9,14 @@ import (
"github.com/gogo/protobuf/proto"
"go.uber.org/zap"
dbx "storj.io/storj/pkg/accounting/dbx"
"storj.io/storj/pkg/dht"
"storj.io/storj/pkg/kademlia"
"storj.io/storj/pkg/node"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/pointerdb"
"storj.io/storj/pkg/provider"
"storj.io/storj/pkg/utils"
"storj.io/storj/pkg/storj"
"storj.io/storj/storage"
)
@ -96,9 +96,9 @@ func (t *tally) identifyActiveNodes(ctx context.Context) (err error) {
return Error.Wrap(err)
}
pieces := pointer.Remote.RemotePieces
var nodeIDs []dht.NodeID
var nodeIDs storj.NodeIDList
for _, p := range pieces {
nodeIDs = append(nodeIDs, node.IDFromString(p.NodeId))
nodeIDs = append(nodeIDs, p.NodeId)
}
online, err := t.onlineNodes(ctx, nodeIDs)
if err != nil {
@ -112,12 +112,12 @@ func (t *tally) identifyActiveNodes(ctx context.Context) (err error) {
return err
}
func (t *tally) onlineNodes(ctx context.Context, nodeIDs []dht.NodeID) (online []*pb.Node, err error) {
responses, err := t.overlay.BulkLookup(ctx, utils.NodeIDsToLookupRequests(nodeIDs))
func (t *tally) onlineNodes(ctx context.Context, nodeIDs storj.NodeIDList) (online []*pb.Node, err error) {
responses, err := t.overlay.BulkLookup(ctx, pb.NodeIDsToLookupRequests(nodeIDs))
if err != nil {
return []*pb.Node{}, err
}
nodes := utils.LookupResponsesToNodes(responses)
nodes := pb.LookupResponsesToNodes(responses)
for _, n := range nodes {
if n != nil {
online = append(online, n)
@ -154,13 +154,13 @@ func (t *tally) tallyAtRestStorage(ctx context.Context, pointer *pb.Pointer, nod
}
}
func (t *tally) needToContact(nodeID string) bool {
func (t *tally) needToContact(id storj.NodeID) bool {
//TODO
//check db if node was updated within the last time period
return true
}
func (t *tally) updateGranularTable(nodeID string, pieceSize int64) error {
func (t *tally) updateGranularTable(id storj.NodeID, pieceSize int64) error {
//TODO
return nil
}

View File

@ -13,14 +13,14 @@ import (
"github.com/stretchr/testify/assert"
"go.uber.org/zap"
"storj.io/storj/internal/teststorj"
"storj.io/storj/pkg/accounting"
"storj.io/storj/pkg/dht"
"storj.io/storj/pkg/kademlia"
"storj.io/storj/pkg/node"
"storj.io/storj/pkg/overlay"
"storj.io/storj/pkg/overlay/mocks"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/pointerdb"
"storj.io/storj/pkg/storj"
"storj.io/storj/storage/teststore"
)
@ -35,18 +35,17 @@ func TestOnlineNodes(t *testing.T) {
const N = 50
nodes := []*pb.Node{}
nodeIDs := []dht.NodeID{}
nodeIDs := storj.NodeIDList{}
expectedOnline := []*pb.Node{}
for i := 0; i < N; i++ {
str := strconv.Itoa(i)
n := &pb.Node{Id: str, Type: pb.NodeType_STORAGE, Address: &pb.NodeAddress{Address: str}}
nodeID := teststorj.NodeIDFromString(strconv.Itoa(i))
n := &pb.Node{Id: nodeID, Type: pb.NodeType_STORAGE, Address: &pb.NodeAddress{Address: ""}}
nodes = append(nodes, n)
if i%(rand.Intn(5)+2) == 0 {
id := node.IDFromString("id" + str)
id := teststorj.NodeIDFromString("id" + nodeID.String())
nodeIDs = append(nodeIDs, id)
} else {
id := node.IDFromString(str)
nodeIDs = append(nodeIDs, id)
nodeIDs = append(nodeIDs, nodeID)
expectedOnline = append(expectedOnline, n)
}
}

View File

@ -16,12 +16,13 @@ import (
"go.uber.org/zap"
"google.golang.org/grpc"
"storj.io/storj/internal/identity"
"storj.io/storj/internal/teststorj"
"storj.io/storj/pkg/auth"
"storj.io/storj/pkg/overlay"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/pointerdb"
"storj.io/storj/pkg/pointerdb/pdbclient"
"storj.io/storj/pkg/provider"
"storj.io/storj/pkg/storage/meta"
"storj.io/storj/pkg/storj"
"storj.io/storj/storage/teststore"
@ -68,7 +69,7 @@ func TestAuditSegment(t *testing.T) {
count int
}
ca, err := provider.NewTestCA(ctx)
ca, err := testidentity.NewTestCA(ctx)
assert.NoError(t, err)
identity, err := ca.NewIdentity()
assert.NoError(t, err)
@ -240,7 +241,7 @@ func makePutRequest(path storj.Path) pb.PutRequest {
var rps []*pb.RemotePiece
rps = append(rps, &pb.RemotePiece{
PieceNum: 1,
NodeId: "testId",
NodeId: teststorj.NodeIDFromString("testId"),
})
pr := pb.PutRequest{
Path: path,

View File

@ -9,6 +9,7 @@ import (
"storj.io/storj/pkg/provider"
proto "storj.io/storj/pkg/statdb/proto"
"storj.io/storj/pkg/statdb/sdbclient"
"storj.io/storj/pkg/storj"
)
type reporter interface {
@ -23,11 +24,7 @@ type Reporter struct {
// NewReporter instantiates a reporter
func NewReporter(ctx context.Context, statDBPort string, maxRetries int, apiKey string) (reporter *Reporter, err error) {
ca, err := provider.NewTestCA(ctx)
if err != nil {
return nil, err
}
identity, err := ca.NewIdentity()
identity, err := provider.NewFullIdentity(ctx, 12, 4)
if err != nil {
return nil, err
}
@ -56,10 +53,10 @@ func (reporter *Reporter) RecordAudits(ctx context.Context, nodes []*proto.Node)
return nil
}
func setAuditFailStatus(ctx context.Context, failedNodes []string) (failStatusNodes []*proto.Node) {
func setAuditFailStatus(ctx context.Context, failedNodes storj.NodeIDList) (failStatusNodes []*proto.Node) {
for i := range failedNodes {
setNode := &proto.Node{
NodeId: []byte(failedNodes[i]),
Id: failedNodes[i],
AuditSuccess: false,
IsUp: true,
UpdateAuditSuccess: true,
@ -71,10 +68,10 @@ func setAuditFailStatus(ctx context.Context, failedNodes []string) (failStatusNo
}
// TODO: offline nodes should maybe be marked as failing the audit in the future
func setOfflineStatus(ctx context.Context, offlineNodeIDs []string) (offlineStatusNodes []*proto.Node) {
func setOfflineStatus(ctx context.Context, offlineNodeIDs storj.NodeIDList) (offlineStatusNodes []*proto.Node) {
for i := range offlineNodeIDs {
setNode := &proto.Node{
NodeId: []byte(offlineNodeIDs[i]),
Id: offlineNodeIDs[i],
IsUp: false,
UpdateUptime: true,
}
@ -83,10 +80,10 @@ func setOfflineStatus(ctx context.Context, offlineNodeIDs []string) (offlineStat
return offlineStatusNodes
}
func setSuccessStatus(ctx context.Context, offlineNodeIDs []string) (successStatusNodes []*proto.Node) {
func setSuccessStatus(ctx context.Context, offlineNodeIDs storj.NodeIDList) (successStatusNodes []*proto.Node) {
for i := range offlineNodeIDs {
setNode := &proto.Node{
NodeId: []byte(offlineNodeIDs[i]),
Id: offlineNodeIDs[i],
AuditSuccess: true,
IsUp: true,
UpdateAuditSuccess: true,

View File

@ -11,15 +11,14 @@ import (
"github.com/gogo/protobuf/proto"
"github.com/vivint/infectious"
monkit "gopkg.in/spacemonkeygo/monkit.v2"
"gopkg.in/spacemonkeygo/monkit.v2"
"storj.io/storj/pkg/dht"
"storj.io/storj/pkg/node"
"storj.io/storj/pkg/overlay"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/piecestore/psclient"
"storj.io/storj/pkg/provider"
sdbproto "storj.io/storj/pkg/statdb/proto"
"storj.io/storj/pkg/storj"
"storj.io/storj/pkg/transport"
"storj.io/storj/pkg/utils"
)
@ -69,8 +68,7 @@ func (d *defaultDownloader) getShare(ctx context.Context, stripeIndex, shareSize
return s, err
}
nodeID := node.IDFromString(fromNode.GetId())
derivedPieceID, err := id.Derive(nodeID.Bytes())
derivedPieceID, err := id.Derive(fromNode.Id.Bytes())
if err != nil {
return s, err
}
@ -121,11 +119,11 @@ func (d *defaultDownloader) DownloadShares(ctx context.Context, pointer *pb.Poin
stripeIndex int, authorization *pb.SignedMessage) (shares map[int]share, nodes map[int]*pb.Node, err error) {
defer mon.Task()(&ctx)(&err)
var nodeIds []dht.NodeID
var nodeIds storj.NodeIDList
pieces := pointer.Remote.GetRemotePieces()
for _, p := range pieces {
nodeIds = append(nodeIds, node.IDFromString(p.GetNodeId()))
nodeIds = append(nodeIds, p.NodeId)
}
// TODO(moby) nodeSlice will not include offline nodes, so overlay should update uptime for these nodes
@ -218,10 +216,10 @@ func (verifier *Verifier) verify(ctx context.Context, stripeIndex int, pointer *
return nil, err
}
var offlineNodes []string
var offlineNodes storj.NodeIDList
for pieceNum := range shares {
if shares[pieceNum].Error != nil {
offlineNodes = append(offlineNodes, nodes[pieceNum].GetId())
offlineNodes = append(offlineNodes, nodes[pieceNum].Id)
}
}
@ -232,9 +230,9 @@ func (verifier *Verifier) verify(ctx context.Context, stripeIndex int, pointer *
return nil, err
}
var failedNodes []string
var failedNodes storj.NodeIDList
for _, pieceNum := range pieceNums {
failedNodes = append(failedNodes, nodes[pieceNum].GetId())
failedNodes = append(failedNodes, nodes[pieceNum].Id)
}
successNodes := getSuccessNodes(ctx, nodes, failedNodes, offlineNodes)
@ -244,8 +242,8 @@ func (verifier *Verifier) verify(ctx context.Context, stripeIndex int, pointer *
}
// getSuccessNodes uses the failed nodes and offline nodes arrays to determine which nodes passed the audit
func getSuccessNodes(ctx context.Context, nodes map[int]*pb.Node, failedNodes, offlineNodes []string) (successNodes []string) {
fails := make(map[string]bool)
func getSuccessNodes(ctx context.Context, nodes map[int]*pb.Node, failedNodes, offlineNodes storj.NodeIDList) (successNodes storj.NodeIDList) {
fails := make(map[storj.NodeID]bool)
for _, fail := range failedNodes {
fails[fail] = true
}
@ -254,15 +252,15 @@ func getSuccessNodes(ctx context.Context, nodes map[int]*pb.Node, failedNodes, o
}
for _, node := range nodes {
if !fails[node.GetId()] {
successNodes = append(successNodes, node.GetId())
if !fails[node.Id] {
successNodes = append(successNodes, node.Id)
}
}
return successNodes
}
// setVerifiedNodes creates a combined array of offline nodes, failed audit nodes, and success nodes with their stats set to the statdb proto Node type
func setVerifiedNodes(ctx context.Context, offlineNodes, failedNodes, successNodes []string) (verifiedNodes []*sdbproto.Node) {
func setVerifiedNodes(ctx context.Context, offlineNodes, failedNodes, successNodes storj.NodeIDList) (verifiedNodes []*sdbproto.Node) {
offlineStatusNodes := setOfflineStatus(ctx, offlineNodes)
failStatusNodes := setAuditFailStatus(ctx, failedNodes)
successStatusNodes := setSuccessStatus(ctx, successNodes)

View File

@ -12,6 +12,8 @@ import (
"github.com/stretchr/testify/assert"
"github.com/vivint/infectious"
"storj.io/storj/internal/teststorj"
"storj.io/storj/pkg/pb"
)
@ -210,7 +212,7 @@ func (m *mockDownloader) DownloadShares(ctx context.Context, pointer *pb.Pointer
for i := 0; i < 30; i++ {
nodes[i] = &pb.Node{
Id: strconv.Itoa(i),
Id: teststorj.NodeIDFromString(strconv.Itoa(i)),
Type: pb.NodeType_STORAGE,
Address: &pb.NodeAddress{
Address: strconv.Itoa(i),
@ -225,7 +227,7 @@ func makePointer(nodeAmt int) *pb.Pointer {
for i := 0; i < nodeAmt; i++ {
rps = append(rps, &pb.RemotePiece{
PieceNum: int32(i),
NodeId: "test" + strconv.Itoa(i),
NodeId: teststorj.NodeIDFromString("test" + strconv.Itoa(i)),
})
}
pr := &pb.Pointer{

View File

@ -11,12 +11,12 @@ import (
"github.com/gtank/cryptopasta"
"github.com/stretchr/testify/assert"
"storj.io/storj/pkg/provider"
"storj.io/storj/internal/identity"
)
func TestGenerateSignature(t *testing.T) {
ctx := context.Background()
ca, err := provider.NewTestCA(ctx)
ca, err := testidentity.NewTestCA(ctx)
assert.NoError(t, err)
identity, err := ca.NewIdentity()
assert.NoError(t, err)
@ -41,7 +41,7 @@ func TestGenerateSignature(t *testing.T) {
func TestSignedMessageVerifier(t *testing.T) {
ctx := context.Background()
ca, err := provider.NewTestCA(ctx)
ca, err := testidentity.NewTestCA(ctx)
assert.NoError(t, err)
identity, err := ca.NewIdentity()
assert.NoError(t, err)

View File

@ -19,13 +19,14 @@ import (
"github.com/gtank/cryptopasta"
"github.com/stretchr/testify/assert"
"github.com/zeebo/errs"
"go.uber.org/zap"
"google.golang.org/grpc"
"storj.io/storj/internal/identity"
"storj.io/storj/internal/teststorj"
"storj.io/storj/pkg/bwagreement/database-manager"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/provider"
"storj.io/storj/pkg/storj"
)
var (
@ -62,18 +63,18 @@ func NewTestServer(t *testing.T) *TestServer {
}
}
caS, err := provider.NewTestCA(context.Background())
caS, err := testidentity.NewTestCA(context.Background())
check(err)
fiS, err := caS.NewIdentity()
check(err)
so, err := fiS.ServerOption()
check(err)
caC, err := provider.NewTestCA(context.Background())
caC, err := testidentity.NewTestCA(context.Background())
check(err)
fiC, err := caC.NewIdentity()
check(err)
co, err := fiC.DialOption("")
co, err := fiC.DialOption(storj.NodeID{})
check(err)
s := newTestServerStruct(t, fiC.Key)
@ -151,8 +152,8 @@ func generatePayerBandwidthAllocation(action pb.PayerBandwidthAllocation_Action,
// Generate PayerBandwidthAllocation_Data
data, _ := proto.Marshal(
&pb.PayerBandwidthAllocation_Data{
SatelliteId: []byte("SatelliteID"),
UplinkId: []byte("UplinkID"),
SatelliteId: teststorj.NodeIDFromString("SatelliteID"),
UplinkId: teststorj.NodeIDFromString("UplinkID"),
ExpirationUnixSec: time.Now().Add(time.Hour * 24 * 10).Unix(),
SerialNumber: "SerialNumber",
Action: action,
@ -190,7 +191,7 @@ func generateRenterBandwidthAllocation(pba *pb.PayerBandwidthAllocation, uplinkK
&pb.RenterBandwidthAllocation_Data{
PayerAllocation: pba,
PubKey: pubbytes, // TODO: Take this out. It will be kept in a database on the satellite
StorageNodeId: []byte("StorageNodeID"),
StorageNodeId: teststorj.NodeIDFromString("StorageNodeID"),
Total: int64(666),
},
)

View File

@ -11,11 +11,9 @@ import (
"go.uber.org/zap"
"storj.io/storj/pkg/datarepair/queue"
"storj.io/storj/pkg/dht"
"storj.io/storj/pkg/node"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/pointerdb"
"storj.io/storj/pkg/utils"
"storj.io/storj/pkg/storj"
"storj.io/storj/storage"
)
@ -92,9 +90,9 @@ func (c *checker) identifyInjuredSegments(ctx context.Context) (err error) {
c.logger.Debug("no pieces on remote segment")
continue
}
var nodeIDs []dht.NodeID
var nodeIDs storj.NodeIDList
for _, p := range pieces {
nodeIDs = append(nodeIDs, node.IDFromString(p.NodeId))
nodeIDs = append(nodeIDs, p.NodeId)
}
missingPieces, err := c.offlineNodes(ctx, nodeIDs)
if err != nil {
@ -118,12 +116,12 @@ func (c *checker) identifyInjuredSegments(ctx context.Context) (err error) {
}
// returns the indices of offline nodes
func (c *checker) offlineNodes(ctx context.Context, nodeIDs []dht.NodeID) (offline []int32, err error) {
responses, err := c.overlay.BulkLookup(ctx, utils.NodeIDsToLookupRequests(nodeIDs))
func (c *checker) offlineNodes(ctx context.Context, nodeIDs storj.NodeIDList) (offline []int32, err error) {
responses, err := c.overlay.BulkLookup(ctx, pb.NodeIDsToLookupRequests(nodeIDs))
if err != nil {
return []int32{}, err
}
nodes := utils.LookupResponsesToNodes(responses)
nodes := pb.LookupResponsesToNodes(responses)
for i, n := range nodes {
if n == nil {
offline = append(offline, int32(i))

View File

@ -15,14 +15,14 @@ import (
"github.com/stretchr/testify/assert"
"go.uber.org/zap"
"storj.io/storj/internal/teststorj"
"storj.io/storj/pkg/auth"
"storj.io/storj/pkg/datarepair/queue"
"storj.io/storj/pkg/dht"
"storj.io/storj/pkg/node"
"storj.io/storj/pkg/overlay"
"storj.io/storj/pkg/overlay/mocks"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/pointerdb"
"storj.io/storj/pkg/storj"
"storj.io/storj/storage/redis"
"storj.io/storj/storage/redis/redisserver"
"storj.io/storj/storage/testqueue"
@ -43,7 +43,7 @@ func TestIdentifyInjuredSegments(t *testing.T) {
//fill a pointerdb
for i := 0; i < N; i++ {
s := strconv.Itoa(i)
ids := []string{s + "a", s + "b", s + "c", s + "d"}
ids := teststorj.NodeIDsFromStrings([]string{s + "a", s + "b", s + "c", s + "d"}...)
p := &pb.Pointer{
Remote: &pb.RemoteSegment{
@ -71,7 +71,7 @@ func TestIdentifyInjuredSegments(t *testing.T) {
//nodes for cache
selection := rand.Intn(4)
for _, v := range ids[:selection] {
n := &pb.Node{Id: v, Type: pb.NodeType_STORAGE, Address: &pb.NodeAddress{Address: v}}
n := &pb.Node{Id: v, Type: pb.NodeType_STORAGE, Address: &pb.NodeAddress{Address: ""}}
nodes = append(nodes, n)
}
pieces := []int32{0, 1, 2, 3}
@ -114,18 +114,16 @@ func TestOfflineNodes(t *testing.T) {
repairQueue := queue.NewQueue(testqueue.New())
const N = 50
nodes := []*pb.Node{}
nodeIDs := []dht.NodeID{}
nodeIDs := storj.NodeIDList{}
expectedOffline := []int32{}
for i := 0; i < N; i++ {
str := strconv.Itoa(i)
n := &pb.Node{Id: str, Type: pb.NodeType_STORAGE, Address: &pb.NodeAddress{Address: str}}
id := teststorj.NodeIDFromString(strconv.Itoa(i))
n := &pb.Node{Id: id, Type: pb.NodeType_STORAGE, Address: &pb.NodeAddress{Address: ""}}
nodes = append(nodes, n)
if i%(rand.Intn(5)+2) == 0 {
id := node.IDFromString("id" + str)
nodeIDs = append(nodeIDs, id)
nodeIDs = append(nodeIDs, teststorj.NodeIDFromString("id"+id.String()))
expectedOffline = append(expectedOffline, int32(i))
} else {
id := node.IDFromString(str)
nodeIDs = append(nodeIDs, id)
}
}
@ -155,7 +153,7 @@ func BenchmarkIdentifyInjuredSegments(b *testing.B) {
//fill a pointerdb
for i := 0; i < N; i++ {
s := strconv.Itoa(i)
ids := []string{s + "a", s + "b", s + "c", s + "d"}
ids := teststorj.NodeIDsFromStrings([]string{s + "a", s + "b", s + "c", s + "d"}...)
p := &pb.Pointer{
Remote: &pb.RemoteSegment{
@ -183,7 +181,7 @@ func BenchmarkIdentifyInjuredSegments(b *testing.B) {
//nodes for cache
selection := rand.Intn(4)
for _, v := range ids[:selection] {
n := &pb.Node{Id: v, Type: pb.NodeType_STORAGE, Address: &pb.NodeAddress{Address: v}}
n := &pb.Node{Id: v, Type: pb.NodeType_STORAGE, Address: &pb.NodeAddress{Address: ""}}
nodes = append(nodes, n)
}
pieces := []int32{0, 1, 2, 3}

View File

@ -11,6 +11,7 @@ import (
"github.com/gogo/protobuf/proto"
"github.com/stretchr/testify/assert"
"storj.io/storj/pkg/pb"
"storj.io/storj/storage/redis"
"storj.io/storj/storage/redis/redisserver"

View File

@ -9,6 +9,7 @@ import (
"github.com/vivint/infectious"
"go.uber.org/zap"
"storj.io/storj/pkg/datarepair/queue"
"storj.io/storj/pkg/eestream"
"storj.io/storj/pkg/miniogw"

View File

@ -8,22 +8,17 @@ import (
"time"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/storj"
"storj.io/storj/storage"
)
// NodeID is the unique identifier used for Nodes in the DHT
type NodeID interface {
String() string
Bytes() []byte
}
// DHT is the interface for the DHT in the Storj network
type DHT interface {
GetNodes(ctx context.Context, start string, limit int, restrictions ...pb.Restriction) ([]*pb.Node, error)
GetNodes(ctx context.Context, start storj.NodeID, limit int, restrictions ...pb.Restriction) ([]*pb.Node, error)
GetRoutingTable(ctx context.Context) (RoutingTable, error)
Bootstrap(ctx context.Context) error
Ping(ctx context.Context, node pb.Node) (pb.Node, error)
FindNode(ctx context.Context, ID NodeID) (pb.Node, error)
FindNode(ctx context.Context, ID storj.NodeID) (pb.Node, error)
Disconnect() error
Seen() []*pb.Node
}
@ -36,24 +31,25 @@ type RoutingTable interface {
CacheSize() int
// Bucket methods
GetBucket(id string) (bucket Bucket, ok bool)
GetBucket(id storj.NodeID) (bucket Bucket, ok bool)
GetBuckets() ([]Bucket, error)
GetBucketIds() (storage.Keys, error)
FindNear(id NodeID, limit int) ([]*pb.Node, error)
FindNear(id storj.NodeID, limit int) ([]*pb.Node, error)
ConnectionSuccess(node *pb.Node) error
ConnectionFailed(node *pb.Node) error
// these are for refreshing
SetBucketTimestamp(id string, now time.Time) error
GetBucketTimestamp(id string, bucket Bucket) (time.Time, error)
SetBucketTimestamp(id []byte, now time.Time) error
GetBucketTimestamp(id []byte, bucket Bucket) (time.Time, error)
}
// Bucket is a set of methods to act on kademlia k buckets
type Bucket interface {
Routing() []pb.Node
Cache() []pb.Node
// TODO: should this be a NodeID?
Midpoint() string
Nodes() []*pb.Node
}

View File

@ -13,7 +13,8 @@ import (
dht "storj.io/storj/pkg/dht"
pb "storj.io/storj/pkg/pb"
"storj.io/storj/storage"
storj "storj.io/storj/pkg/storj"
storage "storj.io/storj/storage"
)
// MockDHT is a mock of DHT interface
@ -64,7 +65,7 @@ func (mr *MockDHTMockRecorder) Disconnect() *gomock.Call {
}
// FindNode mocks base method
func (m *MockDHT) FindNode(arg0 context.Context, arg1 dht.NodeID) (pb.Node, error) {
func (m *MockDHT) FindNode(arg0 context.Context, arg1 storj.NodeID) (pb.Node, error) {
ret := m.ctrl.Call(m, "FindNode", arg0, arg1)
ret0, _ := ret[0].(pb.Node)
ret1, _ := ret[1].(error)
@ -77,7 +78,7 @@ func (mr *MockDHTMockRecorder) FindNode(arg0, arg1 interface{}) *gomock.Call {
}
// GetNodes mocks base method
func (m *MockDHT) GetNodes(arg0 context.Context, arg1 string, arg2 int, arg3 ...pb.Restriction) ([]*pb.Node, error) {
func (m *MockDHT) GetNodes(arg0 context.Context, arg1 storj.NodeID, arg2 int, arg3 ...pb.Restriction) ([]*pb.Node, error) {
varargs := []interface{}{arg0, arg1, arg2}
for _, a := range arg3 {
varargs = append(varargs, a)
@ -192,7 +193,7 @@ func (mr *MockRoutingTableMockRecorder) ConnectionSuccess(arg0 interface{}) *gom
}
// FindNear mocks base method
func (m *MockRoutingTable) FindNear(arg0 dht.NodeID, arg1 int) ([]*pb.Node, error) {
func (m *MockRoutingTable) FindNear(arg0 storj.NodeID, arg1 int) ([]*pb.Node, error) {
ret := m.ctrl.Call(m, "FindNear", arg0, arg1)
ret0, _ := ret[0].([]*pb.Node)
ret1, _ := ret[1].(error)
@ -205,7 +206,7 @@ func (mr *MockRoutingTableMockRecorder) FindNear(arg0, arg1 interface{}) *gomock
}
// GetBucket mocks base method
func (m *MockRoutingTable) GetBucket(arg0 string) (dht.Bucket, bool) {
func (m *MockRoutingTable) GetBucket(arg0 storj.NodeID) (dht.Bucket, bool) {
ret := m.ctrl.Call(m, "GetBucket", arg0)
ret0, _ := ret[0].(dht.Bucket)
ret1, _ := ret[1].(bool)
@ -217,8 +218,21 @@ func (mr *MockRoutingTableMockRecorder) GetBucket(arg0 interface{}) *gomock.Call
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucket", reflect.TypeOf((*MockRoutingTable)(nil).GetBucket), arg0)
}
// GetBucketIds mocks base method
func (m *MockRoutingTable) GetBucketIds() (storage.Keys, error) {
ret := m.ctrl.Call(m, "GetBucketIds")
ret0, _ := ret[0].(storage.Keys)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetBucketIds indicates an expected call of GetBucketIds
func (mr *MockRoutingTableMockRecorder) GetBucketIds() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketIds", reflect.TypeOf((*MockRoutingTable)(nil).GetBucketIds))
}
// GetBucketTimestamp mocks base method
func (m *MockRoutingTable) GetBucketTimestamp(arg0 string, arg1 dht.Bucket) (time.Time, error) {
func (m *MockRoutingTable) GetBucketTimestamp(arg0 []byte, arg1 dht.Bucket) (time.Time, error) {
ret := m.ctrl.Call(m, "GetBucketTimestamp", arg0, arg1)
ret0, _ := ret[0].(time.Time)
ret1, _ := ret[1].(error)
@ -238,10 +252,6 @@ func (m *MockRoutingTable) GetBuckets() ([]dht.Bucket, error) {
return ret0, ret1
}
func (m *MockRoutingTable) GetBucketIds() (storage.Keys, error) {
return nil, nil
}
// GetBuckets indicates an expected call of GetBuckets
func (mr *MockRoutingTableMockRecorder) GetBuckets() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBuckets", reflect.TypeOf((*MockRoutingTable)(nil).GetBuckets))
@ -272,7 +282,7 @@ func (mr *MockRoutingTableMockRecorder) Local() *gomock.Call {
}
// SetBucketTimestamp mocks base method
func (m *MockRoutingTable) SetBucketTimestamp(arg0 string, arg1 time.Time) error {
func (m *MockRoutingTable) SetBucketTimestamp(arg0 []byte, arg1 time.Time) error {
ret := m.ctrl.Call(m, "SetBucketTimestamp", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0

View File

@ -8,10 +8,9 @@ import (
"github.com/zeebo/errs"
"go.uber.org/zap"
monkit "gopkg.in/spacemonkeygo/monkit.v2"
"gopkg.in/spacemonkeygo/monkit.v2"
"storj.io/storj/pkg/kademlia"
"storj.io/storj/pkg/node"
"storj.io/storj/pkg/overlay"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/provider"
@ -48,7 +47,7 @@ func (c Config) Run(ctx context.Context, server *provider.Provider) (err error)
return Error.New("programmer error: statdb responsibility unstarted")
}
id, err := node.NewFullIdentity(ctx, 12, 4)
id, err := provider.NewFullIdentity(ctx, 12, 4)
if err != nil {
return Error.New("error creating inspector identity:")
}

View File

@ -9,7 +9,9 @@ import (
"github.com/zeebo/errs"
"go.uber.org/zap"
monkit "gopkg.in/spacemonkeygo/monkit.v2"
"gopkg.in/spacemonkeygo/monkit.v2"
"storj.io/storj/pkg/storj"
"storj.io/storj/pkg/dht"
"storj.io/storj/pkg/node"
@ -57,10 +59,13 @@ func (srv *Server) GetBuckets(ctx context.Context, req *pb.GetBucketsRequest) (*
if err != nil {
return nil, err
}
bytes := b.ByteSlices()
nodeIDs, err := storj.NodeIDsFromBytes(b.ByteSlices())
if err != nil {
return nil, err
}
return &pb.GetBucketsResponse{
Total: int64(len(b)),
Ids: bytes,
Ids: nodeIDs,
}, nil
}
@ -118,9 +123,8 @@ func (srv *Server) PingNode(ctx context.Context, req *pb.PingNodeRequest) (*pb.P
// GetStats returns the stats for a particular node ID
func (srv *Server) GetStats(ctx context.Context, req *pb.GetStatsRequest) (*pb.GetStatsResponse, error) {
nodeID := node.IDFromString(req.NodeId)
getReq := &statsproto.GetRequest{
NodeId: nodeID.Bytes(),
NodeId: req.NodeId,
}
res, err := srv.statdb.Get(ctx, getReq)
if err != nil {
@ -136,9 +140,8 @@ func (srv *Server) GetStats(ctx context.Context, req *pb.GetStatsRequest) (*pb.G
// CreateStats creates a node with specified stats
func (srv *Server) CreateStats(ctx context.Context, req *pb.CreateStatsRequest) (*pb.CreateStatsResponse, error) {
nodeID := node.IDFromString(req.NodeId)
node := &statsproto.Node{
NodeId: nodeID.Bytes(),
Id: req.NodeId,
}
stats := &statsproto.NodeStats{
AuditCount: req.AuditCount,

View File

@ -19,6 +19,7 @@ import (
"storj.io/storj/pkg/node"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/provider"
"storj.io/storj/pkg/storj"
"storj.io/storj/pkg/utils"
"storj.io/storj/storage"
"storj.io/storj/storage/boltdb"
@ -54,9 +55,9 @@ type Kademlia struct {
}
// NewKademlia returns a newly configured Kademlia instance
func NewKademlia(id dht.NodeID, nodeType pb.NodeType, bootstrapNodes []pb.Node, address string, metadata *pb.NodeMetadata, identity *provider.FullIdentity, path string, alpha int) (*Kademlia, error) {
func NewKademlia(id storj.NodeID, nodeType pb.NodeType, bootstrapNodes []pb.Node, address string, metadata *pb.NodeMetadata, identity *provider.FullIdentity, path string, alpha int) (*Kademlia, error) {
self := pb.Node{
Id: id.String(),
Id: id,
Type: nodeType,
Address: &pb.NodeAddress{Address: address},
Metadata: metadata,
@ -115,15 +116,21 @@ func (k *Kademlia) Disconnect() error {
// GetNodes returns all nodes from a starting node up to a maximum limit
// stored in the local routing table limiting the result by the specified restrictions
func (k *Kademlia) GetNodes(ctx context.Context, start string, limit int, restrictions ...pb.Restriction) ([]*pb.Node, error) {
func (k *Kademlia) GetNodes(ctx context.Context, start storj.NodeID, limit int, restrictions ...pb.Restriction) ([]*pb.Node, error) {
nodes := []*pb.Node{}
iteratorMethod := func(it storage.Iterator) error {
var item storage.ListItem
maxLimit := storage.LookupLimit
for ; maxLimit > 0 && it.Next(&item); maxLimit-- {
id := string(item.Key)
node := &pb.Node{}
err := proto.Unmarshal(item.Value, node)
var (
id storj.NodeID
node = &pb.Node{}
)
err := id.Unmarshal(item.Key)
if err != nil {
return Error.Wrap(err)
}
err = proto.Unmarshal(item.Value, node)
if err != nil {
return Error.Wrap(err)
}
@ -139,7 +146,7 @@ func (k *Kademlia) GetNodes(ctx context.Context, start string, limit int, restri
}
err := k.routingTable.iterate(
storage.IterateOptions{
First: storage.Key(start),
First: storage.Key(start.Bytes()),
Recurse: true,
},
iteratorMethod,
@ -165,12 +172,12 @@ func (k *Kademlia) Bootstrap(ctx context.Context) error {
return BootstrapErr.New("no bootstrap nodes provided")
}
return k.lookup(ctx, node.IDFromString(k.routingTable.self.GetId()), discoveryOptions{
return k.lookup(ctx, k.routingTable.self.Id, discoveryOptions{
concurrency: k.alpha, retries: defaultRetries, bootstrap: true, bootstrapNodes: k.bootstrapNodes,
})
}
func (k *Kademlia) lookup(ctx context.Context, target dht.NodeID, opts discoveryOptions) error {
func (k *Kademlia) lookup(ctx context.Context, target storj.NodeID, opts discoveryOptions) error {
kb := k.routingTable.K()
// look in routing table for targetID
nodes, err := k.routingTable.FindNear(target, kb)
@ -209,16 +216,16 @@ func (k *Kademlia) Ping(ctx context.Context, node pb.Node) (pb.Node, error) {
// FindNode looks up the provided NodeID first in the local Node, and if it is not found
// begins searching the network for the NodeID. Returns and error if node was not found
func (k *Kademlia) FindNode(ctx context.Context, ID dht.NodeID) (pb.Node, error) {
func (k *Kademlia) FindNode(ctx context.Context, ID storj.NodeID) (pb.Node, error) {
// TODO(coyle): actually Find Node not just perform a lookup
err := k.lookup(ctx, node.IDFromString(k.routingTable.self.GetId()), discoveryOptions{
err := k.lookup(ctx, k.routingTable.self.Id, discoveryOptions{
concurrency: k.alpha, retries: defaultRetries, bootstrap: false,
})
if err != nil {
return pb.Node{}, err
}
// k.routingTable.getNodesFromIDs()
// k.routingTable.getNodesFromIDsBytes()
return pb.Node{}, nil
}
@ -250,7 +257,7 @@ func (k *Kademlia) Seen() []*pb.Node {
nodes := []*pb.Node{}
k.routingTable.mutex.Lock()
for _, v := range k.routingTable.seen {
nodes = append(nodes, proto.Clone(v).(*pb.Node))
nodes = append(nodes, pb.CopyNode(v))
}
k.routingTable.mutex.Unlock()
return nodes

View File

@ -5,6 +5,7 @@
package kademlia
import (
"bytes"
"context"
"io/ioutil"
"net"
@ -14,50 +15,43 @@ import (
"sync/atomic"
"testing"
"github.com/gogo/protobuf/proto"
"github.com/stretchr/testify/assert"
"google.golang.org/grpc"
"storj.io/storj/pkg/dht"
"storj.io/storj/internal/identity"
"storj.io/storj/internal/teststorj"
"storj.io/storj/pkg/node"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/provider"
"storj.io/storj/pkg/storj"
)
// helper function to generate new node identities with
// correct difficulty and concurrency
func newTestIdentity() (*provider.FullIdentity, error) {
fid, err := node.NewFullIdentity(context.Background(), 12, 4)
return fid, err
}
func TestNewKademlia(t *testing.T) {
rootdir, cleanup := mktempdir(t, "kademlia")
defer cleanup()
cases := []struct {
id dht.NodeID
id storj.NodeID
bn []pb.Node
addr string
expectedErr error
}{
{
id: func() *node.ID {
id, err := newTestIdentity()
id: func() storj.NodeID {
id, err := testidentity.NewTestIdentity()
assert.NoError(t, err)
n := node.ID(id.ID)
return &n
return id.ID
}(),
bn: []pb.Node{pb.Node{Id: "foo"}},
bn: []pb.Node{{Id: teststorj.NodeIDFromString("foo")}},
addr: "127.0.0.1:8080",
},
{
id: func() *node.ID {
id, err := newTestIdentity()
id: func() storj.NodeID {
id, err := testidentity.NewTestIdentity()
assert.NoError(t, err)
n := node.ID(id.ID)
return &n
return id.ID
}(),
bn: []pb.Node{pb.Node{Id: "foo"}},
bn: []pb.Node{{Id: teststorj.NodeIDFromString("foo")}},
addr: "127.0.0.1:8080",
},
}
@ -65,7 +59,7 @@ func TestNewKademlia(t *testing.T) {
for i, v := range cases {
dir := filepath.Join(rootdir, strconv.Itoa(i))
ca, err := provider.NewTestCA(context.Background())
ca, err := testidentity.NewTestCA(context.Background())
assert.NoError(t, err)
identity, err := ca.NewIdentity()
assert.NoError(t, err)
@ -92,12 +86,12 @@ func TestPeerDiscovery(t *testing.T) {
targetServer, _, targetID, targetAddress := startTestNodeServer()
defer targetServer.Stop()
bootstrapNodes := []pb.Node{pb.Node{Id: bootID.ID.String(), Address: &pb.NodeAddress{Address: bootAddress}}}
bootstrapNodes := []pb.Node{{Id: bootID.ID, Address: &pb.NodeAddress{Address: bootAddress}}}
metadata := &pb.NodeMetadata{
Email: "foo@bar.com",
Wallet: "FarmerWallet",
}
k, err := NewKademlia(dht.NodeID(testID.ID), pb.NodeType_STORAGE, bootstrapNodes, testAddress, metadata, testID, dir, defaultAlpha)
k, err := NewKademlia(testID.ID, pb.NodeType_STORAGE, bootstrapNodes, testAddress, metadata, testID, dir, defaultAlpha)
assert.NoError(t, err)
rt, err := k.GetRoutingTable(context.Background())
assert.NoError(t, err)
@ -109,26 +103,21 @@ func TestPeerDiscovery(t *testing.T) {
}()
cases := []struct {
target dht.NodeID
target storj.NodeID
opts discoveryOptions
expected *pb.Node
expectedErr error
}{
{target: func() *node.ID {
nid := node.ID(targetID.ID)
assert.NoError(t, err)
{target: func() storj.NodeID {
// this is what the bootstrap node returns
mockBootServer.returnValue = []*pb.Node{&pb.Node{Id: targetID.ID.String(), Address: &pb.NodeAddress{Address: targetAddress}}}
return &nid
mockBootServer.returnValue = []*pb.Node{{Id: targetID.ID, Address: &pb.NodeAddress{Address: targetAddress}}}
return targetID.ID
}(),
opts: discoveryOptions{concurrency: 3, bootstrap: true, retries: 1},
expected: &pb.Node{},
expectedErr: nil,
},
{target: func() *node.ID {
n := node.ID(bootID.ID)
return &n
}(),
{target: bootID.ID,
opts: discoveryOptions{concurrency: 3, bootstrap: true, retries: 1},
expected: nil,
expectedErr: nil,
@ -171,13 +160,12 @@ func testNode(t *testing.T, bn []pb.Node) (*Kademlia, *grpc.Server, func()) {
assert.NoError(t, err)
// new config
// new identity
fid, err := newTestIdentity()
id := dht.NodeID(fid.ID)
fid, err := testidentity.NewTestIdentity()
assert.NoError(t, err)
// new kademlia
dir, cleanup := mktempdir(t, "kademlia")
k, err := NewKademlia(id, pb.NodeType_STORAGE, bn, lis.Addr().String(), nil, fid, dir, defaultAlpha)
k, err := NewKademlia(fid.ID, pb.NodeType_STORAGE, bn, lis.Addr().String(), nil, fid, dir, defaultAlpha)
assert.NoError(t, err)
s := node.NewServer(k)
// new ident opts
@ -197,37 +185,40 @@ func testNode(t *testing.T, bn []pb.Node) (*Kademlia, *grpc.Server, func()) {
}
func TestGetNodes(t *testing.T) {
var (
nodeIDA = teststorj.NodeIDFromString("AAAAA")
nodeIDB = teststorj.NodeIDFromString("BBBBB")
nodeIDC = teststorj.NodeIDFromString("CCCCC")
nodeIDD = teststorj.NodeIDFromString("DDDDD")
)
lis, err := net.Listen("tcp", "127.0.0.1:0")
assert.NoError(t, err)
srv, _ := newTestServer([]*pb.Node{&pb.Node{Id: "foo"}})
srv, _ := newTestServer([]*pb.Node{{Id: teststorj.NodeIDFromString("foo")}})
go func() { assert.NoError(t, srv.Serve(lis)) }()
defer srv.Stop()
// make new identity
fid, err := newTestIdentity()
fid, err := testidentity.NewTestIdentity()
assert.NoError(t, err)
fid2, err := newTestIdentity()
fid2, err := testidentity.NewTestIdentity()
assert.NoError(t, err)
fid.ID = "AAAAA"
fid2.ID = "BBBBB"
fid.ID = nodeIDA
fid2.ID = nodeIDB
// create two new unique identities
id := node.ID(fid.ID)
id2 := node.ID(fid2.ID)
assert.NotEqual(t, id, id2)
kid := dht.NodeID(fid.ID)
assert.NotEqual(t, fid.ID, fid2.ID)
dir, cleanup := mktempdir(t, "kademlia")
defer cleanup()
k, err := NewKademlia(kid, pb.NodeType_STORAGE, []pb.Node{pb.Node{Id: id2.String(), Address: &pb.NodeAddress{Address: lis.Addr().String()}}}, lis.Addr().String(), nil, fid, dir, defaultAlpha)
k, err := NewKademlia(fid.ID, pb.NodeType_STORAGE, []pb.Node{{Id: fid2.ID, Address: &pb.NodeAddress{Address: lis.Addr().String()}}}, lis.Addr().String(), nil, fid, dir, defaultAlpha)
assert.NoError(t, err)
defer func() {
assert.NoError(t, k.Disconnect())
}()
// add nodes
ids := []string{"AAAAA", "BBBBB", "CCCCC", "DDDDD"}
ids := storj.NodeIDList{nodeIDA, nodeIDB, nodeIDC, nodeIDD}
bw := []int64{1, 2, 3, 4}
disk := []int64{4, 3, 2, 1}
nodes := []*pb.Node{}
@ -246,16 +237,16 @@ func TestGetNodes(t *testing.T) {
cases := []struct {
testID string
start string
start storj.NodeID
limit int
restrictions []pb.Restriction
expected []*pb.Node
}{
{testID: "one",
start: "BBBBB",
start: nodeIDB,
limit: 2,
restrictions: []pb.Restriction{
pb.Restriction{
{
Operator: pb.Restriction_GT,
Operand: pb.Restriction_FREE_BANDWIDTH,
Value: int64(2),
@ -264,15 +255,15 @@ func TestGetNodes(t *testing.T) {
expected: nodes[2:],
},
{testID: "two",
start: "AAAAA",
start: nodeIDA,
limit: 3,
restrictions: []pb.Restriction{
pb.Restriction{
{
Operator: pb.Restriction_GT,
Operand: pb.Restriction_FREE_BANDWIDTH,
Value: int64(2),
},
pb.Restriction{
{
Operator: pb.Restriction_LT,
Operand: pb.Restriction_FREE_DISK,
Value: int64(2),
@ -281,7 +272,7 @@ func TestGetNodes(t *testing.T) {
expected: nodes[3:],
},
{testID: "three",
start: "AAAAA",
start: nodeIDA,
limit: 4,
restrictions: []pb.Restriction{},
expected: nodes,
@ -293,7 +284,7 @@ func TestGetNodes(t *testing.T) {
assert.NoError(t, err)
assert.Equal(t, len(c.expected), len(ns))
for i, n := range ns {
assert.True(t, proto.Equal(c.expected[i], n))
assert.True(t, bytes.Equal(c.expected[i].Id.Bytes(), n.Id.Bytes()))
}
})
}
@ -308,7 +299,7 @@ func TestMeetsRestrictions(t *testing.T) {
}{
{testID: "pass one",
r: []pb.Restriction{
pb.Restriction{
{
Operator: pb.Restriction_EQ,
Operand: pb.Restriction_FREE_BANDWIDTH,
Value: int64(1),
@ -323,12 +314,12 @@ func TestMeetsRestrictions(t *testing.T) {
},
{testID: "pass multiple",
r: []pb.Restriction{
pb.Restriction{
{
Operator: pb.Restriction_LTE,
Operand: pb.Restriction_FREE_BANDWIDTH,
Value: int64(2),
},
pb.Restriction{
{
Operator: pb.Restriction_GTE,
Operand: pb.Restriction_FREE_DISK,
Value: int64(2),
@ -344,12 +335,12 @@ func TestMeetsRestrictions(t *testing.T) {
},
{testID: "fail one",
r: []pb.Restriction{
pb.Restriction{
{
Operator: pb.Restriction_LT,
Operand: pb.Restriction_FREE_BANDWIDTH,
Value: int64(2),
},
pb.Restriction{
{
Operator: pb.Restriction_GT,
Operand: pb.Restriction_FREE_DISK,
Value: int64(2),
@ -365,12 +356,12 @@ func TestMeetsRestrictions(t *testing.T) {
},
{testID: "fail multiple",
r: []pb.Restriction{
pb.Restriction{
{
Operator: pb.Restriction_LT,
Operand: pb.Restriction_FREE_BANDWIDTH,
Value: int64(2),
},
pb.Restriction{
{
Operator: pb.Restriction_GT,
Operand: pb.Restriction_FREE_DISK,
Value: int64(2),
@ -408,7 +399,7 @@ func startTestNodeServer() (*grpc.Server, *mockNodesServer, *provider.FullIdenti
return nil, nil, nil, ""
}
ca, err := provider.NewTestCA(context.Background())
ca, err := testidentity.NewTestCA(context.Background())
if err != nil {
return nil, nil, nil, ""
}
@ -434,7 +425,7 @@ func startTestNodeServer() (*grpc.Server, *mockNodesServer, *provider.FullIdenti
}
func newTestServer(nn []*pb.Node) (*grpc.Server, *mockNodesServer) {
ca, err := provider.NewTestCA(context.Background())
ca, err := testidentity.NewTestCA(context.Background())
if err != nil {
return nil, nil
}

View File

@ -9,14 +9,15 @@ import (
"github.com/zeebo/errs"
"go.uber.org/zap"
"storj.io/storj/pkg/dht"
"storj.io/storj/pkg/node"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/storj"
)
type peerDiscovery struct {
client node.Client
target dht.NodeID
target storj.NodeID
opts discoveryOptions
cond sync.Cond
@ -26,7 +27,7 @@ type peerDiscovery struct {
// ErrMaxRetries is used when a lookup has been retried the max number of times
var ErrMaxRetries = errs.Class("max retries exceeded for id:")
func newPeerDiscovery(nodes []*pb.Node, client node.Client, target dht.NodeID, opts discoveryOptions) *peerDiscovery {
func newPeerDiscovery(nodes []*pb.Node, client node.Client, target storj.NodeID, opts discoveryOptions) *peerDiscovery {
queue := NewXorQueue(opts.concurrency)
queue.Insert(target, nodes)
@ -65,7 +66,7 @@ func (lookup *peerDiscovery) Run(ctx context.Context) error {
}
next, _ = lookup.queue.Closest()
if !lookup.opts.bootstrap && next.GetId() == lookup.target.String() {
if !lookup.opts.bootstrap && next.Id == lookup.target {
allDone = true
break // closest node is the target and is already in routing table (i.e. no lookup required)
}
@ -80,14 +81,14 @@ func (lookup *peerDiscovery) Run(ctx context.Context) error {
}
lookup.cond.L.Unlock()
neighbors, err := lookup.client.Lookup(ctx, *next, pb.Node{Id: lookup.target.String()})
neighbors, err := lookup.client.Lookup(ctx, *next, pb.Node{Id: lookup.target})
if err != nil {
ok := lookup.queue.Reinsert(lookup.target, next, lookup.opts.retries)
if !ok {
zap.S().Errorf(
"Error occurred during lookup of %s :: %s :: error = %s",
lookup.target.String(),
ErrMaxRetries.New("%s", next.GetId()),
ErrMaxRetries.New("%s", next.Id),
err.Error(),
)
}

View File

@ -8,8 +8,8 @@ import (
"math/big"
"sync"
"storj.io/storj/pkg/dht"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/storj"
)
// XorQueue is a priority queue where the priority is key XOR distance
@ -17,7 +17,7 @@ type XorQueue struct {
maxLen int
mu sync.Mutex
added map[string]int
added map[storj.NodeID]int
items items
}
@ -25,19 +25,19 @@ type XorQueue struct {
func NewXorQueue(size int) *XorQueue {
return &XorQueue{
items: make(items, 0, size),
added: make(map[string]int),
added: make(map[storj.NodeID]int),
maxLen: size,
}
}
// Insert adds Nodes onto the queue
func (x *XorQueue) Insert(target dht.NodeID, nodes []*pb.Node) {
func (x *XorQueue) Insert(target storj.NodeID, nodes []*pb.Node) {
x.mu.Lock()
defer x.mu.Unlock()
unique := nodes[:0]
for _, node := range nodes {
nodeID := node.GetId()
nodeID := node.Id
if _, added := x.added[nodeID]; !added {
x.added[nodeID]++
unique = append(unique, node)
@ -48,11 +48,11 @@ func (x *XorQueue) Insert(target dht.NodeID, nodes []*pb.Node) {
}
// Reinsert adds a Nodes onto the queue if it's been added >= limit times previously
func (x *XorQueue) Reinsert(target dht.NodeID, node *pb.Node, limit int) bool {
func (x *XorQueue) Reinsert(target storj.NodeID, node *pb.Node, limit int) bool {
x.mu.Lock()
defer x.mu.Unlock()
nodeID := node.GetId()
nodeID := node.Id
if x.added[nodeID] >= limit {
return false
}
@ -62,14 +62,21 @@ func (x *XorQueue) Reinsert(target dht.NodeID, node *pb.Node, limit int) bool {
return true
}
func reverse(b []byte) (r []byte) {
for _, v := range b {
r = append([]byte{v}, r...)
}
return r
}
// insert must hold lock while adding
func (x *XorQueue) insert(target dht.NodeID, nodes []*pb.Node) {
targetBytes := new(big.Int).SetBytes(target.Bytes())
func (x *XorQueue) insert(target storj.NodeID, nodes []*pb.Node) {
targetBytes := new(big.Int).SetBytes(reverse(target.Bytes()))
// insert new nodes
for _, node := range nodes {
heap.Push(&x.items, &item{
value: node,
priority: new(big.Int).Xor(targetBytes, new(big.Int).SetBytes([]byte(node.GetId()))),
priority: new(big.Int).Xor(targetBytes, new(big.Int).SetBytes(reverse(node.Id.Bytes()))),
})
}
// resize down if we grew too big

View File

@ -5,45 +5,35 @@ package kademlia
import (
"math/big"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"storj.io/storj/pkg/node"
"storj.io/storj/internal/teststorj"
"storj.io/storj/pkg/pb"
)
//BinStr turns a string like '110001' into a string like 'a'
func BinStr(s string) string {
b := []byte(strings.Repeat("0", 8-len(s)%8) + s)
a := make([]byte, len(b)/8)
for i := 0; i < len(b); i++ {
a[i/8] |= ((b[i] - '0') << uint(7-i%8))
}
return string(a)
}
func TestXorQueue(t *testing.T) {
target := node.ID(BinStr("0001"))
testValues := []string{"0011", "0110", "0111", "1000"} //0011, 0110, 0111, 1000
expectedPriority := []int{2, 6, 7, 9} // 0010=>2, 0111=>7, 0110=>6, 1001=>9
expectedIds := []string{"0011", "0111", "0110", "1000"}
target := teststorj.NodeIDFromBytes([]byte{1})
testValues := []byte{3, 6, 7, 8} // 0011, 0110, 0111, 1000
expectedPriority := []int{2, 6, 7, 9} // 0010=>2, 0111=>7, 0110=>6, 1001=>9
expectedIds := []byte{3, 7, 6, 8}
nodes := make([]*pb.Node, len(testValues))
for i, value := range testValues {
nodes[i] = &pb.Node{Id: BinStr(value)}
for i, v := range testValues {
nodes[i] = &pb.Node{Id: teststorj.NodeIDFromBytes([]byte{v})}
}
//populate queue
// populate queue
pq := NewXorQueue(3)
pq.Insert(&target, nodes)
//make sure we remove as many things as the queue should hold
pq.Insert(target, nodes)
// make sure we remove as many things as the queue should hold
assert.Equal(t, pq.Len(), 3)
for i := 0; pq.Len() > 0; i++ {
node, priority := pq.Closest()
assert.Equal(t, *big.NewInt(int64(expectedPriority[i])), priority)
assert.Equal(t, BinStr(expectedIds[i]), node.Id)
assert.Equal(t, []byte{expectedIds[i]}, node.Id[:1])
}
//test that reading beyong length returns nil
// test that reading beyong length returns nil
node, _ := pq.Closest()
assert.Nil(t, node)
}

View File

@ -5,16 +5,14 @@ package kademlia
import (
"storj.io/storj/pkg/pb"
"storj.io/storj/storage"
)
func (rt *RoutingTable) addToReplacementCache(kadBucketID storage.Key, node *pb.Node) {
bucketID := string(kadBucketID)
nodes := rt.replacementCache[bucketID]
func (rt *RoutingTable) addToReplacementCache(kadBucketID bucketID, node *pb.Node) {
nodes := rt.replacementCache[kadBucketID]
nodes = append(nodes, node)
if len(nodes) > rt.rcBucketSize {
copy(nodes, nodes[1:])
nodes = nodes[:len(nodes)-1]
}
rt.replacementCache[bucketID] = nodes
rt.replacementCache[kadBucketID] = nodes
}

View File

@ -8,25 +8,27 @@ import (
"github.com/stretchr/testify/assert"
"storj.io/storj/internal/teststorj"
"storj.io/storj/pkg/pb"
)
func TestAddToReplacementCache(t *testing.T) {
rt, cleanup := createRoutingTable(t, []byte{244, 255})
rt, cleanup := createRoutingTable(t, teststorj.NodeIDFromBytes([]byte{244, 255}))
defer cleanup()
kadBucketID := []byte{255, 255}
node1 := mockNode(string([]byte{233, 255}))
kadBucketID := keyToBucketID(teststorj.NodeIDFromBytes([]byte{255, 255}).Bytes())
node1 := teststorj.MockNode(string([]byte{233, 255}))
rt.addToReplacementCache(kadBucketID, node1)
assert.Equal(t, []*pb.Node{node1}, rt.replacementCache[string(kadBucketID)])
kadBucketID2 := []byte{127, 255}
node2 := mockNode(string([]byte{100, 255}))
node3 := mockNode(string([]byte{90, 255}))
node4 := mockNode(string([]byte{80, 255}))
assert.Equal(t, []*pb.Node{node1}, rt.replacementCache[kadBucketID])
kadBucketID2 := keyToBucketID(teststorj.NodeIDFromBytes([]byte{127, 255}).Bytes())
node2 := teststorj.MockNode(string([]byte{100, 255}))
node3 := teststorj.MockNode(string([]byte{90, 255}))
node4 := teststorj.MockNode(string([]byte{80, 255}))
rt.addToReplacementCache(kadBucketID2, node2)
rt.addToReplacementCache(kadBucketID2, node3)
assert.Equal(t, []*pb.Node{node1}, rt.replacementCache[string(kadBucketID)])
assert.Equal(t, []*pb.Node{node2, node3}, rt.replacementCache[string(kadBucketID2)])
assert.Equal(t, []*pb.Node{node1}, rt.replacementCache[kadBucketID])
assert.Equal(t, []*pb.Node{node2, node3}, rt.replacementCache[kadBucketID2])
rt.addToReplacementCache(kadBucketID2, node4)
assert.Equal(t, []*pb.Node{node3, node4}, rt.replacementCache[string(kadBucketID2)])
assert.Equal(t, []*pb.Node{node3, node4}, rt.replacementCache[kadBucketID2])
}

View File

@ -5,7 +5,6 @@ package kademlia
import (
"encoding/binary"
"encoding/hex"
"sync"
"time"
@ -28,6 +27,9 @@ const (
// RoutingErr is the class for all errors pertaining to routing table operations
var RoutingErr = errs.Class("routing table error")
// Bucket IDs exist in the same address space as node IDs
type bucketID [len(storj.NodeID{})]byte
// RoutingTable implements the RoutingTable interface
type RoutingTable struct {
self pb.Node
@ -35,9 +37,8 @@ type RoutingTable struct {
nodeBucketDB storage.KeyValueStore
transport *pb.NodeTransport
mutex *sync.Mutex
seen map[string]*pb.Node
replacementCache map[string][]*pb.Node
idLength int // kbucket and node id bit length (SHA256) = 256
seen map[storj.NodeID]*pb.Node
replacementCache map[bucketID][]*pb.Node
bucketSize int // max number of nodes stored in a kbucket = 20 (k)
rcBucketSize int // replacementCache bucket max length
@ -52,10 +53,9 @@ func NewRoutingTable(localNode pb.Node, kdb, ndb storage.KeyValueStore) (*Routin
transport: &defaultTransport,
mutex: &sync.Mutex{},
seen: make(map[string]*pb.Node),
replacementCache: make(map[string][]*pb.Node),
seen: make(map[storj.NodeID]*pb.Node),
replacementCache: make(map[bucketID][]*pb.Node),
idLength: len(storj.NodeID{}) * 8, // NodeID length in bits
bucketSize: *flagBucketSize,
rcBucketSize: *flagReplacementCacheSize,
}
@ -91,19 +91,15 @@ func (rt *RoutingTable) CacheSize() int {
// GetBucket retrieves the corresponding kbucket from node id
// Note: id doesn't need to be stored at time of search
func (rt *RoutingTable) GetBucket(id string) (bucket dht.Bucket, ok bool) {
i, err := hex.DecodeString(id)
func (rt *RoutingTable) GetBucket(id storj.NodeID) (bucket dht.Bucket, ok bool) {
bID, err := rt.getKBucketID(id)
if err != nil {
return &KBucket{}, false
}
bucketID, err := rt.getKBucketID(i)
if err != nil {
if bID == (bucketID{}) {
return &KBucket{}, false
}
if bucketID == nil {
return &KBucket{}, false
}
unmarshaledNodes, err := rt.getUnmarshaledNodesFromBucket(bucketID)
unmarshaledNodes, err := rt.getUnmarshaledNodesFromBucket(bID)
if err != nil {
return &KBucket{}, false
}
@ -118,7 +114,7 @@ func (rt *RoutingTable) GetBuckets() (k []dht.Bucket, err error) {
return bs, RoutingErr.New("could not get bucket ids %s", err)
}
for _, v := range kbuckets {
unmarshaledNodes, err := rt.getUnmarshaledNodesFromBucket(v)
unmarshaledNodes, err := rt.getUnmarshaledNodesFromBucket(keyToBucketID(v))
if err != nil {
return bs, err
}
@ -139,42 +135,41 @@ func (rt *RoutingTable) GetBucketIds() (storage.Keys, error) {
// FindNear returns the node corresponding to the provided nodeID
// returns all Nodes closest via XOR to the provided nodeID up to the provided limit
// always returns limit + self
func (rt *RoutingTable) FindNear(id dht.NodeID, limit int) ([]*pb.Node, error) {
func (rt *RoutingTable) FindNear(id storj.NodeID, limit int) (nodes []*pb.Node, err error) {
// if id is not in the routing table
nodeIDs, err := rt.nodeBucketDB.List(nil, 0)
nodeIDsKeys, err := rt.nodeBucketDB.List(nil, 0)
if err != nil {
return []*pb.Node{}, RoutingErr.New("could not get node ids %s", err)
return nodes, RoutingErr.New("could not get node ids %s", err)
}
sortByXOR(nodeIDsKeys, id.Bytes())
if len(nodeIDsKeys) >= limit {
nodeIDsKeys = nodeIDsKeys[:limit]
}
nodeIDs, err := storj.NodeIDsFromBytes(nodeIDsKeys.ByteSlices())
if err != nil {
return nodes, RoutingErr.Wrap(err)
}
sortByXOR(nodeIDs, id.Bytes())
if len(nodeIDs) >= limit {
nodeIDs = nodeIDs[:limit]
}
ids, serializedNodes, err := rt.getNodesFromIDs(nodeIDs)
nodes, err = rt.getNodesFromIDsBytes(nodeIDs)
if err != nil {
return []*pb.Node{}, RoutingErr.New("could not get nodes %s", err)
return nodes, RoutingErr.New("could not get nodes %s", err)
}
unmarshaledNodes, err := unmarshalNodes(ids, serializedNodes)
if err != nil {
return []*pb.Node{}, RoutingErr.New("could not unmarshal nodes %s", err)
}
return unmarshaledNodes, nil
return nodes, nil
}
// ConnectionSuccess updates or adds a node to the routing table when
// a successful connection is made to the node on the network
func (rt *RoutingTable) ConnectionSuccess(node *pb.Node) error {
// valid to connect to node without ID but don't store connection
if node.GetId() == "" {
if node.Id == (storj.NodeID{}) {
return nil
}
rt.mutex.Lock()
rt.seen[node.GetId()] = node
rt.seen[node.Id] = node
rt.mutex.Unlock()
v, err := rt.nodeBucketDB.Get(storage.Key(node.Id))
v, err := rt.nodeBucketDB.Get(storage.Key(node.Id.Bytes()))
if err != nil && !storage.ErrKeyNotFound.Has(err) {
return RoutingErr.New("could not get node %s", err)
}
@ -199,12 +194,7 @@ func (rt *RoutingTable) ConnectionSuccess(node *pb.Node) error {
// ConnectionFailed removes a node from the routing table when
// a connection fails for the node on the network
func (rt *RoutingTable) ConnectionFailed(node *pb.Node) error {
nodeID := storage.Key(node.Id)
bucketID, err := rt.getKBucketID(nodeID)
if err != nil {
return RoutingErr.New("could not get k bucket %s", err)
}
err = rt.removeNode(bucketID, nodeID)
err := rt.removeNode(node.Id)
if err != nil {
return RoutingErr.New("could not remove node %s", err)
}
@ -212,10 +202,10 @@ func (rt *RoutingTable) ConnectionFailed(node *pb.Node) error {
}
// SetBucketTimestamp updates the last updated time for a bucket
func (rt *RoutingTable) SetBucketTimestamp(id string, now time.Time) error {
func (rt *RoutingTable) SetBucketTimestamp(bIDBytes []byte, now time.Time) error {
rt.mutex.Lock()
defer rt.mutex.Unlock()
err := rt.createOrUpdateKBucket([]byte(id), now)
err := rt.createOrUpdateKBucket(keyToBucketID(bIDBytes), now)
if err != nil {
return NodeErr.New("could not update bucket timestamp %s", err)
}
@ -223,8 +213,8 @@ func (rt *RoutingTable) SetBucketTimestamp(id string, now time.Time) error {
}
// GetBucketTimestamp retrieves the last updated time for a bucket
func (rt *RoutingTable) GetBucketTimestamp(id string, bucket dht.Bucket) (time.Time, error) {
t, err := rt.kadBucketDB.Get([]byte(id))
func (rt *RoutingTable) GetBucketTimestamp(bIDBytes []byte, bucket dht.Bucket) (time.Time, error) {
t, err := rt.kadBucketDB.Get(bIDBytes)
if err != nil {
return time.Now(), RoutingErr.New("could not get bucket timestamp %s", err)
}

View File

@ -6,11 +6,15 @@ package kademlia
import (
"bytes"
"encoding/binary"
"fmt"
"sort"
"time"
"github.com/gogo/protobuf/proto"
"storj.io/storj/pkg/storj"
"storj.io/storj/pkg/utils"
"storj.io/storj/pkg/pb"
"storj.io/storj/storage"
)
@ -21,23 +25,20 @@ import (
func (rt *RoutingTable) addNode(node *pb.Node) (bool, error) {
rt.mutex.Lock()
defer rt.mutex.Unlock()
nodeKey := storage.Key(node.Id)
if bytes.Equal(nodeKey, storage.Key(rt.self.Id)) {
nodeIDBytes := node.Id.Bytes()
if bytes.Equal(nodeIDBytes, rt.self.Id.Bytes()) {
err := rt.createOrUpdateKBucket(rt.createFirstBucketID(), time.Now())
if err != nil {
return false, RoutingErr.New("could not create initial K bucket: %s", err)
}
nodeValue, err := marshalNode(*node)
if err != nil {
return false, RoutingErr.New("could not marshal initial node: %s", err)
}
err = rt.putNode(nodeKey, nodeValue)
err = rt.putNode(node)
if err != nil {
return false, RoutingErr.New("could not add initial node to nodeBucketDB: %s", err)
}
return true, nil
}
kadBucketID, err := rt.getKBucketID(nodeKey)
kadBucketID, err := rt.getKBucketID(node.Id)
if err != nil {
return false, RoutingErr.New("could not getKBucketID: %s", err)
}
@ -50,7 +51,7 @@ func (rt *RoutingTable) addNode(node *pb.Node) (bool, error) {
return false, err
}
withinK, err := rt.nodeIsWithinNearestK(nodeKey)
withinK, err := rt.nodeIsWithinNearestK(node.Id)
if err != nil {
return false, RoutingErr.New("could not determine if node is within k: %s", err)
}
@ -60,16 +61,21 @@ func (rt *RoutingTable) addNode(node *pb.Node) (bool, error) {
if err != nil {
return false, RoutingErr.New("could not determine leaf depth: %s", err)
}
fmt.Printf("NODEID==%v\n", node.Id.Bytes())
fmt.Printf("BEFORE==%v\n", kadBucketID)
kadBucketID = rt.splitBucket(kadBucketID, depth)
fmt.Printf("AFTER==%v\n", kadBucketID)
err = rt.createOrUpdateKBucket(kadBucketID, time.Now())
if err != nil {
return false, RoutingErr.New("could not split and create K bucket: %s", err)
}
kadBucketID, err = rt.getKBucketID(nodeKey)
kadBucketID, err = rt.getKBucketID(node.Id)
fmt.Printf("NODE BUCKET==%v\n", kadBucketID)
if err != nil {
return false, RoutingErr.New("could not get k bucket Id within add node split bucket checks: %s", err)
}
hasRoom, err = rt.kadBucketHasRoom(kadBucketID)
fmt.Printf("HAS ROOM? %v\n", hasRoom)
if err != nil {
return false, err
}
@ -83,11 +89,7 @@ func (rt *RoutingTable) addNode(node *pb.Node) (bool, error) {
return false, nil
}
}
nodeValue, err := marshalNode(*node)
if err != nil {
return false, RoutingErr.New("could not marshal node: %s", err)
}
err = rt.putNode(nodeKey, nodeValue)
err = rt.putNode(node)
if err != nil {
return false, RoutingErr.New("could not add node to nodeBucketDB: %s", err)
}
@ -101,59 +103,48 @@ func (rt *RoutingTable) addNode(node *pb.Node) (bool, error) {
// updateNode will update the node information given that
// the node is already in the routing table.
func (rt *RoutingTable) updateNode(node *pb.Node) error {
marshaledNode, err := marshalNode(*node)
if err != nil {
return err
}
err = rt.putNode(storage.Key(node.Id), marshaledNode)
if err != nil {
if err := rt.putNode(node); err != nil {
return RoutingErr.New("could not update node: %v", err)
}
return nil
}
// removeNode will remove churned nodes and replace those entries with nodes from the replacement cache.
func (rt *RoutingTable) removeNode(kadBucketID storage.Key, nodeID storage.Key) error {
_, err := rt.nodeBucketDB.Get(nodeID)
func (rt *RoutingTable) removeNode(nodeID storj.NodeID) error {
kadBucketID, err := rt.getKBucketID(nodeID)
if err != nil {
return RoutingErr.New("could not get k bucket %s", err)
}
_, err = rt.nodeBucketDB.Get(nodeID.Bytes())
if storage.ErrKeyNotFound.Has(err) {
return nil
} else if err != nil {
return RoutingErr.New("could not get node %s", err)
}
err = rt.nodeBucketDB.Delete(nodeID)
err = rt.nodeBucketDB.Delete(nodeID.Bytes())
if err != nil {
return RoutingErr.New("could not delete node %s", err)
}
nodes := rt.replacementCache[string(kadBucketID)]
nodes := rt.replacementCache[kadBucketID]
if len(nodes) == 0 {
return nil
}
last := nodes[len(nodes)-1]
val, err := marshalNode(*last)
err = rt.putNode(nodes[len(nodes)-1])
if err != nil {
return err
}
err = rt.putNode(storage.Key(last.Id), val)
if err != nil {
return err
}
rt.replacementCache[string(kadBucketID)] = nodes[:len(nodes)-1]
rt.replacementCache[kadBucketID] = nodes[:len(nodes)-1]
return nil
}
// marshalNode: helper, sanitizes Node for db insertion
func marshalNode(node pb.Node) ([]byte, error) {
node.Id = "-"
nodeVal, err := proto.Marshal(&node)
if err != nil {
return nil, RoutingErr.New("could not marshal node: %s", err)
}
return nodeVal, nil
}
// putNode: helper, adds or updates Node and ID to nodeBucketDB
func (rt *RoutingTable) putNode(nodeKey storage.Key, nodeValue storage.Value) error {
err := rt.nodeBucketDB.Put(nodeKey, nodeValue)
func (rt *RoutingTable) putNode(node *pb.Node) error {
v, err := proto.Marshal(node)
if err != nil {
return RoutingErr.Wrap(err)
}
err = rt.nodeBucketDB.Put(node.Id.Bytes(), v)
if err != nil {
return RoutingErr.New("could not add key value pair to nodeBucketDB: %s", err)
}
@ -161,10 +152,10 @@ func (rt *RoutingTable) putNode(nodeKey storage.Key, nodeValue storage.Value) er
}
// createOrUpdateKBucket: helper, adds or updates given kbucket
func (rt *RoutingTable) createOrUpdateKBucket(bucketID storage.Key, now time.Time) error {
func (rt *RoutingTable) createOrUpdateKBucket(bID bucketID, now time.Time) error {
dateTime := make([]byte, binary.MaxVarintLen64)
binary.PutVarint(dateTime, now.UnixNano())
err := rt.kadBucketDB.Put(bucketID, dateTime)
err := rt.kadBucketDB.Put(bID[:], dateTime)
if err != nil {
return RoutingErr.New("could not add or update k bucket: %s", err)
}
@ -173,24 +164,25 @@ func (rt *RoutingTable) createOrUpdateKBucket(bucketID storage.Key, now time.Tim
// getKBucketID: helper, returns the id of the corresponding k bucket given a node id.
// The node doesn't have to be in the routing table at time of search
func (rt *RoutingTable) getKBucketID(nodeID storage.Key) (storage.Key, error) {
func (rt *RoutingTable) getKBucketID(nodeID storj.NodeID) (bucketID, error) {
kadBucketIDs, err := rt.kadBucketDB.List(nil, 0)
if err != nil {
return nil, RoutingErr.New("could not list all k bucket ids: %s", err)
return bucketID{}, RoutingErr.New("could not list all k bucket ids: %s", err)
}
var keys []bucketID
keys = append(keys, bucketID{})
for _, k := range kadBucketIDs {
keys = append(keys, keyToBucketID(k))
}
smallestKey := rt.createZeroAsStorageKey()
var keys storage.Keys
keys = append(keys, smallestKey)
keys = append(keys, kadBucketIDs...)
for i := 0; i < len(keys)-1; i++ {
if bytes.Compare(nodeID, keys[i]) > 0 && bytes.Compare(nodeID, keys[i+1]) <= 0 {
if bytes.Compare(nodeID.Bytes(), keys[i][:]) > 0 && bytes.Compare(nodeID.Bytes(), keys[i+1][:]) <= 0 {
return keys[i+1], nil
}
}
//shouldn't happen BUT return error if no matching kbucket...
return nil, RoutingErr.New("could not find k bucket")
// shouldn't happen BUT return error if no matching kbucket...
return bucketID{}, RoutingErr.New("could not find k bucket")
}
// compareByXor compares left, right xorred by reference
@ -225,17 +217,46 @@ func sortByXOR(nodeIDs storage.Keys, ref storage.Key) {
})
}
// determineFurthestIDWithinK: helper, determines the furthest node within the k closest to local node
func (rt *RoutingTable) determineFurthestIDWithinK(nodeIDs storage.Keys) ([]byte, error) {
sortByXOR(nodeIDs, []byte(rt.self.Id))
if len(nodeIDs) < rt.bucketSize+1 { //adding 1 since we're not including local node in closest k
return nodeIDs[len(nodeIDs)-1], nil
func nodeIDsToKeys(ids storj.NodeIDList) (nodeIDKeys storage.Keys) {
for _, n := range ids {
nodeIDKeys = append(nodeIDKeys, n.Bytes())
}
return nodeIDs[rt.bucketSize], nil
return nodeIDKeys
}
func keysToNodeIDs(keys storage.Keys) (ids storj.NodeIDList, err error) {
var idErrs []error
for _, k := range keys {
id, err := storj.NodeIDFromBytes(k[:])
if err != nil {
idErrs = append(idErrs, err)
}
ids = append(ids, id)
}
if err := utils.CombineErrors(idErrs...); err != nil {
return nil, err
}
return ids, nil
}
func keyToBucketID(key storage.Key) (bID bucketID) {
copy(bID[:], key)
return bID
}
// determineFurthestIDWithinK: helper, determines the furthest node within the k closest to local node
func (rt *RoutingTable) determineFurthestIDWithinK(nodeIDs storj.NodeIDList) (storj.NodeID, error) {
nodeIDKeys := nodeIDsToKeys(nodeIDs)
sortByXOR(nodeIDKeys, rt.self.Id.Bytes())
if len(nodeIDs) < rt.bucketSize+1 { //adding 1 since we're not including local node in closest k
return storj.NodeIDFromBytes(nodeIDKeys[len(nodeIDKeys)-1])
}
return storj.NodeIDFromBytes(nodeIDKeys[rt.bucketSize])
}
// xorTwoIds: helper, finds the xor distance between two byte slices
func xorTwoIds(id []byte, comparisonID []byte) []byte {
func xorTwoIds(id, comparisonID []byte) []byte {
var xorArr []byte
s := len(id)
if s > len(comparisonID) {
@ -250,22 +271,25 @@ func xorTwoIds(id []byte, comparisonID []byte) []byte {
}
// nodeIsWithinNearestK: helper, returns true if the node in question is within the nearest k from local node
func (rt *RoutingTable) nodeIsWithinNearestK(nodeID storage.Key) (bool, error) {
nodes, err := rt.nodeBucketDB.List(nil, 0)
func (rt *RoutingTable) nodeIsWithinNearestK(nodeID storj.NodeID) (bool, error) {
nodeKeys, err := rt.nodeBucketDB.List(nil, 0)
if err != nil {
return false, RoutingErr.New("could not get nodes: %s", err)
}
nodeCount := len(nodes)
nodeCount := len(nodeKeys)
if nodeCount < rt.bucketSize+1 { //adding 1 since we're not including local node in closest k
return true, nil
}
furthestIDWithinK, err := rt.determineFurthestIDWithinK(nodes)
nodeIDs, err := keysToNodeIDs(nodeKeys)
if err != nil {
return false, RoutingErr.Wrap(err)
}
furthestIDWithinK, err := rt.determineFurthestIDWithinK(nodeIDs)
if err != nil {
return false, RoutingErr.New("could not determine furthest id within k: %s", err)
}
localNodeID := rt.self.Id
existingXor := xorTwoIds(furthestIDWithinK, []byte(localNodeID))
newXor := xorTwoIds(nodeID, []byte(localNodeID))
existingXor := xorTwoIds(furthestIDWithinK.Bytes(), rt.self.Id.Bytes())
newXor := xorTwoIds(nodeID.Bytes(), rt.self.Id.Bytes())
if bytes.Compare(newXor, existingXor) < 0 {
return true, nil
}
@ -273,18 +297,17 @@ func (rt *RoutingTable) nodeIsWithinNearestK(nodeID storage.Key) (bool, error) {
}
// kadBucketContainsLocalNode returns true if the kbucket in question contains the local node
func (rt *RoutingTable) kadBucketContainsLocalNode(bucketID storage.Key) (bool, error) {
key := storage.Key(rt.self.Id)
bucket, err := rt.getKBucketID(key)
func (rt *RoutingTable) kadBucketContainsLocalNode(queryID bucketID) (bool, error) {
bID, err := rt.getKBucketID(rt.self.Id)
if err != nil {
return false, err
}
return bytes.Equal(bucket, bucketID), nil
return bytes.Equal(queryID[:], bID[:]), nil
}
// kadBucketHasRoom: helper, returns true if it has fewer than k nodes
func (rt *RoutingTable) kadBucketHasRoom(bucketID storage.Key) (bool, error) {
nodes, err := rt.getNodeIDsWithinKBucket(bucketID)
func (rt *RoutingTable) kadBucketHasRoom(bID bucketID) (bool, error) {
nodes, err := rt.getNodeIDsWithinKBucket(bID)
if err != nil {
return false, err
}
@ -295,129 +318,111 @@ func (rt *RoutingTable) kadBucketHasRoom(bucketID storage.Key) (bool, error) {
}
// getNodeIDsWithinKBucket: helper, returns a collection of all the node ids contained within the kbucket
func (rt *RoutingTable) getNodeIDsWithinKBucket(bucketID storage.Key) (storage.Keys, error) {
endpoints, err := rt.getKBucketRange(bucketID)
func (rt *RoutingTable) getNodeIDsWithinKBucket(bID bucketID) (storj.NodeIDList, error) {
endpoints, err := rt.getKBucketRange(bID)
if err != nil {
return nil, err
}
left := endpoints[0]
right := endpoints[1]
var nodeIDs storage.Keys
allNodeIDs, err := rt.nodeBucketDB.List(nil, 0)
var nodeIDsBytes [][]byte
allNodeIDsBytes, err := rt.nodeBucketDB.List(nil, 0)
if err != nil {
return nil, RoutingErr.New("could not list nodes %s", err)
}
for _, v := range allNodeIDs {
if (bytes.Compare(v, left) > 0) && (bytes.Compare(v, right) <= 0) {
nodeIDs = append(nodeIDs, v)
if len(nodeIDs) == rt.bucketSize {
for _, v := range allNodeIDsBytes {
if (bytes.Compare(v, left[:]) > 0) && (bytes.Compare(v, right[:]) <= 0) {
nodeIDsBytes = append(nodeIDsBytes, v)
if len(nodeIDsBytes) == rt.bucketSize {
break
}
}
}
if len(nodeIDs) > 0 {
nodeIDs, err := storj.NodeIDsFromBytes(nodeIDsBytes)
if err != nil {
return nil, err
}
if len(nodeIDsBytes) > 0 {
return nodeIDs, nil
}
return nil, nil
}
// getNodesFromIDs: helper, returns array of encoded nodes from node ids
func (rt *RoutingTable) getNodesFromIDs(nodeIDs storage.Keys) (storage.Keys, []storage.Value, error) {
var nodes []storage.Value
// getNodesFromIDsBytes: helper, returns array of encoded nodes from node ids
func (rt *RoutingTable) getNodesFromIDsBytes(nodeIDs storj.NodeIDList) ([]*pb.Node, error) {
var marshaledNodes []storage.Value
for _, v := range nodeIDs {
n, err := rt.nodeBucketDB.Get(v)
n, err := rt.nodeBucketDB.Get(v.Bytes())
if err != nil {
return nodeIDs, nodes, RoutingErr.New("could not get node id %v, %s", v, err)
return nil, RoutingErr.New("could not get node id %v, %s", v, err)
}
nodes = append(nodes, n)
marshaledNodes = append(marshaledNodes, n)
}
return nodeIDs, nodes, nil
return unmarshalNodes(marshaledNodes)
}
// unmarshalNodes: helper, returns slice of reconstructed node pointers given a map of nodeIDs:serialized nodes
func unmarshalNodes(nodeIDs storage.Keys, nodes []storage.Value) ([]*pb.Node, error) {
if len(nodeIDs) != len(nodes) {
return []*pb.Node{}, RoutingErr.New("length mismatch between nodeIDs and nodes")
}
func unmarshalNodes(nodes []storage.Value) ([]*pb.Node, error) {
var unmarshaled []*pb.Node
for i, n := range nodes {
for _, n := range nodes {
node := &pb.Node{}
err := proto.Unmarshal(n, node)
if err != nil {
return unmarshaled, RoutingErr.New("could not unmarshal node %s", err)
}
node.Id = string(nodeIDs[i])
unmarshaled = append(unmarshaled, node)
}
return unmarshaled, nil
}
// getUnmarshaledNodesFromBucket: helper, gets nodes within kbucket
func (rt *RoutingTable) getUnmarshaledNodesFromBucket(bucketID storage.Key) ([]*pb.Node, error) {
nodeIDs, err := rt.getNodeIDsWithinKBucket(bucketID)
func (rt *RoutingTable) getUnmarshaledNodesFromBucket(bID bucketID) ([]*pb.Node, error) {
nodeIDsBytes, err := rt.getNodeIDsWithinKBucket(bID)
if err != nil {
return []*pb.Node{}, RoutingErr.New("could not get nodeIds within kbucket %s", err)
}
ids, serializedNodes, err := rt.getNodesFromIDs(nodeIDs)
nodes, err := rt.getNodesFromIDsBytes(nodeIDsBytes)
if err != nil {
return []*pb.Node{}, RoutingErr.New("could not get node values %s", err)
}
unmarshaledNodes, err := unmarshalNodes(ids, serializedNodes)
if err != nil {
return []*pb.Node{}, RoutingErr.New("could not unmarshal nodes %s", err)
}
return unmarshaledNodes, nil
return nodes, nil
}
// getKBucketRange: helper, returns the left and right endpoints of the range of node ids contained within the bucket
func (rt *RoutingTable) getKBucketRange(bucketID storage.Key) (storage.Keys, error) {
key := bucketID
kadIDs, err := rt.kadBucketDB.ReverseList(key, 2)
func (rt *RoutingTable) getKBucketRange(bID bucketID) ([]bucketID, error) {
kadIDs, err := rt.kadBucketDB.ReverseList(bID[:], 2)
if err != nil {
return nil, RoutingErr.New("could not reverse list k bucket ids %s", err)
}
coords := make(storage.Keys, 2)
coords := make([]bucketID, 2)
if len(kadIDs) < 2 {
coords[0] = rt.createZeroAsStorageKey()
coords[0] = bucketID{}
} else {
coords[0] = kadIDs[1]
copy(coords[0][:], kadIDs[1])
}
coords[1] = kadIDs[0]
copy(coords[1][:], kadIDs[0])
return coords, nil
}
// createFirstBucketID creates byte slice representing 11..11
func (rt *RoutingTable) createFirstBucketID() []byte {
var id []byte
func (rt *RoutingTable) createFirstBucketID() bucketID {
var id bucketID
x := byte(255)
bytesLength := rt.idLength / 8
for i := 0; i < bytesLength; i++ {
id = append(id, x)
}
return id
}
// createZeroAsStorageKey creates storage Key representation of 00..00
func (rt *RoutingTable) createZeroAsStorageKey() storage.Key {
var id []byte
x := byte(0)
bytesLength := rt.idLength / 8
for i := 0; i < bytesLength; i++ {
id = append(id, x)
for i := 0; i < len(id); i++ {
id[i] = x
}
return id
}
// determineLeafDepth determines the level of the bucket id in question.
// Eg level 0 means there is only 1 bucket, level 1 means the bucket has been split once, and so on
func (rt *RoutingTable) determineLeafDepth(bucketID storage.Key) (int, error) {
bucketRange, err := rt.getKBucketRange(bucketID)
func (rt *RoutingTable) determineLeafDepth(bID bucketID) (int, error) {
bucketRange, err := rt.getKBucketRange(bID)
if err != nil {
return -1, RoutingErr.New("could not get k bucket range %s", err)
}
smaller := bucketRange[0]
diffBit, err := rt.determineDifferingBitIndex(bucketID, smaller)
diffBit, err := rt.determineDifferingBitIndex(bID, smaller)
if err != nil {
return diffBit + 1, RoutingErr.New("could not determine differing bit %s", err)
}
@ -425,19 +430,21 @@ func (rt *RoutingTable) determineLeafDepth(bucketID storage.Key) (int, error) {
}
// determineDifferingBitIndex: helper, returns the last bit differs starting from prefix to suffix
func (rt *RoutingTable) determineDifferingBitIndex(bucketID storage.Key, comparisonID storage.Key) (int, error) {
if bytes.Equal(bucketID, comparisonID) {
func (rt *RoutingTable) determineDifferingBitIndex(bID, comparisonID bucketID) (int, error) {
if bytes.Equal(bID[:], comparisonID[:]) {
return -2, RoutingErr.New("compared two equivalent k bucket ids")
}
if bytes.Equal(comparisonID, rt.createZeroAsStorageKey()) {
emptyBID := bucketID{}
if bytes.Equal(comparisonID[:], emptyBID[:]) {
comparisonID = rt.createFirstBucketID()
}
var differingByteIndex int
var differingByteXor byte
xorArr := xorTwoIds(bucketID, comparisonID)
xorArr := xorTwoIds(bID[:], comparisonID[:])
if bytes.Equal(xorArr, rt.createFirstBucketID()) {
firstBID := rt.createFirstBucketID()
if bytes.Equal(xorArr, firstBID[:]) {
return -1, nil
}
@ -468,12 +475,11 @@ func (rt *RoutingTable) determineDifferingBitIndex(bucketID storage.Key, compari
// splitBucket: helper, returns the smaller of the two new bucket ids
// the original bucket id becomes the greater of the 2 new
func (rt *RoutingTable) splitBucket(bucketID []byte, depth int) []byte {
newID := make([]byte, len(bucketID))
copy(newID, bucketID)
bitIndex := depth
byteIndex := bitIndex / 8
bitInByteIndex := 7 - (bitIndex % 8)
func (rt *RoutingTable) splitBucket(bID bucketID, depth int) bucketID {
var newID bucketID
copy(newID[:], bID[:])
byteIndex := depth / 8
bitInByteIndex := 7 - (depth % 8)
toggle := byte(1 << uint(bitInByteIndex))
newID[byteIndex] ^= toggle
return newID

View File

@ -4,6 +4,8 @@
package kademlia
import (
"bytes"
"fmt"
"math/rand"
"sync"
"testing"
@ -14,7 +16,10 @@ import (
"github.com/zeebo/errs"
"go.uber.org/zap"
"storj.io/storj/internal/teststorj"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/storj"
"storj.io/storj/storage"
"storj.io/storj/storage/storelogger"
"storj.io/storj/storage/teststore"
@ -29,10 +34,9 @@ func newTestRoutingTable(localNode pb.Node) (*RoutingTable, error) {
transport: &defaultTransport,
mutex: &sync.Mutex{},
seen: make(map[string]*pb.Node),
replacementCache: make(map[string][]*pb.Node),
seen: make(map[storj.NodeID]*pb.Node),
replacementCache: make(map[bucketID][]*pb.Node),
idLength: 16,
bucketSize: 6,
rcBucketSize: 2,
}
@ -43,11 +47,11 @@ func newTestRoutingTable(localNode pb.Node) (*RoutingTable, error) {
return rt, nil
}
func createRoutingTable(t *testing.T, localNodeID []byte) (*RoutingTable, func()) {
if localNodeID == nil {
localNodeID = []byte("AA")
func createRoutingTable(t *testing.T, localNodeID storj.NodeID) (*RoutingTable, func()) {
if localNodeID == (storj.NodeID{}) {
localNodeID = teststorj.NodeIDFromString("AA")
}
localNode := pb.Node{Id: string(localNodeID)}
localNode := pb.Node{Id: localNodeID}
rt, err := newTestRoutingTable(localNode)
if err != nil {
@ -62,18 +66,12 @@ func createRoutingTable(t *testing.T, localNodeID []byte) (*RoutingTable, func()
}
}
func mockNode(id string) *pb.Node {
var node pb.Node
node.Id = id
return &node
}
func TestAddNode(t *testing.T) {
rt, cleanup := createRoutingTable(t, []byte("OO"))
rt, cleanup := createRoutingTable(t, teststorj.NodeIDFromString("OO"))
defer cleanup()
bucket, err := rt.kadBucketDB.Get(storage.Key([]byte{255, 255}))
assert.NoError(t, err)
assert.NotNil(t, bucket)
// bucket, err := rt.kadBucketDB.Get(storage.Key([]byte{255, 255}))
// assert.NoError(t, err)
// assert.NotNil(t, bucket)
cases := []struct {
testID string
node *pb.Node
@ -82,117 +80,116 @@ func TestAddNode(t *testing.T) {
nodeIDs [][]string
}{
{testID: "PO: add node to unfilled kbucket",
node: mockNode("PO"),
node: teststorj.MockNode("PO"),
added: true,
kadIDs: [][]byte{{255, 255}},
nodeIDs: [][]string{{"OO", "PO"}},
},
{testID: "NO: add node to full kbucket and split",
node: mockNode("NO"),
node: teststorj.MockNode("NO"),
added: true,
kadIDs: [][]byte{{255, 255}},
nodeIDs: [][]string{{"NO", "OO", "PO"}},
},
{testID: "MO",
node: mockNode("MO"),
node: teststorj.MockNode("MO"),
added: true,
kadIDs: [][]byte{{255, 255}},
nodeIDs: [][]string{{"MO", "NO", "OO", "PO"}},
},
{testID: "LO",
node: mockNode("LO"),
node: teststorj.MockNode("LO"),
added: true,
kadIDs: [][]byte{{255, 255}},
nodeIDs: [][]string{{"LO", "MO", "NO", "OO", "PO"}},
},
{testID: "QO",
node: mockNode("QO"),
node: teststorj.MockNode("QO"),
added: true,
kadIDs: [][]byte{{255, 255}},
nodeIDs: [][]string{{"LO", "MO", "NO", "OO", "PO", "QO"}},
},
{testID: "SO: split bucket",
node: mockNode("SO"),
node: teststorj.MockNode("SO"),
added: true,
kadIDs: [][]byte{{63, 255}, {79, 255}, {95, 255}, {127, 255}, {255, 255}},
nodeIDs: [][]string{{}, {"LO", "MO", "NO", "OO"}, {"PO", "QO", "SO"}, {}, {}},
},
{testID: "?O",
node: mockNode("?O"),
node: teststorj.MockNode("?O"),
added: true,
kadIDs: [][]byte{{63, 255}, {79, 255}, {95, 255}, {127, 255}, {255, 255}},
nodeIDs: [][]string{{"?O"}, {"LO", "MO", "NO", "OO"}, {"PO", "QO", "SO"}, {}, {}},
},
{testID: ">O",
node: mockNode(">O"),
added: true,
kadIDs: [][]byte{[]byte{63, 255}, []byte{79, 255}, []byte{95, 255}, []byte{127, 255}, []byte{255, 255}},
nodeIDs: [][]string{{">O", "?O"}, {"LO", "MO", "NO", "OO"}, {"PO", "QO", "SO"}, {}, {}},
node: teststorj.MockNode(">O"),
added: true,
kadIDs: [][]byte{{63, 255}, {79, 255}, {95, 255}, {127, 255}, {255, 255}}, nodeIDs: [][]string{{">O", "?O"}, {"LO", "MO", "NO", "OO"}, {"PO", "QO", "SO"}, {}, {}},
},
{testID: "=O",
node: mockNode("=O"),
node: teststorj.MockNode("=O"),
added: true,
kadIDs: [][]byte{[]byte{63, 255}, []byte{79, 255}, []byte{95, 255}, []byte{127, 255}, []byte{255, 255}},
kadIDs: [][]byte{{63, 255}, {79, 255}, {95, 255}, {127, 255}, {255, 255}},
nodeIDs: [][]string{{"=O", ">O", "?O"}, {"LO", "MO", "NO", "OO"}, {"PO", "QO", "SO"}, {}, {}},
},
{testID: ";O",
node: mockNode(";O"),
node: teststorj.MockNode(";O"),
added: true,
kadIDs: [][]byte{[]byte{63, 255}, []byte{79, 255}, []byte{95, 255}, []byte{127, 255}, []byte{255, 255}},
kadIDs: [][]byte{{63, 255}, {79, 255}, {95, 255}, {127, 255}, {255, 255}},
nodeIDs: [][]string{{";O", "=O", ">O", "?O"}, {"LO", "MO", "NO", "OO"}, {"PO", "QO", "SO"}, {}, {}},
},
{testID: ":O",
node: mockNode(":O"),
node: teststorj.MockNode(":O"),
added: true,
kadIDs: [][]byte{[]byte{63, 255}, []byte{79, 255}, []byte{95, 255}, []byte{127, 255}, []byte{255, 255}},
kadIDs: [][]byte{{63, 255}, {79, 255}, {95, 255}, {127, 255}, {255, 255}},
nodeIDs: [][]string{{":O", ";O", "=O", ">O", "?O"}, {"LO", "MO", "NO", "OO"}, {"PO", "QO", "SO"}, {}, {}},
},
{testID: "9O",
node: mockNode("9O"),
node: teststorj.MockNode("9O"),
added: true,
kadIDs: [][]byte{[]byte{63, 255}, []byte{79, 255}, []byte{95, 255}, []byte{127, 255}, []byte{255, 255}},
kadIDs: [][]byte{{63, 255}, {79, 255}, {95, 255}, {127, 255}, {255, 255}},
nodeIDs: [][]string{{"9O", ":O", ";O", "=O", ">O", "?O"}, {"LO", "MO", "NO", "OO"}, {"PO", "QO", "SO"}, {}, {}},
},
{testID: "8O: should drop",
node: mockNode("8O"),
node: teststorj.MockNode("8O"),
added: false,
kadIDs: [][]byte{[]byte{63, 255}, []byte{79, 255}, []byte{95, 255}, []byte{127, 255}, []byte{255, 255}},
kadIDs: [][]byte{{63, 255}, {79, 255}, {95, 255}, {127, 255}, {255, 255}},
nodeIDs: [][]string{{"9O", ":O", ";O", "=O", ">O", "?O"}, {"LO", "MO", "NO", "OO"}, {"PO", "QO", "SO"}, {}, {}},
},
{testID: "KO",
node: mockNode("KO"),
node: teststorj.MockNode("KO"),
added: true,
kadIDs: [][]byte{[]byte{63, 255}, []byte{79, 255}, []byte{95, 255}, []byte{127, 255}, []byte{255, 255}},
kadIDs: [][]byte{{63, 255}, {79, 255}, {95, 255}, {127, 255}, {255, 255}},
nodeIDs: [][]string{{"9O", ":O", ";O", "=O", ">O", "?O"}, {"KO", "LO", "MO", "NO", "OO"}, {"PO", "QO", "SO"}, {}, {}},
},
{testID: "JO",
node: mockNode("JO"),
node: teststorj.MockNode("JO"),
added: true,
kadIDs: [][]byte{[]byte{63, 255}, []byte{79, 255}, []byte{95, 255}, []byte{127, 255}, []byte{255, 255}},
kadIDs: [][]byte{{63, 255}, {79, 255}, {95, 255}, {127, 255}, {255, 255}},
nodeIDs: [][]string{{"9O", ":O", ";O", "=O", ">O", "?O"}, {"JO", "KO", "LO", "MO", "NO", "OO"}, {"PO", "QO", "SO"}, {}, {}},
},
{testID: "]O",
node: mockNode("]O"),
node: teststorj.MockNode("]O"),
added: true,
kadIDs: [][]byte{[]byte{63, 255}, []byte{79, 255}, []byte{95, 255}, []byte{127, 255}, []byte{255, 255}},
kadIDs: [][]byte{{63, 255}, {79, 255}, {95, 255}, {127, 255}, {255, 255}},
nodeIDs: [][]string{{"9O", ":O", ";O", "=O", ">O", "?O"}, {"JO", "KO", "LO", "MO", "NO", "OO"}, {"PO", "QO", "SO", "]O"}, {}, {}},
},
{testID: "^O",
node: mockNode("^O"),
node: teststorj.MockNode("^O"),
added: true,
kadIDs: [][]byte{[]byte{63, 255}, []byte{79, 255}, []byte{95, 255}, []byte{127, 255}, []byte{255, 255}},
kadIDs: [][]byte{{63, 255}, {79, 255}, {95, 255}, {127, 255}, {255, 255}},
nodeIDs: [][]string{{"9O", ":O", ";O", "=O", ">O", "?O"}, {"JO", "KO", "LO", "MO", "NO", "OO"}, {"PO", "QO", "SO", "]O", "^O"}, {}, {}},
},
{testID: "_O",
node: mockNode("_O"),
node: teststorj.MockNode("_O"),
added: true,
kadIDs: [][]byte{[]byte{63, 255}, []byte{79, 255}, []byte{95, 255}, []byte{127, 255}, []byte{255, 255}},
kadIDs: [][]byte{{63, 255}, {79, 255}, {95, 255}, {127, 255}, {255, 255}},
nodeIDs: [][]string{{"9O", ":O", ";O", "=O", ">O", "?O"}, {"JO", "KO", "LO", "MO", "NO", "OO"}, {"PO", "QO", "SO", "]O", "^O", "_O"}, {}, {}},
},
{testID: "@O: split bucket 2",
node: mockNode("@O"),
node: teststorj.MockNode("@O"),
added: true,
kadIDs: [][]byte{[]byte{63, 255}, []byte{71, 255}, []byte{79, 255}, []byte{95, 255}, []byte{127, 255}, []byte{255, 255}},
kadIDs: [][]byte{{63, 255}, {71, 255}, {79, 255}, {95, 255}, {127, 255}, {255, 255}},
nodeIDs: [][]string{{"9O", ":O", ";O", "=O", ">O", "?O"}, {"@O"}, {"JO", "KO", "LO", "MO", "NO", "OO"}, {"PO", "QO", "SO", "]O", "^O", "_O"}, {}, {}},
},
}
@ -204,17 +201,20 @@ func TestAddNode(t *testing.T) {
kadKeys, err := rt.kadBucketDB.List(nil, 0)
assert.NoError(t, err)
for i, v := range kadKeys {
assert.Equal(t, storage.Key(c.kadIDs[i]), v)
a, err := rt.getNodeIDsWithinKBucket(v)
assert.True(t, bytes.Equal(c.kadIDs[i], v[:2]))
ids, err := rt.getNodeIDsWithinKBucket(keyToBucketID(v))
assert.NoError(t, err)
for j, w := range a {
assert.Equal(t, c.nodeIDs[i][j], string(w))
// fmt.Printf("TOTAL=%d\n", len(ids))
for j, id := range ids {
// fmt.Printf("[%v][%d]==[%v]\n", c.nodeIDs[i], j, id.String())
assert.True(t, bytes.Equal(teststorj.NodeIDFromString(c.nodeIDs[i][j]).Bytes(), id.Bytes()))
}
}
if c.testID == "8O" {
n := rt.replacementCache["8O"]
assert.Equal(t, "8O", n[0].Id)
nodeID80 := teststorj.NodeIDFromString("8O")
n := rt.replacementCache[keyToBucketID(nodeID80.Bytes())]
assert.Equal(t, nodeID80.Bytes(), n[0].Id.Bytes())
}
})
@ -222,15 +222,15 @@ func TestAddNode(t *testing.T) {
}
func TestUpdateNode(t *testing.T) {
rt, cleanup := createRoutingTable(t, []byte("AA"))
rt, cleanup := createRoutingTable(t, teststorj.NodeIDFromString("AA"))
defer cleanup()
node := mockNode("BB")
node := teststorj.MockNode("BB")
ok, err := rt.addNode(node)
assert.True(t, ok)
assert.NoError(t, err)
val, err := rt.nodeBucketDB.Get(storage.Key(node.Id))
val, err := rt.nodeBucketDB.Get(node.Id.Bytes())
assert.NoError(t, err)
unmarshaled, err := unmarshalNodes(storage.Keys{storage.Key(node.Id)}, []storage.Value{val})
unmarshaled, err := unmarshalNodes([]storage.Value{val})
assert.NoError(t, err)
x := unmarshaled[0].Address
assert.Nil(t, x)
@ -238,62 +238,62 @@ func TestUpdateNode(t *testing.T) {
node.Address = &pb.NodeAddress{Address: "BB"}
err = rt.updateNode(node)
assert.NoError(t, err)
val, err = rt.nodeBucketDB.Get(storage.Key(node.Id))
val, err = rt.nodeBucketDB.Get(node.Id.Bytes())
assert.NoError(t, err)
unmarshaled, err = unmarshalNodes(storage.Keys{storage.Key(node.Id)}, []storage.Value{val})
unmarshaled, err = unmarshalNodes([]storage.Value{val})
assert.NoError(t, err)
y := unmarshaled[0].Address.Address
assert.Equal(t, "BB", y)
}
func TestRemoveNode(t *testing.T) {
rt, cleanup := createRoutingTable(t, []byte("AA"))
rt, cleanup := createRoutingTable(t, teststorj.NodeIDFromString("AA"))
defer cleanup()
kadBucketID := []byte{255, 255}
node := mockNode("BB")
kadBucketID := rt.createFirstBucketID()
node := teststorj.MockNode("BB")
ok, err := rt.addNode(node)
assert.True(t, ok)
assert.NoError(t, err)
val, err := rt.nodeBucketDB.Get(storage.Key(node.Id))
val, err := rt.nodeBucketDB.Get(node.Id.Bytes())
assert.NoError(t, err)
assert.NotNil(t, val)
node2 := mockNode("CC")
node2 := teststorj.MockNode("CC")
rt.addToReplacementCache(kadBucketID, node2)
err = rt.removeNode(kadBucketID, storage.Key(node.Id))
err = rt.removeNode(node.Id)
assert.NoError(t, err)
val, err = rt.nodeBucketDB.Get(storage.Key(node.Id))
val, err = rt.nodeBucketDB.Get(node.Id.Bytes())
assert.Nil(t, val)
assert.Error(t, err)
val2, err := rt.nodeBucketDB.Get(storage.Key(node2.Id))
val2, err := rt.nodeBucketDB.Get(node2.Id.Bytes())
assert.NoError(t, err)
assert.NotNil(t, val2)
assert.Equal(t, 0, len(rt.replacementCache[string(kadBucketID)]))
assert.Equal(t, 0, len(rt.replacementCache[kadBucketID]))
//try to remove node not in rt
err = rt.removeNode(kadBucketID, storage.Key("DD"))
err = rt.removeNode(teststorj.NodeIDFromString("DD"))
assert.NoError(t, err)
}
func TestCreateOrUpdateKBucket(t *testing.T) {
id := []byte{255, 255}
rt, cleanup := createRoutingTable(t, nil)
id := teststorj.NodeIDFromBytes([]byte{255, 255})
rt, cleanup := createRoutingTable(t, storj.NodeID{})
defer cleanup()
err := rt.createOrUpdateKBucket(storage.Key(id), time.Now())
err := rt.createOrUpdateKBucket(keyToBucketID(id.Bytes()), time.Now())
assert.NoError(t, err)
val, e := rt.kadBucketDB.Get(storage.Key(id))
val, e := rt.kadBucketDB.Get(id.Bytes())
assert.NotNil(t, val)
assert.NoError(t, e)
}
func TestGetKBucketID(t *testing.T) {
kadIDA := storage.Key([]byte{255, 255})
nodeIDA := []byte("AA")
kadIDA := keyToBucketID([]byte{255, 255})
nodeIDA := teststorj.NodeIDFromString("AA")
rt, cleanup := createRoutingTable(t, nodeIDA)
defer cleanup()
keyA, err := rt.getKBucketID(nodeIDA)
assert.NoError(t, err)
assert.Equal(t, kadIDA, keyA)
assert.Equal(t, kadIDA[:2], keyA[:2])
}
func TestXorTwoIds(t *testing.T) {
@ -302,23 +302,23 @@ func TestXorTwoIds(t *testing.T) {
}
func TestSortByXOR(t *testing.T) {
node1 := []byte{127, 255} //xor 0
node1 := teststorj.NodeIDFromBytes([]byte{127, 255}) //xor 0
rt, cleanup := createRoutingTable(t, node1)
defer cleanup()
node2 := []byte{143, 255} //xor 240
assert.NoError(t, rt.nodeBucketDB.Put(node2, []byte("")))
node3 := []byte{255, 255} //xor 128
assert.NoError(t, rt.nodeBucketDB.Put(node3, []byte("")))
node4 := []byte{191, 255} //xor 192
assert.NoError(t, rt.nodeBucketDB.Put(node4, []byte("")))
node5 := []byte{133, 255} //xor 250
assert.NoError(t, rt.nodeBucketDB.Put(node5, []byte("")))
node2 := teststorj.NodeIDFromBytes([]byte{143, 255}) //xor 240
assert.NoError(t, rt.nodeBucketDB.Put(node2.Bytes(), []byte("")))
node3 := teststorj.NodeIDFromBytes([]byte{255, 255}) //xor 128
assert.NoError(t, rt.nodeBucketDB.Put(node3.Bytes(), []byte("")))
node4 := teststorj.NodeIDFromBytes([]byte{191, 255}) //xor 192
assert.NoError(t, rt.nodeBucketDB.Put(node4.Bytes(), []byte("")))
node5 := teststorj.NodeIDFromBytes([]byte{133, 255}) //xor 250
assert.NoError(t, rt.nodeBucketDB.Put(node5.Bytes(), []byte("")))
nodes, err := rt.nodeBucketDB.List(nil, 0)
assert.NoError(t, err)
expectedNodes := storage.Keys{node1, node5, node2, node4, node3}
expectedNodes := storage.Keys{node1.Bytes(), node5.Bytes(), node2.Bytes(), node4.Bytes(), node3.Bytes()}
assert.Equal(t, expectedNodes, nodes)
sortByXOR(nodes, node1)
expectedSorted := storage.Keys{node1, node3, node4, node2, node5}
sortByXOR(nodes, node1.Bytes())
expectedSorted := storage.Keys{node1.Bytes(), node3.Bytes(), node4.Bytes(), node2.Bytes(), node5.Bytes()}
assert.Equal(t, expectedSorted, nodes)
nodes, err = rt.nodeBucketDB.List(nil, 0)
assert.NoError(t, err)
@ -349,7 +349,7 @@ func BenchmarkSortByXOR(b *testing.B) {
}
func TestDetermineFurthestIDWithinK(t *testing.T) {
rt, cleanup := createRoutingTable(t, []byte{127, 255})
rt, cleanup := createRoutingTable(t, teststorj.NodeIDFromBytes([]byte{127, 255}))
defer cleanup()
cases := []struct {
testID string
@ -379,18 +379,19 @@ func TestDetermineFurthestIDWithinK(t *testing.T) {
}
for _, c := range cases {
t.Run(c.testID, func(t *testing.T) {
assert.NoError(t, rt.nodeBucketDB.Put(c.nodeID, []byte("")))
assert.NoError(t, rt.nodeBucketDB.Put(teststorj.NodeIDFromBytes(c.nodeID).Bytes(), []byte("")))
nodes, err := rt.nodeBucketDB.List(nil, 0)
assert.NoError(t, err)
furthest, err := rt.determineFurthestIDWithinK(nodes)
furthest, err := rt.determineFurthestIDWithinK(teststorj.NodeIDsFromBytes(nodes.ByteSlices()...))
assert.NoError(t, err)
assert.Equal(t, c.expectedFurthest, furthest)
fmt.Println(furthest.Bytes())
assert.Equal(t, c.expectedFurthest, furthest[:2])
})
}
}
func TestNodeIsWithinNearestK(t *testing.T) {
rt, cleanup := createRoutingTable(t, []byte{127, 255})
rt, cleanup := createRoutingTable(t, teststorj.NodeIDFromBytes([]byte{127, 255}))
defer cleanup()
rt.bucketSize = 2
cases := []struct {
@ -421,20 +422,22 @@ func TestNodeIsWithinNearestK(t *testing.T) {
}
for _, c := range cases {
t.Run(c.testID, func(t *testing.T) {
result, err := rt.nodeIsWithinNearestK(c.nodeID)
result, err := rt.nodeIsWithinNearestK(teststorj.NodeIDFromBytes(c.nodeID))
assert.NoError(t, err)
assert.Equal(t, c.closest, result)
assert.NoError(t, rt.nodeBucketDB.Put(c.nodeID, []byte("")))
assert.NoError(t, rt.nodeBucketDB.Put(teststorj.NodeIDFromBytes(c.nodeID).Bytes(), []byte("")))
})
}
}
func TestKadBucketContainsLocalNode(t *testing.T) {
nodeIDA := []byte{183, 255} //[10110111, 1111111]
nodeIDA := teststorj.NodeIDFromBytes([]byte{183, 255}) //[10110111, 1111111]
rt, cleanup := createRoutingTable(t, nodeIDA)
defer cleanup()
kadIDA := storage.Key([]byte{255, 255})
kadIDB := storage.Key([]byte{127, 255})
kadIDA := rt.createFirstBucketID()
var kadIDB bucketID
copy(kadIDB[:], kadIDA[:])
kadIDB[0] = 127
now := time.Now()
err := rt.createOrUpdateKBucket(kadIDB, now)
assert.NoError(t, err)
@ -447,132 +450,130 @@ func TestKadBucketContainsLocalNode(t *testing.T) {
}
func TestKadBucketHasRoom(t *testing.T) {
node1 := []byte{255, 255}
kadIDA := storage.Key([]byte{255, 255})
node1 := teststorj.NodeIDFromBytes([]byte{255, 255})
rt, cleanup := createRoutingTable(t, node1)
defer cleanup()
node2 := []byte{191, 255}
node3 := []byte{127, 255}
node4 := []byte{63, 255}
node5 := []byte{159, 255}
node6 := []byte{0, 127}
kadIDA := rt.createFirstBucketID()
node2 := teststorj.NodeIDFromBytes([]byte{191, 255})
node3 := teststorj.NodeIDFromBytes([]byte{127, 255})
node4 := teststorj.NodeIDFromBytes([]byte{63, 255})
node5 := teststorj.NodeIDFromBytes([]byte{159, 255})
node6 := teststorj.NodeIDFromBytes([]byte{0, 127})
resultA, err := rt.kadBucketHasRoom(kadIDA)
assert.NoError(t, err)
assert.True(t, resultA)
assert.NoError(t, rt.nodeBucketDB.Put(node2, []byte("")))
assert.NoError(t, rt.nodeBucketDB.Put(node3, []byte("")))
assert.NoError(t, rt.nodeBucketDB.Put(node4, []byte("")))
assert.NoError(t, rt.nodeBucketDB.Put(node5, []byte("")))
assert.NoError(t, rt.nodeBucketDB.Put(node6, []byte("")))
assert.NoError(t, rt.nodeBucketDB.Put(node2.Bytes(), []byte("")))
assert.NoError(t, rt.nodeBucketDB.Put(node3.Bytes(), []byte("")))
assert.NoError(t, rt.nodeBucketDB.Put(node4.Bytes(), []byte("")))
assert.NoError(t, rt.nodeBucketDB.Put(node5.Bytes(), []byte("")))
assert.NoError(t, rt.nodeBucketDB.Put(node6.Bytes(), []byte("")))
resultB, err := rt.kadBucketHasRoom(kadIDA)
assert.NoError(t, err)
assert.False(t, resultB)
}
func TestGetNodeIDsWithinKBucket(t *testing.T) {
nodeIDA := []byte{183, 255} //[10110111, 1111111]
nodeIDA := teststorj.NodeIDFromBytes([]byte{183, 255}) //[10110111, 1111111]
rt, cleanup := createRoutingTable(t, nodeIDA)
defer cleanup()
kadIDA := storage.Key([]byte{255, 255})
kadIDB := storage.Key([]byte{127, 255})
kadIDA := rt.createFirstBucketID()
var kadIDB bucketID
copy(kadIDB[:], kadIDA[:])
kadIDB[0] = 127
now := time.Now()
assert.NoError(t, rt.createOrUpdateKBucket(kadIDB, now))
nodeIDB := []byte{111, 255} //[01101111, 1111111]
nodeIDC := []byte{47, 255} //[00101111, 1111111]
nodeIDB := teststorj.NodeIDFromBytes([]byte{111, 255}) //[01101111, 1111111]
nodeIDC := teststorj.NodeIDFromBytes([]byte{47, 255}) //[00101111, 1111111]
assert.NoError(t, rt.nodeBucketDB.Put(nodeIDB, []byte("")))
assert.NoError(t, rt.nodeBucketDB.Put(nodeIDC, []byte("")))
assert.NoError(t, rt.nodeBucketDB.Put(nodeIDB.Bytes(), []byte("")))
assert.NoError(t, rt.nodeBucketDB.Put(nodeIDC.Bytes(), []byte("")))
cases := []struct {
testID string
kadID []byte
kadID bucketID
expected storage.Keys
}{
{testID: "A",
kadID: kadIDA,
expected: storage.Keys{nodeIDA},
expected: storage.Keys{nodeIDA.Bytes()},
},
{testID: "B",
kadID: kadIDB,
expected: storage.Keys{nodeIDC, nodeIDB},
expected: storage.Keys{nodeIDC.Bytes(), nodeIDB.Bytes()},
},
}
for _, c := range cases {
t.Run(c.testID, func(t *testing.T) {
n, err := rt.getNodeIDsWithinKBucket(c.kadID)
assert.NoError(t, err)
assert.Equal(t, c.expected, n)
for i, id := range c.expected {
assert.True(t, id.Equal(n[i].Bytes()))
}
})
}
}
func TestGetNodesFromIDs(t *testing.T) {
nodeA := mockNode("AA")
nodeB := mockNode("BB")
nodeC := mockNode("CC")
nodeIDA := []byte(nodeA.Id)
nodeIDB := []byte(nodeB.Id)
nodeIDC := []byte(nodeC.Id)
nodeA := teststorj.MockNode("AA")
nodeB := teststorj.MockNode("BB")
nodeC := teststorj.MockNode("CC")
a, err := proto.Marshal(nodeA)
assert.NoError(t, err)
b, err := proto.Marshal(nodeB)
assert.NoError(t, err)
c, err := proto.Marshal(nodeC)
assert.NoError(t, err)
rt, cleanup := createRoutingTable(t, nodeIDA)
rt, cleanup := createRoutingTable(t, nodeA.Id)
defer cleanup()
assert.NoError(t, rt.nodeBucketDB.Put(nodeIDA, a))
assert.NoError(t, rt.nodeBucketDB.Put(nodeIDB, b))
assert.NoError(t, rt.nodeBucketDB.Put(nodeIDC, c))
expected := []storage.Value{a, b, c}
assert.NoError(t, rt.nodeBucketDB.Put(nodeA.Id.Bytes(), a))
assert.NoError(t, rt.nodeBucketDB.Put(nodeB.Id.Bytes(), b))
assert.NoError(t, rt.nodeBucketDB.Put(nodeC.Id.Bytes(), c))
expected := []*pb.Node{nodeA, nodeB, nodeC}
nodeIDs, err := rt.nodeBucketDB.List(nil, 0)
nodeKeys, err := rt.nodeBucketDB.List(nil, 0)
assert.NoError(t, err)
_, values, err := rt.getNodesFromIDs(nodeIDs)
values, err := rt.getNodesFromIDsBytes(teststorj.NodeIDsFromBytes(nodeKeys.ByteSlices()...))
assert.NoError(t, err)
assert.Equal(t, expected, values)
for i, n := range expected {
assert.True(t, bytes.Equal(n.Id.Bytes(), values[i].Id.Bytes()))
}
}
func TestUnmarshalNodes(t *testing.T) {
nodeA := mockNode("AA")
nodeB := mockNode("BB")
nodeC := mockNode("CC")
nodeA := teststorj.MockNode("AA")
nodeB := teststorj.MockNode("BB")
nodeC := teststorj.MockNode("CC")
nodeIDA := []byte(nodeA.Id)
nodeIDB := []byte(nodeB.Id)
nodeIDC := []byte(nodeC.Id)
a, err := proto.Marshal(nodeA)
assert.NoError(t, err)
b, err := proto.Marshal(nodeB)
assert.NoError(t, err)
c, err := proto.Marshal(nodeC)
assert.NoError(t, err)
rt, cleanup := createRoutingTable(t, nodeIDA)
rt, cleanup := createRoutingTable(t, nodeA.Id)
defer cleanup()
assert.NoError(t, rt.nodeBucketDB.Put(nodeIDA, a))
assert.NoError(t, rt.nodeBucketDB.Put(nodeIDB, b))
assert.NoError(t, rt.nodeBucketDB.Put(nodeIDC, c))
nodeIDs, err := rt.nodeBucketDB.List(nil, 0)
assert.NoError(t, rt.nodeBucketDB.Put(nodeA.Id.Bytes(), a))
assert.NoError(t, rt.nodeBucketDB.Put(nodeB.Id.Bytes(), b))
assert.NoError(t, rt.nodeBucketDB.Put(nodeC.Id.Bytes(), c))
nodeKeys, err := rt.nodeBucketDB.List(nil, 0)
assert.NoError(t, err)
ids, values, err := rt.getNodesFromIDs(nodeIDs)
assert.NoError(t, err)
nodes, err := unmarshalNodes(ids, values)
nodes, err := rt.getNodesFromIDsBytes(teststorj.NodeIDsFromBytes(nodeKeys.ByteSlices()...))
assert.NoError(t, err)
expected := []*pb.Node{nodeA, nodeB, nodeC}
for i, v := range expected {
assert.True(t, proto.Equal(v, nodes[i]))
assert.True(t, bytes.Equal(v.Id.Bytes(), nodes[i].Id.Bytes()))
}
}
func TestGetUnmarshaledNodesFromBucket(t *testing.T) {
bucketID := []byte{255, 255}
nodeA := mockNode("AA")
rt, cleanup := createRoutingTable(t, []byte(nodeA.Id))
nodeA := teststorj.MockNode("AA")
rt, cleanup := createRoutingTable(t, nodeA.Id)
bucketID := rt.createFirstBucketID()
defer cleanup()
nodeB := mockNode("BB")
nodeC := mockNode("CC")
nodeB := teststorj.MockNode("BB")
nodeC := teststorj.MockNode("CC")
var err error
_, err = rt.addNode(nodeB)
assert.NoError(t, err)
@ -582,71 +583,74 @@ func TestGetUnmarshaledNodesFromBucket(t *testing.T) {
expected := []*pb.Node{nodeA, nodeB, nodeC}
assert.NoError(t, err)
for i, v := range expected {
assert.True(t, proto.Equal(v, nodes[i]))
assert.True(t, bytes.Equal(v.Id.Bytes(), nodes[i].Id.Bytes()))
}
}
func TestGetKBucketRange(t *testing.T) {
rt, cleanup := createRoutingTable(t, nil)
rt, cleanup := createRoutingTable(t, storj.NodeID{})
defer cleanup()
idA := []byte{255, 255}
idB := []byte{127, 255}
idC := []byte{63, 255}
assert.NoError(t, rt.kadBucketDB.Put(idA, []byte("")))
assert.NoError(t, rt.kadBucketDB.Put(idB, []byte("")))
assert.NoError(t, rt.kadBucketDB.Put(idC, []byte("")))
idA := teststorj.NodeIDFromBytes([]byte{255, 255})
idB := teststorj.NodeIDFromBytes([]byte{127, 255})
idC := teststorj.NodeIDFromBytes([]byte{63, 255})
assert.NoError(t, rt.kadBucketDB.Put(idA.Bytes(), []byte("")))
assert.NoError(t, rt.kadBucketDB.Put(idB.Bytes(), []byte("")))
assert.NoError(t, rt.kadBucketDB.Put(idC.Bytes(), []byte("")))
zeroBID := bucketID{}
cases := []struct {
testID string
id []byte
id storj.NodeID
expected storage.Keys
}{
{testID: "A",
id: idA,
expected: storage.Keys{idB, idA},
expected: storage.Keys{idB.Bytes(), idA.Bytes()},
},
{testID: "B",
id: idB,
expected: storage.Keys{idC, idB}},
expected: storage.Keys{idC.Bytes(), idB.Bytes()}},
{testID: "C",
id: idC,
expected: storage.Keys{rt.createZeroAsStorageKey(), idC},
expected: storage.Keys{zeroBID[:], idC.Bytes()},
},
}
for _, c := range cases {
t.Run(c.testID, func(t *testing.T) {
ep, err := rt.getKBucketRange(c.id)
ep, err := rt.getKBucketRange(keyToBucketID(c.id.Bytes()))
assert.NoError(t, err)
assert.Equal(t, c.expected, ep)
for i, k := range c.expected {
assert.True(t, k.Equal(ep[i][:]))
}
})
}
}
func TestCreateFirstBucketID(t *testing.T) {
rt, cleanup := createRoutingTable(t, nil)
rt, cleanup := createRoutingTable(t, storj.NodeID{})
defer cleanup()
x := rt.createFirstBucketID()
expected := []byte{255, 255}
assert.Equal(t, x, expected)
expected := []byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}
assert.Equal(t, x[:], expected)
}
func TestCreateZeroAsStorageKey(t *testing.T) {
rt, cleanup := createRoutingTable(t, nil)
defer cleanup()
zero := rt.createZeroAsStorageKey()
expected := []byte{0, 0}
assert.Equal(t, zero, storage.Key(expected))
func TestBucketIDZeroValue(t *testing.T) {
// rt, cleanup := createRoutingTable(t, storj.NodeID{})
// defer cleanup()
zero := bucketID{} //rt.createZeroAsBucketID()
expected := []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
assert.True(t, bytes.Equal(zero[:], expected))
}
func TestDetermineLeafDepth(t *testing.T) {
rt, cleanup := createRoutingTable(t, nil)
rt, cleanup := createRoutingTable(t, storj.NodeID{})
defer cleanup()
idA := []byte{255, 255}
idB := []byte{127, 255}
idC := []byte{63, 255}
idA := teststorj.NodeIDFromBytes([]byte{255, 255})
idB := teststorj.NodeIDFromBytes([]byte{127, 255})
idC := teststorj.NodeIDFromBytes([]byte{63, 255})
cases := []struct {
testID string
id []byte
id storj.NodeID
depth int
addNode func()
}{
@ -654,7 +658,7 @@ func TestDetermineLeafDepth(t *testing.T) {
id: idA,
depth: 0,
addNode: func() {
e := rt.kadBucketDB.Put(idA, []byte(""))
e := rt.kadBucketDB.Put(idA.Bytes(), []byte(""))
assert.NoError(t, e)
},
},
@ -662,7 +666,7 @@ func TestDetermineLeafDepth(t *testing.T) {
id: idB,
depth: 1,
addNode: func() {
e := rt.kadBucketDB.Put(idB, []byte(""))
e := rt.kadBucketDB.Put(idB.Bytes(), []byte(""))
assert.NoError(t, e)
},
},
@ -670,7 +674,7 @@ func TestDetermineLeafDepth(t *testing.T) {
id: idA,
depth: 1,
addNode: func() {
e := rt.kadBucketDB.Put(idC, []byte(""))
e := rt.kadBucketDB.Put(idC.Bytes(), []byte(""))
assert.NoError(t, e)
},
},
@ -688,92 +692,103 @@ func TestDetermineLeafDepth(t *testing.T) {
for _, c := range cases {
t.Run(c.testID, func(t *testing.T) {
c.addNode()
d, err := rt.determineLeafDepth(c.id)
d, err := rt.determineLeafDepth(keyToBucketID(c.id.Bytes()))
assert.NoError(t, err)
assert.Equal(t, c.depth, d)
})
}
}
func padBucketID(b []byte, p byte) (bID bucketID) {
for i := range bID {
if len(b) > i {
bID[i] = b[i]
continue
}
bID[i] = p
}
return bID
}
func TestDetermineDifferingBitIndex(t *testing.T) {
rt, cleanup := createRoutingTable(t, nil)
rt, cleanup := createRoutingTable(t, storj.NodeID{})
defer cleanup()
cases := []struct {
testID string
bucketID []byte
key []byte
bucketID bucketID
key bucketID
expected int
err *errs.Class
}{
{testID: "A",
bucketID: []byte{191, 255},
key: []byte{255, 255},
bucketID: padBucketID([]byte{191, 255}, 255),
key: padBucketID([]byte{255, 255}, 255),
expected: 1,
err: nil,
},
{testID: "B",
bucketID: []byte{255, 255},
key: []byte{191, 255},
bucketID: padBucketID([]byte{255, 255}, 255),
key: padBucketID([]byte{191, 255}, 255),
expected: 1,
err: nil,
},
{testID: "C",
bucketID: []byte{95, 255},
key: []byte{127, 255},
bucketID: padBucketID([]byte{95, 255}, 255),
key: padBucketID([]byte{127, 255}, 255),
expected: 2,
err: nil,
},
{testID: "D",
bucketID: []byte{95, 255},
key: []byte{79, 255},
bucketID: padBucketID([]byte{95, 255}, 255),
key: padBucketID([]byte{79, 255}, 255),
expected: 3,
err: nil,
},
{testID: "E",
bucketID: []byte{95, 255},
key: []byte{63, 255},
bucketID: padBucketID([]byte{95, 255}, 255),
key: padBucketID([]byte{63, 255}, 255),
expected: 2,
err: nil,
},
{testID: "F",
bucketID: []byte{95, 255},
key: []byte{79, 255},
bucketID: padBucketID([]byte{95, 255}, 255),
key: padBucketID([]byte{79, 255}, 255),
expected: 3,
err: nil,
},
{testID: "G",
bucketID: []byte{255, 255},
key: []byte{255, 255},
bucketID: padBucketID([]byte{255, 255}, 255),
key: padBucketID([]byte{255, 255}, 255),
expected: -2,
err: &RoutingErr,
},
{testID: "H",
bucketID: []byte{255, 255},
key: []byte{0, 0},
bucketID: padBucketID([]byte{255, 255}, 255),
key: padBucketID([]byte{0, 0}, 0),
expected: -1,
err: nil,
},
{testID: "I",
bucketID: []byte{127, 255},
key: []byte{0, 0},
bucketID: padBucketID([]byte{127, 255}, 255),
key: padBucketID([]byte{0, 0}, 0),
expected: 0,
err: nil,
},
{testID: "J",
bucketID: []byte{63, 255},
key: []byte{0, 0},
bucketID: padBucketID([]byte{63, 255}, 255),
key: padBucketID([]byte{0, 0}, 0),
expected: 1,
err: nil,
},
{testID: "K",
bucketID: []byte{31, 255},
key: []byte{0, 0},
bucketID: padBucketID([]byte{31, 255}, 255),
key: padBucketID([]byte{0, 0}, 0),
expected: 2,
err: nil,
},
{testID: "L",
bucketID: []byte{95, 255},
key: []byte{63, 255},
bucketID: padBucketID([]byte{95, 255}, 255),
key: padBucketID([]byte{63, 255}, 255),
expected: 2,
err: nil,
},
@ -789,7 +804,7 @@ func TestDetermineDifferingBitIndex(t *testing.T) {
}
func TestSplitBucket(t *testing.T) {
rt, cleanup := createRoutingTable(t, nil)
rt, cleanup := createRoutingTable(t, storj.NodeID{})
defer cleanup()
cases := []struct {
testID string
@ -830,8 +845,8 @@ func TestSplitBucket(t *testing.T) {
}
for _, c := range cases {
t.Run(c.testID, func(t *testing.T) {
newID := rt.splitBucket(c.idA, c.depth)
assert.Equal(t, c.idB, newID)
newID := rt.splitBucket(keyToBucketID(c.idA), c.depth)
assert.Equal(t, c.idB, newID[:2])
})
}
}

View File

@ -4,25 +4,27 @@
package kademlia
import (
"bytes"
"testing"
"time"
"github.com/gogo/protobuf/proto"
"github.com/stretchr/testify/assert"
"storj.io/storj/pkg/node"
"storj.io/storj/internal/teststorj"
"storj.io/storj/pkg/storj"
"storj.io/storj/pkg/pb"
"storj.io/storj/storage"
)
func TestLocal(t *testing.T) {
rt, cleanup := createRoutingTable(t, []byte("AA"))
rt, cleanup := createRoutingTable(t, teststorj.NodeIDFromString("AA"))
defer cleanup()
assert.Equal(t, rt.Local().Id, "AA")
assert.Equal(t, rt.Local().Id.Bytes()[:2], []byte("AA"))
}
func TestK(t *testing.T) {
rt, cleanup := createRoutingTable(t, []byte("AA"))
rt, cleanup := createRoutingTable(t, teststorj.NodeIDFromString("AA"))
defer cleanup()
k := rt.K()
assert.Equal(t, rt.bucketSize, k)
@ -30,7 +32,7 @@ func TestK(t *testing.T) {
}
func TestCacheSize(t *testing.T) {
rt, cleanup := createRoutingTable(t, []byte("AA"))
rt, cleanup := createRoutingTable(t, teststorj.NodeIDFromString("AA"))
defer cleanup()
expected := rt.rcBucketSize
result := rt.CacheSize()
@ -38,16 +40,16 @@ func TestCacheSize(t *testing.T) {
}
func TestGetBucket(t *testing.T) {
rt, cleanup := createRoutingTable(t, []byte("AA"))
rt, cleanup := createRoutingTable(t, teststorj.NodeIDFromString("AA"))
defer cleanup()
node := mockNode("AA")
node2 := mockNode("BB")
node := teststorj.MockNode("AA")
node2 := teststorj.MockNode("BB")
ok, err := rt.addNode(node2)
assert.True(t, ok)
assert.NoError(t, err)
cases := []struct {
nodeID string
nodeID storj.NodeID
expected *KBucket
ok bool
}{
@ -63,7 +65,7 @@ func TestGetBucket(t *testing.T) {
for i, v := range cases {
b, e := rt.GetBucket(node2.Id)
for j, w := range v.expected.nodes {
if !assert.True(t, proto.Equal(w, b.Nodes()[j])) {
if !assert.True(t, bytes.Equal(w.Id.Bytes(), b.Nodes()[j].Id.Bytes())) {
t.Logf("case %v failed expected: ", i)
}
}
@ -74,10 +76,10 @@ func TestGetBucket(t *testing.T) {
}
func TestGetBuckets(t *testing.T) {
rt, cleanup := createRoutingTable(t, []byte("AA"))
rt, cleanup := createRoutingTable(t, teststorj.NodeIDFromString("AA"))
defer cleanup()
node := mockNode("AA")
node2 := mockNode("BB")
node := teststorj.MockNode("AA")
node2 := teststorj.MockNode("BB")
ok, err := rt.addNode(node2)
assert.True(t, ok)
assert.NoError(t, err)
@ -86,17 +88,17 @@ func TestGetBuckets(t *testing.T) {
assert.NoError(t, err)
for _, v := range buckets {
for j, w := range v.Nodes() {
assert.True(t, proto.Equal(expected[j], w))
assert.True(t, bytes.Equal(expected[j].Id.Bytes(), w.Id.Bytes()))
}
}
}
func TestFindNear(t *testing.T) {
rt, cleanup := createRoutingTable(t, []byte("AA"))
rt, cleanup := createRoutingTable(t, teststorj.NodeIDFromString("AA"))
defer cleanup()
node1 := mockNode("AA")
node2 := mockNode("BB")
node3 := mockNode("CC")
node1 := teststorj.MockNode("AA")
node2 := teststorj.MockNode("BB")
node3 := teststorj.MockNode("CC")
ok, err := rt.addNode(node2)
assert.True(t, ok)
assert.NoError(t, err)
@ -130,18 +132,20 @@ func TestFindNear(t *testing.T) {
}
for _, c := range cases {
t.Run(c.testID, func(t *testing.T) {
ns, err := rt.FindNear(node.IDFromString(c.node.Id), c.limit)
ns, err := rt.FindNear(c.node.Id, c.limit)
assert.NoError(t, err)
assert.Equal(t, c.expectedNodes, ns)
for i, n := range c.expectedNodes {
assert.True(t, bytes.Equal(n.Id.Bytes(), ns[i].Id.Bytes()))
}
})
}
}
func TestConnectionSuccess(t *testing.T) {
id := "AA"
rt, cleanup := createRoutingTable(t, []byte(id))
id := teststorj.NodeIDFromString("AA")
rt, cleanup := createRoutingTable(t, id)
defer cleanup()
id2 := "BB"
id2 := teststorj.NodeIDFromString("BB")
address1 := &pb.NodeAddress{Address: "a"}
address2 := &pb.NodeAddress{Address: "b"}
node1 := &pb.Node{Id: id, Address: address1}
@ -149,7 +153,7 @@ func TestConnectionSuccess(t *testing.T) {
cases := []struct {
testID string
node *pb.Node
id string
id storj.NodeID
address *pb.NodeAddress
}{
{testID: "Update Node",
@ -167,9 +171,9 @@ func TestConnectionSuccess(t *testing.T) {
t.Run(c.testID, func(t *testing.T) {
err := rt.ConnectionSuccess(c.node)
assert.NoError(t, err)
v, err := rt.nodeBucketDB.Get([]byte(c.id))
v, err := rt.nodeBucketDB.Get(c.id.Bytes())
assert.NoError(t, err)
n, err := unmarshalNodes(storage.Keys{storage.Key(c.id)}, []storage.Value{v})
n, err := unmarshalNodes([]storage.Value{v})
assert.NoError(t, err)
assert.Equal(t, c.address.Address, n[0].Address.Address)
})
@ -177,45 +181,44 @@ func TestConnectionSuccess(t *testing.T) {
}
func TestConnectionFailed(t *testing.T) {
id := "AA"
node := mockNode(id)
rt, cleanup := createRoutingTable(t, []byte(id))
id := teststorj.NodeIDFromString("AA")
node := &pb.Node{Id: id}
rt, cleanup := createRoutingTable(t, id)
defer cleanup()
err := rt.ConnectionFailed(node)
assert.NoError(t, err)
v, err := rt.nodeBucketDB.Get([]byte(id))
v, err := rt.nodeBucketDB.Get(id.Bytes())
assert.Error(t, err)
assert.Nil(t, v)
}
func TestSetBucketTimestamp(t *testing.T) {
id := []byte("AA")
idStr := string(id)
id := teststorj.NodeIDFromString("AA")
rt, cleanup := createRoutingTable(t, id)
defer cleanup()
now := time.Now().UTC()
err := rt.createOrUpdateKBucket(id, now)
err := rt.createOrUpdateKBucket(keyToBucketID(id.Bytes()), now)
assert.NoError(t, err)
ti, err := rt.GetBucketTimestamp(idStr, nil)
ti, err := rt.GetBucketTimestamp(id.Bytes(), nil)
assert.Equal(t, now, ti)
assert.NoError(t, err)
now = time.Now().UTC()
err = rt.SetBucketTimestamp(idStr, now)
err = rt.SetBucketTimestamp(id.Bytes(), now)
assert.NoError(t, err)
ti, err = rt.GetBucketTimestamp(idStr, nil)
ti, err = rt.GetBucketTimestamp(id.Bytes(), nil)
assert.Equal(t, now, ti)
assert.NoError(t, err)
}
func TestGetBucketTimestamp(t *testing.T) {
rt, cleanup := createRoutingTable(t, []byte("AA"))
id := teststorj.NodeIDFromString("AA")
rt, cleanup := createRoutingTable(t, id)
defer cleanup()
now := time.Now().UTC()
id := "AA"
err := rt.createOrUpdateKBucket([]byte(id), now)
err := rt.createOrUpdateKBucket(keyToBucketID(id.Bytes()), now)
assert.NoError(t, err)
ti, err := rt.GetBucketTimestamp(id, nil)
ti, err := rt.GetBucketTimestamp(id.Bytes(), nil)
assert.Equal(t, now, ti)
assert.NoError(t, err)
}

View File

@ -8,6 +8,7 @@ import (
"storj.io/storj/pkg/dht"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/storj"
)
// NewMockKademlia returns a newly intialized MockKademlia struct
@ -23,7 +24,7 @@ type MockKademlia struct {
// GetNodes increments the GetNodesCalled field on MockKademlia
// returns the Nodes field on MockKademlia
func (k *MockKademlia) GetNodes(ctx context.Context, start string, limit int, restrictions ...pb.Restriction) ([]*pb.Node, error) {
func (k *MockKademlia) GetNodes(ctx context.Context, start storj.NodeID, limit int, restrictions ...pb.Restriction) ([]*pb.Node, error) {
return k.Nodes, nil
}
@ -47,7 +48,7 @@ func (k *MockKademlia) Ping(ctx context.Context, node pb.Node) (pb.Node, error)
// FindNode increments the FindNodeCalled field on MockKademlia
//
// returns the local kademlia node
func (k *MockKademlia) FindNode(ctx context.Context, ID dht.NodeID) (pb.Node, error) {
func (k *MockKademlia) FindNode(ctx context.Context, id storj.NodeID) (pb.Node, error) {
return k.RoutingTable.Local(), nil
}

View File

@ -91,7 +91,9 @@ func TestGetObjectStream(t *testing.T) {
return
}
fmt.Println("BEFORE")
_, err = store.Put(ctx, "large-file", bytes.NewReader(data), objects.SerializableMeta{}, exp)
fmt.Println("AFTER")
if !assert.NoError(t, err) {
return
}
@ -182,7 +184,7 @@ func assertRemoteSegment(t *testing.T, segment storj.Segment) {
}
nums[piece.Number] = struct{}{}
id := piece.Location.HexString()
id := piece.Location.String()
if _, ok := nodes[id]; ok {
t.Fatalf("node id %s is not unique", id)
}

View File

@ -10,7 +10,6 @@ import (
"github.com/gogo/protobuf/proto"
"storj.io/storj/pkg/encryption"
"storj.io/storj/pkg/node"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/storj"
)
@ -93,7 +92,7 @@ func (stream *readonlyStream) segment(ctx context.Context, index int64) (segment
segment.Pieces = make([]storj.Piece, 0, len(pointer.Remote.RemotePieces))
for _, piece := range pointer.Remote.RemotePieces {
var nodeID storj.NodeID
copy(nodeID[:], node.IDFromString(piece.NodeId).Bytes())
copy(nodeID[:], piece.NodeId.Bytes())
segment.Pieces = append(segment.Pieces, storj.Piece{Number: byte(piece.PieceNum), Location: nodeID})
}
}

View File

@ -5,8 +5,9 @@
package logging
import (
gomock "github.com/golang/mock/gomock"
reflect "reflect"
gomock "github.com/golang/mock/gomock"
)
// MockErrorLogger is a mock of ErrorLogger interface

View File

@ -7,6 +7,7 @@ import (
"context"
"github.com/zeebo/errs"
"storj.io/storj/pkg/dht"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/provider"

View File

@ -10,6 +10,8 @@ import (
"github.com/zeebo/errs"
"google.golang.org/grpc"
"storj.io/storj/pkg/storj"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/provider"
"storj.io/storj/pkg/transport"
@ -23,7 +25,7 @@ var Error = errs.Class("connection pool error")
type ConnectionPool struct {
tc transport.Client
mu sync.RWMutex
items map[string]*Conn
items map[storj.NodeID]*Conn
}
// Conn is the connection that is stored in the connection pool
@ -43,18 +45,18 @@ func NewConn(addr string) *Conn { return &Conn{addr: addr} }
func NewConnectionPool(identity *provider.FullIdentity) *ConnectionPool {
return &ConnectionPool{
tc: transport.NewClient(identity),
items: make(map[string]*Conn),
items: make(map[storj.NodeID]*Conn),
mu: sync.RWMutex{},
}
}
// Get retrieves a node connection with the provided nodeID
// nil is returned if the NodeID is not in the connection pool
func (pool *ConnectionPool) Get(key string) (interface{}, error) {
func (pool *ConnectionPool) Get(id storj.NodeID) (interface{}, error) {
pool.mu.Lock()
defer pool.mu.Unlock()
i, ok := pool.items[key]
i, ok := pool.items[id]
if !ok {
return nil, nil
}
@ -63,28 +65,28 @@ func (pool *ConnectionPool) Get(key string) (interface{}, error) {
}
// Disconnect deletes a connection associated with the provided NodeID
func (pool *ConnectionPool) Disconnect(key string) error {
func (pool *ConnectionPool) Disconnect(id storj.NodeID) error {
pool.mu.Lock()
defer pool.mu.Unlock()
return pool.disconnect(key)
return pool.disconnect(id)
}
func (pool *ConnectionPool) disconnect(key string) error {
i, ok := pool.items[key]
func (pool *ConnectionPool) disconnect(id storj.NodeID) error {
i, ok := pool.items[id]
if !ok || i.grpc == nil {
return nil
}
delete(pool.items, key)
delete(pool.items, id)
return i.grpc.Close()
}
// Dial connects to the node with the given ID and Address returning a gRPC Node Client
func (pool *ConnectionPool) Dial(ctx context.Context, n *pb.Node) (pb.NodesClient, error) {
id := n.GetId()
id := n.Id
pool.mu.Lock()
conn, ok := pool.items[id]
if !ok {
@ -127,6 +129,6 @@ func (pool *ConnectionPool) DisconnectAll() error {
// Init initializes the cache
func (pool *ConnectionPool) Init() {
pool.mu.Lock()
pool.items = make(map[string]*Conn)
pool.items = make(map[storj.NodeID]*Conn)
pool.mu.Unlock()
}

View File

@ -10,13 +10,18 @@ import (
"github.com/stretchr/testify/assert"
"google.golang.org/grpc"
"storj.io/storj/internal/teststorj"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/storj"
)
var fooID = teststorj.NodeIDFromString("foo")
func TestGet(t *testing.T) {
cases := []struct {
pool *ConnectionPool
key string
nodeID storj.NodeID
expected Conn
expectedError error
}{
@ -24,10 +29,10 @@ func TestGet(t *testing.T) {
pool: func() *ConnectionPool {
p := NewConnectionPool(newTestIdentity(t))
p.Init()
p.items["foo"] = &Conn{addr: "foo"}
p.items[fooID] = &Conn{addr: "foo"}
return p
}(),
key: "foo",
nodeID: fooID,
expected: Conn{addr: "foo"},
expectedError: nil,
},
@ -35,7 +40,7 @@ func TestGet(t *testing.T) {
for i := range cases {
v := &cases[i]
test, err := v.pool.Get(v.key)
test, err := v.pool.Get(v.nodeID)
assert.Equal(t, v.expectedError, err)
assert.Equal(t, v.expected.addr, test.(*Conn).addr)
@ -49,16 +54,16 @@ func TestDisconnect(t *testing.T) {
// gc.Close = func() error { return nil }
cases := []struct {
pool ConnectionPool
key string
nodeID storj.NodeID
expected interface{}
expectedError error
}{
{
pool: ConnectionPool{
mu: sync.RWMutex{},
items: map[string]*Conn{"foo": &Conn{grpc: conn}},
items: map[storj.NodeID]*Conn{fooID: &Conn{grpc: conn}},
},
key: "foo",
nodeID: fooID,
expected: nil,
expectedError: nil,
},
@ -66,10 +71,10 @@ func TestDisconnect(t *testing.T) {
for i := range cases {
v := &cases[i]
err := v.pool.Disconnect(v.key)
err := v.pool.Disconnect(v.nodeID)
assert.Equal(t, v.expectedError, err)
test, err := v.pool.Get(v.key)
test, err := v.pool.Get(v.nodeID)
assert.Equal(t, v.expectedError, err)
assert.Equal(t, v.expected, test)
@ -86,7 +91,7 @@ func TestDial(t *testing.T) {
}{
{
pool: NewConnectionPool(newTestIdentity(t)),
node: &pb.Node{Id: "foo", Address: &pb.NodeAddress{Address: "127.0.0.1:0"}},
node: &pb.Node{Id: fooID, Address: &pb.NodeAddress{Address: "127.0.0.1:0"}},
expected: nil,
expectedError: nil,
},

View File

@ -1,45 +0,0 @@
// Copyright (C) 2018 Storj Labs, Inc.
// See LICENSE for copying information.
package node
import (
"context"
"storj.io/storj/pkg/provider"
)
// ID is the unique identifier of a Node in the overlay network
type ID string
// NewFullIdentity creates a new ID for nodes with difficulty and concurrency params
func NewFullIdentity(ctx context.Context, difficulty uint16, concurrency uint) (*provider.FullIdentity, error) {
ca, err := provider.NewCA(ctx, provider.NewCAOptions{
Difficulty: difficulty,
Concurrency: concurrency,
})
if err != nil {
return nil, err
}
identity, err := ca.NewIdentity()
if err != nil {
return nil, err
}
return identity, err
}
// String transforms the ID to a string type
func (n *ID) String() string {
return string(*n)
}
// Bytes transforms the ID to type []byte
func (n *ID) Bytes() []byte {
return []byte(*n)
}
// IDFromString trsansforms a string to a ID
func IDFromString(s string) *ID {
n := ID(s)
return &n
}

View File

@ -1,32 +0,0 @@
// Copyright (C) 2018 Storj Labs, Inc.
// See LICENSE for copying information.
package node
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestString(t *testing.T) {
expected := "test node"
node := ID(expected)
result := node.String()
assert.Equal(t, expected, result)
}
func TestIDFromString(t *testing.T) {
str := "test node"
node := ID(str)
expected := IDFromString(str)
assert.Equal(t, expected.String(), node.String())
}
func TestNewFullIdentity(t *testing.T) {
_, err := NewFullIdentity(ctx, 12, 4)
assert.NoError(t, err)
}

View File

@ -12,13 +12,18 @@ import (
"github.com/stretchr/testify/assert"
"google.golang.org/grpc"
"storj.io/storj/internal/identity"
"storj.io/storj/internal/testcontext"
"storj.io/storj/internal/teststorj"
"storj.io/storj/pkg/dht/mocks"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/provider"
)
var ctx = context.Background()
var (
ctx = context.Background()
helloID = teststorj.NodeIDFromString("hello")
)
func TestLookup(t *testing.T) {
ctx := testcontext.New(t)
@ -31,9 +36,9 @@ func TestLookup(t *testing.T) {
expectedErr error
}{
{
self: pb.Node{Id: "hello", Address: &pb.NodeAddress{Address: ":7070"}},
to: pb.Node{Id: "hello", Address: &pb.NodeAddress{Address: ":8080"}},
find: pb.Node{Id: "hello", Address: &pb.NodeAddress{Address: ":9090"}},
self: pb.Node{Id: helloID, Address: &pb.NodeAddress{Address: ":7070"}},
to: pb.Node{Id: helloID, Address: &pb.NodeAddress{Address: ":8080"}},
find: pb.Node{Id: helloID, Address: &pb.NodeAddress{Address: ":9090"}},
expectedErr: nil,
},
}
@ -43,7 +48,7 @@ func TestLookup(t *testing.T) {
assert.NoError(t, err)
id := newTestIdentity(t)
v.to = pb.Node{Id: id.ID.String(), Address: &pb.NodeAddress{Address: lis.Addr().String()}}
v.to = pb.Node{Id: id.ID, Address: &pb.NodeAddress{Address: lis.Addr().String()}}
srv, mock, err := newTestServer(ctx, &mockNodeServer{queryCalled: 0}, id)
assert.NoError(t, err)
@ -59,7 +64,7 @@ func TestLookup(t *testing.T) {
mdht.EXPECT().GetRoutingTable(gomock.Any()).Return(mrt, nil)
mrt.EXPECT().ConnectionSuccess(gomock.Any()).Return(nil)
ca, err := provider.NewTestCA(ctx)
ca, err := testidentity.NewTestCA(ctx)
assert.NoError(t, err)
identity, err := ca.NewIdentity()
assert.NoError(t, err)
@ -80,13 +85,13 @@ func TestPing(t *testing.T) {
cases := []struct {
self pb.Node
toID string
toIdentity *provider.FullIdentity
ident *provider.FullIdentity
expectedErr error
}{
{
self: pb.Node{Id: "hello", Address: &pb.NodeAddress{Address: ":7070"}},
self: pb.Node{Id: helloID, Address: &pb.NodeAddress{Address: ":7070"}},
toID: "",
toIdentity: newTestIdentity(t),
ident: newTestIdentity(t),
expectedErr: nil,
},
}
@ -100,17 +105,16 @@ func TestPing(t *testing.T) {
// set up a node server
srv := NewServer(mdht)
msrv, _, err := newTestServer(ctx, srv, v.toIdentity)
msrv, _, err := newTestServer(ctx, srv, v.ident)
assert.NoError(t, err)
// start gRPC server
ctx.Go(func() error { return msrv.Serve(lis) })
defer msrv.Stop()
nc, err := NewNodeClient(v.toIdentity, v.self, mdht)
nc, err := NewNodeClient(v.ident, v.self, mdht)
assert.NoError(t, err)
id := ID(v.toIdentity.ID)
ok, err := nc.Ping(ctx, pb.Node{Id: id.String(), Address: &pb.NodeAddress{Address: lis.Addr().String()}})
ok, err := nc.Ping(ctx, pb.Node{Id: v.ident.ID, Address: &pb.NodeAddress{Address: lis.Addr().String()}})
assert.Equal(t, v.expectedErr, err)
assert.Equal(t, ok, true)
}
@ -145,7 +149,7 @@ func (mn *mockNodeServer) Ping(ctx context.Context, req *pb.PingRequest) (*pb.Pi
}
func newTestIdentity(t *testing.T) *provider.FullIdentity {
ca, err := provider.NewTestCA(ctx)
ca, err := testidentity.NewTestCA(ctx)
assert.NoError(t, err)
identity, err := ca.NewIdentity()
assert.NoError(t, err)

View File

@ -44,7 +44,7 @@ func (s *Server) Query(ctx context.Context, req *pb.QueryRequest) (*pb.QueryResp
if err != nil {
s.logger.Error("could not respond to connection failed", zap.Error(err))
}
s.logger.Error("connection to node failed", zap.Error(err), zap.String("nodeID", req.Sender.Id))
s.logger.Error("connection to node failed", zap.Error(err), zap.String("nodeID", req.Sender.Id.String()))
}
err = rt.ConnectionSuccess(req.Sender)
@ -53,8 +53,7 @@ func (s *Server) Query(ctx context.Context, req *pb.QueryRequest) (*pb.QueryResp
}
}
id := IDFromString(req.Target.Id)
nodes, err := rt.FindNear(id, int(req.Limit))
nodes, err := rt.FindNear(req.Target.Id, int(req.Limit))
if err != nil {
return &pb.QueryResponse{}, NodeClientErr.New("could not find near %s", err)
}

View File

@ -11,6 +11,8 @@ import (
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
"storj.io/storj/internal/teststorj"
"storj.io/storj/pkg/dht"
"storj.io/storj/pkg/dht/mocks"
"storj.io/storj/pkg/pb"
@ -22,9 +24,9 @@ func TestQuery(t *testing.T) {
mockDHT := mock_dht.NewMockDHT(ctrl)
mockRT := mock_dht.NewMockRoutingTable(ctrl)
s := &Server{dht: mockDHT}
sender := &pb.Node{Id: "A"}
target := &pb.Node{Id: "B"}
node := &pb.Node{Id: "C"}
sender := &pb.Node{Id: teststorj.NodeIDFromString("A")}
target := &pb.Node{Id: teststorj.NodeIDFromString("B")}
node := &pb.Node{Id: teststorj.NodeIDFromString("C")}
cases := []struct {
caseName string
rt dht.RoutingTable
@ -67,7 +69,7 @@ func TestQuery(t *testing.T) {
},
}
for i, v := range cases {
req := pb.QueryRequest{Pingback: true, Sender: sender, Target: &pb.Node{Id: "B"}, Limit: int64(2)}
req := pb.QueryRequest{Pingback: true, Sender: sender, Target: &pb.Node{Id: teststorj.NodeIDFromString("B")}, Limit: int64(2)}
mockDHT.EXPECT().GetRoutingTable(gomock.Any()).Return(v.rt, v.getRTErr)
mockDHT.EXPECT().Ping(gomock.Any(), gomock.Any()).Return(v.pingNode, v.pingErr)
if v.pingErr != nil {

View File

@ -9,8 +9,9 @@ import (
"github.com/gogo/protobuf/proto"
"github.com/zeebo/errs"
"storj.io/storj/pkg/storj"
"storj.io/storj/pkg/dht"
"storj.io/storj/pkg/node"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/statdb"
"storj.io/storj/storage"
@ -43,8 +44,8 @@ func NewOverlayCache(db storage.KeyValueStore, dht dht.DHT, sdb *statdb.Server)
}
// Get looks up the provided nodeID from the overlay cache
func (o *Cache) Get(ctx context.Context, key string) (*pb.Node, error) {
b, err := o.DB.Get([]byte(key))
func (o *Cache) Get(ctx context.Context, nodeID storj.NodeID) (*pb.Node, error) {
b, err := o.DB.Get(nodeID.Bytes())
if err != nil {
return nil, err
}
@ -60,13 +61,13 @@ func (o *Cache) Get(ctx context.Context, key string) (*pb.Node, error) {
}
// GetAll looks up the provided nodeIDs from the overlay cache
func (o *Cache) GetAll(ctx context.Context, keys []string) ([]*pb.Node, error) {
if len(keys) == 0 {
return nil, OverlayError.New("no keys provided")
func (o *Cache) GetAll(ctx context.Context, nodeIDs storj.NodeIDList) ([]*pb.Node, error) {
if len(nodeIDs) == 0 {
return nil, OverlayError.New("no nodeIDs provided")
}
var ks storage.Keys
for _, v := range keys {
ks = append(ks, storage.Key(v))
for _, v := range nodeIDs {
ks = append(ks, v.Bytes())
}
vs, err := o.DB.GetAll(ks)
if err != nil {
@ -89,10 +90,10 @@ func (o *Cache) GetAll(ctx context.Context, keys []string) ([]*pb.Node, error) {
}
// Put adds a nodeID to the redis cache with a binary representation of proto defined Node
func (o *Cache) Put(nodeID string, value pb.Node) error {
func (o *Cache) Put(nodeID storj.NodeID, value pb.Node) error {
// If we get a Node without an ID (i.e. bootstrap node)
// we don't want to add to the routing tbale
if nodeID == "" {
if nodeID == (storj.NodeID{}) {
return nil
}
@ -101,7 +102,7 @@ func (o *Cache) Put(nodeID string, value pb.Node) error {
return err
}
return o.DB.Put(node.IDFromString(nodeID).Bytes(), data)
return o.DB.Put(nodeID.Bytes(), data)
}
// Bootstrap walks the initialized network and populates the cache
@ -123,7 +124,7 @@ func (o *Cache) Refresh(ctx context.Context) error {
nodes := o.DHT.Seen()
for _, v := range nodes {
if err := o.Put(v.GetId(), *v); err != nil {
if err := o.Put(v.Id, *v); err != nil {
return err
}
}

View File

@ -9,6 +9,9 @@ import (
"github.com/stretchr/testify/assert"
"storj.io/storj/internal/teststorj"
"storj.io/storj/pkg/storj"
"storj.io/storj/internal/testcontext"
"storj.io/storj/internal/testplanet"
"storj.io/storj/pkg/overlay"
@ -20,63 +23,70 @@ import (
"storj.io/storj/storage/teststore"
)
var (
valid1ID = teststorj.NodeIDFromString("valid1")
valid2ID = teststorj.NodeIDFromString("valid2")
invalid1ID = teststorj.NodeIDFromString("invalid1")
invalid2ID = teststorj.NodeIDFromString("invalid2")
)
func testCache(ctx context.Context, t *testing.T, store storage.KeyValueStore) {
cache := overlay.Cache{DB: store}
{ // Put
err := cache.Put("valid1", pb.Node{Address: &pb.NodeAddress{Transport: pb.NodeTransport_TCP_TLS_GRPC, Address: "127.0.0.1:9001"}})
err := cache.Put(valid1ID, pb.Node{Address: &pb.NodeAddress{Transport: pb.NodeTransport_TCP_TLS_GRPC, Address: "127.0.0.1:9001"}})
if err != nil {
t.Fatal(err)
}
err = cache.Put("valid2", pb.Node{Address: &pb.NodeAddress{Transport: pb.NodeTransport_TCP_TLS_GRPC, Address: "127.0.0.1:9002"}})
err = cache.Put(valid2ID, pb.Node{Address: &pb.NodeAddress{Transport: pb.NodeTransport_TCP_TLS_GRPC, Address: "127.0.0.1:9002"}})
if err != nil {
t.Fatal(err)
}
}
{ // Get
valid2, err := cache.Get(ctx, "valid2")
valid2, err := cache.Get(ctx, valid2ID)
if assert.NoError(t, err) {
assert.Equal(t, valid2.Address.Address, "127.0.0.1:9002")
}
invalid2, err := cache.Get(ctx, "invalid2")
invalid2, err := cache.Get(ctx, invalid2ID)
assert.Error(t, err)
assert.Nil(t, invalid2)
if storeClient, ok := store.(*teststore.Client); ok {
storeClient.ForceError++
_, err := cache.Get(ctx, "valid1")
_, err := cache.Get(ctx, valid1ID)
assert.Error(t, err)
}
}
{ // GetAll
nodes, err := cache.GetAll(ctx, []string{"valid2", "valid1", "valid2"})
nodes, err := cache.GetAll(ctx, storj.NodeIDList{valid2ID, valid1ID, valid2ID})
if assert.NoError(t, err) {
assert.Equal(t, nodes[0].Address.Address, "127.0.0.1:9002")
assert.Equal(t, nodes[1].Address.Address, "127.0.0.1:9001")
assert.Equal(t, nodes[2].Address.Address, "127.0.0.1:9002")
}
nodes, err = cache.GetAll(ctx, []string{"valid1", "invalid"})
nodes, err = cache.GetAll(ctx, storj.NodeIDList{valid1ID, invalid1ID})
if assert.NoError(t, err) {
assert.Equal(t, nodes[0].Address.Address, "127.0.0.1:9001")
assert.Nil(t, nodes[1])
}
nodes, err = cache.GetAll(ctx, []string{"", ""})
nodes, err = cache.GetAll(ctx, make(storj.NodeIDList, 2))
if assert.NoError(t, err) {
assert.Nil(t, nodes[0])
assert.Nil(t, nodes[1])
}
_, err = cache.GetAll(ctx, []string{})
_, err = cache.GetAll(ctx, storj.NodeIDList{})
assert.True(t, overlay.OverlayError.Has(err))
if storeClient, ok := store.(*teststore.Client); ok {
storeClient.ForceError++
_, err := cache.GetAll(ctx, []string{"valid1", "valid2"})
_, err := cache.GetAll(ctx, storj.NodeIDList{valid1ID, valid2ID})
assert.Error(t, err)
}
}

View File

@ -8,11 +8,10 @@ import (
"github.com/zeebo/errs"
"storj.io/storj/pkg/transport"
"storj.io/storj/pkg/dht"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/provider"
"storj.io/storj/pkg/storj"
"storj.io/storj/pkg/transport"
)
// Client is the interface that defines an overlay client.
@ -29,8 +28,8 @@ var ClientError = errs.Class("Client Error")
//Client implements the Overlay Client interface
type Client interface {
Choose(ctx context.Context, op Options) ([]*pb.Node, error)
Lookup(ctx context.Context, nodeID dht.NodeID) (*pb.Node, error)
BulkLookup(ctx context.Context, nodeIDs []dht.NodeID) ([]*pb.Node, error)
Lookup(ctx context.Context, nodeID storj.NodeID) (*pb.Node, error)
BulkLookup(ctx context.Context, nodeIDs storj.NodeIDList) ([]*pb.Node, error)
}
// Overlay is the overlay concrete implementation of the client interface
@ -42,7 +41,7 @@ type Overlay struct {
type Options struct {
Amount int
Space int64
Excluded []dht.NodeID
Excluded storj.NodeIDList
}
// NewOverlayClient returns a new intialized Overlay Client
@ -66,10 +65,8 @@ var _ Client = (*Overlay)(nil)
// Choose implements the client.Choose interface
func (o *Overlay) Choose(ctx context.Context, op Options) ([]*pb.Node, error) {
var exIDs []string
for _, id := range op.Excluded {
exIDs = append(exIDs, id.String())
}
var exIDs storj.NodeIDList
exIDs = append(exIDs, op.Excluded...)
// TODO(coyle): We will also need to communicate with the reputation service here
resp, err := o.client.FindStorageNodes(ctx, &pb.FindStorageNodesRequest{
Opts: &pb.OverlayOptions{
@ -86,8 +83,8 @@ func (o *Overlay) Choose(ctx context.Context, op Options) ([]*pb.Node, error) {
}
// Lookup provides a Node with the given ID
func (o *Overlay) Lookup(ctx context.Context, nodeID dht.NodeID) (*pb.Node, error) {
resp, err := o.client.Lookup(ctx, &pb.LookupRequest{NodeID: nodeID.String()})
func (o *Overlay) Lookup(ctx context.Context, nodeID storj.NodeID) (*pb.Node, error) {
resp, err := o.client.Lookup(ctx, &pb.LookupRequest{NodeId: nodeID})
if err != nil {
return nil, err
}
@ -95,11 +92,11 @@ func (o *Overlay) Lookup(ctx context.Context, nodeID dht.NodeID) (*pb.Node, erro
return resp.GetNode(), nil
}
//BulkLookup provides a list of Nodes with the given IDs
func (o *Overlay) BulkLookup(ctx context.Context, nodeIDs []dht.NodeID) ([]*pb.Node, error) {
// BulkLookup provides a list of Nodes with the given IDs
func (o *Overlay) BulkLookup(ctx context.Context, nodeIDs storj.NodeIDList) ([]*pb.Node, error) {
var reqs pb.LookupRequests
for _, v := range nodeIDs {
reqs.LookupRequest = append(reqs.LookupRequest, &pb.LookupRequest{NodeID: v.String()})
reqs.LookupRequest = append(reqs.LookupRequest, &pb.LookupRequest{NodeId: v})
}
resp, err := o.client.BulkLookup(ctx, &reqs)

View File

@ -12,25 +12,16 @@ import (
"github.com/stretchr/testify/assert"
"google.golang.org/grpc"
"storj.io/storj/internal/identity"
"storj.io/storj/internal/testcontext"
"storj.io/storj/pkg/dht"
"storj.io/storj/pkg/node"
"storj.io/storj/internal/teststorj"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/provider"
"storj.io/storj/pkg/storj"
"storj.io/storj/storage"
"storj.io/storj/storage/teststore"
)
type mockNodeID struct {
}
func (m mockNodeID) String() string {
return "foobar"
}
func (m mockNodeID) Bytes() []byte {
return []byte("foobar")
}
var fooID = teststorj.NodeIDFromString("foo")
func TestNewOverlayClient(t *testing.T) {
ctx := testcontext.New(t)
@ -45,7 +36,7 @@ func TestNewOverlayClient(t *testing.T) {
}
for _, v := range cases {
ca, err := provider.NewTestCA(ctx)
ca, err := testidentity.NewTestCA(ctx)
assert.NoError(t, err)
identity, err := ca.NewIdentity()
assert.NoError(t, err)
@ -69,28 +60,32 @@ func TestChoose(t *testing.T) {
limit int
space int64
allNodes []*pb.Node
excluded []dht.NodeID
excluded storj.NodeIDList
}{
{
limit: 4,
space: 0,
allNodes: func() []*pb.Node {
n1 := &pb.Node{Id: "n1", Type: pb.NodeType_STORAGE}
n2 := &pb.Node{Id: "n2", Type: pb.NodeType_STORAGE}
n3 := &pb.Node{Id: "n3", Type: pb.NodeType_STORAGE}
n4 := &pb.Node{Id: "n4", Type: pb.NodeType_STORAGE}
n5 := &pb.Node{Id: "n5", Type: pb.NodeType_STORAGE}
n6 := &pb.Node{Id: "n6", Type: pb.NodeType_STORAGE}
n7 := &pb.Node{Id: "n7", Type: pb.NodeType_STORAGE}
n8 := &pb.Node{Id: "n8", Type: pb.NodeType_STORAGE}
return []*pb.Node{n1, n2, n3, n4, n5, n6, n7, n8}
n1 := teststorj.MockNode("n1")
n2 := teststorj.MockNode("n2")
n3 := teststorj.MockNode("n3")
n4 := teststorj.MockNode("n4")
n5 := teststorj.MockNode("n5")
n6 := teststorj.MockNode("n6")
n7 := teststorj.MockNode("n7")
n8 := teststorj.MockNode("n8")
nodes := []*pb.Node{n1, n2, n3, n4, n5, n6, n7, n8}
for _, n := range nodes {
n.Type = pb.NodeType_STORAGE
}
return nodes
}(),
excluded: func() []dht.NodeID {
id1 := node.IDFromString("n1")
id2 := node.IDFromString("n2")
id3 := node.IDFromString("n3")
id4 := node.IDFromString("n4")
return []dht.NodeID{id1, id2, id3, id4}
excluded: func() storj.NodeIDList {
id1 := teststorj.NodeIDFromString("n1")
id2 := teststorj.NodeIDFromString("n2")
id3 := teststorj.NodeIDFromString("n3")
id4 := teststorj.NodeIDFromString("n4")
return storj.NodeIDList{id1, id2, id3, id4}
}(),
},
}
@ -104,12 +99,12 @@ func TestChoose(t *testing.T) {
data, err := proto.Marshal(n)
assert.NoError(t, err)
listItems = append(listItems, storage.ListItem{
Key: storage.Key(n.Id),
Key: n.Id.Bytes(),
Value: data,
})
}
ca, err := provider.NewTestCA(ctx)
ca, err := testidentity.NewTestCA(ctx)
assert.NoError(t, err)
identity, err := ca.NewIdentity()
assert.NoError(t, err)
@ -146,11 +141,11 @@ func TestLookup(t *testing.T) {
defer ctx.Cleanup()
cases := []struct {
nodeID dht.NodeID
nodeID storj.NodeID
expectedCalls int
}{
{
nodeID: mockNodeID{},
nodeID: fooID,
expectedCalls: 1,
},
}
@ -164,7 +159,7 @@ func TestLookup(t *testing.T) {
go func() { assert.NoError(t, srv.Serve(lis)) }()
defer srv.Stop()
ca, err := provider.NewTestCA(ctx)
ca, err := testidentity.NewTestCA(ctx)
assert.NoError(t, err)
identity, err := ca.NewIdentity()
assert.NoError(t, err)
@ -189,11 +184,11 @@ func TestBulkLookup(t *testing.T) {
defer ctx.Cleanup()
cases := []struct {
nodeIDs []dht.NodeID
nodeIDs storj.NodeIDList
expectedCalls int
}{
{
nodeIDs: []dht.NodeID{mockNodeID{}, mockNodeID{}, mockNodeID{}},
nodeIDs: storj.NodeIDList{fooID, fooID, fooID},
expectedCalls: 1,
},
}
@ -206,7 +201,7 @@ func TestBulkLookup(t *testing.T) {
go func() { assert.NoError(t, srv.Serve(lis)) }()
defer srv.Stop()
ca, err := provider.NewTestCA(ctx)
ca, err := testidentity.NewTestCA(ctx)
assert.NoError(t, err)
identity, err := ca.NewIdentity()
assert.NoError(t, err)
@ -238,7 +233,7 @@ func TestBulkLookupV2(t *testing.T) {
go func() { assert.NoError(t, srv.Serve(lis)) }()
defer srv.Stop()
ca, err := provider.NewTestCA(ctx)
ca, err := testidentity.NewTestCA(ctx)
assert.NoError(t, err)
identity, err := ca.NewIdentity()
assert.NoError(t, err)
@ -250,46 +245,46 @@ func TestBulkLookupV2(t *testing.T) {
overlay, ok := oc.(*Overlay)
assert.True(t, ok)
assert.NotEmpty(t, overlay.client)
n1 := &pb.Node{Id: "n1"}
n2 := &pb.Node{Id: "n2"}
n3 := &pb.Node{Id: "n3"}
n1 := teststorj.MockNode("n1")
n2 := teststorj.MockNode("n2")
n3 := teststorj.MockNode("n3")
nodes := []*pb.Node{n1, n2, n3}
for _, n := range nodes {
assert.NoError(t, s.cache.Put(n.Id, *n))
}
nid1 := node.IDFromString("n1")
nid2 := node.IDFromString("n2")
nid3 := node.IDFromString("n3")
nid4 := node.IDFromString("n4")
nid5 := node.IDFromString("n5")
nid1 := teststorj.NodeIDFromString("n1")
nid2 := teststorj.NodeIDFromString("n2")
nid3 := teststorj.NodeIDFromString("n3")
nid4 := teststorj.NodeIDFromString("n4")
nid5 := teststorj.NodeIDFromString("n5")
{ // empty id
_, err := oc.BulkLookup(ctx, []dht.NodeID{})
_, err := oc.BulkLookup(ctx, storj.NodeIDList{})
assert.Error(t, err)
}
{ // valid ids
ns, err := oc.BulkLookup(ctx, []dht.NodeID{nid1, nid2, nid3})
ns, err := oc.BulkLookup(ctx, storj.NodeIDList{nid1, nid2, nid3})
assert.NoError(t, err)
assert.Equal(t, nodes, ns)
}
{ // missing ids
ns, err := oc.BulkLookup(ctx, []dht.NodeID{nid4, nid5})
ns, err := oc.BulkLookup(ctx, storj.NodeIDList{nid4, nid5})
assert.NoError(t, err)
assert.Equal(t, []*pb.Node{nil, nil}, ns)
}
{ // different order and missing
ns, err := oc.BulkLookup(ctx, []dht.NodeID{nid3, nid4, nid1, nid2, nid5})
ns, err := oc.BulkLookup(ctx, storj.NodeIDList{nid3, nid4, nid1, nid2, nid5})
assert.NoError(t, err)
assert.Equal(t, []*pb.Node{n3, nil, n1, n2, nil}, ns)
}
}
func newServer(ctx context.Context) (*grpc.Server, *Server, error) {
ca, err := provider.NewTestCA(ctx)
ca, err := testidentity.NewTestCA(ctx)
if err != nil {
return nil, nil, err
}
@ -311,7 +306,7 @@ func newServer(ctx context.Context) (*grpc.Server, *Server, error) {
}
func newTestServer(ctx context.Context) (*grpc.Server, *mockOverlayServer, error) {
ca, err := provider.NewTestCA(ctx)
ca, err := testidentity.NewTestCA(ctx)
if err != nil {
return nil, nil, err
}

View File

@ -5,11 +5,14 @@ package overlay
import (
"context"
"strings"
"time"
"github.com/zeebo/errs"
"go.uber.org/zap"
monkit "gopkg.in/spacemonkeygo/monkit.v2"
"gopkg.in/spacemonkeygo/monkit.v2"
"storj.io/storj/pkg/storj"
"storj.io/storj/pkg/kademlia"
"storj.io/storj/pkg/pb"
@ -34,6 +37,12 @@ type Config struct {
RefreshInterval time.Duration `help:"the interval at which the cache refreshes itself in seconds" default:"1s"`
}
// LookupConfig is a configuration struct for querying the overlay cache with one or more node IDs
type LookupConfig struct {
NodeIDsString string `help:"one or more string-encoded node IDs, delimited by Delimiter"`
Delimiter string `help:"delimiter used for parsing node IDs" default:","`
}
// CtxKey used for assigning cache and server
type CtxKey int
@ -131,3 +140,21 @@ func LoadServerFromContext(ctx context.Context) *Server {
}
return nil
}
// ParseIDs converts the base58check encoded node ID strings from the config into node IDs
func (c LookupConfig) ParseIDs() (ids storj.NodeIDList, err error) {
var idErrs []error
idStrs := strings.Split(c.NodeIDsString, c.Delimiter)
for _, s := range idStrs {
id, err := storj.NodeIDFromString(s)
if err != nil {
idErrs = append(idErrs, err)
continue
}
ids = append(ids, id)
}
if err := utils.CombineErrors(idErrs...); err != nil {
return nil, err
}
return ids, nil
}

View File

@ -10,9 +10,9 @@ import (
gomock "github.com/golang/mock/gomock"
dht "storj.io/storj/pkg/dht"
x "storj.io/storj/pkg/overlay"
pb "storj.io/storj/pkg/pb"
storj "storj.io/storj/pkg/storj"
)
// MockClient is a mock of Client interface
@ -52,7 +52,7 @@ func (mr *MockClientMockRecorder) Choose(ctx, op interface{}) *gomock.Call {
}
// Lookup mocks base method
func (m *MockClient) Lookup(ctx context.Context, nodeID dht.NodeID) (*pb.Node, error) {
func (m *MockClient) Lookup(ctx context.Context, nodeID storj.NodeID) (*pb.Node, error) {
ret := m.ctrl.Call(m, "Lookup", ctx, nodeID)
ret0, _ := ret[0].(*pb.Node)
ret1, _ := ret[1].(error)
@ -65,7 +65,7 @@ func (mr *MockClientMockRecorder) Lookup(ctx, nodeID interface{}) *gomock.Call {
}
// BulkLookup mocks base method
func (m *MockClient) BulkLookup(ctx context.Context, nodeIDs []dht.NodeID) ([]*pb.Node, error) {
func (m *MockClient) BulkLookup(ctx context.Context, nodeIDs storj.NodeIDList) ([]*pb.Node, error) {
ret := m.ctrl.Call(m, "BulkLookup", ctx, nodeIDs)
ret0, _ := ret[0].([]*pb.Node)
ret1, _ := ret[1].(error)

View File

@ -10,18 +10,20 @@ import (
"github.com/zeebo/errs"
"storj.io/storj/pkg/storj"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/provider"
)
// Overlay is a mocked overlay implementation
type Overlay struct {
nodes map[string]*pb.Node
nodes map[storj.NodeID]*pb.Node
}
// NewOverlay returns a newly initialized mock overlal
func NewOverlay(nodes []*pb.Node) *Overlay {
rv := &Overlay{nodes: map[string]*pb.Node{}}
rv := &Overlay{nodes: map[storj.NodeID]*pb.Node{}}
for _, node := range nodes {
rv.nodes[node.Id] = node
}
@ -52,7 +54,7 @@ func (mo *Overlay) FindStorageNodes(ctx context.Context, req *pb.FindStorageNode
// Lookup finds a single storage node based on the request
func (mo *Overlay) Lookup(ctx context.Context, req *pb.LookupRequest) (
*pb.LookupResponse, error) {
return &pb.LookupResponse{Node: mo.nodes[req.NodeID]}, nil
return &pb.LookupResponse{Node: mo.nodes[req.NodeId]}, nil
}
//BulkLookup finds multiple storage nodes based on the requests
@ -61,7 +63,7 @@ func (mo *Overlay) BulkLookup(ctx context.Context, reqs *pb.LookupRequests) (
var responses []*pb.LookupResponse
for _, r := range reqs.LookupRequest {
// NOTE (Dylan): tests did not catch missing node case, need updating
n := mo.nodes[r.NodeID]
n := mo.nodes[r.NodeId]
resp := &pb.LookupResponse{Node: n}
responses = append(responses, resp)
}
@ -81,7 +83,11 @@ func (c Config) Run(ctx context.Context, server *provider.Provider) error {
if len(parts) != 2 {
return fmt.Errorf("malformed node config: %#v", nodestr)
}
id, addr := parts[0], parts[1]
id, err := storj.NodeIDFromString(parts[0])
if err != nil {
return err
}
addr := parts[1]
nodes = append(nodes, &pb.Node{
Id: id,
Address: &pb.NodeAddress{

View File

@ -4,6 +4,7 @@
package overlay
import (
"bytes"
"context"
"fmt"
@ -14,6 +15,8 @@ import (
"google.golang.org/grpc/status"
"gopkg.in/spacemonkeygo/monkit.v2"
"storj.io/storj/pkg/storj"
"storj.io/storj/pkg/dht"
"storj.io/storj/pkg/pb"
"storj.io/storj/storage"
@ -42,10 +45,10 @@ func NewServer(log *zap.Logger, cache *Cache, dht dht.DHT) *Server {
// Lookup finds the address of a node in our overlay network
func (o *Server) Lookup(ctx context.Context, req *pb.LookupRequest) (*pb.LookupResponse, error) {
na, err := o.cache.Get(ctx, req.NodeID)
na, err := o.cache.Get(ctx, req.NodeId)
if err != nil {
o.logger.Error("Error looking up node", zap.Error(err), zap.String("nodeID", req.NodeID))
o.logger.Error("Error looking up node", zap.Error(err), zap.String("nodeID", req.NodeId.String()))
return nil, err
}
@ -71,16 +74,16 @@ func (o *Server) FindStorageNodes(ctx context.Context, req *pb.FindStorageNodesR
maxNodes = opts.GetAmount()
}
excluded := opts.GetExcludedNodes()
excluded := opts.ExcludedNodes
restrictions := opts.GetRestrictions()
restrictedBandwidth := restrictions.GetFreeBandwidth()
restrictedSpace := restrictions.GetFreeDisk()
var start storage.Key
var startID storj.NodeID
result := []*pb.Node{}
for {
var nodes []*pb.Node
nodes, start, err = o.populate(ctx, req.GetStart(), maxNodes, restrictedBandwidth, restrictedSpace, excluded)
nodes, startID, err = o.populate(ctx, req.Start, maxNodes, restrictedBandwidth, restrictedSpace, excluded)
if err != nil {
return nil, Error.Wrap(err)
}
@ -91,7 +94,7 @@ func (o *Server) FindStorageNodes(ctx context.Context, req *pb.FindStorageNodesR
result = append(result, nodes...)
if len(result) >= int(maxNodes) || start == nil {
if len(result) >= int(maxNodes) || startID == (storj.NodeID{}) {
break
}
@ -130,24 +133,25 @@ func (o *Server) getNodes(ctx context.Context, keys storage.Keys) ([]*pb.Node, e
}
func (o *Server) populate(ctx context.Context, starting storage.Key, maxNodes, restrictedBandwidth, restrictedSpace int64, excluded []string) ([]*pb.Node, storage.Key, error) {
func (o *Server) populate(ctx context.Context, startID storj.NodeID, maxNodes, restrictedBandwidth, restrictedSpace int64, excluded storj.NodeIDList) ([]*pb.Node, storj.NodeID, error) {
limit := int(maxNodes * 2)
keys, err := o.cache.DB.List(starting, limit)
keys, err := o.cache.DB.List(startID.Bytes(), limit)
if err != nil {
o.logger.Error("Error listing nodes", zap.Error(err))
return nil, nil, Error.Wrap(err)
return nil, storj.NodeID{}, Error.Wrap(err)
}
if len(keys) <= 0 {
o.logger.Info("No Keys returned from List operation")
return []*pb.Node{}, starting, nil
return []*pb.Node{}, startID, nil
}
// TODO: should this be `var result []*pb.Node` ?
result := []*pb.Node{}
nodes, err := o.getNodes(ctx, keys)
if err != nil {
o.logger.Error("Error getting nodes", zap.Error(err))
return nil, nil, Error.Wrap(err)
return nil, storj.NodeID{}, Error.Wrap(err)
}
for _, v := range nodes {
@ -165,29 +169,33 @@ func (o *Server) populate(ctx context.Context, starting storage.Key, maxNodes, r
result = append(result, v)
}
nextStart := keys[len(keys)-1]
var nextStart storj.NodeID
if len(keys) < limit {
nextStart = nil
nextStart = storj.NodeID{}
} else {
nextStart, err = storj.NodeIDFromBytes(keys[len(keys)-1])
}
if err != nil {
return nil, storj.NodeID{}, Error.Wrap(err)
}
return result, nextStart, nil
}
// contains checks if item exists in list
func contains(list []string, item string) bool {
for _, listItem := range list {
if listItem == item {
func contains(nodeIDs storj.NodeIDList, searchID storj.NodeID) bool {
for _, id := range nodeIDs {
if bytes.Equal(id.Bytes(), searchID.Bytes()) {
return true
}
}
return false
}
//lookupRequestsToNodeIDs returns the nodeIDs from the LookupRequests
func lookupRequestsToNodeIDs(reqs *pb.LookupRequests) []string {
var ids []string
// lookupRequestsToNodeIDs returns the nodeIDs from the LookupRequests
func lookupRequestsToNodeIDs(reqs *pb.LookupRequests) (ids storj.NodeIDList) {
for _, v := range reqs.LookupRequest {
ids = append(ids, v.NodeID)
ids = append(ids, v.NodeId)
}
return ids
}

View File

@ -42,7 +42,7 @@ func TestServer(t *testing.T) {
}
{ // Lookup
result, err := server.Lookup(ctx, &pb.LookupRequest{NodeID: planet.StorageNodes[0].ID()})
result, err := server.Lookup(ctx, &pb.LookupRequest{NodeId: planet.StorageNodes[0].ID()})
if assert.NoError(t, err) && assert.NotNil(t, result) {
assert.Equal(t, result.Node.Address.Address, planet.StorageNodes[0].Addr())
}
@ -51,9 +51,9 @@ func TestServer(t *testing.T) {
{ // BulkLookup
result, err := server.BulkLookup(ctx, &pb.LookupRequests{
LookupRequest: []*pb.LookupRequest{
{NodeID: planet.StorageNodes[0].ID()},
{NodeID: planet.StorageNodes[1].ID()},
{NodeID: planet.StorageNodes[2].ID()},
{NodeId: planet.StorageNodes[0].ID()},
{NodeId: planet.StorageNodes[1].ID()},
{NodeId: planet.StorageNodes[2].ID()},
},
})

View File

@ -41,7 +41,7 @@ func (x AgreementsSummary_Status) String() string {
return proto.EnumName(AgreementsSummary_Status_name, int32(x))
}
func (AgreementsSummary_Status) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_bandwidth_01db992f91c47bae, []int{0, 0}
return fileDescriptor_bandwidth_acc7ad1b0a6a13c6, []int{0, 0}
}
type AgreementsSummary struct {
@ -55,7 +55,7 @@ func (m *AgreementsSummary) Reset() { *m = AgreementsSummary{} }
func (m *AgreementsSummary) String() string { return proto.CompactTextString(m) }
func (*AgreementsSummary) ProtoMessage() {}
func (*AgreementsSummary) Descriptor() ([]byte, []int) {
return fileDescriptor_bandwidth_01db992f91c47bae, []int{0}
return fileDescriptor_bandwidth_acc7ad1b0a6a13c6, []int{0}
}
func (m *AgreementsSummary) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_AgreementsSummary.Unmarshal(m, b)
@ -159,9 +159,9 @@ var _Bandwidth_serviceDesc = grpc.ServiceDesc{
Metadata: "bandwidth.proto",
}
func init() { proto.RegisterFile("bandwidth.proto", fileDescriptor_bandwidth_01db992f91c47bae) }
func init() { proto.RegisterFile("bandwidth.proto", fileDescriptor_bandwidth_acc7ad1b0a6a13c6) }
var fileDescriptor_bandwidth_01db992f91c47bae = []byte{
var fileDescriptor_bandwidth_acc7ad1b0a6a13c6 = []byte{
// 196 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4f, 0x4a, 0xcc, 0x4b,
0x29, 0xcf, 0x4c, 0x29, 0xc9, 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x84, 0x0b, 0x48,

View File

@ -31,7 +31,7 @@ func (m *InjuredSegment) Reset() { *m = InjuredSegment{} }
func (m *InjuredSegment) String() string { return proto.CompactTextString(m) }
func (*InjuredSegment) ProtoMessage() {}
func (*InjuredSegment) Descriptor() ([]byte, []int) {
return fileDescriptor_datarepair_13e4beab54f194bd, []int{0}
return fileDescriptor_datarepair_ed86aa1a63e3d6e4, []int{0}
}
func (m *InjuredSegment) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_InjuredSegment.Unmarshal(m, b)
@ -69,9 +69,9 @@ func init() {
proto.RegisterType((*InjuredSegment)(nil), "repair.InjuredSegment")
}
func init() { proto.RegisterFile("datarepair.proto", fileDescriptor_datarepair_13e4beab54f194bd) }
func init() { proto.RegisterFile("datarepair.proto", fileDescriptor_datarepair_ed86aa1a63e3d6e4) }
var fileDescriptor_datarepair_13e4beab54f194bd = []byte{
var fileDescriptor_datarepair_ed86aa1a63e3d6e4 = []byte{
// 119 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x48, 0x49, 0x2c, 0x49,
0x2c, 0x4a, 0x2d, 0x48, 0xcc, 0x2c, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x83, 0xf0,

View File

@ -8,4 +8,10 @@ import "storj.io/storj/pkg/storj"
// Path represents a object path
type Path = storj.Path
//go:generate protoc -I. --gogo_out=plugins=grpc:. meta.proto overlay.proto pointerdb.proto piecestore.proto bandwidth.proto inspector.proto datarepair.proto
// NodeID is an alias to storj.NodeID for use in generated protobuf code
type NodeID = storj.NodeID
// NodeIDList is an alias to storj.NodeIDList for use in generated protobuf code
type NodeIDList = storj.NodeIDList
//go:generate protoc -I. --gogo_out=plugins=grpc:. meta.proto overlay.proto pointerdb.proto piecestore.proto bandwidth.proto inspector.proto datarepair.proto node.proto

144
pkg/pb/gogo.proto Normal file
View File

@ -0,0 +1,144 @@
// Protocol Buffers for Go with Gadgets
//
// Copyright (c) 2013, The GoGo Authors. All rights reserved.
// http://github.com/gogo/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
syntax = "proto2";
package gogoproto;
import "google/protobuf/descriptor.proto";
option java_package = "com.google.protobuf";
option java_outer_classname = "GoGoProtos";
option go_package = "github.com/gogo/protobuf/gogoproto";
extend google.protobuf.EnumOptions {
optional bool goproto_enum_prefix = 62001;
optional bool goproto_enum_stringer = 62021;
optional bool enum_stringer = 62022;
optional string enum_customname = 62023;
optional bool enumdecl = 62024;
}
extend google.protobuf.EnumValueOptions {
optional string enumvalue_customname = 66001;
}
extend google.protobuf.FileOptions {
optional bool goproto_getters_all = 63001;
optional bool goproto_enum_prefix_all = 63002;
optional bool goproto_stringer_all = 63003;
optional bool verbose_equal_all = 63004;
optional bool face_all = 63005;
optional bool gostring_all = 63006;
optional bool populate_all = 63007;
optional bool stringer_all = 63008;
optional bool onlyone_all = 63009;
optional bool equal_all = 63013;
optional bool description_all = 63014;
optional bool testgen_all = 63015;
optional bool benchgen_all = 63016;
optional bool marshaler_all = 63017;
optional bool unmarshaler_all = 63018;
optional bool stable_marshaler_all = 63019;
optional bool sizer_all = 63020;
optional bool goproto_enum_stringer_all = 63021;
optional bool enum_stringer_all = 63022;
optional bool unsafe_marshaler_all = 63023;
optional bool unsafe_unmarshaler_all = 63024;
optional bool goproto_extensions_map_all = 63025;
optional bool goproto_unrecognized_all = 63026;
optional bool gogoproto_import = 63027;
optional bool protosizer_all = 63028;
optional bool compare_all = 63029;
optional bool typedecl_all = 63030;
optional bool enumdecl_all = 63031;
optional bool goproto_registration = 63032;
optional bool messagename_all = 63033;
optional bool goproto_sizecache_all = 63034;
optional bool goproto_unkeyed_all = 63035;
}
extend google.protobuf.MessageOptions {
optional bool goproto_getters = 64001;
optional bool goproto_stringer = 64003;
optional bool verbose_equal = 64004;
optional bool face = 64005;
optional bool gostring = 64006;
optional bool populate = 64007;
optional bool stringer = 67008;
optional bool onlyone = 64009;
optional bool equal = 64013;
optional bool description = 64014;
optional bool testgen = 64015;
optional bool benchgen = 64016;
optional bool marshaler = 64017;
optional bool unmarshaler = 64018;
optional bool stable_marshaler = 64019;
optional bool sizer = 64020;
optional bool unsafe_marshaler = 64023;
optional bool unsafe_unmarshaler = 64024;
optional bool goproto_extensions_map = 64025;
optional bool goproto_unrecognized = 64026;
optional bool protosizer = 64028;
optional bool typedecl = 64030;
optional bool messagename = 64033;
optional bool goproto_sizecache = 64034;
optional bool goproto_unkeyed = 64035;
}
extend google.protobuf.FieldOptions {
optional bool nullable = 65001;
optional bool embed = 65002;
optional string customtype = 65003;
optional string customname = 65004;
optional string jsontag = 65005;
optional string moretags = 65006;
optional string casttype = 65007;
optional string castkey = 65008;
optional string castvalue = 65009;
optional bool stdtime = 65010;
optional bool stdduration = 65011;
optional bool wktpointer = 65012;
optional bool compare = 65013;
}

View File

@ -6,6 +6,7 @@ package pb
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
import _ "github.com/gogo/protobuf/gogoproto"
import context "golang.org/x/net/context"
import grpc "google.golang.org/grpc"
@ -23,7 +24,7 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
// GetStats
type GetStatsRequest struct {
NodeId string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
NodeId NodeID `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3,customtype=NodeID" json:"node_id"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@ -33,7 +34,7 @@ func (m *GetStatsRequest) Reset() { *m = GetStatsRequest{} }
func (m *GetStatsRequest) String() string { return proto.CompactTextString(m) }
func (*GetStatsRequest) ProtoMessage() {}
func (*GetStatsRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_inspector_bc69ade8473655f5, []int{0}
return fileDescriptor_inspector_9ff12d18f09cf85c, []int{0}
}
func (m *GetStatsRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_GetStatsRequest.Unmarshal(m, b)
@ -53,13 +54,6 @@ func (m *GetStatsRequest) XXX_DiscardUnknown() {
var xxx_messageInfo_GetStatsRequest proto.InternalMessageInfo
func (m *GetStatsRequest) GetNodeId() string {
if m != nil {
return m.NodeId
}
return ""
}
type GetStatsResponse struct {
AuditCount int64 `protobuf:"varint,1,opt,name=audit_count,json=auditCount,proto3" json:"audit_count,omitempty"`
UptimeRatio float64 `protobuf:"fixed64,2,opt,name=uptime_ratio,json=uptimeRatio,proto3" json:"uptime_ratio,omitempty"`
@ -73,7 +67,7 @@ func (m *GetStatsResponse) Reset() { *m = GetStatsResponse{} }
func (m *GetStatsResponse) String() string { return proto.CompactTextString(m) }
func (*GetStatsResponse) ProtoMessage() {}
func (*GetStatsResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_inspector_bc69ade8473655f5, []int{1}
return fileDescriptor_inspector_9ff12d18f09cf85c, []int{1}
}
func (m *GetStatsResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_GetStatsResponse.Unmarshal(m, b)
@ -116,7 +110,7 @@ func (m *GetStatsResponse) GetAuditRatio() float64 {
// CreateStats
type CreateStatsRequest struct {
NodeId string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
NodeId NodeID `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3,customtype=NodeID" json:"node_id"`
AuditCount int64 `protobuf:"varint,2,opt,name=audit_count,json=auditCount,proto3" json:"audit_count,omitempty"`
AuditSuccessCount int64 `protobuf:"varint,3,opt,name=audit_success_count,json=auditSuccessCount,proto3" json:"audit_success_count,omitempty"`
UptimeCount int64 `protobuf:"varint,4,opt,name=uptime_count,json=uptimeCount,proto3" json:"uptime_count,omitempty"`
@ -130,7 +124,7 @@ func (m *CreateStatsRequest) Reset() { *m = CreateStatsRequest{} }
func (m *CreateStatsRequest) String() string { return proto.CompactTextString(m) }
func (*CreateStatsRequest) ProtoMessage() {}
func (*CreateStatsRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_inspector_bc69ade8473655f5, []int{2}
return fileDescriptor_inspector_9ff12d18f09cf85c, []int{2}
}
func (m *CreateStatsRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_CreateStatsRequest.Unmarshal(m, b)
@ -150,13 +144,6 @@ func (m *CreateStatsRequest) XXX_DiscardUnknown() {
var xxx_messageInfo_CreateStatsRequest proto.InternalMessageInfo
func (m *CreateStatsRequest) GetNodeId() string {
if m != nil {
return m.NodeId
}
return ""
}
func (m *CreateStatsRequest) GetAuditCount() int64 {
if m != nil {
return m.AuditCount
@ -195,7 +182,7 @@ func (m *CreateStatsResponse) Reset() { *m = CreateStatsResponse{} }
func (m *CreateStatsResponse) String() string { return proto.CompactTextString(m) }
func (*CreateStatsResponse) ProtoMessage() {}
func (*CreateStatsResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_inspector_bc69ade8473655f5, []int{3}
return fileDescriptor_inspector_9ff12d18f09cf85c, []int{3}
}
func (m *CreateStatsResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_CreateStatsResponse.Unmarshal(m, b)
@ -228,7 +215,7 @@ func (m *CountNodesResponse) Reset() { *m = CountNodesResponse{} }
func (m *CountNodesResponse) String() string { return proto.CompactTextString(m) }
func (*CountNodesResponse) ProtoMessage() {}
func (*CountNodesResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_inspector_bc69ade8473655f5, []int{4}
return fileDescriptor_inspector_9ff12d18f09cf85c, []int{4}
}
func (m *CountNodesResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_CountNodesResponse.Unmarshal(m, b)
@ -272,7 +259,7 @@ func (m *CountNodesRequest) Reset() { *m = CountNodesRequest{} }
func (m *CountNodesRequest) String() string { return proto.CompactTextString(m) }
func (*CountNodesRequest) ProtoMessage() {}
func (*CountNodesRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_inspector_bc69ade8473655f5, []int{5}
return fileDescriptor_inspector_9ff12d18f09cf85c, []int{5}
}
func (m *CountNodesRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_CountNodesRequest.Unmarshal(m, b)
@ -303,7 +290,7 @@ func (m *GetBucketsRequest) Reset() { *m = GetBucketsRequest{} }
func (m *GetBucketsRequest) String() string { return proto.CompactTextString(m) }
func (*GetBucketsRequest) ProtoMessage() {}
func (*GetBucketsRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_inspector_bc69ade8473655f5, []int{6}
return fileDescriptor_inspector_9ff12d18f09cf85c, []int{6}
}
func (m *GetBucketsRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_GetBucketsRequest.Unmarshal(m, b)
@ -325,7 +312,7 @@ var xxx_messageInfo_GetBucketsRequest proto.InternalMessageInfo
type GetBucketsResponse struct {
Total int64 `protobuf:"varint,1,opt,name=total,proto3" json:"total,omitempty"`
Ids [][]byte `protobuf:"bytes,2,rep,name=ids" json:"ids,omitempty"`
Ids []NodeID `protobuf:"bytes,2,rep,name=ids,customtype=NodeID" json:"ids,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@ -335,7 +322,7 @@ func (m *GetBucketsResponse) Reset() { *m = GetBucketsResponse{} }
func (m *GetBucketsResponse) String() string { return proto.CompactTextString(m) }
func (*GetBucketsResponse) ProtoMessage() {}
func (*GetBucketsResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_inspector_bc69ade8473655f5, []int{7}
return fileDescriptor_inspector_9ff12d18f09cf85c, []int{7}
}
func (m *GetBucketsResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_GetBucketsResponse.Unmarshal(m, b)
@ -362,16 +349,9 @@ func (m *GetBucketsResponse) GetTotal() int64 {
return 0
}
func (m *GetBucketsResponse) GetIds() [][]byte {
if m != nil {
return m.Ids
}
return nil
}
// GetBucket
type GetBucketRequest struct {
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
Id NodeID `protobuf:"bytes,1,opt,name=id,proto3,customtype=NodeID" json:"id"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@ -381,7 +361,7 @@ func (m *GetBucketRequest) Reset() { *m = GetBucketRequest{} }
func (m *GetBucketRequest) String() string { return proto.CompactTextString(m) }
func (*GetBucketRequest) ProtoMessage() {}
func (*GetBucketRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_inspector_bc69ade8473655f5, []int{8}
return fileDescriptor_inspector_9ff12d18f09cf85c, []int{8}
}
func (m *GetBucketRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_GetBucketRequest.Unmarshal(m, b)
@ -401,15 +381,8 @@ func (m *GetBucketRequest) XXX_DiscardUnknown() {
var xxx_messageInfo_GetBucketRequest proto.InternalMessageInfo
func (m *GetBucketRequest) GetId() string {
if m != nil {
return m.Id
}
return ""
}
type GetBucketResponse struct {
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
Id NodeID `protobuf:"bytes,1,opt,name=id,proto3,customtype=NodeID" json:"id"`
Nodes []*Node `protobuf:"bytes,2,rep,name=nodes" json:"nodes,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
@ -420,7 +393,7 @@ func (m *GetBucketResponse) Reset() { *m = GetBucketResponse{} }
func (m *GetBucketResponse) String() string { return proto.CompactTextString(m) }
func (*GetBucketResponse) ProtoMessage() {}
func (*GetBucketResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_inspector_bc69ade8473655f5, []int{9}
return fileDescriptor_inspector_9ff12d18f09cf85c, []int{9}
}
func (m *GetBucketResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_GetBucketResponse.Unmarshal(m, b)
@ -440,13 +413,6 @@ func (m *GetBucketResponse) XXX_DiscardUnknown() {
var xxx_messageInfo_GetBucketResponse proto.InternalMessageInfo
func (m *GetBucketResponse) GetId() string {
if m != nil {
return m.Id
}
return ""
}
func (m *GetBucketResponse) GetNodes() []*Node {
if m != nil {
return m.Nodes
@ -465,7 +431,7 @@ func (m *Bucket) Reset() { *m = Bucket{} }
func (m *Bucket) String() string { return proto.CompactTextString(m) }
func (*Bucket) ProtoMessage() {}
func (*Bucket) Descriptor() ([]byte, []int) {
return fileDescriptor_inspector_bc69ade8473655f5, []int{10}
return fileDescriptor_inspector_9ff12d18f09cf85c, []int{10}
}
func (m *Bucket) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Bucket.Unmarshal(m, b)
@ -503,7 +469,7 @@ func (m *BucketList) Reset() { *m = BucketList{} }
func (m *BucketList) String() string { return proto.CompactTextString(m) }
func (*BucketList) ProtoMessage() {}
func (*BucketList) Descriptor() ([]byte, []int) {
return fileDescriptor_inspector_bc69ade8473655f5, []int{11}
return fileDescriptor_inspector_9ff12d18f09cf85c, []int{11}
}
func (m *BucketList) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_BucketList.Unmarshal(m, b)
@ -532,7 +498,7 @@ func (m *BucketList) GetNodes() []*Node {
// PingNode
type PingNodeRequest struct {
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
Id NodeID `protobuf:"bytes,1,opt,name=id,proto3,customtype=NodeID" json:"id"`
Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
@ -543,7 +509,7 @@ func (m *PingNodeRequest) Reset() { *m = PingNodeRequest{} }
func (m *PingNodeRequest) String() string { return proto.CompactTextString(m) }
func (*PingNodeRequest) ProtoMessage() {}
func (*PingNodeRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_inspector_bc69ade8473655f5, []int{12}
return fileDescriptor_inspector_9ff12d18f09cf85c, []int{12}
}
func (m *PingNodeRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PingNodeRequest.Unmarshal(m, b)
@ -563,13 +529,6 @@ func (m *PingNodeRequest) XXX_DiscardUnknown() {
var xxx_messageInfo_PingNodeRequest proto.InternalMessageInfo
func (m *PingNodeRequest) GetId() string {
if m != nil {
return m.Id
}
return ""
}
func (m *PingNodeRequest) GetAddress() string {
if m != nil {
return m.Address
@ -588,7 +547,7 @@ func (m *PingNodeResponse) Reset() { *m = PingNodeResponse{} }
func (m *PingNodeResponse) String() string { return proto.CompactTextString(m) }
func (*PingNodeResponse) ProtoMessage() {}
func (*PingNodeResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_inspector_bc69ade8473655f5, []int{13}
return fileDescriptor_inspector_9ff12d18f09cf85c, []int{13}
}
func (m *PingNodeResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PingNodeResponse.Unmarshal(m, b)
@ -885,42 +844,44 @@ var _Inspector_serviceDesc = grpc.ServiceDesc{
Metadata: "inspector.proto",
}
func init() { proto.RegisterFile("inspector.proto", fileDescriptor_inspector_bc69ade8473655f5) }
func init() { proto.RegisterFile("inspector.proto", fileDescriptor_inspector_9ff12d18f09cf85c) }
var fileDescriptor_inspector_bc69ade8473655f5 = []byte{
// 533 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0x5d, 0x6f, 0xd3, 0x30,
0x14, 0x55, 0x92, 0xb5, 0x6b, 0x6f, 0x37, 0xba, 0xb9, 0x43, 0x54, 0xe9, 0x80, 0x62, 0x5e, 0x2a,
0x24, 0x2a, 0x18, 0x8f, 0xf0, 0xb4, 0x4a, 0x8c, 0xa2, 0x09, 0xa1, 0xec, 0x8d, 0x97, 0x2a, 0x8b,
0x2d, 0x64, 0xb5, 0x8b, 0x43, 0xec, 0x80, 0xf8, 0x79, 0xfc, 0x06, 0xfe, 0x10, 0xf2, 0x47, 0xe2,
0x24, 0xcd, 0xd0, 0xde, 0x72, 0xef, 0x39, 0xf7, 0x5c, 0xdf, 0x73, 0x63, 0xc3, 0x98, 0xa5, 0x22,
0xa3, 0x89, 0xe4, 0xf9, 0x32, 0xcb, 0xb9, 0xe4, 0x68, 0x58, 0x25, 0xc2, 0x63, 0xfe, 0x93, 0xe6,
0xbb, 0xf8, 0xb7, 0x41, 0xf0, 0x2b, 0x18, 0x5f, 0x51, 0x79, 0x23, 0x63, 0x29, 0x22, 0xfa, 0xa3,
0xa0, 0x42, 0xa2, 0x27, 0x70, 0x98, 0x72, 0x42, 0x37, 0x8c, 0x4c, 0xbd, 0xb9, 0xb7, 0x18, 0x46,
0x7d, 0x15, 0xae, 0x09, 0xfe, 0x05, 0x27, 0x8e, 0x2b, 0x32, 0x9e, 0x0a, 0x8a, 0x9e, 0xc3, 0x28,
0x2e, 0x08, 0x93, 0x9b, 0x84, 0x17, 0xa9, 0xd4, 0x05, 0x41, 0x04, 0x3a, 0xb5, 0x52, 0x19, 0xf4,
0x02, 0x8e, 0x8a, 0x4c, 0xb2, 0x3b, 0xba, 0xc9, 0x63, 0xc9, 0xf8, 0xd4, 0x9f, 0x7b, 0x0b, 0x2f,
0x1a, 0x99, 0x5c, 0xa4, 0x52, 0x4e, 0xc3, 0x30, 0x02, 0xcd, 0x30, 0x1a, 0x9a, 0x80, 0xff, 0x7a,
0x80, 0x56, 0x39, 0x8d, 0x25, 0x7d, 0xd0, 0x41, 0xdb, 0x87, 0xf2, 0xf7, 0x0e, 0xb5, 0x84, 0x89,
0x21, 0x88, 0x22, 0x49, 0xa8, 0x10, 0x96, 0x18, 0x68, 0xe2, 0xa9, 0x86, 0x6e, 0x0c, 0xd2, 0x1e,
0xc2, 0x10, 0x0f, 0x34, 0xd1, 0x0e, 0x61, 0x28, 0x6f, 0xe0, 0xcc, 0x52, 0x9a, 0x9a, 0x3d, 0x4d,
0x45, 0x06, 0xab, 0x8b, 0xe2, 0xc7, 0x30, 0x69, 0x0c, 0x65, 0x1c, 0xc5, 0x9f, 0x01, 0x69, 0xfc,
0x0b, 0x27, 0xd4, 0xf9, 0x1c, 0xc2, 0x60, 0x1b, 0x13, 0x7a, 0xb7, 0x63, 0xb1, 0x35, 0xb9, 0x8a,
0xd1, 0x14, 0x0e, 0xed, 0x52, 0xed, 0xa8, 0x65, 0x88, 0x27, 0x70, 0x5a, 0xd7, 0xd2, 0xb6, 0xa9,
0xe4, 0x15, 0x95, 0x97, 0x45, 0xb2, 0xa5, 0x95, 0x97, 0xf8, 0x03, 0xa0, 0x7a, 0xd2, 0x76, 0x3d,
0x83, 0x9e, 0xe4, 0x32, 0xde, 0xd9, 0x96, 0x26, 0x40, 0x27, 0x10, 0x30, 0x22, 0xa6, 0xfe, 0x3c,
0x58, 0x1c, 0x45, 0xea, 0x13, 0x63, 0xfd, 0x67, 0x98, 0xea, 0x72, 0x3b, 0x8f, 0xc0, 0xaf, 0x16,
0xe3, 0x33, 0x82, 0x3f, 0xd5, 0xda, 0x56, 0x0d, 0x5a, 0x24, 0xf4, 0x12, 0x7a, 0x6a, 0x87, 0x46,
0x7c, 0x74, 0x71, 0xbc, 0x2c, 0xff, 0x56, 0x35, 0x41, 0x64, 0x30, 0xfc, 0x1a, 0xfa, 0x46, 0xe6,
0x61, 0xf4, 0xb7, 0x00, 0x86, 0x7e, 0xcd, 0x44, 0xad, 0xc4, 0xfb, 0x4f, 0xc9, 0x7b, 0x18, 0x7f,
0x65, 0xe9, 0x77, 0x9d, 0xea, 0x1e, 0x47, 0x99, 0x1e, 0x13, 0x92, 0x53, 0x21, 0xb4, 0xe9, 0xc3,
0xa8, 0x0c, 0x95, 0x19, 0xae, 0xd8, 0xcd, 0xc9, 0xb7, 0xba, 0x7a, 0x10, 0xf9, 0x7c, 0x7b, 0xf1,
0x27, 0x80, 0xe1, 0xba, 0xbc, 0x93, 0x68, 0x0d, 0xe0, 0xd6, 0x84, 0xce, 0x97, 0xee, 0xfa, 0xee,
0x6d, 0x2f, 0x7c, 0x7a, 0x0f, 0x6a, 0x1b, 0xad, 0x01, 0xdc, 0x1e, 0x1b, 0x52, 0x7b, 0x3b, 0x6f,
0x48, 0x75, 0x2c, 0xff, 0x23, 0x0c, 0xab, 0x2c, 0x9a, 0x75, 0x71, 0x4b, 0xa1, 0xf3, 0x6e, 0xd0,
0xea, 0xac, 0x60, 0x50, 0xfa, 0x81, 0xc2, 0x1a, 0xb3, 0xe5, 0x70, 0x38, 0xeb, 0xc4, 0x9c, 0x48,
0xf9, 0xf6, 0x34, 0x44, 0x5a, 0x8f, 0x57, 0x38, 0xeb, 0xc4, 0xac, 0xc8, 0x35, 0x8c, 0x6a, 0x37,
0x0e, 0x35, 0xac, 0xdc, 0x7b, 0x5e, 0xc2, 0x67, 0xf7, 0xc1, 0x46, 0xed, 0xf2, 0xe0, 0x9b, 0x9f,
0xdd, 0xde, 0xf6, 0xf5, 0x3b, 0xfa, 0xee, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb1, 0x4d, 0xa6,
0x45, 0x74, 0x05, 0x00, 0x00,
var fileDescriptor_inspector_9ff12d18f09cf85c = []byte{
// 564 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0xcd, 0x6e, 0x1a, 0x3f,
0x14, 0xc5, 0xff, 0x33, 0x13, 0x08, 0x5c, 0x50, 0x48, 0x4c, 0xfe, 0x12, 0x1a, 0x68, 0xa0, 0xde,
0x14, 0x75, 0x81, 0x2a, 0xba, 0xeb, 0x12, 0xaa, 0xa6, 0xb4, 0x51, 0x55, 0x4d, 0xd4, 0x4d, 0x37,
0x68, 0x82, 0x2d, 0x64, 0x41, 0xf0, 0x74, 0xec, 0x69, 0xd5, 0xc7, 0xeb, 0xae, 0xcf, 0xd0, 0x45,
0x36, 0x7d, 0x91, 0xca, 0x1f, 0xf3, 0x05, 0x83, 0x1a, 0x75, 0x87, 0xcf, 0xf9, 0x71, 0xec, 0x7b,
0xaf, 0xc7, 0xd0, 0x61, 0x3b, 0x11, 0xd1, 0x95, 0xe4, 0xf1, 0x24, 0x8a, 0xb9, 0xe4, 0xa8, 0x99,
0x09, 0x3e, 0xac, 0xf9, 0x9a, 0x1b, 0xd9, 0x87, 0x1d, 0x27, 0xd4, 0xfc, 0xc6, 0xaf, 0xa0, 0x73,
0x4d, 0xe5, 0xad, 0x0c, 0xa5, 0x08, 0xe8, 0x97, 0x84, 0x0a, 0x89, 0x9e, 0xc1, 0xa9, 0x02, 0x96,
0x8c, 0xf4, 0x9c, 0x91, 0x33, 0x6e, 0xcf, 0xce, 0x7e, 0x3e, 0x0c, 0xff, 0xfb, 0xf5, 0x30, 0xac,
0x7f, 0xe0, 0x84, 0x2e, 0x5e, 0x07, 0x75, 0x65, 0x2f, 0x08, 0xfe, 0x06, 0xe7, 0xf9, 0x7f, 0x45,
0xc4, 0x77, 0x82, 0xa2, 0x21, 0xb4, 0xc2, 0x84, 0x30, 0xb9, 0x5c, 0xf1, 0x64, 0x27, 0x75, 0x80,
0x17, 0x80, 0x96, 0xe6, 0x4a, 0x41, 0x4f, 0xa1, 0x9d, 0x44, 0x92, 0xdd, 0xd3, 0x65, 0x1c, 0x4a,
0xc6, 0x7b, 0xee, 0xc8, 0x19, 0x3b, 0x41, 0xcb, 0x68, 0x81, 0x92, 0xf2, 0x0c, 0x43, 0x78, 0x9a,
0x30, 0x19, 0x1a, 0xc0, 0xbf, 0x1d, 0x40, 0xf3, 0x98, 0x86, 0x92, 0xfe, 0xd3, 0xc1, 0xf7, 0x0f,
0xe9, 0x1e, 0x1c, 0x72, 0x02, 0x5d, 0x03, 0x88, 0x64, 0xb5, 0xa2, 0x42, 0x58, 0xd0, 0xd3, 0xe0,
0x85, 0xb6, 0x6e, 0x8d, 0xb3, 0x5f, 0x94, 0x01, 0x4f, 0x34, 0x68, 0x8b, 0x32, 0xc8, 0x0b, 0xb8,
0xb4, 0x48, 0x39, 0xb3, 0xa6, 0x51, 0x64, 0xbc, 0x62, 0x28, 0xfe, 0x1f, 0xba, 0xa5, 0x22, 0x4d,
0x87, 0xf1, 0x3b, 0x40, 0xda, 0x57, 0x35, 0xe5, 0x7d, 0xf7, 0xa1, 0xb1, 0x09, 0x09, 0xbd, 0xdf,
0xb2, 0xd0, 0x36, 0x3d, 0x5b, 0xa3, 0x1e, 0x9c, 0xf2, 0xaf, 0x34, 0xde, 0x86, 0xdf, 0x6d, 0xa9,
0xe9, 0x12, 0x77, 0xe1, 0xa2, 0x98, 0xa5, 0xdb, 0xa8, 0xc4, 0x6b, 0x2a, 0x67, 0xc9, 0x6a, 0x43,
0xb3, 0xde, 0xe2, 0xb7, 0x80, 0x8a, 0xa2, 0xdd, 0xf5, 0x12, 0x6a, 0x92, 0xcb, 0x70, 0x6b, 0xb7,
0x34, 0x0b, 0x34, 0x00, 0x8f, 0x11, 0xd1, 0x73, 0x47, 0xde, 0xb8, 0x3d, 0x83, 0x42, 0xff, 0x95,
0x8c, 0xa7, 0xfa, 0xd6, 0x98, 0xa4, 0x74, 0x72, 0x57, 0xe0, 0x1e, 0x1d, 0x9a, 0xcb, 0x08, 0xfe,
0x54, 0x38, 0x52, 0xb6, 0xf9, 0x5f, 0xfe, 0x84, 0x46, 0x50, 0x53, 0xf3, 0x36, 0x07, 0x69, 0x4d,
0x61, 0xa2, 0xaf, 0xbd, 0x02, 0x02, 0x63, 0xe0, 0xe7, 0x50, 0x37, 0x99, 0x8f, 0x60, 0x27, 0x00,
0x86, 0xbd, 0x61, 0xa2, 0xc0, 0x3b, 0xc7, 0xf8, 0xf7, 0xd0, 0xf9, 0xc8, 0x76, 0x6b, 0x2d, 0x3d,
0xae, 0x4a, 0x35, 0xa7, 0x90, 0x90, 0x98, 0x0a, 0xa1, 0xe7, 0xd4, 0x0c, 0xd2, 0x25, 0xc6, 0x70,
0x9e, 0x87, 0xd9, 0xf2, 0xcf, 0xc0, 0xe5, 0x1b, 0x9d, 0xd6, 0x08, 0x5c, 0xbe, 0x99, 0xfe, 0xf0,
0xa0, 0xb9, 0x48, 0xbf, 0x77, 0xb4, 0x00, 0xc8, 0x27, 0x8b, 0x06, 0x93, 0xfc, 0x69, 0x38, 0x18,
0xb8, 0xff, 0xe4, 0x88, 0x6b, 0x37, 0x5a, 0x00, 0xe4, 0xa3, 0x2f, 0x45, 0x1d, 0x5c, 0x93, 0x52,
0x54, 0xc5, 0x7d, 0x79, 0x03, 0xcd, 0x4c, 0x45, 0xfd, 0x2a, 0x36, 0x0d, 0x1a, 0x54, 0x9b, 0x36,
0x67, 0x0e, 0x8d, 0xb4, 0x1f, 0xc8, 0x2f, 0x90, 0x7b, 0x1d, 0xf7, 0xfb, 0x95, 0x5e, 0x1e, 0x92,
0x3e, 0x5f, 0xa5, 0x90, 0xbd, 0xf7, 0xd0, 0xef, 0x57, 0x7a, 0x36, 0xe4, 0x06, 0x5a, 0x85, 0x8f,
0x14, 0x95, 0x5a, 0x79, 0xf0, 0x42, 0xf9, 0x57, 0xc7, 0x6c, 0x93, 0x36, 0x3b, 0xf9, 0xec, 0x46,
0x77, 0x77, 0x75, 0xfd, 0x34, 0xbf, 0xfc, 0x13, 0x00, 0x00, 0xff, 0xff, 0x66, 0x18, 0xa5, 0x1e,
0xd0, 0x05, 0x00, 0x00,
}

View File

@ -4,7 +4,8 @@
syntax = "proto3";
option go_package = "pb";
import "overlay.proto";
import "gogo.proto";
import "node.proto";
package inspector;
@ -28,7 +29,7 @@ service Inspector {
// GetStats
message GetStatsRequest {
string node_id = 1;
bytes node_id = 1 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false];
}
message GetStatsResponse {
@ -39,7 +40,7 @@ message GetStatsResponse {
// CreateStats
message CreateStatsRequest {
string node_id = 1;
bytes node_id = 1 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false];
int64 audit_count = 2;
int64 audit_success_count = 3;
int64 uptime_count = 4;
@ -64,29 +65,29 @@ message GetBucketsRequest {
message GetBucketsResponse {
int64 total = 1;
repeated bytes ids = 2;
repeated bytes ids = 2 [(gogoproto.customtype) = "NodeID"];
}
// GetBucket
message GetBucketRequest {
string id = 1;
bytes id = 1 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false];
}
message GetBucketResponse {
string id = 1;
repeated overlay.Node nodes = 2;
bytes id = 1 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false];
repeated node.Node nodes = 2;
}
message Bucket {
repeated overlay.Node nodes = 2;
repeated node.Node nodes = 2;
}
message BucketList {
repeated overlay.Node nodes = 1;
repeated node.Node nodes = 1;
}
// PingNode
message PingNodeRequest {
string id = 1;
bytes id = 1 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false];
string address = 2;
}

View File

@ -30,7 +30,7 @@ func (m *SegmentMeta) Reset() { *m = SegmentMeta{} }
func (m *SegmentMeta) String() string { return proto.CompactTextString(m) }
func (*SegmentMeta) ProtoMessage() {}
func (*SegmentMeta) Descriptor() ([]byte, []int) {
return fileDescriptor_meta_7cdcbfdcc9c518d5, []int{0}
return fileDescriptor_meta_f2973588633dae4c, []int{0}
}
func (m *SegmentMeta) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_SegmentMeta.Unmarshal(m, b)
@ -78,7 +78,7 @@ func (m *StreamInfo) Reset() { *m = StreamInfo{} }
func (m *StreamInfo) String() string { return proto.CompactTextString(m) }
func (*StreamInfo) ProtoMessage() {}
func (*StreamInfo) Descriptor() ([]byte, []int) {
return fileDescriptor_meta_7cdcbfdcc9c518d5, []int{1}
return fileDescriptor_meta_f2973588633dae4c, []int{1}
}
func (m *StreamInfo) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_StreamInfo.Unmarshal(m, b)
@ -140,7 +140,7 @@ func (m *StreamMeta) Reset() { *m = StreamMeta{} }
func (m *StreamMeta) String() string { return proto.CompactTextString(m) }
func (*StreamMeta) ProtoMessage() {}
func (*StreamMeta) Descriptor() ([]byte, []int) {
return fileDescriptor_meta_7cdcbfdcc9c518d5, []int{2}
return fileDescriptor_meta_f2973588633dae4c, []int{2}
}
func (m *StreamMeta) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_StreamMeta.Unmarshal(m, b)
@ -194,9 +194,9 @@ func init() {
proto.RegisterType((*StreamMeta)(nil), "streams.StreamMeta")
}
func init() { proto.RegisterFile("meta.proto", fileDescriptor_meta_7cdcbfdcc9c518d5) }
func init() { proto.RegisterFile("meta.proto", fileDescriptor_meta_f2973588633dae4c) }
var fileDescriptor_meta_7cdcbfdcc9c518d5 = []byte{
var fileDescriptor_meta_f2973588633dae4c = []byte{
// 306 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0x51, 0xcd, 0x4e, 0xf3, 0x30,
0x10, 0x54, 0xff, 0xbe, 0xaf, 0x6c, 0x03, 0x05, 0x03, 0x52, 0x04, 0x17, 0x14, 0x0e, 0x20, 0x84,

520
pkg/pb/node.pb.go Normal file
View File

@ -0,0 +1,520 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: node.proto
package pb
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
import _ "github.com/gogo/protobuf/gogoproto"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
// NodeType is an enum of possible node types
type NodeType int32
const (
NodeType_ADMIN NodeType = 0
NodeType_STORAGE NodeType = 1
)
var NodeType_name = map[int32]string{
0: "ADMIN",
1: "STORAGE",
}
var NodeType_value = map[string]int32{
"ADMIN": 0,
"STORAGE": 1,
}
func (x NodeType) String() string {
return proto.EnumName(NodeType_name, int32(x))
}
func (NodeType) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_node_2ea4a85792199846, []int{0}
}
// NodeTransport is an enum of possible transports for the overlay network
type NodeTransport int32
const (
NodeTransport_TCP_TLS_GRPC NodeTransport = 0
)
var NodeTransport_name = map[int32]string{
0: "TCP_TLS_GRPC",
}
var NodeTransport_value = map[string]int32{
"TCP_TLS_GRPC": 0,
}
func (x NodeTransport) String() string {
return proto.EnumName(NodeTransport_name, int32(x))
}
func (NodeTransport) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_node_2ea4a85792199846, []int{1}
}
// NodeRestrictions contains all relevant data about a nodes ability to store data
type NodeRestrictions struct {
FreeBandwidth int64 `protobuf:"varint,1,opt,name=freeBandwidth,proto3" json:"freeBandwidth,omitempty"`
FreeDisk int64 `protobuf:"varint,2,opt,name=freeDisk,proto3" json:"freeDisk,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *NodeRestrictions) Reset() { *m = NodeRestrictions{} }
func (m *NodeRestrictions) String() string { return proto.CompactTextString(m) }
func (*NodeRestrictions) ProtoMessage() {}
func (*NodeRestrictions) Descriptor() ([]byte, []int) {
return fileDescriptor_node_2ea4a85792199846, []int{0}
}
func (m *NodeRestrictions) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_NodeRestrictions.Unmarshal(m, b)
}
func (m *NodeRestrictions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_NodeRestrictions.Marshal(b, m, deterministic)
}
func (dst *NodeRestrictions) XXX_Merge(src proto.Message) {
xxx_messageInfo_NodeRestrictions.Merge(dst, src)
}
func (m *NodeRestrictions) XXX_Size() int {
return xxx_messageInfo_NodeRestrictions.Size(m)
}
func (m *NodeRestrictions) XXX_DiscardUnknown() {
xxx_messageInfo_NodeRestrictions.DiscardUnknown(m)
}
var xxx_messageInfo_NodeRestrictions proto.InternalMessageInfo
func (m *NodeRestrictions) GetFreeBandwidth() int64 {
if m != nil {
return m.FreeBandwidth
}
return 0
}
func (m *NodeRestrictions) GetFreeDisk() int64 {
if m != nil {
return m.FreeDisk
}
return 0
}
// Node represents a node in the overlay network
// Node is info for a updating a single storagenode, used in the Update rpc calls
type Node struct {
Id NodeID `protobuf:"bytes,1,opt,name=id,proto3,customtype=NodeID" json:"id"`
Address *NodeAddress `protobuf:"bytes,2,opt,name=address" json:"address,omitempty"`
Type NodeType `protobuf:"varint,3,opt,name=type,proto3,enum=node.NodeType" json:"type,omitempty"`
Restrictions *NodeRestrictions `protobuf:"bytes,4,opt,name=restrictions" json:"restrictions,omitempty"`
Metadata *NodeMetadata `protobuf:"bytes,5,opt,name=metadata" json:"metadata,omitempty"`
LatencyList []int64 `protobuf:"varint,6,rep,packed,name=latency_list,json=latencyList" json:"latency_list,omitempty"`
AuditSuccess bool `protobuf:"varint,7,opt,name=audit_success,json=auditSuccess,proto3" json:"audit_success,omitempty"`
IsUp bool `protobuf:"varint,8,opt,name=is_up,json=isUp,proto3" json:"is_up,omitempty"`
UpdateLatency bool `protobuf:"varint,9,opt,name=update_latency,json=updateLatency,proto3" json:"update_latency,omitempty"`
UpdateAuditSuccess bool `protobuf:"varint,10,opt,name=update_audit_success,json=updateAuditSuccess,proto3" json:"update_audit_success,omitempty"`
UpdateUptime bool `protobuf:"varint,11,opt,name=update_uptime,json=updateUptime,proto3" json:"update_uptime,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Node) Reset() { *m = Node{} }
func (m *Node) String() string { return proto.CompactTextString(m) }
func (*Node) ProtoMessage() {}
func (*Node) Descriptor() ([]byte, []int) {
return fileDescriptor_node_2ea4a85792199846, []int{1}
}
func (m *Node) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Node.Unmarshal(m, b)
}
func (m *Node) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Node.Marshal(b, m, deterministic)
}
func (dst *Node) XXX_Merge(src proto.Message) {
xxx_messageInfo_Node.Merge(dst, src)
}
func (m *Node) XXX_Size() int {
return xxx_messageInfo_Node.Size(m)
}
func (m *Node) XXX_DiscardUnknown() {
xxx_messageInfo_Node.DiscardUnknown(m)
}
var xxx_messageInfo_Node proto.InternalMessageInfo
func (m *Node) GetAddress() *NodeAddress {
if m != nil {
return m.Address
}
return nil
}
func (m *Node) GetType() NodeType {
if m != nil {
return m.Type
}
return NodeType_ADMIN
}
func (m *Node) GetRestrictions() *NodeRestrictions {
if m != nil {
return m.Restrictions
}
return nil
}
func (m *Node) GetMetadata() *NodeMetadata {
if m != nil {
return m.Metadata
}
return nil
}
func (m *Node) GetLatencyList() []int64 {
if m != nil {
return m.LatencyList
}
return nil
}
func (m *Node) GetAuditSuccess() bool {
if m != nil {
return m.AuditSuccess
}
return false
}
func (m *Node) GetIsUp() bool {
if m != nil {
return m.IsUp
}
return false
}
func (m *Node) GetUpdateLatency() bool {
if m != nil {
return m.UpdateLatency
}
return false
}
func (m *Node) GetUpdateAuditSuccess() bool {
if m != nil {
return m.UpdateAuditSuccess
}
return false
}
func (m *Node) GetUpdateUptime() bool {
if m != nil {
return m.UpdateUptime
}
return false
}
// NodeAddress contains the information needed to communicate with a node on the network
type NodeAddress struct {
Transport NodeTransport `protobuf:"varint,1,opt,name=transport,proto3,enum=node.NodeTransport" json:"transport,omitempty"`
Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *NodeAddress) Reset() { *m = NodeAddress{} }
func (m *NodeAddress) String() string { return proto.CompactTextString(m) }
func (*NodeAddress) ProtoMessage() {}
func (*NodeAddress) Descriptor() ([]byte, []int) {
return fileDescriptor_node_2ea4a85792199846, []int{2}
}
func (m *NodeAddress) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_NodeAddress.Unmarshal(m, b)
}
func (m *NodeAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_NodeAddress.Marshal(b, m, deterministic)
}
func (dst *NodeAddress) XXX_Merge(src proto.Message) {
xxx_messageInfo_NodeAddress.Merge(dst, src)
}
func (m *NodeAddress) XXX_Size() int {
return xxx_messageInfo_NodeAddress.Size(m)
}
func (m *NodeAddress) XXX_DiscardUnknown() {
xxx_messageInfo_NodeAddress.DiscardUnknown(m)
}
var xxx_messageInfo_NodeAddress proto.InternalMessageInfo
func (m *NodeAddress) GetTransport() NodeTransport {
if m != nil {
return m.Transport
}
return NodeTransport_TCP_TLS_GRPC
}
func (m *NodeAddress) GetAddress() string {
if m != nil {
return m.Address
}
return ""
}
// NodeStats is info about a single storagenode stored in the stats db
type NodeStats struct {
NodeId NodeID `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3,customtype=NodeID" json:"node_id"`
Latency_90 int64 `protobuf:"varint,2,opt,name=latency_90,json=latency90,proto3" json:"latency_90,omitempty"`
AuditSuccessRatio float64 `protobuf:"fixed64,3,opt,name=audit_success_ratio,json=auditSuccessRatio,proto3" json:"audit_success_ratio,omitempty"`
UptimeRatio float64 `protobuf:"fixed64,4,opt,name=uptime_ratio,json=uptimeRatio,proto3" json:"uptime_ratio,omitempty"`
AuditCount int64 `protobuf:"varint,5,opt,name=audit_count,json=auditCount,proto3" json:"audit_count,omitempty"`
AuditSuccessCount int64 `protobuf:"varint,6,opt,name=audit_success_count,json=auditSuccessCount,proto3" json:"audit_success_count,omitempty"`
UptimeCount int64 `protobuf:"varint,7,opt,name=uptime_count,json=uptimeCount,proto3" json:"uptime_count,omitempty"`
UptimeSuccessCount int64 `protobuf:"varint,8,opt,name=uptime_success_count,json=uptimeSuccessCount,proto3" json:"uptime_success_count,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *NodeStats) Reset() { *m = NodeStats{} }
func (m *NodeStats) String() string { return proto.CompactTextString(m) }
func (*NodeStats) ProtoMessage() {}
func (*NodeStats) Descriptor() ([]byte, []int) {
return fileDescriptor_node_2ea4a85792199846, []int{3}
}
func (m *NodeStats) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_NodeStats.Unmarshal(m, b)
}
func (m *NodeStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_NodeStats.Marshal(b, m, deterministic)
}
func (dst *NodeStats) XXX_Merge(src proto.Message) {
xxx_messageInfo_NodeStats.Merge(dst, src)
}
func (m *NodeStats) XXX_Size() int {
return xxx_messageInfo_NodeStats.Size(m)
}
func (m *NodeStats) XXX_DiscardUnknown() {
xxx_messageInfo_NodeStats.DiscardUnknown(m)
}
var xxx_messageInfo_NodeStats proto.InternalMessageInfo
func (m *NodeStats) GetLatency_90() int64 {
if m != nil {
return m.Latency_90
}
return 0
}
func (m *NodeStats) GetAuditSuccessRatio() float64 {
if m != nil {
return m.AuditSuccessRatio
}
return 0
}
func (m *NodeStats) GetUptimeRatio() float64 {
if m != nil {
return m.UptimeRatio
}
return 0
}
func (m *NodeStats) GetAuditCount() int64 {
if m != nil {
return m.AuditCount
}
return 0
}
func (m *NodeStats) GetAuditSuccessCount() int64 {
if m != nil {
return m.AuditSuccessCount
}
return 0
}
func (m *NodeStats) GetUptimeCount() int64 {
if m != nil {
return m.UptimeCount
}
return 0
}
func (m *NodeStats) GetUptimeSuccessCount() int64 {
if m != nil {
return m.UptimeSuccessCount
}
return 0
}
// TODO: combine with `NodeStats`
// NodeRep is the reputation characteristics of a node
type NodeRep struct {
MinUptime float32 `protobuf:"fixed32,1,opt,name=min_uptime,json=minUptime,proto3" json:"min_uptime,omitempty"`
MinAuditSuccess float32 `protobuf:"fixed32,2,opt,name=min_audit_success,json=minAuditSuccess,proto3" json:"min_audit_success,omitempty"`
MinAuditCount int64 `protobuf:"varint,3,opt,name=min_audit_count,json=minAuditCount,proto3" json:"min_audit_count,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *NodeRep) Reset() { *m = NodeRep{} }
func (m *NodeRep) String() string { return proto.CompactTextString(m) }
func (*NodeRep) ProtoMessage() {}
func (*NodeRep) Descriptor() ([]byte, []int) {
return fileDescriptor_node_2ea4a85792199846, []int{4}
}
func (m *NodeRep) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_NodeRep.Unmarshal(m, b)
}
func (m *NodeRep) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_NodeRep.Marshal(b, m, deterministic)
}
func (dst *NodeRep) XXX_Merge(src proto.Message) {
xxx_messageInfo_NodeRep.Merge(dst, src)
}
func (m *NodeRep) XXX_Size() int {
return xxx_messageInfo_NodeRep.Size(m)
}
func (m *NodeRep) XXX_DiscardUnknown() {
xxx_messageInfo_NodeRep.DiscardUnknown(m)
}
var xxx_messageInfo_NodeRep proto.InternalMessageInfo
func (m *NodeRep) GetMinUptime() float32 {
if m != nil {
return m.MinUptime
}
return 0
}
func (m *NodeRep) GetMinAuditSuccess() float32 {
if m != nil {
return m.MinAuditSuccess
}
return 0
}
func (m *NodeRep) GetMinAuditCount() int64 {
if m != nil {
return m.MinAuditCount
}
return 0
}
type NodeMetadata struct {
Email string `protobuf:"bytes,1,opt,name=email,proto3" json:"email,omitempty"`
Wallet string `protobuf:"bytes,2,opt,name=wallet,proto3" json:"wallet,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *NodeMetadata) Reset() { *m = NodeMetadata{} }
func (m *NodeMetadata) String() string { return proto.CompactTextString(m) }
func (*NodeMetadata) ProtoMessage() {}
func (*NodeMetadata) Descriptor() ([]byte, []int) {
return fileDescriptor_node_2ea4a85792199846, []int{5}
}
func (m *NodeMetadata) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_NodeMetadata.Unmarshal(m, b)
}
func (m *NodeMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_NodeMetadata.Marshal(b, m, deterministic)
}
func (dst *NodeMetadata) XXX_Merge(src proto.Message) {
xxx_messageInfo_NodeMetadata.Merge(dst, src)
}
func (m *NodeMetadata) XXX_Size() int {
return xxx_messageInfo_NodeMetadata.Size(m)
}
func (m *NodeMetadata) XXX_DiscardUnknown() {
xxx_messageInfo_NodeMetadata.DiscardUnknown(m)
}
var xxx_messageInfo_NodeMetadata proto.InternalMessageInfo
func (m *NodeMetadata) GetEmail() string {
if m != nil {
return m.Email
}
return ""
}
func (m *NodeMetadata) GetWallet() string {
if m != nil {
return m.Wallet
}
return ""
}
func init() {
proto.RegisterType((*NodeRestrictions)(nil), "node.NodeRestrictions")
proto.RegisterType((*Node)(nil), "node.Node")
proto.RegisterType((*NodeAddress)(nil), "node.NodeAddress")
proto.RegisterType((*NodeStats)(nil), "node.NodeStats")
proto.RegisterType((*NodeRep)(nil), "node.NodeRep")
proto.RegisterType((*NodeMetadata)(nil), "node.NodeMetadata")
proto.RegisterEnum("node.NodeType", NodeType_name, NodeType_value)
proto.RegisterEnum("node.NodeTransport", NodeTransport_name, NodeTransport_value)
}
func init() { proto.RegisterFile("node.proto", fileDescriptor_node_2ea4a85792199846) }
var fileDescriptor_node_2ea4a85792199846 = []byte{
// 646 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x94, 0x4f, 0x4f, 0xdb, 0x4c,
0x10, 0xc6, 0x89, 0xed, 0xfc, 0xf1, 0x38, 0x09, 0x61, 0x40, 0xc8, 0x42, 0x7a, 0x5f, 0x82, 0x79,
0xdf, 0x36, 0xa2, 0x52, 0x44, 0xe9, 0x89, 0xaa, 0x97, 0x00, 0x15, 0x42, 0x02, 0x8a, 0x36, 0xe1,
0xc2, 0xc5, 0x5a, 0xe2, 0x2d, 0x5d, 0x35, 0xb1, 0x2d, 0x7b, 0x23, 0x84, 0xd4, 0xcf, 0xd6, 0x73,
0x0f, 0xfd, 0x04, 0x3d, 0xf0, 0x59, 0xaa, 0x9d, 0x75, 0x88, 0xad, 0xaa, 0x37, 0xef, 0xf3, 0xfc,
0x76, 0x66, 0x77, 0x66, 0xbc, 0x00, 0x71, 0x12, 0x89, 0x61, 0x9a, 0x25, 0x2a, 0x41, 0x47, 0x7f,
0xef, 0xc0, 0x43, 0xf2, 0x90, 0x18, 0x25, 0x98, 0x40, 0xef, 0x3a, 0x89, 0x04, 0x13, 0xb9, 0xca,
0xe4, 0x54, 0xc9, 0x24, 0xce, 0xf1, 0x3f, 0xe8, 0x7c, 0xce, 0x84, 0x38, 0xe1, 0x71, 0xf4, 0x28,
0x23, 0xf5, 0xc5, 0xaf, 0xf5, 0x6b, 0x03, 0x9b, 0x55, 0x45, 0xdc, 0x81, 0x96, 0x16, 0xce, 0x64,
0xfe, 0xd5, 0xb7, 0x08, 0x78, 0x59, 0x07, 0xdf, 0x6d, 0x70, 0x74, 0x58, 0xfc, 0x17, 0x2c, 0x19,
0xd1, 0xfe, 0xf6, 0x49, 0xf7, 0xc7, 0xf3, 0xee, 0xda, 0xaf, 0xe7, 0xdd, 0x86, 0x76, 0x2e, 0xce,
0x98, 0x25, 0x23, 0x7c, 0x03, 0x4d, 0x1e, 0x45, 0x99, 0xc8, 0x73, 0x8a, 0xe1, 0x1d, 0x6d, 0x0c,
0xe9, 0xb8, 0x1a, 0x19, 0x19, 0x83, 0x2d, 0x09, 0x0c, 0xc0, 0x51, 0x4f, 0xa9, 0xf0, 0xed, 0x7e,
0x6d, 0xd0, 0x3d, 0xea, 0xae, 0xc8, 0xc9, 0x53, 0x2a, 0x18, 0x79, 0xf8, 0x1e, 0xda, 0x59, 0xe9,
0x2e, 0xbe, 0x43, 0x51, 0xb7, 0x57, 0x6c, 0xf9, 0xa6, 0xac, 0xc2, 0xe2, 0x10, 0x5a, 0x73, 0xa1,
0x78, 0xc4, 0x15, 0xf7, 0xeb, 0xb4, 0x0f, 0x57, 0xfb, 0xae, 0x0a, 0x87, 0xbd, 0x30, 0xb8, 0x07,
0xed, 0x19, 0x57, 0x22, 0x9e, 0x3e, 0x85, 0x33, 0x99, 0x2b, 0xbf, 0xd1, 0xb7, 0x07, 0x36, 0xf3,
0x0a, 0xed, 0x52, 0xe6, 0x0a, 0xf7, 0xa1, 0xc3, 0x17, 0x91, 0x54, 0x61, 0xbe, 0x98, 0x4e, 0xf5,
0x2d, 0x9b, 0xfd, 0xda, 0xa0, 0xc5, 0xda, 0x24, 0x8e, 0x8d, 0x86, 0x9b, 0x50, 0x97, 0x79, 0xb8,
0x48, 0xfd, 0x16, 0x99, 0x8e, 0xcc, 0x6f, 0x53, 0xfc, 0x1f, 0xba, 0x8b, 0x34, 0xe2, 0x4a, 0x84,
0x45, 0x3c, 0xdf, 0x25, 0xb7, 0x63, 0xd4, 0x4b, 0x23, 0xe2, 0x21, 0x6c, 0x15, 0x58, 0x35, 0x0f,
0x10, 0x8c, 0xc6, 0x1b, 0x95, 0xb3, 0xed, 0x43, 0x11, 0x22, 0x5c, 0xa4, 0x4a, 0xce, 0x85, 0xef,
0x99, 0x23, 0x19, 0xf1, 0x96, 0xb4, 0xe0, 0x0e, 0xbc, 0x52, 0x0b, 0xf0, 0x2d, 0xb8, 0x2a, 0xe3,
0x71, 0x9e, 0x26, 0x99, 0xa2, 0x6e, 0x76, 0x8f, 0x36, 0x4b, 0xe5, 0x5f, 0x5a, 0x6c, 0x45, 0xa1,
0x5f, 0xed, 0xac, 0xfb, 0xd2, 0xc6, 0xe0, 0xa7, 0x05, 0xae, 0xde, 0x36, 0x56, 0x5c, 0xe5, 0xf8,
0x1a, 0x9a, 0x3a, 0x50, 0xf8, 0xd7, 0x31, 0x69, 0x68, 0xfb, 0x22, 0xc2, 0x7f, 0x00, 0x96, 0xd5,
0x3e, 0x3e, 0x2c, 0x26, 0xce, 0x2d, 0x94, 0xe3, 0x43, 0x1c, 0xc2, 0x66, 0xa5, 0x02, 0x61, 0xc6,
0x95, 0x4c, 0x68, 0x56, 0x6a, 0x6c, 0xa3, 0x5c, 0x6f, 0xa6, 0x0d, 0xdd, 0x3c, 0x73, 0xff, 0x02,
0x74, 0x08, 0xf4, 0x8c, 0x66, 0x90, 0x5d, 0xf0, 0x4c, 0xc8, 0x69, 0xb2, 0x88, 0x15, 0x8d, 0x84,
0xcd, 0x80, 0xa4, 0x53, 0xad, 0xfc, 0x99, 0xd3, 0x80, 0x0d, 0x02, 0x2b, 0x39, 0x0d, 0xbf, 0xca,
0x69, 0xc0, 0x26, 0x81, 0x45, 0x4e, 0x83, 0x50, 0x3f, 0x09, 0xa9, 0xc6, 0x6c, 0x11, 0x8a, 0xc6,
0x2b, 0x07, 0x0d, 0xbe, 0x41, 0xd3, 0xcc, 0x75, 0xaa, 0x4b, 0x34, 0x97, 0xf1, 0xb2, 0xaf, 0xba,
0x9c, 0x16, 0x73, 0xe7, 0x32, 0x36, 0x4d, 0xc5, 0x03, 0xd8, 0xd0, 0x76, 0x75, 0x50, 0x2c, 0xa2,
0xd6, 0xe7, 0x32, 0xae, 0x4c, 0xc9, 0x2b, 0x58, 0x5f, 0xb1, 0xe6, 0x08, 0xb6, 0x79, 0x05, 0x96,
0xa4, 0xc9, 0xfe, 0x01, 0xda, 0xe5, 0xbf, 0x03, 0xb7, 0xa0, 0x2e, 0xe6, 0x5c, 0xce, 0x28, 0xbb,
0xcb, 0xcc, 0x02, 0xb7, 0xa1, 0xf1, 0xc8, 0x67, 0x33, 0xa1, 0x8a, 0x59, 0x28, 0x56, 0x07, 0x01,
0xb4, 0x96, 0xff, 0x2f, 0xba, 0x50, 0x1f, 0x9d, 0x5d, 0x5d, 0x5c, 0xf7, 0xd6, 0xd0, 0x83, 0xe6,
0x78, 0xf2, 0x89, 0x8d, 0xce, 0x3f, 0xf6, 0x6a, 0x07, 0x7b, 0xd0, 0xa9, 0x0c, 0x19, 0xf6, 0xa0,
0x3d, 0x39, 0xbd, 0x09, 0x27, 0x97, 0xe3, 0xf0, 0x9c, 0xdd, 0x9c, 0xf6, 0xd6, 0x4e, 0x9c, 0x3b,
0x2b, 0xbd, 0xbf, 0x6f, 0xd0, 0x8b, 0xf6, 0xee, 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa3, 0x4d,
0xc8, 0xfa, 0xf1, 0x04, 0x00, 0x00,
}

74
pkg/pb/node.proto Normal file
View File

@ -0,0 +1,74 @@
// Copyright (C) 2018 Storj Labs, Inc.
// See LICENSE for copying information.
syntax = "proto3";
option go_package = "pb";
package node;
import "gogo.proto";
// NodeRestrictions contains all relevant data about a nodes ability to store data
message NodeRestrictions {
int64 freeBandwidth = 1;
int64 freeDisk = 2;
}
// Node represents a node in the overlay network
// Node is info for a updating a single storagenode, used in the Update rpc calls
message Node {
bytes id = 1 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false];
NodeAddress address = 2;
NodeType type = 3;
NodeRestrictions restrictions = 4;
NodeMetadata metadata = 5;
repeated int64 latency_list = 6;
bool audit_success = 7;
bool is_up = 8;
bool update_latency = 9;
bool update_audit_success = 10;
bool update_uptime = 11;
}
// NodeType is an enum of possible node types
enum NodeType {
ADMIN = 0;
STORAGE = 1;
}
// NodeAddress contains the information needed to communicate with a node on the network
message NodeAddress {
NodeTransport transport = 1;
string address = 2;
}
// NodeTransport is an enum of possible transports for the overlay network
enum NodeTransport {
TCP_TLS_GRPC = 0;
}
// NodeStats is info about a single storagenode stored in the stats db
message NodeStats {
bytes node_id = 1 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false];
int64 latency_90 = 2; // 90th percentile measure of storagenode latency
double audit_success_ratio = 3; // (auditSuccessCount / totalAuditCount)
double uptime_ratio = 4; // (uptimeCount / totalUptimeCheckCount)
int64 audit_count = 5;
int64 audit_success_count = 6;
int64 uptime_count = 7;
int64 uptime_success_count = 8;
}
// TODO: combine with `NodeStats`
// NodeRep is the reputation characteristics of a node
message NodeRep {
float min_uptime = 1;
float min_audit_success = 2;
int64 min_audit_count = 3;
}
message NodeMetadata {
string email = 1;
string wallet = 2;
}

View File

@ -6,6 +6,7 @@ package pb
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
import _ "github.com/gogo/protobuf/gogoproto"
import duration "github.com/golang/protobuf/ptypes/duration"
import context "golang.org/x/net/context"
@ -22,51 +23,6 @@ var _ = math.Inf
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
// NodeTransport is an enum of possible transports for the overlay network
type NodeTransport int32
const (
NodeTransport_TCP_TLS_GRPC NodeTransport = 0
)
var NodeTransport_name = map[int32]string{
0: "TCP_TLS_GRPC",
}
var NodeTransport_value = map[string]int32{
"TCP_TLS_GRPC": 0,
}
func (x NodeTransport) String() string {
return proto.EnumName(NodeTransport_name, int32(x))
}
func (NodeTransport) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_overlay_94858445011884fe, []int{0}
}
// NodeType is an enum of possible node types
type NodeType int32
const (
NodeType_ADMIN NodeType = 0
NodeType_STORAGE NodeType = 1
)
var NodeType_name = map[int32]string{
0: "ADMIN",
1: "STORAGE",
}
var NodeType_value = map[string]int32{
"ADMIN": 0,
"STORAGE": 1,
}
func (x NodeType) String() string {
return proto.EnumName(NodeType_name, int32(x))
}
func (NodeType) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_overlay_94858445011884fe, []int{1}
}
type Restriction_Operator int32
const (
@ -96,7 +52,7 @@ func (x Restriction_Operator) String() string {
return proto.EnumName(Restriction_Operator_name, int32(x))
}
func (Restriction_Operator) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_overlay_94858445011884fe, []int{16, 0}
return fileDescriptor_overlay_366705336bc11433, []int{11, 0}
}
type Restriction_Operand int32
@ -119,12 +75,12 @@ func (x Restriction_Operand) String() string {
return proto.EnumName(Restriction_Operand_name, int32(x))
}
func (Restriction_Operand) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_overlay_94858445011884fe, []int{16, 1}
return fileDescriptor_overlay_366705336bc11433, []int{11, 1}
}
// LookupRequest is is request message for the lookup rpc call
type LookupRequest struct {
NodeID string `protobuf:"bytes,1,opt,name=nodeID,proto3" json:"nodeID,omitempty"`
NodeId NodeID `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3,customtype=NodeID" json:"node_id"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@ -134,7 +90,7 @@ func (m *LookupRequest) Reset() { *m = LookupRequest{} }
func (m *LookupRequest) String() string { return proto.CompactTextString(m) }
func (*LookupRequest) ProtoMessage() {}
func (*LookupRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_overlay_94858445011884fe, []int{0}
return fileDescriptor_overlay_366705336bc11433, []int{0}
}
func (m *LookupRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_LookupRequest.Unmarshal(m, b)
@ -154,13 +110,6 @@ func (m *LookupRequest) XXX_DiscardUnknown() {
var xxx_messageInfo_LookupRequest proto.InternalMessageInfo
func (m *LookupRequest) GetNodeID() string {
if m != nil {
return m.NodeID
}
return ""
}
// LookupResponse is is response message for the lookup rpc call
type LookupResponse struct {
Node *Node `protobuf:"bytes,1,opt,name=node" json:"node,omitempty"`
@ -173,7 +122,7 @@ func (m *LookupResponse) Reset() { *m = LookupResponse{} }
func (m *LookupResponse) String() string { return proto.CompactTextString(m) }
func (*LookupResponse) ProtoMessage() {}
func (*LookupResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_overlay_94858445011884fe, []int{1}
return fileDescriptor_overlay_366705336bc11433, []int{1}
}
func (m *LookupResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_LookupResponse.Unmarshal(m, b)
@ -212,7 +161,7 @@ func (m *LookupRequests) Reset() { *m = LookupRequests{} }
func (m *LookupRequests) String() string { return proto.CompactTextString(m) }
func (*LookupRequests) ProtoMessage() {}
func (*LookupRequests) Descriptor() ([]byte, []int) {
return fileDescriptor_overlay_94858445011884fe, []int{2}
return fileDescriptor_overlay_366705336bc11433, []int{2}
}
func (m *LookupRequests) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_LookupRequests.Unmarshal(m, b)
@ -251,7 +200,7 @@ func (m *LookupResponses) Reset() { *m = LookupResponses{} }
func (m *LookupResponses) String() string { return proto.CompactTextString(m) }
func (*LookupResponses) ProtoMessage() {}
func (*LookupResponses) Descriptor() ([]byte, []int) {
return fileDescriptor_overlay_94858445011884fe, []int{3}
return fileDescriptor_overlay_366705336bc11433, []int{3}
}
func (m *LookupResponses) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_LookupResponses.Unmarshal(m, b)
@ -290,7 +239,7 @@ func (m *FindStorageNodesResponse) Reset() { *m = FindStorageNodesRespon
func (m *FindStorageNodesResponse) String() string { return proto.CompactTextString(m) }
func (*FindStorageNodesResponse) ProtoMessage() {}
func (*FindStorageNodesResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_overlay_94858445011884fe, []int{4}
return fileDescriptor_overlay_366705336bc11433, []int{4}
}
func (m *FindStorageNodesResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_FindStorageNodesResponse.Unmarshal(m, b)
@ -322,7 +271,7 @@ type FindStorageNodesRequest struct {
ObjectSize int64 `protobuf:"varint,1,opt,name=object_size,json=objectSize,proto3" json:"object_size,omitempty"`
ContractLength *duration.Duration `protobuf:"bytes,2,opt,name=contract_length,json=contractLength" json:"contract_length,omitempty"`
Opts *OverlayOptions `protobuf:"bytes,3,opt,name=opts" json:"opts,omitempty"`
Start []byte `protobuf:"bytes,4,opt,name=start,proto3" json:"start,omitempty"`
Start NodeID `protobuf:"bytes,4,opt,name=start,proto3,customtype=NodeID" json:"start"`
MaxNodes int64 `protobuf:"varint,5,opt,name=max_nodes,json=maxNodes,proto3" json:"max_nodes,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
@ -333,7 +282,7 @@ func (m *FindStorageNodesRequest) Reset() { *m = FindStorageNodesRequest
func (m *FindStorageNodesRequest) String() string { return proto.CompactTextString(m) }
func (*FindStorageNodesRequest) ProtoMessage() {}
func (*FindStorageNodesRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_overlay_94858445011884fe, []int{5}
return fileDescriptor_overlay_366705336bc11433, []int{5}
}
func (m *FindStorageNodesRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_FindStorageNodesRequest.Unmarshal(m, b)
@ -374,13 +323,6 @@ func (m *FindStorageNodesRequest) GetOpts() *OverlayOptions {
return nil
}
func (m *FindStorageNodesRequest) GetStart() []byte {
if m != nil {
return m.Start
}
return nil
}
func (m *FindStorageNodesRequest) GetMaxNodes() int64 {
if m != nil {
return m.MaxNodes
@ -388,53 +330,6 @@ func (m *FindStorageNodesRequest) GetMaxNodes() int64 {
return 0
}
// NodeAddress contains the information needed to communicate with a node on the network
type NodeAddress struct {
Transport NodeTransport `protobuf:"varint,1,opt,name=transport,proto3,enum=overlay.NodeTransport" json:"transport,omitempty"`
Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *NodeAddress) Reset() { *m = NodeAddress{} }
func (m *NodeAddress) String() string { return proto.CompactTextString(m) }
func (*NodeAddress) ProtoMessage() {}
func (*NodeAddress) Descriptor() ([]byte, []int) {
return fileDescriptor_overlay_94858445011884fe, []int{6}
}
func (m *NodeAddress) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_NodeAddress.Unmarshal(m, b)
}
func (m *NodeAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_NodeAddress.Marshal(b, m, deterministic)
}
func (dst *NodeAddress) XXX_Merge(src proto.Message) {
xxx_messageInfo_NodeAddress.Merge(dst, src)
}
func (m *NodeAddress) XXX_Size() int {
return xxx_messageInfo_NodeAddress.Size(m)
}
func (m *NodeAddress) XXX_DiscardUnknown() {
xxx_messageInfo_NodeAddress.DiscardUnknown(m)
}
var xxx_messageInfo_NodeAddress proto.InternalMessageInfo
func (m *NodeAddress) GetTransport() NodeTransport {
if m != nil {
return m.Transport
}
return NodeTransport_TCP_TLS_GRPC
}
func (m *NodeAddress) GetAddress() string {
if m != nil {
return m.Address
}
return ""
}
// OverlayOptions is a set of criteria that a node must meet to be considered for a storage opportunity
type OverlayOptions struct {
MaxLatency *duration.Duration `protobuf:"bytes,1,opt,name=max_latency,json=maxLatency" json:"max_latency,omitempty"`
@ -442,7 +337,7 @@ type OverlayOptions struct {
MinSpeedKbps int64 `protobuf:"varint,3,opt,name=min_speed_kbps,json=minSpeedKbps,proto3" json:"min_speed_kbps,omitempty"`
Amount int64 `protobuf:"varint,4,opt,name=amount,proto3" json:"amount,omitempty"`
Restrictions *NodeRestrictions `protobuf:"bytes,5,opt,name=restrictions" json:"restrictions,omitempty"`
ExcludedNodes []string `protobuf:"bytes,6,rep,name=excluded_nodes,json=excludedNodes" json:"excluded_nodes,omitempty"`
ExcludedNodes []NodeID `protobuf:"bytes,6,rep,name=excluded_nodes,json=excludedNodes,customtype=NodeID" json:"excluded_nodes,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@ -452,7 +347,7 @@ func (m *OverlayOptions) Reset() { *m = OverlayOptions{} }
func (m *OverlayOptions) String() string { return proto.CompactTextString(m) }
func (*OverlayOptions) ProtoMessage() {}
func (*OverlayOptions) Descriptor() ([]byte, []int) {
return fileDescriptor_overlay_94858445011884fe, []int{7}
return fileDescriptor_overlay_366705336bc11433, []int{6}
}
func (m *OverlayOptions) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_OverlayOptions.Unmarshal(m, b)
@ -507,240 +402,6 @@ func (m *OverlayOptions) GetRestrictions() *NodeRestrictions {
return nil
}
func (m *OverlayOptions) GetExcludedNodes() []string {
if m != nil {
return m.ExcludedNodes
}
return nil
}
// NodeRep is the reputation characteristics of a node
type NodeRep struct {
UptimeRatio float64 `protobuf:"fixed64,1,opt,name=uptimeRatio,proto3" json:"uptimeRatio,omitempty"`
AuditSuccessRatio float64 `protobuf:"fixed64,2,opt,name=auditSuccessRatio,proto3" json:"auditSuccessRatio,omitempty"`
AuditCount int64 `protobuf:"varint,3,opt,name=auditCount,proto3" json:"auditCount,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *NodeRep) Reset() { *m = NodeRep{} }
func (m *NodeRep) String() string { return proto.CompactTextString(m) }
func (*NodeRep) ProtoMessage() {}
func (*NodeRep) Descriptor() ([]byte, []int) {
return fileDescriptor_overlay_94858445011884fe, []int{8}
}
func (m *NodeRep) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_NodeRep.Unmarshal(m, b)
}
func (m *NodeRep) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_NodeRep.Marshal(b, m, deterministic)
}
func (dst *NodeRep) XXX_Merge(src proto.Message) {
xxx_messageInfo_NodeRep.Merge(dst, src)
}
func (m *NodeRep) XXX_Size() int {
return xxx_messageInfo_NodeRep.Size(m)
}
func (m *NodeRep) XXX_DiscardUnknown() {
xxx_messageInfo_NodeRep.DiscardUnknown(m)
}
var xxx_messageInfo_NodeRep proto.InternalMessageInfo
func (m *NodeRep) GetUptimeRatio() float64 {
if m != nil {
return m.UptimeRatio
}
return 0
}
func (m *NodeRep) GetAuditSuccessRatio() float64 {
if m != nil {
return m.AuditSuccessRatio
}
return 0
}
func (m *NodeRep) GetAuditCount() int64 {
if m != nil {
return m.AuditCount
}
return 0
}
// NodeRestrictions contains all relevant data about a nodes ability to store data
type NodeRestrictions struct {
FreeBandwidth int64 `protobuf:"varint,1,opt,name=free_bandwidth,json=freeBandwidth,proto3" json:"free_bandwidth,omitempty"`
FreeDisk int64 `protobuf:"varint,2,opt,name=free_disk,json=freeDisk,proto3" json:"free_disk,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *NodeRestrictions) Reset() { *m = NodeRestrictions{} }
func (m *NodeRestrictions) String() string { return proto.CompactTextString(m) }
func (*NodeRestrictions) ProtoMessage() {}
func (*NodeRestrictions) Descriptor() ([]byte, []int) {
return fileDescriptor_overlay_94858445011884fe, []int{9}
}
func (m *NodeRestrictions) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_NodeRestrictions.Unmarshal(m, b)
}
func (m *NodeRestrictions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_NodeRestrictions.Marshal(b, m, deterministic)
}
func (dst *NodeRestrictions) XXX_Merge(src proto.Message) {
xxx_messageInfo_NodeRestrictions.Merge(dst, src)
}
func (m *NodeRestrictions) XXX_Size() int {
return xxx_messageInfo_NodeRestrictions.Size(m)
}
func (m *NodeRestrictions) XXX_DiscardUnknown() {
xxx_messageInfo_NodeRestrictions.DiscardUnknown(m)
}
var xxx_messageInfo_NodeRestrictions proto.InternalMessageInfo
func (m *NodeRestrictions) GetFreeBandwidth() int64 {
if m != nil {
return m.FreeBandwidth
}
return 0
}
func (m *NodeRestrictions) GetFreeDisk() int64 {
if m != nil {
return m.FreeDisk
}
return 0
}
// Node represents a node in the overlay network
type Node struct {
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
Address *NodeAddress `protobuf:"bytes,2,opt,name=address" json:"address,omitempty"`
Type NodeType `protobuf:"varint,3,opt,name=type,proto3,enum=overlay.NodeType" json:"type,omitempty"`
Restrictions *NodeRestrictions `protobuf:"bytes,4,opt,name=restrictions" json:"restrictions,omitempty"`
Metadata *NodeMetadata `protobuf:"bytes,5,opt,name=metadata" json:"metadata,omitempty"`
Reputation *NodeRep `protobuf:"bytes,6,opt,name=reputation" json:"reputation,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Node) Reset() { *m = Node{} }
func (m *Node) String() string { return proto.CompactTextString(m) }
func (*Node) ProtoMessage() {}
func (*Node) Descriptor() ([]byte, []int) {
return fileDescriptor_overlay_94858445011884fe, []int{10}
}
func (m *Node) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Node.Unmarshal(m, b)
}
func (m *Node) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Node.Marshal(b, m, deterministic)
}
func (dst *Node) XXX_Merge(src proto.Message) {
xxx_messageInfo_Node.Merge(dst, src)
}
func (m *Node) XXX_Size() int {
return xxx_messageInfo_Node.Size(m)
}
func (m *Node) XXX_DiscardUnknown() {
xxx_messageInfo_Node.DiscardUnknown(m)
}
var xxx_messageInfo_Node proto.InternalMessageInfo
func (m *Node) GetId() string {
if m != nil {
return m.Id
}
return ""
}
func (m *Node) GetAddress() *NodeAddress {
if m != nil {
return m.Address
}
return nil
}
func (m *Node) GetType() NodeType {
if m != nil {
return m.Type
}
return NodeType_ADMIN
}
func (m *Node) GetRestrictions() *NodeRestrictions {
if m != nil {
return m.Restrictions
}
return nil
}
func (m *Node) GetMetadata() *NodeMetadata {
if m != nil {
return m.Metadata
}
return nil
}
func (m *Node) GetReputation() *NodeRep {
if m != nil {
return m.Reputation
}
return nil
}
type NodeMetadata struct {
Email string `protobuf:"bytes,1,opt,name=email,proto3" json:"email,omitempty"`
Wallet string `protobuf:"bytes,2,opt,name=wallet,proto3" json:"wallet,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *NodeMetadata) Reset() { *m = NodeMetadata{} }
func (m *NodeMetadata) String() string { return proto.CompactTextString(m) }
func (*NodeMetadata) ProtoMessage() {}
func (*NodeMetadata) Descriptor() ([]byte, []int) {
return fileDescriptor_overlay_94858445011884fe, []int{11}
}
func (m *NodeMetadata) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_NodeMetadata.Unmarshal(m, b)
}
func (m *NodeMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_NodeMetadata.Marshal(b, m, deterministic)
}
func (dst *NodeMetadata) XXX_Merge(src proto.Message) {
xxx_messageInfo_NodeMetadata.Merge(dst, src)
}
func (m *NodeMetadata) XXX_Size() int {
return xxx_messageInfo_NodeMetadata.Size(m)
}
func (m *NodeMetadata) XXX_DiscardUnknown() {
xxx_messageInfo_NodeMetadata.DiscardUnknown(m)
}
var xxx_messageInfo_NodeMetadata proto.InternalMessageInfo
func (m *NodeMetadata) GetEmail() string {
if m != nil {
return m.Email
}
return ""
}
func (m *NodeMetadata) GetWallet() string {
if m != nil {
return m.Wallet
}
return ""
}
type QueryRequest struct {
Sender *Node `protobuf:"bytes,1,opt,name=sender" json:"sender,omitempty"`
Target *Node `protobuf:"bytes,2,opt,name=target" json:"target,omitempty"`
@ -755,7 +416,7 @@ func (m *QueryRequest) Reset() { *m = QueryRequest{} }
func (m *QueryRequest) String() string { return proto.CompactTextString(m) }
func (*QueryRequest) ProtoMessage() {}
func (*QueryRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_overlay_94858445011884fe, []int{12}
return fileDescriptor_overlay_366705336bc11433, []int{7}
}
func (m *QueryRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_QueryRequest.Unmarshal(m, b)
@ -815,7 +476,7 @@ func (m *QueryResponse) Reset() { *m = QueryResponse{} }
func (m *QueryResponse) String() string { return proto.CompactTextString(m) }
func (*QueryResponse) ProtoMessage() {}
func (*QueryResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_overlay_94858445011884fe, []int{13}
return fileDescriptor_overlay_366705336bc11433, []int{8}
}
func (m *QueryResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_QueryResponse.Unmarshal(m, b)
@ -859,7 +520,7 @@ func (m *PingRequest) Reset() { *m = PingRequest{} }
func (m *PingRequest) String() string { return proto.CompactTextString(m) }
func (*PingRequest) ProtoMessage() {}
func (*PingRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_overlay_94858445011884fe, []int{14}
return fileDescriptor_overlay_366705336bc11433, []int{9}
}
func (m *PingRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PingRequest.Unmarshal(m, b)
@ -889,7 +550,7 @@ func (m *PingResponse) Reset() { *m = PingResponse{} }
func (m *PingResponse) String() string { return proto.CompactTextString(m) }
func (*PingResponse) ProtoMessage() {}
func (*PingResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_overlay_94858445011884fe, []int{15}
return fileDescriptor_overlay_366705336bc11433, []int{10}
}
func (m *PingResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PingResponse.Unmarshal(m, b)
@ -922,7 +583,7 @@ func (m *Restriction) Reset() { *m = Restriction{} }
func (m *Restriction) String() string { return proto.CompactTextString(m) }
func (*Restriction) ProtoMessage() {}
func (*Restriction) Descriptor() ([]byte, []int) {
return fileDescriptor_overlay_94858445011884fe, []int{16}
return fileDescriptor_overlay_366705336bc11433, []int{11}
}
func (m *Restriction) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Restriction.Unmarshal(m, b)
@ -970,19 +631,12 @@ func init() {
proto.RegisterType((*LookupResponses)(nil), "overlay.LookupResponses")
proto.RegisterType((*FindStorageNodesResponse)(nil), "overlay.FindStorageNodesResponse")
proto.RegisterType((*FindStorageNodesRequest)(nil), "overlay.FindStorageNodesRequest")
proto.RegisterType((*NodeAddress)(nil), "overlay.NodeAddress")
proto.RegisterType((*OverlayOptions)(nil), "overlay.OverlayOptions")
proto.RegisterType((*NodeRep)(nil), "overlay.NodeRep")
proto.RegisterType((*NodeRestrictions)(nil), "overlay.NodeRestrictions")
proto.RegisterType((*Node)(nil), "overlay.Node")
proto.RegisterType((*NodeMetadata)(nil), "overlay.NodeMetadata")
proto.RegisterType((*QueryRequest)(nil), "overlay.QueryRequest")
proto.RegisterType((*QueryResponse)(nil), "overlay.QueryResponse")
proto.RegisterType((*PingRequest)(nil), "overlay.PingRequest")
proto.RegisterType((*PingResponse)(nil), "overlay.PingResponse")
proto.RegisterType((*Restriction)(nil), "overlay.Restriction")
proto.RegisterEnum("overlay.NodeTransport", NodeTransport_name, NodeTransport_value)
proto.RegisterEnum("overlay.NodeType", NodeType_name, NodeType_value)
proto.RegisterEnum("overlay.Restriction_Operator", Restriction_Operator_name, Restriction_Operator_value)
proto.RegisterEnum("overlay.Restriction_Operand", Restriction_Operand_name, Restriction_Operand_value)
}
@ -1228,77 +882,61 @@ var _Nodes_serviceDesc = grpc.ServiceDesc{
Metadata: "overlay.proto",
}
func init() { proto.RegisterFile("overlay.proto", fileDescriptor_overlay_94858445011884fe) }
func init() { proto.RegisterFile("overlay.proto", fileDescriptor_overlay_366705336bc11433) }
var fileDescriptor_overlay_94858445011884fe = []byte{
// 1100 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0xdb, 0x6e, 0xdb, 0x46,
0x13, 0x36, 0x75, 0xd6, 0x48, 0x62, 0x98, 0x45, 0x12, 0xeb, 0xd7, 0xdf, 0xa6, 0x32, 0x5b, 0xa3,
0x6e, 0x1a, 0x28, 0xad, 0x1c, 0xa4, 0x48, 0xd1, 0x20, 0xb5, 0x2d, 0xc5, 0x35, 0xa2, 0xd8, 0xce,
0x4a, 0x68, 0x80, 0x02, 0x85, 0xb0, 0x12, 0x37, 0x0a, 0x2b, 0x8a, 0x64, 0xb9, 0xcb, 0xc4, 0xca,
0x43, 0x14, 0xe8, 0x63, 0xf4, 0xa5, 0xf2, 0x20, 0xbd, 0x2a, 0xf6, 0x40, 0x4a, 0xb4, 0xad, 0xa0,
0xbd, 0xe2, 0xce, 0x37, 0xdf, 0x0c, 0xe7, 0xb4, 0xb3, 0xd0, 0x08, 0xde, 0xd2, 0xc8, 0x23, 0xcb,
0x4e, 0x18, 0x05, 0x3c, 0x40, 0x65, 0x2d, 0xb6, 0xee, 0xce, 0x82, 0x60, 0xe6, 0xd1, 0x07, 0x12,
0x9e, 0xc4, 0xaf, 0x1f, 0x38, 0x71, 0x44, 0xb8, 0x1b, 0xf8, 0x8a, 0x68, 0x7f, 0x09, 0x8d, 0x41,
0x10, 0xcc, 0xe3, 0x10, 0xd3, 0xdf, 0x63, 0xca, 0x38, 0xba, 0x03, 0x25, 0x3f, 0x70, 0xe8, 0x49,
0xaf, 0x69, 0xb4, 0x8d, 0xbd, 0x2a, 0xd6, 0x92, 0xbd, 0x0f, 0x66, 0x42, 0x64, 0x61, 0xe0, 0x33,
0x8a, 0x76, 0xa0, 0x20, 0x74, 0x92, 0x57, 0xeb, 0x36, 0x3a, 0x49, 0x04, 0xa7, 0x81, 0x43, 0xb1,
0x54, 0xd9, 0x67, 0x2b, 0x23, 0xe9, 0x9d, 0xa1, 0x27, 0x60, 0x7a, 0x12, 0x19, 0x47, 0x0a, 0x6a,
0x1a, 0xed, 0xfc, 0x5e, 0xad, 0x7b, 0x27, 0x35, 0xcf, 0x18, 0xe0, 0x86, 0xb7, 0x2e, 0xda, 0x43,
0xb8, 0x91, 0x8d, 0x82, 0xa1, 0x1f, 0xe1, 0x46, 0xea, 0x51, 0x61, 0xda, 0xe5, 0xf6, 0x15, 0x97,
0x4a, 0x8d, 0x4d, 0x2f, 0x23, 0xdb, 0x4f, 0xa1, 0xf9, 0xcc, 0xf5, 0x9d, 0x21, 0x0f, 0x22, 0x32,
0xa3, 0x22, 0x7c, 0x96, 0x26, 0xf9, 0x39, 0x14, 0x45, 0x26, 0x4c, 0xfb, 0xbc, 0x94, 0xa5, 0xd2,
0xd9, 0x1f, 0x0c, 0xd8, 0xbe, 0xea, 0x41, 0xd5, 0xf3, 0x33, 0xa8, 0x05, 0x93, 0xdf, 0xe8, 0x94,
0x8f, 0x99, 0xfb, 0x5e, 0x15, 0x2b, 0x8f, 0x41, 0x41, 0x43, 0xf7, 0x3d, 0x45, 0x87, 0x70, 0x63,
0x1a, 0xf8, 0x3c, 0x22, 0x53, 0x3e, 0xf6, 0xa8, 0x3f, 0xe3, 0x6f, 0x9a, 0x39, 0x59, 0xd1, 0xff,
0x75, 0x54, 0xef, 0x3a, 0x49, 0xef, 0x3a, 0x3d, 0xdd, 0x3b, 0x6c, 0x26, 0x16, 0x03, 0x69, 0x80,
0xbe, 0x86, 0x42, 0x10, 0x72, 0xd6, 0xcc, 0x4b, 0xc3, 0x55, 0xe2, 0x67, 0xea, 0x7b, 0x16, 0x0a,
0x2b, 0x86, 0x25, 0x09, 0xdd, 0x82, 0x22, 0xe3, 0x24, 0xe2, 0xcd, 0x42, 0xdb, 0xd8, 0xab, 0x63,
0x25, 0xa0, 0xff, 0x43, 0x75, 0x41, 0x2e, 0xc6, 0x2a, 0xd9, 0xa2, 0x8c, 0xb2, 0xb2, 0x20, 0x17,
0x32, 0x17, 0xfb, 0x57, 0xa8, 0x89, 0xc3, 0x81, 0xe3, 0x44, 0x94, 0x31, 0xf4, 0x10, 0xaa, 0x3c,
0x22, 0x3e, 0x0b, 0x83, 0x88, 0xcb, 0x8c, 0xcc, 0xb5, 0xfe, 0x09, 0xe2, 0x28, 0xd1, 0xe2, 0x15,
0x11, 0x35, 0xa1, 0x4c, 0x94, 0x03, 0x99, 0x60, 0x15, 0x27, 0xa2, 0xfd, 0x57, 0x0e, 0xcc, 0x6c,
0xa8, 0xe8, 0x7b, 0xa8, 0x89, 0x70, 0x3c, 0xc2, 0xa9, 0x3f, 0x5d, 0xea, 0x19, 0xfb, 0x48, 0x45,
0x60, 0x41, 0x2e, 0x06, 0x8a, 0x8c, 0xbe, 0x03, 0x73, 0xe1, 0xfa, 0xe3, 0x88, 0x86, 0x31, 0x97,
0x5a, 0x5d, 0x50, 0x2b, 0xdb, 0x3c, 0x1a, 0xe2, 0xc6, 0xc2, 0xf5, 0x71, 0x4a, 0x43, 0x5f, 0x28,
0x43, 0x16, 0x52, 0xea, 0x8c, 0xe7, 0x93, 0x50, 0x15, 0x34, 0x8f, 0xeb, 0x0b, 0xd7, 0x1f, 0x0a,
0xf0, 0xf9, 0x24, 0x64, 0xe2, 0x86, 0x90, 0x45, 0x10, 0xfb, 0xaa, 0x80, 0x79, 0xac, 0x25, 0xf4,
0x04, 0xea, 0x11, 0x65, 0x3c, 0x72, 0xa7, 0x32, 0x05, 0x59, 0x44, 0x11, 0x73, 0xf6, 0xa7, 0x2b,
0x02, 0xce, 0xd0, 0xd1, 0x2e, 0x98, 0xf4, 0x62, 0xea, 0xc5, 0x0e, 0x75, 0x74, 0x17, 0x4a, 0xed,
0xfc, 0x5e, 0x15, 0x37, 0x12, 0x54, 0xb5, 0x62, 0x09, 0x65, 0x1d, 0x3d, 0x6a, 0x43, 0x2d, 0x0e,
0xb9, 0xbb, 0xa0, 0x58, 0x84, 0x2f, 0x6b, 0x64, 0xe0, 0x75, 0x08, 0xdd, 0x87, 0x9b, 0x24, 0x76,
0x5c, 0x3e, 0x8c, 0xa7, 0x53, 0xca, 0x98, 0xe2, 0xe5, 0x24, 0xef, 0xaa, 0x02, 0xdd, 0x05, 0x90,
0xe0, 0x91, 0x4c, 0x4e, 0xa5, 0xbe, 0x86, 0xd8, 0x3f, 0x83, 0x75, 0x39, 0x07, 0x11, 0xf5, 0xeb,
0x88, 0xd2, 0xf1, 0x84, 0xf8, 0xce, 0x3b, 0xd7, 0xe1, 0x6f, 0xf4, 0x84, 0x37, 0x04, 0x7a, 0x98,
0x80, 0x62, 0xba, 0x24, 0xcd, 0x71, 0xd9, 0x5c, 0x06, 0x90, 0xc7, 0x15, 0x01, 0xf4, 0x5c, 0x36,
0xb7, 0xff, 0xcc, 0x41, 0x41, 0x38, 0x46, 0x26, 0xe4, 0x5c, 0x47, 0xef, 0x9d, 0x9c, 0xeb, 0xa0,
0x4e, 0x76, 0x62, 0x6a, 0xdd, 0x5b, 0x99, 0x62, 0xea, 0x71, 0x4c, 0xe7, 0x08, 0xed, 0x42, 0x81,
0x2f, 0x43, 0x2a, 0x43, 0x37, 0xbb, 0x37, 0xb3, 0x23, 0xb9, 0x0c, 0x29, 0x96, 0xea, 0x2b, 0x8d,
0x2a, 0xfc, 0xb7, 0x46, 0x7d, 0x0b, 0x95, 0x05, 0xe5, 0xc4, 0x21, 0x9c, 0xe8, 0x1e, 0xdf, 0xce,
0x98, 0xbe, 0xd0, 0x4a, 0x9c, 0xd2, 0xd0, 0x37, 0x00, 0x6b, 0xd3, 0x58, 0xda, 0x30, 0x8d, 0x6b,
0x1c, 0xfb, 0x07, 0xa8, 0xaf, 0xfb, 0x12, 0x97, 0x96, 0x2e, 0x88, 0xeb, 0xe9, 0xea, 0x28, 0x41,
0x8c, 0xe2, 0x3b, 0xe2, 0x79, 0x94, 0xeb, 0x1b, 0xa5, 0x25, 0xfb, 0x0f, 0x03, 0xea, 0x2f, 0x63,
0x1a, 0x2d, 0x93, 0x2d, 0xb4, 0x0b, 0x25, 0x46, 0x7d, 0x87, 0x46, 0xd7, 0x6f, 0x6b, 0xad, 0x14,
0x34, 0x4e, 0xa2, 0x99, 0xf6, 0x77, 0x95, 0xa6, 0x94, 0x22, 0x18, 0xcf, 0x5d, 0xb8, 0xc9, 0x8c,
0x28, 0x01, 0xb5, 0xa0, 0x12, 0xba, 0xfe, 0x6c, 0x42, 0xa6, 0x73, 0x59, 0xd2, 0x0a, 0x4e, 0x65,
0x9b, 0x40, 0x43, 0xc7, 0xa3, 0xf7, 0xea, 0xbf, 0x0c, 0xe8, 0x2b, 0xa8, 0xa4, 0x5b, 0x3d, 0x77,
0xdd, 0x06, 0x4e, 0xd5, 0x76, 0x03, 0x6a, 0xe7, 0xae, 0x3f, 0x4b, 0x5e, 0x0a, 0x13, 0xea, 0x4a,
0xd4, 0xea, 0xbf, 0x0d, 0xa8, 0xad, 0x35, 0x15, 0x3d, 0x86, 0x4a, 0x10, 0xd2, 0x88, 0xf0, 0x20,
0xd2, 0x2b, 0xec, 0xd3, 0xd4, 0xf3, 0x1a, 0xaf, 0x73, 0xa6, 0x49, 0x38, 0xa5, 0xa3, 0x47, 0x50,
0x96, 0x67, 0xdf, 0x91, 0x65, 0x32, 0xbb, 0x9f, 0x6c, 0xb6, 0xf4, 0x1d, 0x9c, 0x90, 0x45, 0xd9,
0xde, 0x12, 0x2f, 0xa6, 0x49, 0xd9, 0xa4, 0x60, 0x3f, 0x84, 0x4a, 0xf2, 0x0f, 0x54, 0x82, 0xdc,
0x60, 0x64, 0x6d, 0x89, 0x6f, 0xff, 0xa5, 0x65, 0x88, 0xef, 0xf1, 0xc8, 0xca, 0xa1, 0x32, 0xe4,
0x07, 0xa3, 0xbe, 0x95, 0x17, 0x87, 0xe3, 0x51, 0xdf, 0x2a, 0xd8, 0xf7, 0xa1, 0xac, 0xfd, 0x23,
0x04, 0xe6, 0x33, 0xdc, 0xef, 0x8f, 0x0f, 0x0f, 0x4e, 0x7b, 0xaf, 0x4e, 0x7a, 0xa3, 0x9f, 0xac,
0x2d, 0xd4, 0x80, 0xaa, 0xc4, 0x7a, 0x27, 0xc3, 0xe7, 0x96, 0x71, 0x6f, 0x07, 0x1a, 0x99, 0xb5,
0x8c, 0x2c, 0xa8, 0x8f, 0x8e, 0xce, 0xc7, 0xa3, 0xc1, 0x70, 0x7c, 0x8c, 0xcf, 0x8f, 0xac, 0xad,
0x7b, 0x36, 0x54, 0x92, 0x6b, 0x82, 0xaa, 0x50, 0x3c, 0xe8, 0xbd, 0x38, 0x39, 0xb5, 0xb6, 0x50,
0x0d, 0xca, 0xc3, 0xd1, 0x19, 0x3e, 0x38, 0xee, 0x5b, 0x46, 0xf7, 0x83, 0x01, 0x65, 0xbd, 0xa7,
0xd1, 0x63, 0x28, 0xa9, 0x67, 0x15, 0x6d, 0x78, 0xba, 0x5b, 0x9b, 0xde, 0x5f, 0xf4, 0x14, 0xe0,
0x30, 0xf6, 0xe6, 0xda, 0x7c, 0xfb, 0x7a, 0x73, 0xd6, 0x6a, 0x6e, 0xb0, 0x67, 0xe8, 0x15, 0x58,
0x97, 0x9f, 0x5b, 0xd4, 0x4e, 0xd9, 0x1b, 0x5e, 0xe2, 0xd6, 0xce, 0x47, 0x18, 0xca, 0x73, 0x97,
0x43, 0x51, 0x79, 0x7b, 0x04, 0x45, 0x39, 0xaf, 0x68, 0x75, 0xb5, 0xd7, 0xef, 0x53, 0xeb, 0xce,
0x65, 0x58, 0xa7, 0xb6, 0x0f, 0x05, 0x31, 0x75, 0x68, 0xb5, 0xa8, 0xd6, 0x66, 0xb2, 0x75, 0xfb,
0x12, 0xaa, 0x8c, 0x0e, 0x0b, 0xbf, 0xe4, 0xc2, 0xc9, 0xa4, 0x24, 0x1f, 0xb5, 0xfd, 0x7f, 0x02,
0x00, 0x00, 0xff, 0xff, 0x5e, 0xda, 0x93, 0x25, 0xca, 0x09, 0x00, 0x00,
var fileDescriptor_overlay_366705336bc11433 = []byte{
// 846 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0xdd, 0x8e, 0xdb, 0x44,
0x14, 0x5e, 0xe7, 0xbf, 0x27, 0xb1, 0x37, 0x1a, 0xb5, 0xbb, 0x21, 0x40, 0x37, 0x58, 0x15, 0xac,
0x04, 0x4a, 0x21, 0xad, 0x2a, 0x5a, 0x81, 0x80, 0x28, 0x69, 0x59, 0x35, 0xea, 0xd2, 0x49, 0xa4,
0x4a, 0x70, 0x11, 0x39, 0xf1, 0x60, 0xcc, 0xda, 0x33, 0xc6, 0x33, 0xae, 0xb2, 0x7d, 0x02, 0xde,
0x86, 0xd7, 0xe0, 0x19, 0xb8, 0xd8, 0x47, 0xe0, 0x01, 0xb8, 0x42, 0xf3, 0x63, 0xaf, 0xb3, 0xbb,
0x59, 0xf5, 0xca, 0x3e, 0xe7, 0x7c, 0xdf, 0x99, 0xf9, 0xbe, 0x39, 0x33, 0x60, 0xb3, 0xb7, 0x24,
0x8d, 0xbc, 0xf3, 0x61, 0x92, 0x32, 0xc1, 0x50, 0xd3, 0x84, 0xfd, 0xfb, 0x01, 0x63, 0x41, 0x44,
0x1e, 0xaa, 0xf4, 0x2a, 0xfb, 0xf5, 0xa1, 0x9f, 0xa5, 0x9e, 0x08, 0x19, 0xd5, 0xc0, 0x3e, 0x04,
0x2c, 0x60, 0xf9, 0x3f, 0x65, 0x3e, 0xd1, 0xff, 0xee, 0xd7, 0x60, 0xcf, 0x18, 0x3b, 0xcb, 0x12,
0x4c, 0xfe, 0xc8, 0x08, 0x17, 0xe8, 0x33, 0x68, 0xca, 0xf2, 0x32, 0xf4, 0x7b, 0xd6, 0xc0, 0x3a,
0xee, 0x8c, 0x9d, 0xbf, 0x2f, 0x8e, 0xf6, 0xfe, 0xb9, 0x38, 0x6a, 0xbc, 0x62, 0x3e, 0x39, 0x99,
0xe0, 0x86, 0x2c, 0x9f, 0xf8, 0xee, 0x97, 0xe0, 0xe4, 0x4c, 0x9e, 0x30, 0xca, 0x09, 0xba, 0x0f,
0x35, 0x59, 0x53, 0xbc, 0xf6, 0x08, 0x86, 0x6a, 0x19, 0xc9, 0xc2, 0x2a, 0xef, 0x9e, 0x5e, 0x32,
0xd4, 0x5a, 0x1c, 0x7d, 0x0b, 0x4e, 0xa4, 0x32, 0xcb, 0x54, 0xa7, 0x7a, 0xd6, 0xa0, 0x7a, 0xdc,
0x1e, 0x1d, 0x0c, 0x73, 0x99, 0x5b, 0x04, 0x6c, 0x47, 0xe5, 0xd0, 0x9d, 0xc3, 0xfe, 0xf6, 0x16,
0x38, 0xfa, 0x1e, 0xf6, 0x8b, 0x8e, 0x3a, 0x67, 0x5a, 0x1e, 0x5e, 0x6b, 0xa9, 0xcb, 0xd8, 0x89,
0xb6, 0x62, 0xf7, 0x1b, 0xe8, 0x3d, 0x0f, 0xa9, 0x3f, 0x17, 0x2c, 0xf5, 0x02, 0x22, 0xb7, 0xcf,
0x0b, 0x85, 0x03, 0xa8, 0x4b, 0x25, 0xdc, 0xf4, 0x2c, 0x4b, 0xd4, 0x05, 0xf7, 0x5f, 0x0b, 0x0e,
0xaf, 0xd3, 0xb5, 0xb5, 0x47, 0xd0, 0x66, 0xab, 0xdf, 0xc9, 0x5a, 0x2c, 0x79, 0xf8, 0x4e, 0xdb,
0x54, 0xc5, 0xa0, 0x53, 0xf3, 0xf0, 0x1d, 0x41, 0x63, 0xd8, 0x5f, 0x33, 0x2a, 0x52, 0x6f, 0x2d,
0x96, 0x11, 0xa1, 0x81, 0xf8, 0xad, 0x57, 0x51, 0x5e, 0x7e, 0x30, 0xd4, 0xc7, 0x3b, 0xcc, 0x8f,
0x77, 0x38, 0x31, 0xc7, 0x8b, 0x9d, 0x9c, 0x31, 0x53, 0x04, 0xf4, 0x39, 0xd4, 0x58, 0x22, 0x78,
0xaf, 0xaa, 0x88, 0x97, 0xaa, 0x4f, 0xf5, 0xf7, 0x34, 0x91, 0x2c, 0x8e, 0x15, 0x08, 0x3d, 0x80,
0x3a, 0x17, 0x5e, 0x2a, 0x7a, 0xb5, 0x1b, 0x8f, 0x5a, 0x17, 0xd1, 0x87, 0x70, 0x27, 0xf6, 0x36,
0x4b, 0xad, 0xbc, 0xae, 0x76, 0xdd, 0x8a, 0xbd, 0x8d, 0xd2, 0xe6, 0xfe, 0x55, 0x01, 0x67, 0xbb,
0x37, 0x7a, 0x06, 0x6d, 0x89, 0x8f, 0x3c, 0x41, 0xe8, 0xfa, 0xdc, 0x8c, 0xc3, 0x2d, 0x12, 0x20,
0xf6, 0x36, 0x33, 0x0d, 0x46, 0x8f, 0xc1, 0x89, 0x43, 0xba, 0x4c, 0x49, 0x92, 0x09, 0x55, 0x35,
0x0e, 0xd8, 0x25, 0xab, 0x49, 0x82, 0xed, 0x38, 0xa4, 0xb8, 0xc0, 0xa0, 0x07, 0x9a, 0xc5, 0x13,
0x42, 0xfc, 0xe5, 0xd9, 0x2a, 0xd1, 0xf2, 0xab, 0xb8, 0x13, 0x87, 0x74, 0x2e, 0x93, 0x2f, 0x57,
0x09, 0x47, 0x07, 0xd0, 0xf0, 0x62, 0x96, 0x51, 0x2d, 0xb7, 0x8a, 0x4d, 0x84, 0x9e, 0x41, 0x27,
0x25, 0x5c, 0xa4, 0xe1, 0x5a, 0xed, 0x5f, 0x49, 0x94, 0x33, 0x58, 0x5a, 0xf1, 0xb2, 0x8a, 0xb7,
0xb0, 0xe8, 0x2b, 0x70, 0xc8, 0x66, 0x1d, 0x65, 0x3e, 0xf1, 0x8d, 0x41, 0x8d, 0x41, 0xf5, 0xb8,
0x33, 0x86, 0x92, 0x8d, 0x76, 0x8e, 0xd0, 0x8e, 0xfd, 0x69, 0x41, 0xe7, 0x75, 0x46, 0xd2, 0xf3,
0x7c, 0x2e, 0x5c, 0x68, 0x70, 0x42, 0x7d, 0x92, 0xde, 0x70, 0x73, 0x4c, 0x45, 0x62, 0x84, 0x97,
0x06, 0x44, 0x18, 0x3f, 0xb6, 0x30, 0xba, 0x82, 0xee, 0x42, 0x3d, 0x0a, 0xe3, 0x50, 0x18, 0xf1,
0x3a, 0x40, 0x7d, 0x68, 0x25, 0x21, 0x0d, 0x56, 0xde, 0xfa, 0x4c, 0xe9, 0x6e, 0xe1, 0x22, 0x76,
0x7f, 0x01, 0xdb, 0xec, 0xc4, 0x0c, 0xf8, 0xfb, 0x6c, 0xe5, 0x53, 0x68, 0x15, 0x77, 0xab, 0x72,
0xed, 0x1e, 0x14, 0x35, 0xd7, 0x86, 0xf6, 0x4f, 0x21, 0x0d, 0xf2, 0xcb, 0xea, 0x40, 0x47, 0x87,
0xa6, 0xfc, 0x9f, 0x05, 0xed, 0x92, 0xb1, 0xe8, 0x29, 0xb4, 0x58, 0x42, 0x52, 0x4f, 0x30, 0xbd,
0xb8, 0x33, 0xfa, 0xb8, 0x18, 0xde, 0x12, 0x6e, 0x78, 0x6a, 0x40, 0xb8, 0x80, 0xa3, 0x27, 0xd0,
0x54, 0xff, 0xd4, 0x57, 0xee, 0x38, 0xa3, 0x8f, 0x76, 0x33, 0xa9, 0x8f, 0x73, 0xb0, 0x34, 0xec,
0xad, 0x17, 0x65, 0x24, 0x37, 0x4c, 0x05, 0xee, 0x63, 0x68, 0xe5, 0x6b, 0xa0, 0x06, 0x54, 0x66,
0x8b, 0xee, 0x9e, 0xfc, 0x4e, 0x5f, 0x77, 0x2d, 0xf9, 0x7d, 0xb1, 0xe8, 0x56, 0x50, 0x13, 0xaa,
0xb3, 0xc5, 0xb4, 0x5b, 0x95, 0x3f, 0x2f, 0x16, 0xd3, 0x6e, 0xcd, 0xfd, 0x02, 0x9a, 0xa6, 0x3f,
0x42, 0xe0, 0x3c, 0xc7, 0xd3, 0xe9, 0x72, 0xfc, 0xc3, 0xab, 0xc9, 0x9b, 0x93, 0xc9, 0xe2, 0xc7,
0xee, 0x1e, 0xb2, 0xe1, 0x8e, 0xca, 0x4d, 0x4e, 0xe6, 0x2f, 0xbb, 0xd6, 0xe8, 0xc2, 0x82, 0xa6,
0xb9, 0x35, 0xe8, 0x29, 0x34, 0xf4, 0x93, 0x84, 0x76, 0x3c, 0x7b, 0xfd, 0x5d, 0x6f, 0x17, 0xfa,
0x0e, 0x60, 0x9c, 0x45, 0x67, 0x86, 0x7e, 0x78, 0x33, 0x9d, 0xf7, 0x7b, 0x3b, 0xf8, 0x1c, 0xbd,
0x81, 0xee, 0xd5, 0xd7, 0x0a, 0x0d, 0x0a, 0xf4, 0x8e, 0x87, 0xac, 0xff, 0xc9, 0x2d, 0x08, 0xdd,
0x79, 0x24, 0xa0, 0xae, 0xbb, 0x3d, 0x81, 0xba, 0x1a, 0x31, 0x74, 0xaf, 0x20, 0x95, 0x87, 0xbf,
0x7f, 0x70, 0x35, 0x6d, 0xa4, 0x3d, 0x82, 0x9a, 0x1c, 0x17, 0x74, 0xb7, 0xa8, 0x97, 0x86, 0xa9,
0x7f, 0xef, 0x4a, 0x56, 0x93, 0xc6, 0xb5, 0x9f, 0x2b, 0xc9, 0x6a, 0xd5, 0x50, 0x4f, 0xcc, 0xa3,
0xff, 0x03, 0x00, 0x00, 0xff, 0xff, 0x26, 0xc0, 0x69, 0xb7, 0x2c, 0x07, 0x00, 0x00,
}

View File

@ -5,14 +5,11 @@ syntax = "proto3";
option go_package = "pb";
import "google/protobuf/duration.proto";
import "gogo.proto";
import "node.proto";
package overlay;
// NodeTransport is an enum of possible transports for the overlay network
enum NodeTransport {
TCP_TLS_GRPC = 0;
}
// Overlay defines the interface for communication with the overlay network
service Overlay {
// Lookup finds a nodes address from the network
@ -30,12 +27,12 @@ service Nodes {
// LookupRequest is is request message for the lookup rpc call
message LookupRequest {
string nodeID = 1;
bytes node_id = 1 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false];
}
// LookupResponse is is response message for the lookup rpc call
message LookupResponse {
Node node = 1;
node.Node node = 1;
}
//LookupRequests is a list of LookupRequest
@ -51,7 +48,7 @@ message LookupResponses {
// FindStorageNodesResponse is is response message for the FindStorageNodes rpc call
message FindStorageNodesResponse {
repeated Node nodes = 1;
repeated node.Node nodes = 1;
}
// FindStorageNodesRequest is is request message for the FindStorageNodes rpc call
@ -59,71 +56,30 @@ message FindStorageNodesRequest {
int64 object_size = 1;
google.protobuf.Duration contract_length = 2;
OverlayOptions opts = 3;
bytes start = 4;
bytes start = 4 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false];
int64 max_nodes = 5;
}
// NodeAddress contains the information needed to communicate with a node on the network
message NodeAddress {
NodeTransport transport = 1;
string address = 2;
}
// OverlayOptions is a set of criteria that a node must meet to be considered for a storage opportunity
message OverlayOptions {
google.protobuf.Duration max_latency = 1;
NodeRep min_reputation = 2;
node.NodeRep min_reputation = 2;
int64 min_speed_kbps = 3;
int64 amount = 4;
NodeRestrictions restrictions = 5;
repeated string excluded_nodes = 6;
}
// NodeRep is the reputation characteristics of a node
message NodeRep {
double uptimeRatio = 1;
double auditSuccessRatio = 2;
int64 auditCount = 3;
}
// NodeRestrictions contains all relevant data about a nodes ability to store data
message NodeRestrictions {
int64 free_bandwidth = 1;
int64 free_disk = 2;
}
// Node represents a node in the overlay network
message Node {
string id = 1;
NodeAddress address = 2;
NodeType type = 3;
NodeRestrictions restrictions = 4;
NodeMetadata metadata = 5;
NodeRep reputation = 6;
}
// NodeType is an enum of possible node types
enum NodeType {
ADMIN = 0;
STORAGE = 1;
}
message NodeMetadata {
string email = 1;
string wallet = 2;
node.NodeRestrictions restrictions = 5;
repeated bytes excluded_nodes = 6 [(gogoproto.customtype) = "NodeID"];
}
message QueryRequest {
overlay.Node sender = 1;
overlay.Node target = 2;
node.Node sender = 1;
node.Node target = 2;
int64 limit = 3;
bool pingback = 4;
}
message QueryResponse {
overlay.Node sender = 1;
repeated overlay.Node response = 2;
node.Node sender = 1;
repeated node.Node response = 2;
}
message PingRequest {};

View File

@ -6,6 +6,7 @@ package pb
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
import _ "github.com/gogo/protobuf/gogoproto"
import context "golang.org/x/net/context"
import grpc "google.golang.org/grpc"
@ -41,7 +42,7 @@ func (x PayerBandwidthAllocation_Action) String() string {
return proto.EnumName(PayerBandwidthAllocation_Action_name, int32(x))
}
func (PayerBandwidthAllocation_Action) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_piecestore_158eb3902a5ba56c, []int{0, 0}
return fileDescriptor_piecestore_c22028cf5808832f, []int{0, 0}
}
type PayerBandwidthAllocation struct {
@ -56,7 +57,7 @@ func (m *PayerBandwidthAllocation) Reset() { *m = PayerBandwidthAllocati
func (m *PayerBandwidthAllocation) String() string { return proto.CompactTextString(m) }
func (*PayerBandwidthAllocation) ProtoMessage() {}
func (*PayerBandwidthAllocation) Descriptor() ([]byte, []int) {
return fileDescriptor_piecestore_158eb3902a5ba56c, []int{0}
return fileDescriptor_piecestore_c22028cf5808832f, []int{0}
}
func (m *PayerBandwidthAllocation) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PayerBandwidthAllocation.Unmarshal(m, b)
@ -91,8 +92,8 @@ func (m *PayerBandwidthAllocation) GetData() []byte {
}
type PayerBandwidthAllocation_Data struct {
SatelliteId []byte `protobuf:"bytes,1,opt,name=satellite_id,json=satelliteId,proto3" json:"satellite_id,omitempty"`
UplinkId []byte `protobuf:"bytes,2,opt,name=uplink_id,json=uplinkId,proto3" json:"uplink_id,omitempty"`
SatelliteId NodeID `protobuf:"bytes,1,opt,name=satellite_id,json=satelliteId,proto3,customtype=NodeID" json:"satellite_id"`
UplinkId NodeID `protobuf:"bytes,2,opt,name=uplink_id,json=uplinkId,proto3,customtype=NodeID" json:"uplink_id"`
MaxSize int64 `protobuf:"varint,3,opt,name=max_size,json=maxSize,proto3" json:"max_size,omitempty"`
ExpirationUnixSec int64 `protobuf:"varint,4,opt,name=expiration_unix_sec,json=expirationUnixSec,proto3" json:"expiration_unix_sec,omitempty"`
SerialNumber string `protobuf:"bytes,5,opt,name=serial_number,json=serialNumber,proto3" json:"serial_number,omitempty"`
@ -107,7 +108,7 @@ func (m *PayerBandwidthAllocation_Data) Reset() { *m = PayerBandwidthAll
func (m *PayerBandwidthAllocation_Data) String() string { return proto.CompactTextString(m) }
func (*PayerBandwidthAllocation_Data) ProtoMessage() {}
func (*PayerBandwidthAllocation_Data) Descriptor() ([]byte, []int) {
return fileDescriptor_piecestore_158eb3902a5ba56c, []int{0, 0}
return fileDescriptor_piecestore_c22028cf5808832f, []int{0, 0}
}
func (m *PayerBandwidthAllocation_Data) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PayerBandwidthAllocation_Data.Unmarshal(m, b)
@ -127,20 +128,6 @@ func (m *PayerBandwidthAllocation_Data) XXX_DiscardUnknown() {
var xxx_messageInfo_PayerBandwidthAllocation_Data proto.InternalMessageInfo
func (m *PayerBandwidthAllocation_Data) GetSatelliteId() []byte {
if m != nil {
return m.SatelliteId
}
return nil
}
func (m *PayerBandwidthAllocation_Data) GetUplinkId() []byte {
if m != nil {
return m.UplinkId
}
return nil
}
func (m *PayerBandwidthAllocation_Data) GetMaxSize() int64 {
if m != nil {
return m.MaxSize
@ -188,7 +175,7 @@ func (m *RenterBandwidthAllocation) Reset() { *m = RenterBandwidthAlloca
func (m *RenterBandwidthAllocation) String() string { return proto.CompactTextString(m) }
func (*RenterBandwidthAllocation) ProtoMessage() {}
func (*RenterBandwidthAllocation) Descriptor() ([]byte, []int) {
return fileDescriptor_piecestore_158eb3902a5ba56c, []int{1}
return fileDescriptor_piecestore_c22028cf5808832f, []int{1}
}
func (m *RenterBandwidthAllocation) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_RenterBandwidthAllocation.Unmarshal(m, b)
@ -225,7 +212,7 @@ func (m *RenterBandwidthAllocation) GetData() []byte {
type RenterBandwidthAllocation_Data struct {
PayerAllocation *PayerBandwidthAllocation `protobuf:"bytes,1,opt,name=payer_allocation,json=payerAllocation" json:"payer_allocation,omitempty"`
Total int64 `protobuf:"varint,2,opt,name=total,proto3" json:"total,omitempty"`
StorageNodeId []byte `protobuf:"bytes,3,opt,name=storage_node_id,json=storageNodeId,proto3" json:"storage_node_id,omitempty"`
StorageNodeId NodeID `protobuf:"bytes,3,opt,name=storage_node_id,json=storageNodeId,proto3,customtype=NodeID" json:"storage_node_id"`
PubKey []byte `protobuf:"bytes,4,opt,name=pub_key,json=pubKey,proto3" json:"pub_key,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
@ -236,7 +223,7 @@ func (m *RenterBandwidthAllocation_Data) Reset() { *m = RenterBandwidthA
func (m *RenterBandwidthAllocation_Data) String() string { return proto.CompactTextString(m) }
func (*RenterBandwidthAllocation_Data) ProtoMessage() {}
func (*RenterBandwidthAllocation_Data) Descriptor() ([]byte, []int) {
return fileDescriptor_piecestore_158eb3902a5ba56c, []int{1, 0}
return fileDescriptor_piecestore_c22028cf5808832f, []int{1, 0}
}
func (m *RenterBandwidthAllocation_Data) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_RenterBandwidthAllocation_Data.Unmarshal(m, b)
@ -270,13 +257,6 @@ func (m *RenterBandwidthAllocation_Data) GetTotal() int64 {
return 0
}
func (m *RenterBandwidthAllocation_Data) GetStorageNodeId() []byte {
if m != nil {
return m.StorageNodeId
}
return nil
}
func (m *RenterBandwidthAllocation_Data) GetPubKey() []byte {
if m != nil {
return m.PubKey
@ -297,7 +277,7 @@ func (m *PieceStore) Reset() { *m = PieceStore{} }
func (m *PieceStore) String() string { return proto.CompactTextString(m) }
func (*PieceStore) ProtoMessage() {}
func (*PieceStore) Descriptor() ([]byte, []int) {
return fileDescriptor_piecestore_158eb3902a5ba56c, []int{2}
return fileDescriptor_piecestore_c22028cf5808832f, []int{2}
}
func (m *PieceStore) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PieceStore.Unmarshal(m, b)
@ -339,6 +319,7 @@ func (m *PieceStore) GetAuthorization() *SignedMessage {
}
type PieceStore_PieceData struct {
// TODO: may want to use customtype and fixed-length byte slice
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
ExpirationUnixSec int64 `protobuf:"varint,2,opt,name=expiration_unix_sec,json=expirationUnixSec,proto3" json:"expiration_unix_sec,omitempty"`
Content []byte `protobuf:"bytes,3,opt,name=content,proto3" json:"content,omitempty"`
@ -351,7 +332,7 @@ func (m *PieceStore_PieceData) Reset() { *m = PieceStore_PieceData{} }
func (m *PieceStore_PieceData) String() string { return proto.CompactTextString(m) }
func (*PieceStore_PieceData) ProtoMessage() {}
func (*PieceStore_PieceData) Descriptor() ([]byte, []int) {
return fileDescriptor_piecestore_158eb3902a5ba56c, []int{2, 0}
return fileDescriptor_piecestore_c22028cf5808832f, []int{2, 0}
}
func (m *PieceStore_PieceData) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PieceStore_PieceData.Unmarshal(m, b)
@ -393,6 +374,7 @@ func (m *PieceStore_PieceData) GetContent() []byte {
}
type PieceId struct {
// TODO: may want to use customtype and fixed-length byte slice
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
Authorization *SignedMessage `protobuf:"bytes,2,opt,name=authorization" json:"authorization,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
@ -404,7 +386,7 @@ func (m *PieceId) Reset() { *m = PieceId{} }
func (m *PieceId) String() string { return proto.CompactTextString(m) }
func (*PieceId) ProtoMessage() {}
func (*PieceId) Descriptor() ([]byte, []int) {
return fileDescriptor_piecestore_158eb3902a5ba56c, []int{3}
return fileDescriptor_piecestore_c22028cf5808832f, []int{3}
}
func (m *PieceId) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PieceId.Unmarshal(m, b)
@ -451,7 +433,7 @@ func (m *PieceSummary) Reset() { *m = PieceSummary{} }
func (m *PieceSummary) String() string { return proto.CompactTextString(m) }
func (*PieceSummary) ProtoMessage() {}
func (*PieceSummary) Descriptor() ([]byte, []int) {
return fileDescriptor_piecestore_158eb3902a5ba56c, []int{4}
return fileDescriptor_piecestore_c22028cf5808832f, []int{4}
}
func (m *PieceSummary) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PieceSummary.Unmarshal(m, b)
@ -505,7 +487,7 @@ func (m *PieceRetrieval) Reset() { *m = PieceRetrieval{} }
func (m *PieceRetrieval) String() string { return proto.CompactTextString(m) }
func (*PieceRetrieval) ProtoMessage() {}
func (*PieceRetrieval) Descriptor() ([]byte, []int) {
return fileDescriptor_piecestore_158eb3902a5ba56c, []int{5}
return fileDescriptor_piecestore_c22028cf5808832f, []int{5}
}
func (m *PieceRetrieval) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PieceRetrieval.Unmarshal(m, b)
@ -547,6 +529,7 @@ func (m *PieceRetrieval) GetAuthorization() *SignedMessage {
}
type PieceRetrieval_PieceData struct {
// TODO: may want to use customtype and fixed-length byte slice
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
PieceSize int64 `protobuf:"varint,2,opt,name=piece_size,json=pieceSize,proto3" json:"piece_size,omitempty"`
Offset int64 `protobuf:"varint,3,opt,name=offset,proto3" json:"offset,omitempty"`
@ -559,7 +542,7 @@ func (m *PieceRetrieval_PieceData) Reset() { *m = PieceRetrieval_PieceDa
func (m *PieceRetrieval_PieceData) String() string { return proto.CompactTextString(m) }
func (*PieceRetrieval_PieceData) ProtoMessage() {}
func (*PieceRetrieval_PieceData) Descriptor() ([]byte, []int) {
return fileDescriptor_piecestore_158eb3902a5ba56c, []int{5, 0}
return fileDescriptor_piecestore_c22028cf5808832f, []int{5, 0}
}
func (m *PieceRetrieval_PieceData) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PieceRetrieval_PieceData.Unmarshal(m, b)
@ -612,7 +595,7 @@ func (m *PieceRetrievalStream) Reset() { *m = PieceRetrievalStream{} }
func (m *PieceRetrievalStream) String() string { return proto.CompactTextString(m) }
func (*PieceRetrievalStream) ProtoMessage() {}
func (*PieceRetrievalStream) Descriptor() ([]byte, []int) {
return fileDescriptor_piecestore_158eb3902a5ba56c, []int{6}
return fileDescriptor_piecestore_c22028cf5808832f, []int{6}
}
func (m *PieceRetrievalStream) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PieceRetrievalStream.Unmarshal(m, b)
@ -647,6 +630,7 @@ func (m *PieceRetrievalStream) GetContent() []byte {
}
type PieceDelete struct {
// TODO: may want to use customtype and fixed-length byte slice
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
Authorization *SignedMessage `protobuf:"bytes,3,opt,name=authorization" json:"authorization,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
@ -658,7 +642,7 @@ func (m *PieceDelete) Reset() { *m = PieceDelete{} }
func (m *PieceDelete) String() string { return proto.CompactTextString(m) }
func (*PieceDelete) ProtoMessage() {}
func (*PieceDelete) Descriptor() ([]byte, []int) {
return fileDescriptor_piecestore_158eb3902a5ba56c, []int{7}
return fileDescriptor_piecestore_c22028cf5808832f, []int{7}
}
func (m *PieceDelete) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PieceDelete.Unmarshal(m, b)
@ -703,7 +687,7 @@ func (m *PieceDeleteSummary) Reset() { *m = PieceDeleteSummary{} }
func (m *PieceDeleteSummary) String() string { return proto.CompactTextString(m) }
func (*PieceDeleteSummary) ProtoMessage() {}
func (*PieceDeleteSummary) Descriptor() ([]byte, []int) {
return fileDescriptor_piecestore_158eb3902a5ba56c, []int{8}
return fileDescriptor_piecestore_c22028cf5808832f, []int{8}
}
func (m *PieceDeleteSummary) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PieceDeleteSummary.Unmarshal(m, b)
@ -742,7 +726,7 @@ func (m *PieceStoreSummary) Reset() { *m = PieceStoreSummary{} }
func (m *PieceStoreSummary) String() string { return proto.CompactTextString(m) }
func (*PieceStoreSummary) ProtoMessage() {}
func (*PieceStoreSummary) Descriptor() ([]byte, []int) {
return fileDescriptor_piecestore_158eb3902a5ba56c, []int{9}
return fileDescriptor_piecestore_c22028cf5808832f, []int{9}
}
func (m *PieceStoreSummary) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PieceStoreSummary.Unmarshal(m, b)
@ -786,7 +770,7 @@ func (m *StatsReq) Reset() { *m = StatsReq{} }
func (m *StatsReq) String() string { return proto.CompactTextString(m) }
func (*StatsReq) ProtoMessage() {}
func (*StatsReq) Descriptor() ([]byte, []int) {
return fileDescriptor_piecestore_158eb3902a5ba56c, []int{10}
return fileDescriptor_piecestore_c22028cf5808832f, []int{10}
}
func (m *StatsReq) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_StatsReq.Unmarshal(m, b)
@ -820,7 +804,7 @@ func (m *StatSummary) Reset() { *m = StatSummary{} }
func (m *StatSummary) String() string { return proto.CompactTextString(m) }
func (*StatSummary) ProtoMessage() {}
func (*StatSummary) Descriptor() ([]byte, []int) {
return fileDescriptor_piecestore_158eb3902a5ba56c, []int{11}
return fileDescriptor_piecestore_c22028cf5808832f, []int{11}
}
func (m *StatSummary) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_StatSummary.Unmarshal(m, b)
@ -881,7 +865,7 @@ func (m *SignedMessage) Reset() { *m = SignedMessage{} }
func (m *SignedMessage) String() string { return proto.CompactTextString(m) }
func (*SignedMessage) ProtoMessage() {}
func (*SignedMessage) Descriptor() ([]byte, []int) {
return fileDescriptor_piecestore_158eb3902a5ba56c, []int{12}
return fileDescriptor_piecestore_c22028cf5808832f, []int{12}
}
func (m *SignedMessage) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_SignedMessage.Unmarshal(m, b)
@ -1214,66 +1198,68 @@ var _PieceStoreRoutes_serviceDesc = grpc.ServiceDesc{
Metadata: "piecestore.proto",
}
func init() { proto.RegisterFile("piecestore.proto", fileDescriptor_piecestore_158eb3902a5ba56c) }
func init() { proto.RegisterFile("piecestore.proto", fileDescriptor_piecestore_c22028cf5808832f) }
var fileDescriptor_piecestore_158eb3902a5ba56c = []byte{
// 916 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xdd, 0x6e, 0xdb, 0x36,
0x14, 0x8e, 0xe4, 0xc4, 0x8e, 0x8f, 0x7f, 0xe2, 0x32, 0xc1, 0xa6, 0x68, 0xcd, 0xe6, 0xa9, 0x6b,
0x66, 0x74, 0x80, 0xb7, 0x65, 0x4f, 0xd0, 0x22, 0xc1, 0x60, 0x14, 0x4b, 0x03, 0x39, 0xb9, 0xe9,
0xc5, 0x54, 0x4a, 0x3a, 0x4d, 0x89, 0xca, 0x92, 0x26, 0x51, 0x99, 0x9d, 0xeb, 0x3d, 0xc3, 0x6e,
0xf6, 0x08, 0xc3, 0xee, 0xf6, 0x2c, 0xbb, 0xdf, 0x9b, 0x0c, 0x22, 0x69, 0x29, 0x8e, 0x2d, 0x7b,
0x28, 0x9a, 0x3b, 0xf1, 0x23, 0xf9, 0x9d, 0x73, 0x3e, 0x7e, 0x87, 0x14, 0xf4, 0x62, 0x86, 0x1e,
0xa6, 0x3c, 0x4a, 0x70, 0x18, 0x27, 0x11, 0x8f, 0xc8, 0x1d, 0x24, 0x89, 0x32, 0x8e, 0xa9, 0xf5,
0x7b, 0x0d, 0x8c, 0x0b, 0x3a, 0xc3, 0xe4, 0x05, 0x0d, 0xfd, 0x5f, 0x99, 0xcf, 0xdf, 0x3d, 0x0f,
0x82, 0xc8, 0xa3, 0x9c, 0x45, 0x21, 0x79, 0x0c, 0xcd, 0x94, 0x5d, 0x87, 0x94, 0x67, 0x09, 0x1a,
0x5a, 0x5f, 0x1b, 0xb4, 0xed, 0x12, 0x20, 0x04, 0xb6, 0x7d, 0xca, 0xa9, 0xa1, 0x8b, 0x09, 0xf1,
0x6d, 0xfe, 0xa9, 0xc3, 0xf6, 0x29, 0xe5, 0x94, 0x7c, 0x09, 0xed, 0x94, 0x72, 0x0c, 0x02, 0xc6,
0xd1, 0x61, 0xbe, 0xda, 0xdd, 0x2a, 0xb0, 0x91, 0x4f, 0x3e, 0x83, 0x66, 0x16, 0x07, 0x2c, 0x7c,
0x9f, 0xcf, 0x4b, 0x92, 0x5d, 0x09, 0x8c, 0x7c, 0x72, 0x08, 0xbb, 0x13, 0x3a, 0x75, 0x52, 0x76,
0x8b, 0x46, 0xad, 0xaf, 0x0d, 0x6a, 0x76, 0x63, 0x42, 0xa7, 0x63, 0x76, 0x8b, 0x64, 0x08, 0xfb,
0x38, 0x8d, 0x59, 0x22, 0x72, 0x74, 0xb2, 0x90, 0x4d, 0x9d, 0x14, 0x3d, 0x63, 0x5b, 0xac, 0x7a,
0x54, 0x4e, 0x5d, 0x85, 0x6c, 0x3a, 0x46, 0x8f, 0x3c, 0x81, 0x4e, 0x8a, 0x09, 0xa3, 0x81, 0x13,
0x66, 0x13, 0x17, 0x13, 0x63, 0xa7, 0xaf, 0x0d, 0x9a, 0x76, 0x5b, 0x82, 0xe7, 0x02, 0x23, 0x23,
0xa8, 0x53, 0x2f, 0xdf, 0x65, 0xd4, 0xfb, 0xda, 0xa0, 0x7b, 0xf2, 0xfd, 0xf0, 0xbe, 0x54, 0xc3,
0x2a, 0x99, 0x86, 0xcf, 0xc5, 0x46, 0x5b, 0x11, 0x90, 0x01, 0xf4, 0xbc, 0x04, 0x29, 0x47, 0xbf,
0x4c, 0xae, 0x21, 0x92, 0xeb, 0x2a, 0x5c, 0x65, 0x66, 0x99, 0x50, 0x97, 0x7b, 0x49, 0x03, 0x6a,
0x17, 0x57, 0x97, 0xbd, 0xad, 0xfc, 0xe3, 0xc7, 0xb3, 0xcb, 0x9e, 0x66, 0xfd, 0xa6, 0xc3, 0xa1,
0x8d, 0x21, 0xff, 0x58, 0x27, 0xf3, 0xb7, 0xa6, 0x4e, 0xe6, 0x0a, 0x7a, 0x71, 0x5e, 0x89, 0x43,
0x0b, 0x3a, 0xc1, 0xd0, 0x3a, 0x79, 0xf6, 0xff, 0x6b, 0xb6, 0xf7, 0x04, 0xc7, 0x9d, 0x8c, 0x0e,
0x60, 0x87, 0x47, 0x9c, 0x06, 0x22, 0x68, 0xcd, 0x96, 0x03, 0x72, 0x0c, 0x7b, 0x39, 0x1d, 0xbd,
0x46, 0x27, 0x8c, 0x7c, 0xe1, 0x84, 0x9a, 0x48, 0xaa, 0xa3, 0xe0, 0xf3, 0xc8, 0xcf, 0xbd, 0xf0,
0x29, 0x34, 0xe2, 0xcc, 0x75, 0xde, 0xe3, 0x4c, 0x9c, 0x63, 0xdb, 0xae, 0xc7, 0x99, 0xfb, 0x12,
0x67, 0xd6, 0xbf, 0x3a, 0xc0, 0x45, 0x9e, 0xd5, 0x38, 0xcf, 0x8a, 0xfc, 0x0c, 0x07, 0xee, 0x3c,
0x9b, 0xe5, 0x02, 0xbe, 0x59, 0x2e, 0xa0, 0x52, 0x42, 0x7b, 0xdf, 0x5d, 0xa1, 0xeb, 0x19, 0x80,
0xa0, 0x70, 0x0a, 0xfd, 0x5a, 0x27, 0xc7, 0x2b, 0x64, 0x29, 0x32, 0x92, 0x9f, 0xb9, 0xb0, 0x76,
0x33, 0x9e, 0x7f, 0x92, 0x33, 0xe8, 0xd0, 0x8c, 0xbf, 0x8b, 0x12, 0x76, 0x2b, 0xf3, 0xab, 0x09,
0xa6, 0x2f, 0x96, 0x99, 0xc6, 0xec, 0x3a, 0x44, 0xff, 0x27, 0x4c, 0x53, 0x7a, 0x8d, 0xf6, 0xe2,
0x2e, 0x13, 0xa1, 0x59, 0xd0, 0x93, 0x2e, 0xe8, 0xaa, 0x8f, 0x9a, 0xb6, 0xce, 0xfc, 0xaa, 0x36,
0xd0, 0xab, 0xda, 0xc0, 0x80, 0x86, 0x17, 0x85, 0x1c, 0x43, 0xae, 0x8e, 0x60, 0x3e, 0xb4, 0xde,
0x40, 0x43, 0x84, 0x19, 0xf9, 0x4b, 0x41, 0x96, 0x0a, 0xd1, 0x3f, 0xa4, 0x10, 0x6b, 0x02, 0x6d,
0x29, 0x59, 0x36, 0x99, 0xd0, 0x64, 0xb6, 0x14, 0xe6, 0x68, 0x2e, 0xbb, 0xe8, 0x77, 0x59, 0x82,
0x94, 0x73, 0x5d, 0xc7, 0xd7, 0x2a, 0x4a, 0xb5, 0xfe, 0xd1, 0xa1, 0x2b, 0xe2, 0xd9, 0xc8, 0x13,
0x86, 0x37, 0x34, 0x78, 0x70, 0xe3, 0x8c, 0x56, 0x18, 0xe7, 0x59, 0x85, 0x71, 0x8a, 0xac, 0x1e,
0xd4, 0x3c, 0xf6, 0x3a, 0xf3, 0x6c, 0x10, 0xfc, 0x13, 0xa8, 0x47, 0x6f, 0xdf, 0xa6, 0xc8, 0x95,
0xc6, 0x6a, 0x64, 0xbd, 0x82, 0x83, 0xc5, 0x0a, 0xc6, 0x3c, 0x41, 0x3a, 0xb9, 0x47, 0xa7, 0xdd,
0xa7, 0xbb, 0x63, 0x3d, 0x7d, 0xd1, 0x7a, 0x3e, 0xb4, 0x64, 0x92, 0x18, 0x20, 0xc7, 0xcd, 0xf6,
0xfb, 0x20, 0x29, 0xac, 0x21, 0x90, 0x3b, 0x51, 0xe6, 0x26, 0x34, 0xa0, 0x31, 0x91, 0xeb, 0x55,
0xc4, 0xf9, 0xd0, 0xba, 0x84, 0x47, 0x65, 0x87, 0x6f, 0x5c, 0x4e, 0x9e, 0x42, 0x57, 0xdc, 0x76,
0x4e, 0x82, 0x1e, 0xb2, 0x1b, 0xf4, 0x95, 0xa0, 0x1d, 0x81, 0xda, 0x0a, 0xb4, 0x00, 0x76, 0xc7,
0x9c, 0xf2, 0xd4, 0xc6, 0x5f, 0xac, 0xbf, 0x34, 0x68, 0xe5, 0x83, 0x39, 0xf9, 0x11, 0x40, 0x96,
0xa2, 0xef, 0xa4, 0x31, 0xf5, 0x0a, 0x01, 0x73, 0x64, 0x9c, 0x03, 0xe4, 0x6b, 0xd8, 0xa3, 0x37,
0x94, 0x05, 0xd4, 0x0d, 0x50, 0xad, 0x91, 0x21, 0xba, 0x05, 0x2c, 0x17, 0x3e, 0x85, 0xae, 0xe0,
0x29, 0x2c, 0xaa, 0x0e, 0xb0, 0x93, 0xa3, 0x85, 0x99, 0xc9, 0xb7, 0xb0, 0x5f, 0xf2, 0x95, 0x6b,
0xe5, 0x13, 0x4a, 0x8a, 0xa9, 0x62, 0x83, 0xf5, 0x06, 0x3a, 0x0b, 0x0a, 0x17, 0x4f, 0x8c, 0x56,
0x3e, 0x31, 0x8b, 0x8f, 0x92, 0x7e, 0xff, 0x51, 0xca, 0x3d, 0x92, 0xb9, 0x01, 0xf3, 0xc4, 0x2d,
0x2f, 0xaf, 0xa0, 0xa6, 0x44, 0x5e, 0xe2, 0xec, 0xe4, 0x8f, 0x1a, 0xf4, 0x4a, 0xd1, 0x6d, 0x71,
0xaa, 0xe4, 0x14, 0x76, 0x04, 0x46, 0x0e, 0x2b, 0x5a, 0x69, 0xe4, 0x9b, 0x9f, 0x57, 0x5d, 0xcf,
0x52, 0x5a, 0x6b, 0x8b, 0xbc, 0x86, 0x5d, 0x65, 0x58, 0x24, 0xfd, 0x4d, 0x3d, 0x69, 0x1e, 0x6f,
0x5a, 0x21, 0x3d, 0x6f, 0x6d, 0x0d, 0xb4, 0xef, 0x34, 0x72, 0x0e, 0x3b, 0xf2, 0x65, 0x7a, 0xbc,
0xee, 0x95, 0x30, 0x9f, 0xac, 0x9b, 0x2d, 0x32, 0x1d, 0x68, 0xe4, 0x15, 0xd4, 0x55, 0x2f, 0x1c,
0x55, 0x6c, 0x91, 0xd3, 0xe6, 0x57, 0x6b, 0xa7, 0xcb, 0xe2, 0x4f, 0xf3, 0x04, 0x29, 0x4f, 0x89,
0xb9, 0xa2, 0x69, 0x94, 0x1d, 0xcd, 0xa3, 0xd5, 0x73, 0x05, 0xcb, 0x8b, 0xed, 0xd7, 0x7a, 0xec,
0xba, 0x75, 0xf1, 0x17, 0xf9, 0xc3, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x68, 0x43, 0xc6, 0xc7,
0x59, 0x0a, 0x00, 0x00,
var fileDescriptor_piecestore_c22028cf5808832f = []byte{
// 946 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xcf, 0x6e, 0xdb, 0xc6,
0x13, 0x36, 0x29, 0x5b, 0xb2, 0x46, 0x7f, 0xac, 0xac, 0x8d, 0xdf, 0x4f, 0x26, 0xe2, 0x5a, 0x60,
0x9a, 0x54, 0x48, 0x00, 0xb5, 0x71, 0x81, 0xde, 0x63, 0xd8, 0x28, 0x84, 0xa0, 0x8e, 0xb1, 0xb2,
0x2f, 0x39, 0x94, 0x59, 0x92, 0x13, 0x65, 0x11, 0x8a, 0x64, 0xc9, 0xa5, 0x2b, 0xf9, 0x39, 0x7a,
0xeb, 0x33, 0xf4, 0xda, 0x27, 0xe8, 0xa1, 0x40, 0xef, 0x3d, 0xf4, 0x10, 0xa0, 0x6f, 0x52, 0x70,
0x97, 0x22, 0x2d, 0x4b, 0x94, 0x8a, 0xa0, 0xb9, 0x71, 0x67, 0x66, 0xbf, 0x99, 0xf9, 0xf6, 0x9b,
0x5d, 0x42, 0x27, 0xe4, 0xe8, 0x60, 0x2c, 0x82, 0x08, 0x07, 0x61, 0x14, 0x88, 0x80, 0xdc, 0xb1,
0x44, 0x41, 0x22, 0x30, 0x36, 0x60, 0x1c, 0x8c, 0x03, 0xe5, 0x35, 0x7f, 0xad, 0x40, 0xf7, 0x92,
0xcd, 0x30, 0x3a, 0x65, 0xbe, 0xfb, 0x23, 0x77, 0xc5, 0xbb, 0x17, 0x9e, 0x17, 0x38, 0x4c, 0xf0,
0xc0, 0x27, 0x0f, 0xa1, 0x1e, 0xf3, 0xb1, 0xcf, 0x44, 0x12, 0x61, 0x57, 0xeb, 0x69, 0xfd, 0x26,
0x2d, 0x0c, 0x84, 0xc0, 0xb6, 0xcb, 0x04, 0xeb, 0xea, 0xd2, 0x21, 0xbf, 0x8d, 0x3f, 0x74, 0xd8,
0x3e, 0x63, 0x82, 0x91, 0xe7, 0xd0, 0x8c, 0x99, 0x40, 0xcf, 0xe3, 0x02, 0x2d, 0xee, 0xaa, 0xdd,
0xa7, 0xed, 0xdf, 0x3f, 0x1c, 0x6f, 0xfd, 0xf5, 0xe1, 0xb8, 0x7a, 0x11, 0xb8, 0x38, 0x3c, 0xa3,
0x8d, 0x3c, 0x66, 0xe8, 0x92, 0x67, 0x50, 0x4f, 0x42, 0x8f, 0xfb, 0xef, 0xd3, 0x78, 0x7d, 0x65,
0xfc, 0xae, 0x0a, 0x18, 0xba, 0xe4, 0x10, 0x76, 0x27, 0x6c, 0x6a, 0xc5, 0xfc, 0x16, 0xbb, 0x95,
0x9e, 0xd6, 0xaf, 0xd0, 0xda, 0x84, 0x4d, 0x47, 0xfc, 0x16, 0xc9, 0x00, 0xf6, 0x71, 0x1a, 0xf2,
0x48, 0xf6, 0x60, 0x25, 0x3e, 0x9f, 0x5a, 0x31, 0x3a, 0xdd, 0x6d, 0x19, 0xf5, 0xa0, 0x70, 0x5d,
0xfb, 0x7c, 0x3a, 0x42, 0x87, 0x3c, 0x82, 0x56, 0x8c, 0x11, 0x67, 0x9e, 0xe5, 0x27, 0x13, 0x1b,
0xa3, 0xee, 0x4e, 0x4f, 0xeb, 0xd7, 0x69, 0x53, 0x19, 0x2f, 0xa4, 0x8d, 0x0c, 0xa1, 0xca, 0x9c,
0x74, 0x57, 0xb7, 0xda, 0xd3, 0xfa, 0xed, 0x93, 0xe7, 0x83, 0xfb, 0xb4, 0x0e, 0xca, 0x68, 0x1c,
0xbc, 0x90, 0x1b, 0x69, 0x06, 0x40, 0xfa, 0xd0, 0x71, 0x22, 0x64, 0x02, 0xdd, 0xa2, 0xb8, 0x9a,
0x2c, 0xae, 0x9d, 0xd9, 0xb3, 0xca, 0x4c, 0x03, 0xaa, 0x6a, 0x2f, 0xa9, 0x41, 0xe5, 0xf2, 0xfa,
0xaa, 0xb3, 0x95, 0x7e, 0x7c, 0x7b, 0x7e, 0xd5, 0xd1, 0xcc, 0x9f, 0x74, 0x38, 0xa4, 0xe8, 0x8b,
0xff, 0xea, 0xe4, 0x7e, 0xd3, 0xb2, 0x93, 0xbb, 0x86, 0x4e, 0x98, 0x76, 0x62, 0xb1, 0x1c, 0x4e,
0x22, 0x34, 0x4e, 0x9e, 0xfe, 0xfb, 0x9e, 0xe9, 0x9e, 0xc4, 0xb8, 0x53, 0xd1, 0x01, 0xec, 0x88,
0x40, 0x30, 0x4f, 0x26, 0xad, 0x50, 0xb5, 0x20, 0xdf, 0xc0, 0x5e, 0x0a, 0xc7, 0xc6, 0x68, 0xf9,
0x81, 0x2b, 0x95, 0x52, 0x59, 0x79, 0xf2, 0xad, 0x2c, 0x4c, 0x2e, 0x5d, 0xf2, 0x7f, 0xa8, 0x85,
0x89, 0x6d, 0xbd, 0xc7, 0x99, 0x3c, 0xd7, 0x26, 0xad, 0x86, 0x89, 0xfd, 0x12, 0x67, 0xe6, 0xdf,
0x3a, 0xc0, 0x65, 0x5a, 0xe5, 0x28, 0xad, 0x92, 0x7c, 0x0f, 0x07, 0xf6, 0xbc, 0xba, 0xe5, 0x86,
0x9e, 0x2d, 0x37, 0x54, 0x4a, 0x29, 0xdd, 0xb7, 0x57, 0xf0, 0x7c, 0x0e, 0x20, 0x21, 0xac, 0x9c,
0xcf, 0xc6, 0xc9, 0x93, 0x15, 0x34, 0xe5, 0x15, 0xa9, 0xcf, 0x94, 0x68, 0x5a, 0x0f, 0xe7, 0x9f,
0xe4, 0x1c, 0x5a, 0x2c, 0x11, 0xef, 0x82, 0x88, 0xdf, 0xaa, 0xfa, 0x2a, 0x12, 0xe9, 0x78, 0x19,
0x69, 0xc4, 0xc7, 0x3e, 0xba, 0xdf, 0x61, 0x1c, 0xb3, 0x31, 0xd2, 0xc5, 0x5d, 0x06, 0x42, 0x3d,
0x87, 0x27, 0x6d, 0xd0, 0xb3, 0xb9, 0xab, 0x53, 0x9d, 0xbb, 0x65, 0x63, 0xa1, 0x97, 0x8d, 0x45,
0x17, 0x6a, 0x4e, 0xe0, 0x0b, 0xf4, 0x85, 0x3a, 0x12, 0x3a, 0x5f, 0x9a, 0x6f, 0xa0, 0x26, 0xd3,
0x0c, 0xdd, 0xa5, 0x24, 0x4b, 0x8d, 0xe8, 0x1f, 0xd3, 0x88, 0x39, 0x81, 0xa6, 0xa2, 0x2c, 0x99,
0x4c, 0x58, 0x34, 0x5b, 0x4a, 0x73, 0x34, 0xa7, 0x5d, 0xce, 0xbf, 0x6a, 0x41, 0xd1, 0xb9, 0xee,
0x06, 0xa8, 0x94, 0xb4, 0x6a, 0xfe, 0xa9, 0x43, 0x5b, 0xe6, 0xa3, 0x28, 0x22, 0x8e, 0x37, 0xcc,
0xfb, 0xe4, 0xc2, 0x19, 0xae, 0x10, 0xce, 0xd3, 0x12, 0xe1, 0xe4, 0x55, 0x7d, 0x52, 0xf1, 0xd0,
0x75, 0xe2, 0xd9, 0x40, 0xf8, 0xff, 0xa0, 0x1a, 0xbc, 0x7d, 0x1b, 0xa3, 0xc8, 0x38, 0xce, 0x56,
0xe6, 0x2b, 0x38, 0x58, 0xec, 0x60, 0x24, 0x22, 0x64, 0x93, 0x7b, 0x70, 0xda, 0x7d, 0xb8, 0x3b,
0xd2, 0xd3, 0x17, 0xa5, 0xe7, 0x42, 0x43, 0x15, 0x89, 0x1e, 0x0a, 0xdc, 0x2c, 0xbf, 0x8f, 0xa2,
0xc2, 0x1c, 0x00, 0xb9, 0x93, 0x65, 0x2e, 0xc2, 0x2e, 0xd4, 0x26, 0x2a, 0x3e, 0xcb, 0x38, 0x5f,
0x9a, 0x57, 0xf0, 0xa0, 0x98, 0xf0, 0x8d, 0xe1, 0xe4, 0x31, 0xb4, 0xe5, 0xed, 0x67, 0x45, 0xe8,
0x20, 0xbf, 0x41, 0x37, 0x23, 0xb4, 0x25, 0xad, 0x34, 0x33, 0x9a, 0x00, 0xbb, 0x23, 0xc1, 0x44,
0x4c, 0xf1, 0x07, 0xf3, 0x17, 0x0d, 0x1a, 0xe9, 0x62, 0x0e, 0x7e, 0x04, 0x90, 0xc4, 0xe8, 0x5a,
0x71, 0xc8, 0x9c, 0x9c, 0xc0, 0xd4, 0x32, 0x4a, 0x0d, 0xe4, 0x0b, 0xd8, 0x63, 0x37, 0x8c, 0x7b,
0xcc, 0xf6, 0x30, 0x8b, 0x51, 0x29, 0xda, 0xb9, 0x59, 0x05, 0x3e, 0x86, 0xb6, 0xc4, 0xc9, 0x25,
0x9a, 0x1d, 0x60, 0x2b, 0xb5, 0xe6, 0x62, 0x26, 0x5f, 0xc2, 0x7e, 0x81, 0x57, 0xc4, 0xaa, 0x27,
0x95, 0xe4, 0xae, 0x7c, 0x83, 0xf9, 0x06, 0x5a, 0x0b, 0x0c, 0xe7, 0x4f, 0x8e, 0x56, 0x3c, 0x39,
0x8b, 0x8f, 0x94, 0x7e, 0xff, 0x91, 0x4a, 0x35, 0x92, 0xd8, 0x1e, 0x77, 0xe4, 0x2d, 0xaf, 0xae,
0xa0, 0xba, 0xb2, 0xbc, 0xc4, 0xd9, 0xc9, 0xcf, 0x15, 0xe8, 0x14, 0xa4, 0x53, 0x79, 0xaa, 0xe4,
0x0c, 0x76, 0xa4, 0x8d, 0x1c, 0x96, 0x8c, 0xd2, 0xd0, 0x35, 0x3e, 0x2b, 0xbb, 0x9e, 0x15, 0xb5,
0xe6, 0x16, 0x79, 0x0d, 0xbb, 0x99, 0x60, 0x91, 0xf4, 0x36, 0xcd, 0xa4, 0xf1, 0x64, 0x53, 0x84,
0xd2, 0xbc, 0xb9, 0xd5, 0xd7, 0xbe, 0xd2, 0xc8, 0x05, 0xec, 0xa8, 0x97, 0xe9, 0xe1, 0xba, 0x57,
0xc2, 0x78, 0xb4, 0xce, 0x9b, 0x57, 0xda, 0xd7, 0xc8, 0x2b, 0xa8, 0x66, 0xb3, 0x70, 0x54, 0xb2,
0x45, 0xb9, 0x8d, 0xcf, 0xd7, 0xba, 0x8b, 0xe6, 0xcf, 0xd2, 0x02, 0x99, 0x88, 0x89, 0xb1, 0x62,
0x68, 0x32, 0x39, 0x1a, 0x47, 0xab, 0x7d, 0x39, 0xca, 0xe9, 0xf6, 0x6b, 0x3d, 0xb4, 0xed, 0xaa,
0xfc, 0xc7, 0xfc, 0xfa, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x64, 0xd1, 0x91, 0x47, 0x95, 0x0a,
0x00, 0x00,
}

View File

@ -6,6 +6,8 @@ option go_package = "pb";
package piecestoreroutes;
import "gogo.proto";
service PieceStoreRoutes {
rpc Piece(PieceId) returns (PieceSummary) {}
@ -26,8 +28,8 @@ message PayerBandwidthAllocation { // Payer refers to satellite
}
message Data {
bytes satellite_id = 1; // Satellite Identity
bytes uplink_id = 2; // Uplink Identity
bytes satellite_id = 1 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false]; // Satellite Identity
bytes uplink_id = 2 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false]; // Uplink Identity
int64 max_size = 3; // Max amount of data the satellite will pay for in bytes
int64 expiration_unix_sec = 4; // Unix timestamp for when data is no longer being paid for
string serial_number = 5; // Unique serial number
@ -43,7 +45,7 @@ message RenterBandwidthAllocation { // Renter refers to uplink
message Data {
PayerBandwidthAllocation payer_allocation = 1; // Bandwidth Allocation from Satellite
int64 total = 2; // Total Bytes Stored
bytes storage_node_id = 3; // Storage Node Identity
bytes storage_node_id = 3 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false]; // Storage Node Identity
bytes pub_key = 4; // Renter Public Key // TODO: Take this out. It will be kept in a database on the satellite
}
@ -53,6 +55,7 @@ message RenterBandwidthAllocation { // Renter refers to uplink
message PieceStore {
message PieceData {
// TODO: may want to use customtype and fixed-length byte slice
string id = 1;
int64 expiration_unix_sec = 2;
bytes content = 3;
@ -64,6 +67,7 @@ message PieceStore {
}
message PieceId {
// TODO: may want to use customtype and fixed-length byte slice
string id = 1;
SignedMessage authorization = 2;
@ -77,6 +81,7 @@ message PieceSummary {
message PieceRetrieval {
message PieceData {
// TODO: may want to use customtype and fixed-length byte slice
string id = 1;
int64 piece_size = 2;
int64 offset = 3;
@ -93,6 +98,7 @@ message PieceRetrievalStream {
}
message PieceDelete {
// TODO: may want to use customtype and fixed-length byte slice
string id = 1;
SignedMessage authorization = 3;
}

View File

@ -6,6 +6,7 @@ package pb
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
import _ "github.com/gogo/protobuf/gogoproto"
import timestamp "github.com/golang/protobuf/ptypes/timestamp"
import context "golang.org/x/net/context"
@ -39,7 +40,7 @@ func (x RedundancyScheme_SchemeType) String() string {
return proto.EnumName(RedundancyScheme_SchemeType_name, int32(x))
}
func (RedundancyScheme_SchemeType) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_pointerdb_526f1300cf6003a1, []int{0, 0}
return fileDescriptor_pointerdb_21b4d7ef3abc5ac1, []int{0, 0}
}
type Pointer_DataType int32
@ -62,7 +63,7 @@ func (x Pointer_DataType) String() string {
return proto.EnumName(Pointer_DataType_name, int32(x))
}
func (Pointer_DataType) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_pointerdb_526f1300cf6003a1, []int{3, 0}
return fileDescriptor_pointerdb_21b4d7ef3abc5ac1, []int{3, 0}
}
type RedundancyScheme struct {
@ -82,7 +83,7 @@ func (m *RedundancyScheme) Reset() { *m = RedundancyScheme{} }
func (m *RedundancyScheme) String() string { return proto.CompactTextString(m) }
func (*RedundancyScheme) ProtoMessage() {}
func (*RedundancyScheme) Descriptor() ([]byte, []int) {
return fileDescriptor_pointerdb_526f1300cf6003a1, []int{0}
return fileDescriptor_pointerdb_21b4d7ef3abc5ac1, []int{0}
}
func (m *RedundancyScheme) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_RedundancyScheme.Unmarshal(m, b)
@ -146,7 +147,7 @@ func (m *RedundancyScheme) GetErasureShareSize() int32 {
type RemotePiece struct {
PieceNum int32 `protobuf:"varint,1,opt,name=piece_num,json=pieceNum,proto3" json:"piece_num,omitempty"`
NodeId string `protobuf:"bytes,2,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
NodeId NodeID `protobuf:"bytes,2,opt,name=node_id,json=nodeId,proto3,customtype=NodeID" json:"node_id"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@ -156,7 +157,7 @@ func (m *RemotePiece) Reset() { *m = RemotePiece{} }
func (m *RemotePiece) String() string { return proto.CompactTextString(m) }
func (*RemotePiece) ProtoMessage() {}
func (*RemotePiece) Descriptor() ([]byte, []int) {
return fileDescriptor_pointerdb_526f1300cf6003a1, []int{1}
return fileDescriptor_pointerdb_21b4d7ef3abc5ac1, []int{1}
}
func (m *RemotePiece) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_RemotePiece.Unmarshal(m, b)
@ -183,28 +184,22 @@ func (m *RemotePiece) GetPieceNum() int32 {
return 0
}
func (m *RemotePiece) GetNodeId() string {
if m != nil {
return m.NodeId
}
return ""
}
type RemoteSegment struct {
Redundancy *RedundancyScheme `protobuf:"bytes,1,opt,name=redundancy" json:"redundancy,omitempty"`
PieceId string `protobuf:"bytes,2,opt,name=piece_id,json=pieceId,proto3" json:"piece_id,omitempty"`
RemotePieces []*RemotePiece `protobuf:"bytes,3,rep,name=remote_pieces,json=remotePieces" json:"remote_pieces,omitempty"`
MerkleRoot []byte `protobuf:"bytes,4,opt,name=merkle_root,json=merkleRoot,proto3" json:"merkle_root,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
Redundancy *RedundancyScheme `protobuf:"bytes,1,opt,name=redundancy" json:"redundancy,omitempty"`
// TODO: may want to use customtype and fixed-length byte slice
PieceId string `protobuf:"bytes,2,opt,name=piece_id,json=pieceId,proto3" json:"piece_id,omitempty"`
RemotePieces []*RemotePiece `protobuf:"bytes,3,rep,name=remote_pieces,json=remotePieces" json:"remote_pieces,omitempty"`
MerkleRoot []byte `protobuf:"bytes,4,opt,name=merkle_root,json=merkleRoot,proto3" json:"merkle_root,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *RemoteSegment) Reset() { *m = RemoteSegment{} }
func (m *RemoteSegment) String() string { return proto.CompactTextString(m) }
func (*RemoteSegment) ProtoMessage() {}
func (*RemoteSegment) Descriptor() ([]byte, []int) {
return fileDescriptor_pointerdb_526f1300cf6003a1, []int{2}
return fileDescriptor_pointerdb_21b4d7ef3abc5ac1, []int{2}
}
func (m *RemoteSegment) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_RemoteSegment.Unmarshal(m, b)
@ -253,9 +248,10 @@ func (m *RemoteSegment) GetMerkleRoot() []byte {
}
type Pointer struct {
Type Pointer_DataType `protobuf:"varint,1,opt,name=type,proto3,enum=pointerdb.Pointer_DataType" json:"type,omitempty"`
InlineSegment []byte `protobuf:"bytes,3,opt,name=inline_segment,json=inlineSegment,proto3" json:"inline_segment,omitempty"`
Remote *RemoteSegment `protobuf:"bytes,4,opt,name=remote" json:"remote,omitempty"`
Type Pointer_DataType `protobuf:"varint,1,opt,name=type,proto3,enum=pointerdb.Pointer_DataType" json:"type,omitempty"`
InlineSegment []byte `protobuf:"bytes,3,opt,name=inline_segment,json=inlineSegment,proto3" json:"inline_segment,omitempty"`
Remote *RemoteSegment `protobuf:"bytes,4,opt,name=remote" json:"remote,omitempty"`
// TODO: rename
SegmentSize int64 `protobuf:"varint,5,opt,name=segment_size,json=segmentSize,proto3" json:"segment_size,omitempty"`
CreationDate *timestamp.Timestamp `protobuf:"bytes,6,opt,name=creation_date,json=creationDate" json:"creation_date,omitempty"`
ExpirationDate *timestamp.Timestamp `protobuf:"bytes,7,opt,name=expiration_date,json=expirationDate" json:"expiration_date,omitempty"`
@ -269,7 +265,7 @@ func (m *Pointer) Reset() { *m = Pointer{} }
func (m *Pointer) String() string { return proto.CompactTextString(m) }
func (*Pointer) ProtoMessage() {}
func (*Pointer) Descriptor() ([]byte, []int) {
return fileDescriptor_pointerdb_526f1300cf6003a1, []int{3}
return fileDescriptor_pointerdb_21b4d7ef3abc5ac1, []int{3}
}
func (m *Pointer) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Pointer.Unmarshal(m, b)
@ -351,7 +347,7 @@ func (m *PutRequest) Reset() { *m = PutRequest{} }
func (m *PutRequest) String() string { return proto.CompactTextString(m) }
func (*PutRequest) ProtoMessage() {}
func (*PutRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_pointerdb_526f1300cf6003a1, []int{4}
return fileDescriptor_pointerdb_21b4d7ef3abc5ac1, []int{4}
}
func (m *PutRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PutRequest.Unmarshal(m, b)
@ -397,7 +393,7 @@ func (m *GetRequest) Reset() { *m = GetRequest{} }
func (m *GetRequest) String() string { return proto.CompactTextString(m) }
func (*GetRequest) ProtoMessage() {}
func (*GetRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_pointerdb_526f1300cf6003a1, []int{5}
return fileDescriptor_pointerdb_21b4d7ef3abc5ac1, []int{5}
}
func (m *GetRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_GetRequest.Unmarshal(m, b)
@ -441,7 +437,7 @@ func (m *ListRequest) Reset() { *m = ListRequest{} }
func (m *ListRequest) String() string { return proto.CompactTextString(m) }
func (*ListRequest) ProtoMessage() {}
func (*ListRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_pointerdb_526f1300cf6003a1, []int{6}
return fileDescriptor_pointerdb_21b4d7ef3abc5ac1, []int{6}
}
func (m *ListRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ListRequest.Unmarshal(m, b)
@ -514,7 +510,7 @@ func (m *PutResponse) Reset() { *m = PutResponse{} }
func (m *PutResponse) String() string { return proto.CompactTextString(m) }
func (*PutResponse) ProtoMessage() {}
func (*PutResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_pointerdb_526f1300cf6003a1, []int{7}
return fileDescriptor_pointerdb_21b4d7ef3abc5ac1, []int{7}
}
func (m *PutResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PutResponse.Unmarshal(m, b)
@ -549,7 +545,7 @@ func (m *GetResponse) Reset() { *m = GetResponse{} }
func (m *GetResponse) String() string { return proto.CompactTextString(m) }
func (*GetResponse) ProtoMessage() {}
func (*GetResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_pointerdb_526f1300cf6003a1, []int{8}
return fileDescriptor_pointerdb_21b4d7ef3abc5ac1, []int{8}
}
func (m *GetResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_GetResponse.Unmarshal(m, b)
@ -610,7 +606,7 @@ func (m *ListResponse) Reset() { *m = ListResponse{} }
func (m *ListResponse) String() string { return proto.CompactTextString(m) }
func (*ListResponse) ProtoMessage() {}
func (*ListResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_pointerdb_526f1300cf6003a1, []int{9}
return fileDescriptor_pointerdb_21b4d7ef3abc5ac1, []int{9}
}
func (m *ListResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ListResponse.Unmarshal(m, b)
@ -657,7 +653,7 @@ func (m *ListResponse_Item) Reset() { *m = ListResponse_Item{} }
func (m *ListResponse_Item) String() string { return proto.CompactTextString(m) }
func (*ListResponse_Item) ProtoMessage() {}
func (*ListResponse_Item) Descriptor() ([]byte, []int) {
return fileDescriptor_pointerdb_526f1300cf6003a1, []int{9, 0}
return fileDescriptor_pointerdb_21b4d7ef3abc5ac1, []int{9, 0}
}
func (m *ListResponse_Item) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ListResponse_Item.Unmarshal(m, b)
@ -709,7 +705,7 @@ func (m *DeleteRequest) Reset() { *m = DeleteRequest{} }
func (m *DeleteRequest) String() string { return proto.CompactTextString(m) }
func (*DeleteRequest) ProtoMessage() {}
func (*DeleteRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_pointerdb_526f1300cf6003a1, []int{10}
return fileDescriptor_pointerdb_21b4d7ef3abc5ac1, []int{10}
}
func (m *DeleteRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_DeleteRequest.Unmarshal(m, b)
@ -747,7 +743,7 @@ func (m *DeleteResponse) Reset() { *m = DeleteResponse{} }
func (m *DeleteResponse) String() string { return proto.CompactTextString(m) }
func (*DeleteResponse) ProtoMessage() {}
func (*DeleteResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_pointerdb_526f1300cf6003a1, []int{11}
return fileDescriptor_pointerdb_21b4d7ef3abc5ac1, []int{11}
}
func (m *DeleteResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_DeleteResponse.Unmarshal(m, b)
@ -782,7 +778,7 @@ func (m *IterateRequest) Reset() { *m = IterateRequest{} }
func (m *IterateRequest) String() string { return proto.CompactTextString(m) }
func (*IterateRequest) ProtoMessage() {}
func (*IterateRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_pointerdb_526f1300cf6003a1, []int{12}
return fileDescriptor_pointerdb_21b4d7ef3abc5ac1, []int{12}
}
func (m *IterateRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_IterateRequest.Unmarshal(m, b)
@ -841,7 +837,7 @@ func (m *PayerBandwidthAllocationRequest) Reset() { *m = PayerBandwidthA
func (m *PayerBandwidthAllocationRequest) String() string { return proto.CompactTextString(m) }
func (*PayerBandwidthAllocationRequest) ProtoMessage() {}
func (*PayerBandwidthAllocationRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_pointerdb_526f1300cf6003a1, []int{13}
return fileDescriptor_pointerdb_21b4d7ef3abc5ac1, []int{13}
}
func (m *PayerBandwidthAllocationRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PayerBandwidthAllocationRequest.Unmarshal(m, b)
@ -879,7 +875,7 @@ func (m *PayerBandwidthAllocationResponse) Reset() { *m = PayerBandwidth
func (m *PayerBandwidthAllocationResponse) String() string { return proto.CompactTextString(m) }
func (*PayerBandwidthAllocationResponse) ProtoMessage() {}
func (*PayerBandwidthAllocationResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_pointerdb_526f1300cf6003a1, []int{14}
return fileDescriptor_pointerdb_21b4d7ef3abc5ac1, []int{14}
}
func (m *PayerBandwidthAllocationResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PayerBandwidthAllocationResponse.Unmarshal(m, b)
@ -1141,75 +1137,77 @@ var _PointerDB_serviceDesc = grpc.ServiceDesc{
Metadata: "pointerdb.proto",
}
func init() { proto.RegisterFile("pointerdb.proto", fileDescriptor_pointerdb_526f1300cf6003a1) }
func init() { proto.RegisterFile("pointerdb.proto", fileDescriptor_pointerdb_21b4d7ef3abc5ac1) }
var fileDescriptor_pointerdb_526f1300cf6003a1 = []byte{
// 1068 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0xdd, 0x6e, 0x1b, 0x45,
0x14, 0xae, 0xff, 0xe3, 0xb3, 0x76, 0x6a, 0x46, 0x25, 0xdd, 0xba, 0x45, 0x09, 0x5b, 0x81, 0x4a,
0x5b, 0x6d, 0xc1, 0x54, 0x42, 0xa2, 0x20, 0xd4, 0x34, 0xa1, 0xb2, 0xd4, 0x06, 0x6b, 0x9c, 0x2b,
0x6e, 0x96, 0xb1, 0xf7, 0xd8, 0x1e, 0xb1, 0x7f, 0x9d, 0x99, 0x2d, 0x4d, 0xde, 0x84, 0x37, 0xe1,
0x86, 0x4b, 0xde, 0x04, 0xde, 0x82, 0x0b, 0xb4, 0x33, 0xb3, 0xf6, 0xba, 0x21, 0x69, 0x05, 0x37,
0xc9, 0x9e, 0x6f, 0xbe, 0x73, 0x66, 0xce, 0xf9, 0xbe, 0x19, 0xc3, 0xf5, 0x2c, 0xe5, 0x89, 0x42,
0x11, 0xce, 0xfc, 0x4c, 0xa4, 0x2a, 0x25, 0xdd, 0x35, 0x30, 0xdc, 0x5f, 0xa6, 0xe9, 0x32, 0xc2,
0x47, 0x7a, 0x61, 0x96, 0x2f, 0x1e, 0x29, 0x1e, 0xa3, 0x54, 0x2c, 0xce, 0x0c, 0x77, 0xd8, 0x4f,
0x5f, 0xa3, 0x88, 0xd8, 0x99, 0x0d, 0x07, 0x19, 0xc7, 0x39, 0x4a, 0x95, 0x0a, 0x34, 0x88, 0xf7,
0x6b, 0x1d, 0x06, 0x14, 0xc3, 0x3c, 0x09, 0x59, 0x32, 0x3f, 0x9b, 0xce, 0x57, 0x18, 0x23, 0xf9,
0x1a, 0x9a, 0xea, 0x2c, 0x43, 0xb7, 0x76, 0x50, 0xbb, 0xb7, 0x3b, 0xfa, 0xd4, 0xdf, 0x9c, 0xe0,
0x6d, 0xaa, 0x6f, 0xfe, 0x9d, 0x9e, 0x65, 0x48, 0x75, 0x0e, 0xb9, 0x09, 0x9d, 0x98, 0x27, 0x81,
0xc0, 0x57, 0x6e, 0xfd, 0xa0, 0x76, 0xaf, 0x45, 0xdb, 0x31, 0x4f, 0x28, 0xbe, 0x22, 0x37, 0xa0,
0xa5, 0x52, 0xc5, 0x22, 0xb7, 0xa1, 0x61, 0x13, 0x90, 0xcf, 0x60, 0x20, 0x30, 0x63, 0x5c, 0x04,
0x6a, 0x25, 0x50, 0xae, 0xd2, 0x28, 0x74, 0x9b, 0x9a, 0x70, 0xdd, 0xe0, 0xa7, 0x25, 0x4c, 0x1e,
0xc0, 0x07, 0x32, 0x9f, 0xcf, 0x51, 0xca, 0x0a, 0xb7, 0xa5, 0xb9, 0x03, 0xbb, 0xb0, 0x21, 0x3f,
0x04, 0x82, 0x82, 0xc9, 0x5c, 0x60, 0x20, 0x57, 0xac, 0xf8, 0xcb, 0xcf, 0xd1, 0x6d, 0x1b, 0xb6,
0x5d, 0x99, 0x16, 0x0b, 0x53, 0x7e, 0x8e, 0xde, 0x0d, 0x80, 0x4d, 0x23, 0xa4, 0x0d, 0x75, 0x3a,
0x1d, 0x5c, 0xf3, 0x9e, 0x81, 0x43, 0x31, 0x4e, 0x15, 0x4e, 0x8a, 0xa9, 0x91, 0xdb, 0xd0, 0xd5,
0xe3, 0x0b, 0x92, 0x3c, 0xd6, 0xa3, 0x69, 0xd1, 0x1d, 0x0d, 0x9c, 0xe4, 0x71, 0xd1, 0x76, 0x92,
0x86, 0x18, 0xf0, 0x50, 0xb7, 0xdd, 0xa5, 0xed, 0x22, 0x1c, 0x87, 0xde, 0x1f, 0x35, 0xe8, 0x9b,
0x2a, 0x53, 0x5c, 0xc6, 0x98, 0x28, 0xf2, 0x04, 0x40, 0xac, 0xc7, 0xa8, 0x0b, 0x39, 0xa3, 0xdb,
0x57, 0xcc, 0x98, 0x56, 0xe8, 0xe4, 0x16, 0x98, 0x3d, 0x37, 0x1b, 0x75, 0x74, 0x3c, 0x0e, 0xc9,
0x13, 0xe8, 0x0b, 0xbd, 0x51, 0x60, 0x54, 0x76, 0x1b, 0x07, 0x8d, 0x7b, 0xce, 0x68, 0x6f, 0xab,
0xf4, 0xba, 0x1d, 0xda, 0x13, 0x9b, 0x40, 0x92, 0x7d, 0x70, 0x62, 0x14, 0x3f, 0x47, 0x18, 0x88,
0x34, 0x55, 0x5a, 0x82, 0x1e, 0x05, 0x03, 0xd1, 0x34, 0x55, 0xde, 0xdf, 0x75, 0xe8, 0x4c, 0x4c,
0x21, 0xf2, 0x68, 0xcb, 0x1f, 0xd5, 0xb3, 0x5b, 0x86, 0x7f, 0xc4, 0x14, 0xab, 0x98, 0xe2, 0x13,
0xd8, 0xe5, 0x49, 0xc4, 0x13, 0x0c, 0xa4, 0x19, 0x82, 0x36, 0x41, 0x8f, 0xf6, 0x0d, 0x5a, 0x4e,
0xe6, 0x73, 0x68, 0x9b, 0x43, 0xe9, 0xfd, 0x9d, 0x91, 0x7b, 0xe1, 0xe8, 0x96, 0x49, 0x2d, 0x8f,
0x7c, 0x0c, 0x3d, 0x5b, 0xd1, 0x08, 0x5c, 0xd8, 0xa1, 0x41, 0x1d, 0x8b, 0x15, 0xda, 0x92, 0xef,
0xa0, 0x3f, 0x17, 0xc8, 0x14, 0x4f, 0x93, 0x20, 0x64, 0xca, 0x98, 0xc0, 0x19, 0x0d, 0x7d, 0x73,
0x77, 0xfc, 0xf2, 0xee, 0xf8, 0xa7, 0xe5, 0xdd, 0xa1, 0xbd, 0x32, 0xe1, 0x88, 0x29, 0x24, 0xcf,
0xe0, 0x3a, 0xbe, 0xc9, 0xb8, 0xa8, 0x94, 0xe8, 0xbc, 0xb3, 0xc4, 0xee, 0x26, 0x45, 0x17, 0x19,
0xc2, 0x4e, 0x8c, 0x8a, 0x85, 0x4c, 0x31, 0x77, 0x47, 0xf7, 0xbe, 0x8e, 0x3d, 0x0f, 0x76, 0xca,
0x79, 0x11, 0x80, 0xf6, 0xf8, 0xe4, 0xc5, 0xf8, 0xe4, 0x78, 0x70, 0xad, 0xf8, 0xa6, 0xc7, 0x2f,
0x7f, 0x38, 0x3d, 0x1e, 0xd4, 0xbc, 0x13, 0x80, 0x49, 0xae, 0x28, 0xbe, 0xca, 0x51, 0x2a, 0x42,
0xa0, 0x99, 0x31, 0xb5, 0xd2, 0x02, 0x74, 0xa9, 0xfe, 0x26, 0x0f, 0xa1, 0x63, 0xa7, 0xa5, 0x8d,
0xe1, 0x8c, 0xc8, 0x45, 0x5d, 0x68, 0x49, 0xf1, 0x0e, 0x00, 0x9e, 0xe3, 0x55, 0xf5, 0xbc, 0xdf,
0x6a, 0xe0, 0xbc, 0xe0, 0x72, 0xcd, 0xd9, 0x83, 0x76, 0x26, 0x70, 0xc1, 0xdf, 0x58, 0x96, 0x8d,
0x0a, 0xe7, 0x48, 0xc5, 0x84, 0x0a, 0xd8, 0xa2, 0xdc, 0xbb, 0x4b, 0x41, 0x43, 0x4f, 0x0b, 0x84,
0x7c, 0x04, 0x80, 0x49, 0x18, 0xcc, 0x70, 0x91, 0x0a, 0xd4, 0xc2, 0x77, 0x69, 0x17, 0x93, 0xf0,
0x50, 0x03, 0xe4, 0x0e, 0x74, 0x05, 0xce, 0x73, 0x21, 0xf9, 0x6b, 0xa3, 0xfb, 0x0e, 0xdd, 0x00,
0xc5, 0xab, 0x11, 0xf1, 0x98, 0x2b, 0x7b, 0xd1, 0x4d, 0x50, 0x94, 0x2c, 0xa6, 0x17, 0x2c, 0x22,
0xb6, 0x94, 0x5a, 0xd0, 0x0e, 0xed, 0x16, 0xc8, 0xf7, 0x05, 0xe0, 0xf5, 0xc1, 0xd1, 0xc3, 0x92,
0x59, 0x9a, 0x48, 0xf4, 0xfe, 0xaa, 0x81, 0xa3, 0x9b, 0x35, 0x71, 0x75, 0x52, 0xb5, 0x77, 0x4e,
0x8a, 0xdc, 0x85, 0x56, 0x71, 0x95, 0xa5, 0x5b, 0xd7, 0xd7, 0xa9, 0xef, 0x97, 0x4f, 0xea, 0x49,
0x1a, 0x22, 0x35, 0x6b, 0xe4, 0x1b, 0x68, 0x64, 0x33, 0xa6, 0x9b, 0x73, 0x46, 0xf7, 0xfd, 0xcd,
0x33, 0x2b, 0xd2, 0x5c, 0xa1, 0xf4, 0x27, 0xec, 0x0c, 0xc5, 0x21, 0x4b, 0xc2, 0x5f, 0x78, 0xa8,
0x56, 0x4f, 0xa3, 0x28, 0x9d, 0x6b, 0x6f, 0xd0, 0x22, 0x8d, 0x1c, 0x43, 0x9f, 0xe5, 0x6a, 0x95,
0x0a, 0x7e, 0xae, 0x51, 0x6b, 0xff, 0xfd, 0x8b, 0x75, 0xa6, 0x7c, 0x99, 0x60, 0xf8, 0x12, 0xa5,
0x64, 0x4b, 0xa4, 0xdb, 0x59, 0xde, 0xef, 0x35, 0xe8, 0x19, 0xc5, 0x6c, 0xa3, 0x23, 0x68, 0x71,
0x85, 0xb1, 0x74, 0x6b, 0xfa, 0xe8, 0x77, 0x2a, 0x6d, 0x56, 0x79, 0xfe, 0x58, 0x61, 0x4c, 0x0d,
0xb5, 0xb0, 0x42, 0x5c, 0xe8, 0x54, 0xd7, 0x4a, 0xe8, 0xef, 0x21, 0x42, 0xb3, 0xa0, 0xfc, 0x7f,
0xdb, 0x15, 0x6f, 0x28, 0x97, 0x81, 0xf5, 0x51, 0x43, 0x6f, 0xb1, 0xc3, 0xe5, 0x44, 0xc7, 0xde,
0x5d, 0xe8, 0x1f, 0x61, 0x84, 0x0a, 0xaf, 0xb2, 0xe5, 0x00, 0x76, 0x4b, 0x92, 0x95, 0x57, 0xc0,
0xee, 0x58, 0xa1, 0x60, 0x9b, 0xbc, 0xcb, 0xac, 0x7a, 0x03, 0x5a, 0x0b, 0x2e, 0xa4, 0xb2, 0x26,
0x35, 0x01, 0x71, 0xa1, 0x63, 0xfc, 0x86, 0xf6, 0x44, 0x65, 0x68, 0x56, 0x5e, 0x63, 0xb1, 0xd2,
0x2c, 0x57, 0x74, 0xe8, 0x45, 0xb0, 0x7f, 0xa9, 0xa4, 0xf6, 0x10, 0x63, 0x68, 0xb3, 0xb9, 0x56,
0xd3, 0x3c, 0x93, 0x5f, 0xbc, 0xbf, 0x2b, 0xfc, 0xa7, 0x3a, 0x91, 0xda, 0x02, 0xde, 0x4f, 0x70,
0x70, 0xf9, 0x6e, 0x56, 0x6b, 0xeb, 0xc0, 0xda, 0x7f, 0x72, 0xe0, 0xe8, 0xcf, 0x3a, 0x74, 0xad,
0x58, 0x47, 0x87, 0xe4, 0x31, 0x34, 0x26, 0xb9, 0x22, 0x1f, 0x56, 0x95, 0x5c, 0x3f, 0x3e, 0xc3,
0xbd, 0xb7, 0x61, 0x7b, 0x82, 0xc7, 0xd0, 0x78, 0x8e, 0xdb, 0x59, 0x9b, 0x27, 0x66, 0x2b, 0xab,
0x7a, 0x19, 0xbf, 0x82, 0x66, 0xe1, 0x45, 0xb2, 0x77, 0xc1, 0x9c, 0x26, 0xef, 0xe6, 0x25, 0xa6,
0x25, 0xdf, 0x42, 0xdb, 0x18, 0x81, 0x54, 0x7f, 0x26, 0xb6, 0x0c, 0x34, 0xbc, 0xf5, 0x2f, 0x2b,
0x36, 0x5d, 0x82, 0x7b, 0xd9, 0x48, 0xc8, 0xfd, 0x6a, 0x87, 0x57, 0xcb, 0x3c, 0x7c, 0xf0, 0x5e,
0x5c, 0xb3, 0xe9, 0x61, 0xf3, 0xc7, 0x7a, 0x36, 0x9b, 0xb5, 0xf5, 0xef, 0xc5, 0x97, 0xff, 0x04,
0x00, 0x00, 0xff, 0xff, 0xef, 0xc4, 0x3f, 0xf3, 0xda, 0x09, 0x00, 0x00,
var fileDescriptor_pointerdb_21b4d7ef3abc5ac1 = []byte{
// 1092 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x5d, 0x6f, 0x1b, 0x45,
0x17, 0xae, 0xbf, 0xe3, 0xb3, 0x76, 0xea, 0x77, 0xd4, 0x37, 0xdd, 0xba, 0x45, 0x09, 0x8b, 0x80,
0xd2, 0x56, 0x5b, 0x30, 0x95, 0x90, 0x28, 0x08, 0x35, 0x24, 0x54, 0x96, 0xda, 0x10, 0x8d, 0x73,
0xc5, 0xcd, 0x32, 0xf1, 0x1e, 0xdb, 0x23, 0xbc, 0x1f, 0x9d, 0x99, 0x2d, 0x4d, 0xff, 0x09, 0xff,
0x84, 0x1b, 0x2e, 0x91, 0xf8, 0x0d, 0x5c, 0xf4, 0x02, 0xf1, 0x33, 0xb8, 0x40, 0xf3, 0xb1, 0xf6,
0xba, 0x21, 0x69, 0x05, 0x37, 0xc9, 0x9e, 0x73, 0x9e, 0x73, 0xe6, 0x9c, 0xf3, 0x3c, 0x33, 0x86,
0xab, 0x79, 0xc6, 0x53, 0x85, 0x22, 0x3e, 0x0d, 0x73, 0x91, 0xa9, 0x8c, 0x74, 0x57, 0x8e, 0xe1,
0xee, 0x3c, 0xcb, 0xe6, 0x4b, 0xbc, 0x6f, 0x02, 0xa7, 0xc5, 0xec, 0xbe, 0xe2, 0x09, 0x4a, 0xc5,
0x92, 0xdc, 0x62, 0x87, 0x30, 0xcf, 0xe6, 0x59, 0xf9, 0x9d, 0x66, 0x31, 0xba, 0xef, 0x41, 0xce,
0x71, 0x8a, 0x52, 0x65, 0xc2, 0x79, 0x82, 0x9f, 0xea, 0x30, 0xa0, 0x18, 0x17, 0x69, 0xcc, 0xd2,
0xe9, 0xd9, 0x64, 0xba, 0xc0, 0x04, 0xc9, 0xe7, 0xd0, 0x54, 0x67, 0x39, 0xfa, 0xb5, 0xbd, 0xda,
0xed, 0xed, 0xd1, 0x07, 0xe1, 0xba, 0x95, 0xd7, 0xa1, 0xa1, 0xfd, 0x77, 0x72, 0x96, 0x23, 0x35,
0x39, 0xe4, 0x3a, 0x74, 0x12, 0x9e, 0x46, 0x02, 0x9f, 0xf9, 0xf5, 0xbd, 0xda, 0xed, 0x16, 0x6d,
0x27, 0x3c, 0xa5, 0xf8, 0x8c, 0x5c, 0x83, 0x96, 0xca, 0x14, 0x5b, 0xfa, 0x0d, 0xe3, 0xb6, 0x06,
0xf9, 0x08, 0x06, 0x02, 0x73, 0xc6, 0x45, 0xa4, 0x16, 0x02, 0xe5, 0x22, 0x5b, 0xc6, 0x7e, 0xd3,
0x00, 0xae, 0x5a, 0xff, 0x49, 0xe9, 0x26, 0x77, 0xe1, 0x7f, 0xb2, 0x98, 0x4e, 0x51, 0xca, 0x0a,
0xb6, 0x65, 0xb0, 0x03, 0x17, 0x58, 0x83, 0xef, 0x01, 0x41, 0xc1, 0x64, 0x21, 0x30, 0x92, 0x0b,
0xa6, 0xff, 0xf2, 0x97, 0xe8, 0xb7, 0x2d, 0xda, 0x45, 0x26, 0x3a, 0x30, 0xe1, 0x2f, 0x31, 0xb8,
0x06, 0xb0, 0x1e, 0x84, 0xb4, 0xa1, 0x4e, 0x27, 0x83, 0x2b, 0xc1, 0x04, 0x3c, 0x8a, 0x49, 0xa6,
0xf0, 0x58, 0x6f, 0x8d, 0xdc, 0x84, 0xae, 0x59, 0x5f, 0x94, 0x16, 0x89, 0x59, 0x4d, 0x8b, 0x6e,
0x19, 0xc7, 0x51, 0x91, 0x90, 0x0f, 0xa1, 0xa3, 0xf7, 0x1c, 0xf1, 0xd8, 0x8c, 0xdd, 0xdb, 0xdf,
0xfe, 0xed, 0xd5, 0xee, 0x95, 0xdf, 0x5f, 0xed, 0xb6, 0x8f, 0xb2, 0x18, 0xc7, 0x07, 0xb4, 0xad,
0xc3, 0xe3, 0x38, 0xf8, 0xb5, 0x06, 0x7d, 0x5b, 0x75, 0x82, 0xf3, 0x04, 0x53, 0x45, 0x1e, 0x02,
0x88, 0xd5, 0x5a, 0x4d, 0x61, 0x6f, 0x74, 0xf3, 0x92, 0x9d, 0xd3, 0x0a, 0x9c, 0xdc, 0x00, 0xdb,
0x43, 0x79, 0x70, 0x97, 0x76, 0x8c, 0x3d, 0x8e, 0xc9, 0x43, 0xe8, 0x0b, 0x73, 0x50, 0x64, 0x59,
0xf7, 0x1b, 0x7b, 0x8d, 0xdb, 0xde, 0x68, 0x67, 0xa3, 0xf4, 0x6a, 0x3c, 0xda, 0x13, 0x6b, 0x43,
0x92, 0x5d, 0xf0, 0x12, 0x14, 0x3f, 0x2c, 0x31, 0x12, 0x59, 0xa6, 0x0c, 0x25, 0x3d, 0x0a, 0xd6,
0x45, 0xb3, 0x4c, 0x05, 0x7f, 0xd5, 0xa1, 0x73, 0x6c, 0x0b, 0x91, 0xfb, 0x1b, 0x7a, 0xa9, 0xf6,
0xee, 0x10, 0xe1, 0x01, 0x53, 0xac, 0x22, 0x92, 0xf7, 0x61, 0x9b, 0xa7, 0x4b, 0x9e, 0x62, 0x24,
0xed, 0x12, 0x8c, 0x28, 0x7a, 0xb4, 0x6f, 0xbd, 0xe5, 0x66, 0x3e, 0x86, 0xb6, 0x6d, 0xca, 0x9c,
0xef, 0x8d, 0xfc, 0x73, 0xad, 0x3b, 0x24, 0x75, 0x38, 0xf2, 0x2e, 0xf4, 0x5c, 0x45, 0x4b, 0xb8,
0x96, 0x47, 0x83, 0x7a, 0xce, 0xa7, 0xb9, 0x26, 0x5f, 0x41, 0x7f, 0x2a, 0x90, 0x29, 0x9e, 0xa5,
0x51, 0xcc, 0x94, 0x15, 0x85, 0x37, 0x1a, 0x86, 0xf6, 0x52, 0x85, 0xe5, 0xa5, 0x0a, 0x4f, 0xca,
0x4b, 0x45, 0x7b, 0x65, 0xc2, 0x01, 0x53, 0x48, 0xbe, 0x86, 0xab, 0xf8, 0x22, 0xe7, 0xa2, 0x52,
0xa2, 0xf3, 0xc6, 0x12, 0xdb, 0xeb, 0x14, 0x53, 0x64, 0x08, 0x5b, 0x09, 0x2a, 0x16, 0x33, 0xc5,
0xfc, 0x2d, 0x33, 0xfb, 0xca, 0x0e, 0x02, 0xd8, 0x2a, 0xf7, 0x45, 0x00, 0xda, 0xe3, 0xa3, 0x27,
0xe3, 0xa3, 0xc3, 0xc1, 0x15, 0xfd, 0x4d, 0x0f, 0x9f, 0x7e, 0x7b, 0x72, 0x38, 0xa8, 0x05, 0x47,
0x00, 0xc7, 0x85, 0xa2, 0xf8, 0xac, 0x40, 0xa9, 0x08, 0x81, 0x66, 0xce, 0xd4, 0xc2, 0x10, 0xd0,
0xa5, 0xe6, 0x9b, 0xdc, 0x83, 0x8e, 0xdb, 0x96, 0x11, 0x86, 0x37, 0x22, 0xe7, 0x79, 0xa1, 0x25,
0x24, 0xd8, 0x03, 0x78, 0x8c, 0x97, 0xd5, 0x0b, 0x7e, 0xae, 0x81, 0xf7, 0x84, 0xcb, 0x15, 0x66,
0x07, 0xda, 0xb9, 0xc0, 0x19, 0x7f, 0xe1, 0x50, 0xce, 0xd2, 0xca, 0x91, 0x8a, 0x09, 0x15, 0xb1,
0x59, 0x79, 0x76, 0x97, 0x82, 0x71, 0x3d, 0xd2, 0x1e, 0xf2, 0x0e, 0x00, 0xa6, 0x71, 0x74, 0x8a,
0xb3, 0x4c, 0xa0, 0x21, 0xbe, 0x4b, 0xbb, 0x98, 0xc6, 0xfb, 0xc6, 0x41, 0x6e, 0x41, 0x57, 0xe0,
0xb4, 0x10, 0x92, 0x3f, 0xb7, 0xbc, 0x6f, 0xd1, 0xb5, 0x43, 0xbf, 0x22, 0x4b, 0x9e, 0x70, 0xe5,
0x2e, 0xbe, 0x35, 0x74, 0x49, 0xbd, 0xbd, 0x68, 0xb6, 0x64, 0x73, 0x69, 0x08, 0xed, 0xd0, 0xae,
0xf6, 0x7c, 0xa3, 0x1d, 0x41, 0x1f, 0x3c, 0xb3, 0x2c, 0x99, 0x67, 0xa9, 0xc4, 0xe0, 0x8f, 0x1a,
0x78, 0x66, 0x58, 0x6b, 0x57, 0x37, 0x55, 0x7b, 0xe3, 0xa6, 0xc8, 0x1e, 0xb4, 0xf4, 0x55, 0x96,
0x7e, 0xdd, 0x5c, 0x27, 0x08, 0xcd, 0xfb, 0xaa, 0x6f, 0x39, 0xb5, 0x01, 0xf2, 0x05, 0x34, 0xf2,
0x53, 0x66, 0x26, 0xf3, 0x46, 0x77, 0xc2, 0xf5, 0x9b, 0x2b, 0xb2, 0x42, 0xa1, 0x0c, 0x8f, 0xd9,
0x19, 0x8a, 0x7d, 0x96, 0xc6, 0x3f, 0xf2, 0x58, 0x2d, 0x1e, 0x2d, 0x97, 0xd9, 0xd4, 0x08, 0x83,
0xea, 0x34, 0x72, 0x08, 0x7d, 0x56, 0xa8, 0x45, 0x26, 0xf8, 0x4b, 0xe3, 0x75, 0xda, 0xdf, 0x3d,
0x5f, 0x67, 0xc2, 0xe7, 0x29, 0xc6, 0x4f, 0x51, 0x4a, 0x36, 0x47, 0xba, 0x99, 0x15, 0xfc, 0x52,
0x83, 0x9e, 0xa5, 0xcb, 0x4d, 0x39, 0x82, 0x16, 0x57, 0x98, 0x48, 0xbf, 0x66, 0xfa, 0xbe, 0x55,
0x99, 0xb1, 0x8a, 0x0b, 0xc7, 0x0a, 0x13, 0x6a, 0xa1, 0x5a, 0x07, 0x89, 0x26, 0xa9, 0x6e, 0x68,
0x30, 0xdf, 0x43, 0x84, 0xa6, 0x86, 0xfc, 0x77, 0xcd, 0xe9, 0x07, 0x95, 0xcb, 0xc8, 0x89, 0xa8,
0x61, 0x8e, 0xd8, 0xe2, 0xf2, 0xd8, 0xd8, 0xc1, 0x7b, 0xd0, 0x3f, 0xc0, 0x25, 0x2a, 0xbc, 0x4c,
0x93, 0x03, 0xd8, 0x2e, 0x41, 0x8e, 0x5b, 0x01, 0xdb, 0x63, 0x85, 0x82, 0xad, 0xf3, 0x2e, 0xd2,
0xe9, 0x35, 0x68, 0xcd, 0xb8, 0x90, 0xca, 0x29, 0xd4, 0x1a, 0xc4, 0x87, 0x8e, 0x15, 0x1b, 0xba,
0x8e, 0x4a, 0xd3, 0x46, 0x9e, 0xa3, 0x8e, 0x34, 0xcb, 0x88, 0x31, 0x83, 0x25, 0xec, 0x5e, 0x48,
0xa9, 0x6b, 0x62, 0x0c, 0x6d, 0x36, 0x35, 0x6c, 0xda, 0x37, 0xf2, 0x93, 0xb7, 0x57, 0x45, 0xf8,
0xc8, 0x24, 0x52, 0x57, 0x20, 0xf8, 0x1e, 0xf6, 0x2e, 0x3e, 0xcd, 0x71, 0xed, 0x14, 0x58, 0xfb,
0x57, 0x0a, 0x1c, 0xfd, 0x59, 0x87, 0xae, 0x23, 0xeb, 0x60, 0x9f, 0x3c, 0x80, 0xc6, 0x71, 0xa1,
0xc8, 0xff, 0xab, 0x4c, 0xae, 0x5e, 0x9e, 0xe1, 0xce, 0xeb, 0x6e, 0xd7, 0xc1, 0x03, 0x68, 0x3c,
0xc6, 0xcd, 0xac, 0xf5, 0xfb, 0xb2, 0x91, 0x55, 0xbd, 0x89, 0x9f, 0x41, 0x53, 0x6b, 0x91, 0xec,
0x9c, 0x13, 0xa7, 0xcd, 0xbb, 0x7e, 0x81, 0x68, 0xc9, 0x97, 0xd0, 0xb6, 0x42, 0x20, 0xd5, 0xdf,
0x88, 0x0d, 0x01, 0x0d, 0x6f, 0xfc, 0x43, 0xc4, 0xa5, 0x4b, 0xf0, 0x2f, 0x5a, 0x09, 0xb9, 0x53,
0x9d, 0xf0, 0x72, 0x9a, 0x87, 0x77, 0xdf, 0x0a, 0x6b, 0x0f, 0xdd, 0x6f, 0x7e, 0x57, 0xcf, 0x4f,
0x4f, 0xdb, 0xe6, 0xc7, 0xe2, 0xd3, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0xfa, 0x56, 0x48, 0xf9,
0xf0, 0x09, 0x00, 0x00,
}

View File

@ -7,7 +7,8 @@ option go_package = "pb";
package pointerdb;
import "google/protobuf/timestamp.proto";
import "overlay.proto";
import "gogo.proto";
import "node.proto";
import "piecestore.proto";
// PointerDB defines the interface for interacting with the network state persistence layer
@ -41,11 +42,12 @@ message RedundancyScheme {
message RemotePiece {
int32 piece_num = 1;
string node_id = 2;
bytes node_id = 2 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false];
}
message RemoteSegment {
RedundancyScheme redundancy = 1;
// TODO: may want to use customtype and fixed-length byte slice
string piece_id = 2;
repeated RemotePiece remote_pieces = 3;
@ -62,6 +64,7 @@ message Pointer {
bytes inline_segment = 3;
RemoteSegment remote = 4;
// TODO: rename
int64 segment_size = 5;
google.protobuf.Timestamp creation_date = 6;
@ -98,7 +101,7 @@ message PutResponse {
// GetResponse is a response message for the Get rpc call
message GetResponse {
Pointer pointer = 1;
repeated overlay.Node nodes = 2;
repeated node.Node nodes = 2;
piecestoreroutes.PayerBandwidthAllocation pba = 3;
piecestoreroutes.SignedMessage authorization = 4;
}

63
pkg/pb/utils.go Normal file
View File

@ -0,0 +1,63 @@
// Copyright (C) 2018 Storj Labs, Inc.
// See LICENSE for copying information.
package pb
import "storj.io/storj/pkg/storj"
// NodeIDsToLookupRequests ...
func NodeIDsToLookupRequests(nodeIDs storj.NodeIDList) *LookupRequests {
var rq []*LookupRequest
for _, v := range nodeIDs {
r := &LookupRequest{NodeId: v}
rq = append(rq, r)
}
return &LookupRequests{LookupRequest: rq}
}
// LookupResponsesToNodes ...
func LookupResponsesToNodes(responses *LookupResponses) []*Node {
var nodes []*Node
for _, v := range responses.LookupResponse {
n := v.Node
nodes = append(nodes, n)
}
return nodes
}
// CopyNode returns a deep copy of a node
// It would be better to use `proto.Clone` but it is curently incompatible
// with gogo's customtype extension.
// (see https://github.com/gogo/protobuf/issues/147)
func CopyNode(src *Node) (dst *Node) {
node := Node{Id: storj.NodeID{}}
copy(node.Id[:], src.Id[:])
if src.Address != nil {
node.Address = &NodeAddress{
Transport: src.Address.Transport,
Address: src.Address.Address,
}
}
if src.Metadata != nil {
node.Metadata = &NodeMetadata{
Email: src.Metadata.Email,
Wallet: src.Metadata.Wallet,
}
}
if src.Restrictions != nil {
node.Restrictions = &NodeRestrictions{
FreeBandwidth: src.Restrictions.FreeBandwidth,
FreeDisk: src.Restrictions.FreeDisk,
}
}
node.AuditSuccess = src.AuditSuccess
node.IsUp = src.IsUp
node.LatencyList = src.LatencyList
node.Type = src.Type
node.UpdateAuditSuccess = src.UpdateAuditSuccess
node.UpdateLatency = src.UpdateLatency
node.UpdateUptime = src.UpdateUptime
return &node
}

View File

@ -18,9 +18,9 @@ import (
"go.uber.org/zap"
"golang.org/x/net/context"
"storj.io/storj/pkg/node"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/ranger"
"storj.io/storj/pkg/storj"
"storj.io/storj/pkg/transport"
)
@ -52,7 +52,7 @@ type PieceStore struct {
client pb.PieceStoreRoutesClient // PieceStore for interacting with Storage Node
prikey crypto.PrivateKey // Uplink private key
bandwidthMsgSize int // max bandwidth message size in bytes
nodeID *node.ID // Storage node being connected to
nodeID storj.NodeID // Storage node being connected to
}
// NewPSClient initilizes a piecestore client
@ -75,7 +75,7 @@ func NewPSClient(ctx context.Context, tc transport.Client, n *pb.Node, bandwidth
client: pb.NewPieceStoreRoutesClient(conn),
bandwidthMsgSize: bandwidthMsgSize,
prikey: tc.Identity().Key,
nodeID: node.IDFromString(n.GetId()),
nodeID: n.Id,
}, nil
}
@ -93,7 +93,7 @@ func NewCustomRoute(client pb.PieceStoreRoutesClient, target *pb.Node, bandwidth
client: client,
bandwidthMsgSize: bandwidthMsgSize,
prikey: prikey,
nodeID: node.IDFromString(target.GetId()),
nodeID: target.Id,
}, nil
}

View File

@ -4,15 +4,12 @@
package psclient
import (
"context"
"testing"
"github.com/mr-tron/base58/base58"
"github.com/stretchr/testify/assert"
"storj.io/storj/pkg/dht"
"storj.io/storj/pkg/node"
"storj.io/storj/pkg/provider"
"storj.io/storj/internal/identity"
)
func TestNewPieceID(t *testing.T) {
@ -30,26 +27,16 @@ func TestNewPieceID(t *testing.T) {
func TestDerivePieceID(t *testing.T) {
pid := NewPieceID()
fid, err := newTestIdentity()
fid, err := testidentity.NewTestIdentity()
assert.NoError(t, err)
nid := dht.NodeID(fid.ID)
did, err := pid.Derive(nid.Bytes())
did, err := pid.Derive(fid.ID.Bytes())
assert.NoError(t, err)
assert.NotEqual(t, pid, did)
did2, err := pid.Derive(nid.Bytes())
did2, err := pid.Derive(fid.ID.Bytes())
assert.NoError(t, err)
assert.Equal(t, did, did2)
_, err = base58.Decode(did.String())
assert.NoError(t, err)
}
// helper function to generate new node identities with
// correct difficulty and concurrency
func newTestIdentity() (*provider.FullIdentity, error) {
ctx := context.Background()
fid, err := node.NewFullIdentity(ctx, 12, 4)
return fid, err
}

View File

@ -16,6 +16,8 @@ import (
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
"storj.io/storj/internal/teststorj"
"storj.io/storj/pkg/pb"
)
@ -80,7 +82,7 @@ func TestPieceRanger(t *testing.T) {
Address: "",
Transport: 0,
},
Id: "test-node-id-1234567",
Id: teststorj.NodeIDFromString("test-node-id-1234567"),
}
c, err := NewCustomRoute(route, target, 32*1024, priv)
assert.NoError(t, err)
@ -158,7 +160,7 @@ func TestPieceRangerSize(t *testing.T) {
Address: "",
Transport: 0,
},
Id: "test-node-id-1234567",
Id: teststorj.NodeIDFromString("test-node-id-1234567"),
}
c, err := NewCustomRoute(route, target, 32*1024, priv)
assert.NoError(t, err)

View File

@ -41,7 +41,7 @@ func (s *StreamWriter) Write(b []byte) (int, error) {
allocationData := &pb.RenterBandwidthAllocation_Data{
PayerAllocation: s.pba,
Total: updatedAllocation,
StorageNodeId: s.signer.nodeID.Bytes(),
StorageNodeId: s.signer.nodeID,
PubKey: pubbytes, // TODO: Take this out. It will be kept in a database on the satellite
}
@ -135,7 +135,7 @@ func NewStreamReader(client *PieceStore, stream pb.PieceStoreRoutes_RetrieveClie
allocationData := &pb.RenterBandwidthAllocation_Data{
PayerAllocation: pba,
Total: sr.allocated + allocate,
StorageNodeId: sr.client.nodeID.Bytes(),
StorageNodeId: sr.client.nodeID,
PubKey: pubbytes, // TODO: Take this out. It will be kept in a database on the satellite
}

View File

@ -12,7 +12,8 @@ import (
"golang.org/x/net/context"
"google.golang.org/grpc"
"storj.io/storj/pkg/node"
"storj.io/storj/pkg/storj"
"storj.io/storj/pkg/overlay"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/piecestore/psserver/psdb"
@ -51,7 +52,7 @@ func (as *AgreementSender) Run(ctx context.Context) error {
zap.S().Info("AgreementSender is starting up")
type agreementGroup struct {
satellite string
satellite storj.NodeID
agreements []*psdb.Agreement
}
@ -83,14 +84,14 @@ func (as *AgreementSender) Run(ctx context.Context) error {
zap.S().Infof("Sending %v agreements to satellite %s\n", len(agreementGroup.agreements), agreementGroup.satellite)
// Get satellite ip from overlay by Lookup agreementGroup.satellite
satellite, err := as.overlay.Lookup(ctx, node.IDFromString(agreementGroup.satellite))
satellite, err := as.overlay.Lookup(ctx, agreementGroup.satellite)
if err != nil {
zap.S().Error(err)
return
}
// Create client from satellite ip
identOpt, err := as.identity.DialOption("")
identOpt, err := as.identity.DialOption(storj.NodeID{})
if err != nil {
zap.S().Error(err)
return

View File

@ -19,6 +19,8 @@ import (
"go.uber.org/zap"
monkit "gopkg.in/spacemonkeygo/monkit.v2"
"storj.io/storj/pkg/storj"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/piecestore"
"storj.io/storj/pkg/utils"
@ -105,7 +107,7 @@ func (db *DB) init() (err error) {
return err
}
_, err = tx.Exec("CREATE TABLE IF NOT EXISTS `bandwidth_agreements` (`satellite` TEXT, `agreement` BLOB, `signature` BLOB);")
_, err = tx.Exec("CREATE TABLE IF NOT EXISTS `bandwidth_agreements` (`satellite` BLOB, `agreement` BLOB, `signature` BLOB);")
if err != nil {
return err
}
@ -223,7 +225,7 @@ func (db *DB) WriteBandwidthAllocToDB(ba *pb.RenterBandwidthAllocation) error {
return err
}
_, err := db.DB.Exec(`INSERT INTO bandwidth_agreements (satellite, agreement, signature) VALUES (?, ?, ?)`, pbad.GetSatelliteId(), ba.GetData(), ba.GetSignature())
_, err := db.DB.Exec(`INSERT INTO bandwidth_agreements (satellite, agreement, signature) VALUES (?, ?, ?)`, pbad.SatelliteId.Bytes(), ba.GetData(), ba.GetSignature())
return err
}
@ -266,7 +268,7 @@ func (db *DB) GetBandwidthAllocationBySignature(signature []byte) ([][]byte, err
}
// GetBandwidthAllocations all bandwidth agreements and sorts by satellite
func (db *DB) GetBandwidthAllocations() (map[string][]*Agreement, error) {
func (db *DB) GetBandwidthAllocations() (map[storj.NodeID][]*Agreement, error) {
defer db.locked()()
rows, err := db.DB.Query(`SELECT * FROM bandwidth_agreements ORDER BY satellite`)
@ -279,19 +281,23 @@ func (db *DB) GetBandwidthAllocations() (map[string][]*Agreement, error) {
}
}()
agreements := make(map[string][]*Agreement)
agreements := make(map[storj.NodeID][]*Agreement)
for rows.Next() {
agreement := &Agreement{}
var satellite sql.NullString
var satellite []byte
err := rows.Scan(&satellite, &agreement.Agreement, &agreement.Signature)
if err != nil {
return agreements, err
}
if !satellite.Valid {
return agreements, nil
// if !satellite.Valid {
// return agreements, nil
// }
satelliteID, err := storj.NodeIDFromBytes(satellite)
if err != nil {
return nil, err
}
agreements[satellite.String] = append(agreements[satellite.String], agreement)
agreements[satelliteID] = append(agreements[satelliteID], agreement)
}
return agreements, nil
}

View File

@ -16,6 +16,9 @@ import (
"github.com/gogo/protobuf/proto"
_ "github.com/mattn/go-sqlite3"
"storj.io/storj/internal/teststorj"
"storj.io/storj/pkg/storj"
"storj.io/storj/pkg/pb"
)
@ -136,11 +139,11 @@ func TestHappyPath(t *testing.T) {
}
})
bandwidthAllocation := func(satelliteID string, total int64) []byte {
bandwidthAllocation := func(satelliteID storj.NodeID, total int64) []byte {
return serialize(t, &pb.RenterBandwidthAllocation_Data{
PayerAllocation: &pb.PayerBandwidthAllocation{
Data: serialize(t, &pb.PayerBandwidthAllocation_Data{
SatelliteId: []byte(satelliteID),
SatelliteId: satelliteID,
}),
},
Total: total,
@ -148,22 +151,23 @@ func TestHappyPath(t *testing.T) {
}
//TODO: use better data
nodeIDAB := teststorj.NodeIDFromString("AB")
allocationTests := []*pb.RenterBandwidthAllocation{
{
Signature: []byte("signed by test"),
Data: bandwidthAllocation("AB", 0),
Data: bandwidthAllocation(nodeIDAB, 0),
},
{
Signature: []byte("signed by sigma"),
Data: bandwidthAllocation("AB", 10),
Data: bandwidthAllocation(nodeIDAB, 10),
},
{
Signature: []byte("signed by sigma"),
Data: bandwidthAllocation("AB", 98),
Data: bandwidthAllocation(nodeIDAB, 98),
},
{
Signature: []byte("signed by test"),
Data: bandwidthAllocation("AB", 3),
Data: bandwidthAllocation(nodeIDAB, 3),
},
}

View File

@ -26,10 +26,11 @@ import (
"golang.org/x/net/context"
"google.golang.org/grpc"
"storj.io/storj/internal/identity"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/piecestore"
"storj.io/storj/pkg/piecestore/psserver/psdb"
"storj.io/storj/pkg/provider"
"storj.io/storj/pkg/storj"
)
var ctx = context.Background()
@ -535,18 +536,18 @@ func NewTestServer(t *testing.T) *TestServer {
}
}
caS, err := provider.NewTestCA(context.Background())
caS, err := testidentity.NewTestCA(context.Background())
check(err)
fiS, err := caS.NewIdentity()
check(err)
so, err := fiS.ServerOption()
check(err)
caC, err := provider.NewTestCA(context.Background())
caC, err := testidentity.NewTestCA(context.Background())
check(err)
fiC, err := caC.NewIdentity()
check(err)
co, err := fiC.DialOption("")
co, err := fiC.DialOption(storj.NodeID{})
check(err)
s, cleanup := newTestServerStruct(t)

View File

@ -16,6 +16,8 @@ import (
"github.com/golang/protobuf/ptypes"
"github.com/stretchr/testify/assert"
"storj.io/storj/internal/teststorj"
"storj.io/storj/pkg/auth"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/storage/meta"
@ -51,7 +53,7 @@ func makePointer(path storj.Path) pb.PutRequest {
var rps []*pb.RemotePiece
rps = append(rps, &pb.RemotePiece{
PieceNum: 1,
NodeId: "testId",
NodeId: teststorj.NodeIDFromString("testId"),
})
pr := pb.PutRequest{
Path: path,

View File

@ -6,8 +6,10 @@ package mock_pointerdb
import (
context "context"
gomock "github.com/golang/mock/gomock"
reflect "reflect"
gomock "github.com/golang/mock/gomock"
pb "storj.io/storj/pkg/pb"
pdbclient "storj.io/storj/pkg/pointerdb/pdbclient"
)

View File

@ -296,7 +296,7 @@ func (s *Server) Iterate(ctx context.Context, req *pb.IterateRequest, f func(it
// PayerBandwidthAllocation returns PayerBandwidthAllocation struct, signed and with given action type
func (s *Server) PayerBandwidthAllocation(ctx context.Context, req *pb.PayerBandwidthAllocationRequest) (*pb.PayerBandwidthAllocationResponse, error) {
payer := s.identity.ID.Bytes()
payer := s.identity.ID
// TODO(michal) should be replaced with renter id when available
peerIdentity, err := provider.PeerIdentityFromContext(ctx)
@ -305,7 +305,7 @@ func (s *Server) PayerBandwidthAllocation(ctx context.Context, req *pb.PayerBand
}
pbad := &pb.PayerBandwidthAllocation_Data{
SatelliteId: payer,
UplinkId: peerIdentity.ID.Bytes(),
UplinkId: peerIdentity.ID,
CreatedUnixSec: time.Now().Unix(),
Action: req.GetAction(),
}

View File

@ -21,9 +21,9 @@ import (
"google.golang.org/grpc/peer"
"google.golang.org/grpc/status"
"storj.io/storj/internal/identity"
"storj.io/storj/pkg/auth"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/provider"
"storj.io/storj/pkg/storage/meta"
"storj.io/storj/storage"
"storj.io/storj/storage/teststore"
@ -67,7 +67,7 @@ func TestServicePut(t *testing.T) {
func TestServiceGet(t *testing.T) {
ctx := context.Background()
ca, err := provider.NewTestCA(ctx)
ca, err := testidentity.NewTestCA(ctx)
assert.NoError(t, err)
identity, err := ca.NewIdentity()
assert.NoError(t, err)

View File

@ -10,6 +10,14 @@ import (
"github.com/stretchr/testify/assert"
)
// newTestCA returns a ca with a default difficulty and concurrency for use in tests
func newTestCA(ctx context.Context) (*FullCertificateAuthority, error) {
return NewCA(ctx, NewCAOptions{
Difficulty: 12,
Concurrency: 4,
})
}
func TestNewCA(t *testing.T) {
expectedDifficulty := uint16(4)
@ -20,7 +28,8 @@ func TestNewCA(t *testing.T) {
assert.NoError(t, err)
assert.NotEmpty(t, ca)
actualDifficulty := ca.ID.Difficulty()
actualDifficulty, err := ca.ID.Difficulty()
assert.NoError(t, err)
assert.True(t, actualDifficulty >= expectedDifficulty)
}
@ -31,7 +40,7 @@ func TestFullCertificateAuthority_NewIdentity(t *testing.T) {
}
}
ca, err := NewTestCA(context.Background())
ca, err := newTestCA(context.Background())
check(err, ca)
fi, err := ca.NewIdentity()
check(err, fi)

View File

@ -15,6 +15,7 @@ import (
"github.com/zeebo/errs"
"storj.io/storj/pkg/peertls"
"storj.io/storj/pkg/storj"
"storj.io/storj/pkg/utils"
)
@ -24,7 +25,7 @@ type PeerCertificateAuthority struct {
// Cert is the x509 certificate of the CA
Cert *x509.Certificate
// The ID is calculated from the CA public key.
ID nodeID
ID storj.NodeID
}
// FullCertificateAuthority represents the CA which is used to author and validate full identities
@ -33,7 +34,7 @@ type FullCertificateAuthority struct {
// Cert is the x509 certificate of the CA
Cert *x509.Certificate
// The ID is calculated from the CA public key.
ID nodeID
ID storj.NodeID
// Key is the private key of the CA
Key crypto.PrivateKey
}
@ -168,7 +169,7 @@ func (pc PeerCAConfig) Load() (*PeerCertificateAuthority, error) {
pc.CertPath, err)
}
i, err := idFromKey(c[0].PublicKey)
i, err := NodeIDFromKey(c[0].PublicKey)
if err != nil {
return nil, err
}
@ -257,11 +258,3 @@ func (ca FullCertificateAuthority) NewIdentity() (*FullIdentity, error) {
ID: ca.ID,
}, nil
}
// NewTestCA returns a ca with a default difficulty and concurrency for use in tests
func NewTestCA(ctx context.Context) (*FullCertificateAuthority, error) {
return NewCA(ctx, NewCAOptions{
Difficulty: 12,
Concurrency: 4,
})
}

View File

@ -8,19 +8,19 @@ import (
"crypto"
"crypto/tls"
"crypto/x509"
"encoding/base64"
"fmt"
"io/ioutil"
"math/bits"
"net"
"os"
"github.com/zeebo/errs"
"go.uber.org/zap"
"golang.org/x/crypto/sha3"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/peer"
"storj.io/storj/pkg/storj"
"storj.io/storj/pkg/peertls"
"storj.io/storj/pkg/utils"
)
@ -39,7 +39,7 @@ type PeerIdentity struct {
// signed by the CA. The leaf is what is used for communication.
Leaf *x509.Certificate
// The ID taken from the CA public key
ID nodeID
ID storj.NodeID
}
// FullIdentity represents you on the network. In addition to a PeerIdentity,
@ -52,7 +52,7 @@ type FullIdentity struct {
// signed by the CA. The leaf is what is used for communication.
Leaf *x509.Certificate
// The ID taken from the CA public key
ID nodeID
ID storj.NodeID
// Key is the key this identity uses with the leaf for communication.
Key crypto.PrivateKey
// PeerCAWhitelist is a whitelist of CA certs which, if present, restricts which peers this identity will verify as valid;
@ -106,7 +106,7 @@ func FullIdentityFromPEM(chainPEM, keyPEM, CAWhitelistPEM []byte) (*FullIdentity
if err != nil {
return nil, errs.Wrap(err)
}
i, err := idFromKey(ch[1].PublicKey)
i, err := NodeIDFromKey(ch[1].PublicKey)
if err != nil {
return nil, err
}
@ -151,7 +151,7 @@ func ParseCertChain(chain [][]byte) ([]*x509.Certificate, error) {
// PeerIdentityFromCerts loads a PeerIdentity from a pair of leaf and ca x509 certificates
func PeerIdentityFromCerts(leaf, ca *x509.Certificate, rest []*x509.Certificate) (*PeerIdentity, error) {
i, err := idFromKey(ca.PublicKey.(crypto.PublicKey))
i, err := NodeIDFromKey(ca.PublicKey.(crypto.PublicKey))
if err != nil {
return nil, err
}
@ -318,7 +318,7 @@ func (fi *FullIdentity) ServerOption(pcvFuncs ...peertls.PeerCertVerificationFun
// DialOption returns a grpc `DialOption` for making outgoing connections
// to the node with this peer identity
// id is an optional id of the node we are dialing
func (fi *FullIdentity) DialOption(id string) (grpc.DialOption, error) {
func (fi *FullIdentity) DialOption(id storj.NodeID) (grpc.DialOption, error) {
// TODO(coyle): add ID
ch := [][]byte{fi.Leaf.Raw, fi.CA.Raw}
ch = append(ch, fi.RestChainRaw()...)
@ -333,7 +333,7 @@ func (fi *FullIdentity) DialOption(id string) (grpc.DialOption, error) {
VerifyPeerCertificate: peertls.VerifyPeerFunc(
peertls.VerifyPeerCertChains,
func(_ [][]byte, parsedChains [][]*x509.Certificate) error {
if id == "" {
if id == (storj.NodeID{}) {
return nil
}
@ -342,7 +342,7 @@ func (fi *FullIdentity) DialOption(id string) (grpc.DialOption, error) {
return err
}
if peer.ID.String() != id {
if peer.ID.String() != id.String() {
return Error.New("peer ID did not match requested ID")
}
@ -354,31 +354,29 @@ func (fi *FullIdentity) DialOption(id string) (grpc.DialOption, error) {
return grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig)), nil
}
type nodeID string
func (n nodeID) String() string { return string(n) }
func (n nodeID) Bytes() []byte { return []byte(n) }
func (n nodeID) Difficulty() uint16 {
hash, err := base64.URLEncoding.DecodeString(n.String())
// NodeIDFromKey hashes a publc key and creates a node ID from it
func NodeIDFromKey(k crypto.PublicKey) (storj.NodeID, error) {
kb, err := x509.MarshalPKIXPublicKey(k)
if err != nil {
zap.S().Error(errs.Wrap(err))
return storj.NodeID{}, storj.ErrNodeID.Wrap(err)
}
for i := 1; i < len(hash); i++ {
b := hash[len(hash)-i]
if b != 0 {
zeroBits := bits.TrailingZeros16(uint16(b))
if zeroBits == 16 {
zeroBits = 0
}
return uint16((i-1)*8 + zeroBits)
}
}
// NB: this should never happen
reason := fmt.Sprintf("difficulty matches hash length! hash: %s", hash)
zap.S().Error(reason)
panic(reason)
hash := make([]byte, len(storj.NodeID{}))
sha3.ShakeSum256(hash, kb)
return storj.NodeIDFromBytes(hash)
}
// NewFullIdentity creates a new ID for nodes with difficulty and concurrency params
func NewFullIdentity(ctx context.Context, difficulty uint16, concurrency uint) (*FullIdentity, error) {
ca, err := NewCA(ctx, NewCAOptions{
Difficulty: difficulty,
Concurrency: concurrency,
})
if err != nil {
return nil, err
}
identity, err := ca.NewIdentity()
if err != nil {
return nil, err
}
return identity, err
}

View File

@ -235,7 +235,8 @@ func TestNodeID_Difficulty(t *testing.T) {
done, _, fi, knownDifficulty := tempIdentity(t)
defer done()
difficulty := fi.ID.Difficulty()
difficulty, err := fi.ID.Difficulty()
assert.NoError(t, err)
assert.True(t, difficulty >= knownDifficulty)
}
@ -246,7 +247,7 @@ func TestVerifyPeer(t *testing.T) {
}
}
ca, err := NewTestCA(context.Background())
ca, err := newTestCA(context.Background())
check(err)
fi, err := ca.NewIdentity()
check(err)

View File

@ -11,13 +11,13 @@ import (
"crypto/x509"
"crypto/x509/pkix"
"encoding/asn1"
"encoding/base64"
"encoding/pem"
"os"
"path/filepath"
"github.com/zeebo/errs"
"golang.org/x/crypto/sha3"
"storj.io/storj/pkg/storj"
"storj.io/storj/pkg/peertls"
)
@ -62,7 +62,7 @@ func decodePEM(PEMBytes []byte) ([][]byte, error) {
func newCAWorker(ctx context.Context, difficulty uint16, parentCert *x509.Certificate, parentKey crypto.PrivateKey, caC chan FullCertificateAuthority, eC chan error) {
var (
k crypto.PrivateKey
i nodeID
i storj.NodeID
err error
)
for {
@ -77,7 +77,7 @@ func newCAWorker(ctx context.Context, difficulty uint16, parentCert *x509.Certif
}
switch kE := k.(type) {
case *ecdsa.PrivateKey:
i, err = idFromKey(&kE.PublicKey)
i, err = NodeIDFromKey(&kE.PublicKey)
if err != nil {
eC <- err
return
@ -88,7 +88,12 @@ func newCAWorker(ctx context.Context, difficulty uint16, parentCert *x509.Certif
}
}
if i.Difficulty() >= difficulty {
d, err := i.Difficulty()
if err != nil {
eC <- err
continue
}
if d >= difficulty {
break
}
}
@ -163,16 +168,6 @@ func newCACert(key, parentKey crypto.PrivateKey, template, parentCert *x509.Cert
return cert, nil
}
func idFromKey(k crypto.PublicKey) (nodeID, error) {
kb, err := x509.MarshalPKIXPublicKey(k)
if err != nil {
return "", errs.Wrap(err)
}
hash := make([]byte, IdentityLength)
sha3.ShakeSum256(hash, kb)
return nodeID(base64.URLEncoding.EncodeToString(hash)), nil
}
func openCert(path string, flag int) (*os.File, error) {
if err := os.MkdirAll(filepath.Dir(path), 0744); err != nil {
return nil, errs.Wrap(err)

View File

@ -9,6 +9,7 @@ import (
"github.com/zeebo/errs"
"github.com/skyrings/skyring-common/tools/uuid"
"storj.io/storj/pkg/satellite"
"storj.io/storj/pkg/satellite/satellitedb/dbx"
)

View File

@ -7,6 +7,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"storj.io/storj/internal/testcontext"
"storj.io/storj/pkg/satellite"
"storj.io/storj/pkg/satellite/satellitedb/dbx"

Some files were not shown because too many files have changed in this diff Show More