merge conflicts

This commit is contained in:
Dennis Coyle 2018-05-02 15:10:40 -04:00
commit a51087a343
10 changed files with 473 additions and 170 deletions

2
.gitignore vendored
View File

@ -13,5 +13,3 @@
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
# Generated protobuf files
*.pb.go

View File

@ -8,19 +8,18 @@
----
Storj is a platform, cryptocurrency, and suite of decentralized applications that allows you to store data in a secure and decentralized manner. Your files are encrypted, shredded into little pieces called 'shards', and stored in a decentralized network of computers around the globe. No one but you has a complete copy of your file, not even in an ecrypted form.
Storj is a platform, token, and suite of decentralized applications that allows you to store data in a secure and decentralized manner. Your files are encrypted, shredded into little pieces called 'shards' and stored in a global decentralized network of computers. Only you have access and the ability to retrieve all shards from the network, decrypt them, and finally re-combine all file pieces into your original file.
----
## To start using Storj
See our documentation at [storj docs](https://docs.storj.io/docs).
See our documentation at [Storj docs](https://docs.storj.io/docs).
## To start developing storj
## To start developing Storj
The [community site](https://storj.io/community.html) hosts all information about
building storj from source, how to contribute code
The [community site](https://storj.io/community.html) hosts all information about building storj from source, how to contribute code
and documentation, who to contact about what, etc.
### Install VGO
@ -31,32 +30,35 @@ go get -u golang.org/x/vgo
### Install non-go development dependencies
In order to develop on storj, you will need to have the protobuf compiler installed on your system.
In order to develop on Storj, you will need to have the `protobuf` compiler installed on your system.
1. Grab the latest release for your system from [here](https://github.com/google/protobuf/releases)
1. Grab the latest release for your system from [here](https://github.com/google/protobuf/releases).
1. place the `protoc` binary in your path. i.e.
1. place the `protoc` binary in your path. i.e
```bash
mv $HOME/Downloads/protoc-<version>-<arch>/bin/protoc /usr/local/bin
```
### Install go dependencies
Use vgo to install both dev and non-dev dependencies
Use vgo to install both dev and non-dev dependencies.
1. Install development dependencies
```
make build-dev-deps
```
1. Install project dependencies
```bash
# in project root
vgo install
```
If you want to build storj right away there are two options:
If you want to build Storj right away there are two options:
##### You have a working [Go environment](https://golang.org/doc/install).
@ -78,12 +80,7 @@ For the full story, head over to the [developer's documentation].
## Support
If you need support, start with the [troubleshooting guide],
and work your way through the process that we've outlined.
That said, if you have questions, reach out to us
[twitter](https://twitter.com/storjproject).
If you need support, start with the [troubleshooting guide], and work your way through the process that we've outlined.
That said, if you have any questions or suggestions please reach out to us on [rocketchat](https://storj.io/community.html) or [twitter](https://twitter.com/storjproject).

View File

@ -47,13 +47,29 @@ func Main() error {
if err != nil {
return err
}
// initialize http rangers in parallel to save from network latency
rrs := map[int]ranger.Ranger{}
for i := 0; i < 40; i++ {
url := fmt.Sprintf("http://localhost:%d", 10000+i)
rrs[i], err = ranger.HTTPRanger(url)
if err != nil {
type indexRangerError struct {
i int
rr ranger.Ranger
err error
}
result := make(chan indexRangerError, *rsn)
for i := 0; i < *rsn; i++ {
go func(i int) {
url := fmt.Sprintf("http://18.184.133.99:%d", 10000+i)
rr, err := ranger.HTTPRanger(url)
result <- indexRangerError{i, rr, err}
}(i)
}
// wait for all goroutines to finish and save result in rrs map
for i := 0; i < *rsn; i++ {
res := <-result
if res.err != nil {
// return on the first failure
return err
}
rrs[res.i] = res.rr
}
rr, err := eestream.Decode(rrs, es)
if err != nil {

View File

@ -151,11 +151,25 @@ func (dr *decodedRanger) Range(offset, length int64) io.ReadCloser {
offset, length, dr.es.DecodedBlockSize())
// go ask for ranges for all those block boundaries
// do it parallel to save from network latency
readers := make(map[int]io.ReadCloser, len(dr.rrs))
type indexReadCloser struct {
i int
r io.ReadCloser
}
result := make(chan indexReadCloser, len(dr.rrs))
for i, rr := range dr.rrs {
readers[i] = rr.Range(
firstBlock*int64(dr.es.EncodedBlockSize()),
blockCount*int64(dr.es.EncodedBlockSize()))
go func(i int, rr ranger.Ranger) {
r := rr.Range(
firstBlock*int64(dr.es.EncodedBlockSize()),
blockCount*int64(dr.es.EncodedBlockSize()))
result <- indexReadCloser{i, r}
}(i, rr)
}
// wait for all goroutines to finish and save result in readers map
for range dr.rrs {
res := <-result
readers[res.i] = res.r
}
// decode from all those ranges
r := DecodeReaders(readers, dr.es)

View File

@ -12,7 +12,7 @@ import (
"github.com/stretchr/testify/assert"
"google.golang.org/grpc"
proto "github.com/coyle/storj/protos/overlay" // naming proto to avoid confusion with this package
proto "storj.io/storj/protos/overlay" // naming proto to avoid confusion with this package
)
func TestNewServer(t *testing.T) {

View File

@ -34,30 +34,6 @@ func ServeContent(w http.ResponseWriter, r *http.Request, name string,
code := http.StatusOK
// If Content-Type isn't set, use the file's extension to find it, but
// if the Content-Type is unset explicitly, do not sniff the type.
ctypes, haveType := w.Header()["Content-Type"]
var ctype string
if !haveType {
ctype = mime.TypeByExtension(filepath.Ext(name))
if ctype == "" {
// read a chunk to decide between utf-8 text and binary
var buf [sniffLen]byte
amount := content.Size()
if amount > sniffLen {
amount = sniffLen
}
// TODO: cache this somewhere so we don't have to pull it out again
r := content.Range(0, amount)
defer r.Close()
n, _ := io.ReadFull(r, buf[:])
ctype = http.DetectContentType(buf[:n])
}
w.Header().Set("Content-Type", ctype)
} else if len(ctypes) > 0 {
ctype = ctypes[0]
}
size := content.Size()
if size <= 0 {
@ -105,6 +81,30 @@ func ServeContent(w http.ResponseWriter, r *http.Request, name string,
code = http.StatusPartialContent
w.Header().Set("Content-Range", ra.contentRange(size))
case len(ranges) > 1:
// If Content-Type isn't set, use the file's extension to find it, but
// if the Content-Type is unset explicitly, do not sniff the type.
ctypes, haveType := w.Header()["Content-Type"]
var ctype string
if !haveType {
ctype = mime.TypeByExtension(filepath.Ext(name))
if ctype == "" {
// read a chunk to decide between utf-8 text and binary
var buf [sniffLen]byte
amount := content.Size()
if amount > sniffLen {
amount = sniffLen
}
// TODO: cache this somewhere so we don't have to pull it out again
r := content.Range(0, amount)
defer r.Close()
n, _ := io.ReadFull(r, buf[:])
ctype = http.DetectContentType(buf[:n])
}
w.Header().Set("Content-Type", ctype)
} else if len(ctypes) > 0 {
ctype = ctypes[0]
}
sendSize = rangesMIMESize(ranges, ctype, size)
code = http.StatusPartialContent

View File

@ -1,117 +0,0 @@
// Protocol Buffers - Google's data interchange format
// Copyright 2008 Google Inc. All rights reserved.
// https://developers.google.com/protocol-buffers/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
syntax = "proto3";
package google.protobuf;
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
option cc_enable_arenas = true;
option go_package = "github.com/golang/protobuf/ptypes/duration";
option java_package = "com.google.protobuf";
option java_outer_classname = "DurationProto";
option java_multiple_files = true;
option objc_class_prefix = "GPB";
// A Duration represents a signed, fixed-length span of time represented
// as a count of seconds and fractions of seconds at nanosecond
// resolution. It is independent of any calendar and concepts like "day"
// or "month". It is related to Timestamp in that the difference between
// two Timestamp values is a Duration and it can be added or subtracted
// from a Timestamp. Range is approximately +-10,000 years.
//
// # Examples
//
// Example 1: Compute Duration from two Timestamps in pseudo code.
//
// Timestamp start = ...;
// Timestamp end = ...;
// Duration duration = ...;
//
// duration.seconds = end.seconds - start.seconds;
// duration.nanos = end.nanos - start.nanos;
//
// if (duration.seconds < 0 && duration.nanos > 0) {
// duration.seconds += 1;
// duration.nanos -= 1000000000;
// } else if (durations.seconds > 0 && duration.nanos < 0) {
// duration.seconds -= 1;
// duration.nanos += 1000000000;
// }
//
// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
//
// Timestamp start = ...;
// Duration duration = ...;
// Timestamp end = ...;
//
// end.seconds = start.seconds + duration.seconds;
// end.nanos = start.nanos + duration.nanos;
//
// if (end.nanos < 0) {
// end.seconds -= 1;
// end.nanos += 1000000000;
// } else if (end.nanos >= 1000000000) {
// end.seconds += 1;
// end.nanos -= 1000000000;
// }
//
// Example 3: Compute Duration from datetime.timedelta in Python.
//
// td = datetime.timedelta(days=3, minutes=10)
// duration = Duration()
// duration.FromTimedelta(td)
//
// # JSON Mapping
//
// In JSON format, the Duration type is encoded as a string rather than an
// object, where the string ends in the suffix "s" (indicating seconds) and
// is preceded by the number of seconds, with nanoseconds expressed as
// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
// microsecond should be expressed in JSON format as "3.000001s".
//
//
message Duration {
// Signed seconds of the span of time. Must be from -315,576,000,000
// to +315,576,000,000 inclusive. Note: these bounds are computed from:
// 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
int64 seconds = 1;
// Signed fractions of a second at nanosecond resolution of the span
// of time. Durations less than one second are represented with a 0
// `seconds` field and a positive or negative `nanos` field. For durations
// of one second or more, a non-zero value for the `nanos` field must be
// of the same sign as the `seconds` field. Must be from -999,999,999
// to +999,999,999 inclusive.
int32 nanos = 2;
}

3
protos/overlay/gen.go Normal file
View File

@ -0,0 +1,3 @@
package overlay
//go:generate protoc --go_out=plugins=grpc:. overlay.proto

View File

@ -0,0 +1,392 @@
// Code generated by protoc-gen-go.
// source: overlay.proto
// DO NOT EDIT!
/*
Package overlay is a generated protocol buffer package.
It is generated from these files:
overlay.proto
It has these top-level messages:
LookupRequest
LookupResponse
FindStorageNodesResponse
FindStorageNodesRequest
NodeAddress
OverlayOptions
NodeRep
Node
*/
package overlay
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import google_protobuf "github.com/golang/protobuf/ptypes/duration"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// NodeTransport is an enum of possible transports for the overlay network
type NodeTransport int32
const (
NodeTransport_TCP NodeTransport = 0
)
var NodeTransport_name = map[int32]string{
0: "TCP",
}
var NodeTransport_value = map[string]int32{
"TCP": 0,
}
func (x NodeTransport) String() string {
return proto.EnumName(NodeTransport_name, int32(x))
}
func (NodeTransport) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
// LookupRequest is is request message for the lookup rpc call
type LookupRequest struct {
NodeID string `protobuf:"bytes,1,opt,name=nodeID" json:"nodeID,omitempty"`
}
func (m *LookupRequest) Reset() { *m = LookupRequest{} }
func (m *LookupRequest) String() string { return proto.CompactTextString(m) }
func (*LookupRequest) ProtoMessage() {}
func (*LookupRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *LookupRequest) GetNodeID() string {
if m != nil {
return m.NodeID
}
return ""
}
// LookupResponse is is response message for the lookup rpc call
type LookupResponse struct {
NodeAddress *NodeAddress `protobuf:"bytes,1,opt,name=nodeAddress" json:"nodeAddress,omitempty"`
}
func (m *LookupResponse) Reset() { *m = LookupResponse{} }
func (m *LookupResponse) String() string { return proto.CompactTextString(m) }
func (*LookupResponse) ProtoMessage() {}
func (*LookupResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (m *LookupResponse) GetNodeAddress() *NodeAddress {
if m != nil {
return m.NodeAddress
}
return nil
}
// FindStorageNodesResponse is is response message for the FindStorageNodes rpc call
type FindStorageNodesResponse struct {
Node []*Node `protobuf:"bytes,1,rep,name=node" json:"node,omitempty"`
}
func (m *FindStorageNodesResponse) Reset() { *m = FindStorageNodesResponse{} }
func (m *FindStorageNodesResponse) String() string { return proto.CompactTextString(m) }
func (*FindStorageNodesResponse) ProtoMessage() {}
func (*FindStorageNodesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
func (m *FindStorageNodesResponse) GetNode() []*Node {
if m != nil {
return m.Node
}
return nil
}
// FindStorageNodesRequest is is request message for the FindStorageNodes rpc call
type FindStorageNodesRequest struct {
ObjectSize int64 `protobuf:"varint,1,opt,name=objectSize" json:"objectSize,omitempty"`
ContractLength *google_protobuf.Duration `protobuf:"bytes,2,opt,name=contractLength" json:"contractLength,omitempty"`
Opts *OverlayOptions `protobuf:"bytes,3,opt,name=opts" json:"opts,omitempty"`
}
func (m *FindStorageNodesRequest) Reset() { *m = FindStorageNodesRequest{} }
func (m *FindStorageNodesRequest) String() string { return proto.CompactTextString(m) }
func (*FindStorageNodesRequest) ProtoMessage() {}
func (*FindStorageNodesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
func (m *FindStorageNodesRequest) GetObjectSize() int64 {
if m != nil {
return m.ObjectSize
}
return 0
}
func (m *FindStorageNodesRequest) GetContractLength() *google_protobuf.Duration {
if m != nil {
return m.ContractLength
}
return nil
}
func (m *FindStorageNodesRequest) GetOpts() *OverlayOptions {
if m != nil {
return m.Opts
}
return nil
}
// NodeAddress contains the information needed to communicate with a node on the network
type NodeAddress struct {
Transport NodeTransport `protobuf:"varint,1,opt,name=transport,enum=NodeTransport" json:"transport,omitempty"`
Address string `protobuf:"bytes,2,opt,name=address" json:"address,omitempty"`
}
func (m *NodeAddress) Reset() { *m = NodeAddress{} }
func (m *NodeAddress) String() string { return proto.CompactTextString(m) }
func (*NodeAddress) ProtoMessage() {}
func (*NodeAddress) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
func (m *NodeAddress) GetTransport() NodeTransport {
if m != nil {
return m.Transport
}
return NodeTransport_TCP
}
func (m *NodeAddress) GetAddress() string {
if m != nil {
return m.Address
}
return ""
}
// OverlayOptions is a set of criteria that a node must meet to be considered for a storage opportunity
type OverlayOptions struct {
MaxLatency *google_protobuf.Duration `protobuf:"bytes,1,opt,name=maxLatency" json:"maxLatency,omitempty"`
MinReputation *NodeRep `protobuf:"bytes,2,opt,name=minReputation" json:"minReputation,omitempty"`
MinSpeedKbps int64 `protobuf:"varint,3,opt,name=minSpeedKbps" json:"minSpeedKbps,omitempty"`
}
func (m *OverlayOptions) Reset() { *m = OverlayOptions{} }
func (m *OverlayOptions) String() string { return proto.CompactTextString(m) }
func (*OverlayOptions) ProtoMessage() {}
func (*OverlayOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
func (m *OverlayOptions) GetMaxLatency() *google_protobuf.Duration {
if m != nil {
return m.MaxLatency
}
return nil
}
func (m *OverlayOptions) GetMinReputation() *NodeRep {
if m != nil {
return m.MinReputation
}
return nil
}
func (m *OverlayOptions) GetMinSpeedKbps() int64 {
if m != nil {
return m.MinSpeedKbps
}
return 0
}
// NodeRep is the reputation characteristics of a node
type NodeRep struct {
}
func (m *NodeRep) Reset() { *m = NodeRep{} }
func (m *NodeRep) String() string { return proto.CompactTextString(m) }
func (*NodeRep) ProtoMessage() {}
func (*NodeRep) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
// Node represents a node in the overlay network
type Node struct {
Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
Address *NodeAddress `protobuf:"bytes,2,opt,name=address" json:"address,omitempty"`
}
func (m *Node) Reset() { *m = Node{} }
func (m *Node) String() string { return proto.CompactTextString(m) }
func (*Node) ProtoMessage() {}
func (*Node) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
func (m *Node) GetId() string {
if m != nil {
return m.Id
}
return ""
}
func (m *Node) GetAddress() *NodeAddress {
if m != nil {
return m.Address
}
return nil
}
func init() {
proto.RegisterType((*LookupRequest)(nil), "LookupRequest")
proto.RegisterType((*LookupResponse)(nil), "LookupResponse")
proto.RegisterType((*FindStorageNodesResponse)(nil), "FindStorageNodesResponse")
proto.RegisterType((*FindStorageNodesRequest)(nil), "FindStorageNodesRequest")
proto.RegisterType((*NodeAddress)(nil), "NodeAddress")
proto.RegisterType((*OverlayOptions)(nil), "OverlayOptions")
proto.RegisterType((*NodeRep)(nil), "NodeRep")
proto.RegisterType((*Node)(nil), "Node")
proto.RegisterEnum("NodeTransport", NodeTransport_name, NodeTransport_value)
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for Overlay service
type OverlayClient interface {
// Lookup finds a nodes address from the network
Lookup(ctx context.Context, in *LookupRequest, opts ...grpc.CallOption) (*LookupResponse, error)
// FindStorageNodes finds a list of nodes in the network that meet the specified request parameters
FindStorageNodes(ctx context.Context, in *FindStorageNodesRequest, opts ...grpc.CallOption) (*FindStorageNodesResponse, error)
}
type overlayClient struct {
cc *grpc.ClientConn
}
func NewOverlayClient(cc *grpc.ClientConn) OverlayClient {
return &overlayClient{cc}
}
func (c *overlayClient) Lookup(ctx context.Context, in *LookupRequest, opts ...grpc.CallOption) (*LookupResponse, error) {
out := new(LookupResponse)
err := grpc.Invoke(ctx, "/Overlay/Lookup", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *overlayClient) FindStorageNodes(ctx context.Context, in *FindStorageNodesRequest, opts ...grpc.CallOption) (*FindStorageNodesResponse, error) {
out := new(FindStorageNodesResponse)
err := grpc.Invoke(ctx, "/Overlay/FindStorageNodes", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for Overlay service
type OverlayServer interface {
// Lookup finds a nodes address from the network
Lookup(context.Context, *LookupRequest) (*LookupResponse, error)
// FindStorageNodes finds a list of nodes in the network that meet the specified request parameters
FindStorageNodes(context.Context, *FindStorageNodesRequest) (*FindStorageNodesResponse, error)
}
func RegisterOverlayServer(s *grpc.Server, srv OverlayServer) {
s.RegisterService(&_Overlay_serviceDesc, srv)
}
func _Overlay_Lookup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(LookupRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(OverlayServer).Lookup(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/Overlay/Lookup",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(OverlayServer).Lookup(ctx, req.(*LookupRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Overlay_FindStorageNodes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(FindStorageNodesRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(OverlayServer).FindStorageNodes(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/Overlay/FindStorageNodes",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(OverlayServer).FindStorageNodes(ctx, req.(*FindStorageNodesRequest))
}
return interceptor(ctx, in, info, handler)
}
var _Overlay_serviceDesc = grpc.ServiceDesc{
ServiceName: "Overlay",
HandlerType: (*OverlayServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Lookup",
Handler: _Overlay_Lookup_Handler,
},
{
MethodName: "FindStorageNodes",
Handler: _Overlay_FindStorageNodes_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "overlay.proto",
}
func init() { proto.RegisterFile("overlay.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 444 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x7c, 0x92, 0x51, 0x6f, 0xd3, 0x30,
0x10, 0xc7, 0x49, 0x53, 0x5a, 0x7a, 0x5d, 0xb3, 0xc9, 0x0f, 0x90, 0xf6, 0x61, 0x9a, 0x8c, 0x04,
0x03, 0x21, 0x4f, 0x2a, 0xe2, 0x81, 0x17, 0xc4, 0xc4, 0x04, 0x42, 0x54, 0x0c, 0xb9, 0xe3, 0x03,
0xb8, 0xf5, 0x51, 0x02, 0xab, 0x6d, 0x6c, 0x07, 0x31, 0x24, 0x3e, 0x0b, 0xe2, 0x9b, 0xa2, 0x38,
0x0e, 0x6b, 0x8a, 0xba, 0xb7, 0xdc, 0xfd, 0xff, 0xe7, 0xdc, 0xfd, 0xf4, 0x87, 0x91, 0xfe, 0x8e,
0xf6, 0x52, 0x5c, 0x31, 0x63, 0xb5, 0xd7, 0x93, 0xc3, 0x95, 0xd6, 0xab, 0x4b, 0x3c, 0x09, 0xd5,
0xa2, 0xfc, 0x74, 0x22, 0x4b, 0x2b, 0x7c, 0xa1, 0x55, 0xad, 0xd3, 0x87, 0x30, 0x9a, 0x69, 0xfd,
0xb5, 0x34, 0x1c, 0xbf, 0x95, 0xe8, 0x3c, 0xb9, 0x0b, 0x3d, 0xa5, 0x25, 0xbe, 0x3d, 0xcb, 0x93,
0xa3, 0xe4, 0x78, 0xc0, 0x63, 0x45, 0x5f, 0x42, 0xd6, 0x18, 0x9d, 0xd1, 0xca, 0x21, 0x61, 0x30,
0xac, 0xb4, 0x53, 0x29, 0x2d, 0x3a, 0x17, 0xec, 0xc3, 0xe9, 0x1e, 0x7b, 0x7f, 0xdd, 0xe3, 0x9b,
0x06, 0xfa, 0x0c, 0xf2, 0xd7, 0x85, 0x92, 0x73, 0xaf, 0xad, 0x58, 0x61, 0x65, 0x73, 0xff, 0xde,
0x1a, 0x43, 0xb7, 0xb2, 0xe6, 0xc9, 0x51, 0x7a, 0x3c, 0x9c, 0xde, 0x0e, 0x8f, 0xf0, 0xd0, 0xa2,
0x7f, 0x12, 0xb8, 0xf7, 0xff, 0x5c, 0xbd, 0xec, 0x21, 0x80, 0x5e, 0x7c, 0xc1, 0xa5, 0x9f, 0x17,
0x3f, 0x31, 0x6c, 0x90, 0xf2, 0x8d, 0x0e, 0x39, 0x85, 0x6c, 0xa9, 0x95, 0xb7, 0x62, 0xe9, 0x67,
0xa8, 0x56, 0xfe, 0x73, 0xde, 0x09, 0x5b, 0x8e, 0x59, 0x8d, 0x85, 0x35, 0x58, 0xd8, 0x59, 0xc4,
0xc2, 0xb7, 0x06, 0xc8, 0x7d, 0xe8, 0x6a, 0xe3, 0x5d, 0x9e, 0x86, 0xc1, 0x7d, 0x76, 0x5e, 0xe3,
0x3d, 0x37, 0x95, 0xdb, 0xf1, 0x20, 0xd2, 0x8f, 0x30, 0xdc, 0x38, 0x9b, 0x3c, 0x81, 0x81, 0xb7,
0x42, 0x39, 0xa3, 0xad, 0x0f, 0x5b, 0x65, 0xd3, 0x2c, 0x9c, 0x74, 0xd1, 0x74, 0xf9, 0xb5, 0x81,
0xe4, 0xd0, 0x17, 0x91, 0x61, 0x27, 0x20, 0x6f, 0x4a, 0xfa, 0x3b, 0x81, 0xac, 0xfd, 0x3f, 0xf2,
0x1c, 0x60, 0x2d, 0x7e, 0xcc, 0x84, 0x47, 0xb5, 0xbc, 0x8a, 0xcc, 0x6f, 0xb8, 0x66, 0xc3, 0x4c,
0x18, 0x8c, 0xd6, 0x85, 0xe2, 0x68, 0x4a, 0x1f, 0xc4, 0xc8, 0xe2, 0x4e, 0x0d, 0x1b, 0x0d, 0x6f,
0xcb, 0x84, 0xc2, 0xde, 0xba, 0x50, 0x73, 0x83, 0x28, 0xdf, 0x2d, 0x4c, 0x4d, 0x20, 0xe5, 0xad,
0x1e, 0x1d, 0x40, 0x3f, 0x4e, 0xd3, 0x17, 0xd0, 0xad, 0x3e, 0x49, 0x06, 0x9d, 0x42, 0xc6, 0xf0,
0x74, 0x0a, 0x49, 0x1e, 0xb4, 0xcf, 0xdb, 0x8e, 0x48, 0x23, 0x3e, 0xce, 0x61, 0xd4, 0x42, 0x44,
0xfa, 0x90, 0x5e, 0xbc, 0xfa, 0x70, 0x70, 0x6b, 0xfa, 0x0b, 0xfa, 0x91, 0x02, 0x79, 0x04, 0xbd,
0x3a, 0x85, 0x24, 0x63, 0xad, 0xdc, 0x4e, 0xf6, 0xd9, 0x56, 0x3c, 0xdf, 0xc0, 0xc1, 0x76, 0x6c,
0x48, 0xce, 0x76, 0x24, 0x69, 0x32, 0x66, 0xbb, 0xb2, 0xb9, 0xe8, 0x05, 0xac, 0x4f, 0xff, 0x06,
0x00, 0x00, 0xff, 0xff, 0xe2, 0x47, 0x66, 0x29, 0x5a, 0x03, 0x00, 0x00,
}

View File

@ -3,7 +3,7 @@
syntax = "proto3";
import "duration.proto";
import "google/protobuf/duration.proto";
// NodeTransport is an enum of possible transports for the overlay network
enum NodeTransport {