Storage node and upload/download protocol refactor (#1422)
refactor storage node server refactor upload and download protocol
This commit is contained in:
parent
c7ffbe1c28
commit
05d148aeb5
@ -1,6 +1,6 @@
|
||||
language: go
|
||||
|
||||
go: 1.11.x
|
||||
|
||||
go_import_path: "storj.io/storj"
|
||||
|
||||
git:
|
||||
|
@ -462,7 +462,7 @@ func getSegments(cmd *cobra.Command, args []string) error {
|
||||
|
||||
// query DB and paginate results
|
||||
for length >= irreparableLimit {
|
||||
res, err := i.irrdbclient.ListSegments(context.Background(), &pb.ListSegmentsRequest{Limit: irreparableLimit, Offset: offset})
|
||||
res, err := i.irrdbclient.ListIrreparableSegments(context.Background(), &pb.ListIrreparableSegmentsRequest{Limit: irreparableLimit, Offset: offset})
|
||||
if err != nil {
|
||||
return ErrRequest.Wrap(err)
|
||||
}
|
||||
|
@ -43,12 +43,12 @@ func main() {
|
||||
|
||||
filesizes := &memory.Sizes{
|
||||
Default: []memory.Size{
|
||||
1 * memory.KB,
|
||||
256 * memory.KB,
|
||||
1 * memory.MB,
|
||||
32 * memory.MB,
|
||||
64 * memory.MB,
|
||||
256 * memory.MB,
|
||||
1 * memory.KiB,
|
||||
256 * memory.KiB,
|
||||
1 * memory.MiB,
|
||||
32 * memory.MiB,
|
||||
64 * memory.MiB,
|
||||
256 * memory.MiB,
|
||||
},
|
||||
}
|
||||
flag.Var(filesizes, "filesize", "filesizes to test with")
|
||||
|
@ -88,7 +88,7 @@ func printDashboard(data *pb.DashboardResponse, online bool) error {
|
||||
_, _ = heading.Printf("\n======================\n\n")
|
||||
|
||||
w := tabwriter.NewWriter(color.Output, 0, 0, 1, ' ', 0)
|
||||
fmt.Fprintf(w, "ID\t%s\n", color.YellowString(data.GetNodeId()))
|
||||
fmt.Fprintf(w, "ID\t%s\n", color.YellowString(data.NodeId.String()))
|
||||
|
||||
if online {
|
||||
fmt.Fprintf(w, "Status\t%s\n", color.GreenString("ONLINE"))
|
||||
|
@ -109,6 +109,7 @@ func databaseConfig(config storagenode.Config) storagenodedb.Config {
|
||||
return storagenodedb.Config{
|
||||
Storage: config.Storage.Path,
|
||||
Info: filepath.Join(config.Storage.Path, "piecestore.db"),
|
||||
Info2: filepath.Join(config.Storage.Path, "info.db"),
|
||||
Kademlia: config.Kademlia.DBPath,
|
||||
}
|
||||
}
|
||||
|
@ -183,8 +183,6 @@ func newNetwork(flags *Flags) (*Processes, error) {
|
||||
"--server.private-address", net.JoinHostPort(host, strconv.Itoa(satellitePrivatePort+i)),
|
||||
|
||||
"--kademlia.bootstrap-addr", bootstrap.Address,
|
||||
"--repairer.overlay-addr", process.Address,
|
||||
"--repairer.pointer-db-addr", process.Address,
|
||||
|
||||
"--server.extensions.revocation=false",
|
||||
"--server.use-peer-ca-whitelist=false",
|
||||
|
@ -22,12 +22,12 @@ func TestCopy(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
r := io.LimitReader(rand.Reader, 32*memory.KB.Int64())
|
||||
r := io.LimitReader(rand.Reader, 32*memory.KiB.Int64())
|
||||
|
||||
n, err := sync2.Copy(ctx, ioutil.Discard, r)
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, n, 32*memory.KB.Int64())
|
||||
assert.Equal(t, n, 32*memory.KiB.Int64())
|
||||
}
|
||||
|
||||
func TestCopy_Cancel(t *testing.T) {
|
||||
@ -36,7 +36,7 @@ func TestCopy_Cancel(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
|
||||
r := io.LimitReader(rand.Reader, 32*memory.KB.Int64())
|
||||
r := io.LimitReader(rand.Reader, 32*memory.KiB.Int64())
|
||||
|
||||
n, err := sync2.Copy(ctx, ioutil.Discard, r)
|
||||
|
||||
|
@ -186,7 +186,7 @@ func (ctx *Context) reportRunning() {
|
||||
|
||||
ctx.test.Error(message.String())
|
||||
|
||||
stack := make([]byte, 1*memory.MB.Int())
|
||||
stack := make([]byte, 1*memory.MiB.Int())
|
||||
n := runtime.Stack(stack, true)
|
||||
stack = stack[:n]
|
||||
ctx.test.Error("Full Stack Trace:\n", string(stack))
|
||||
|
@ -172,7 +172,12 @@ func NewCustom(log *zap.Logger, config Config) (*Planet, error) {
|
||||
return nil, errs.Combine(err, planet.Shutdown())
|
||||
}
|
||||
|
||||
planet.StorageNodes, err = planet.newStorageNodes(config.StorageNodeCount)
|
||||
whitelistedSatellites := make([]string, len(planet.Satellites))
|
||||
for _, satellite := range planet.Satellites {
|
||||
whitelistedSatellites = append(whitelistedSatellites, satellite.ID().String())
|
||||
}
|
||||
|
||||
planet.StorageNodes, err = planet.newStorageNodes(config.StorageNodeCount, whitelistedSatellites)
|
||||
if err != nil {
|
||||
return nil, errs.Combine(err, planet.Shutdown())
|
||||
}
|
||||
@ -427,12 +432,9 @@ func (planet *Planet) newSatellites(count int) ([]*satellite.Peer, error) {
|
||||
Interval: 30 * time.Second,
|
||||
},
|
||||
Repairer: repairer.Config{
|
||||
MaxRepair: 10,
|
||||
Interval: time.Hour,
|
||||
OverlayAddr: "", // overridden in satellite.New
|
||||
PointerDBAddr: "", // overridden in satellite.New
|
||||
MaxBufferMem: 4 * memory.MB,
|
||||
APIKey: "",
|
||||
MaxRepair: 10,
|
||||
Interval: time.Hour,
|
||||
MaxBufferMem: 4 * memory.MiB,
|
||||
},
|
||||
Audit: audit.Config{
|
||||
MaxRetriesStatDB: 0,
|
||||
@ -481,7 +483,7 @@ func (planet *Planet) newSatellites(count int) ([]*satellite.Peer, error) {
|
||||
}
|
||||
|
||||
// newStorageNodes initializes storage nodes
|
||||
func (planet *Planet) newStorageNodes(count int) ([]*storagenode.Peer, error) {
|
||||
func (planet *Planet) newStorageNodes(count int, whitelistedSatelliteIDs []string) ([]*storagenode.Peer, error) {
|
||||
// TODO: move into separate file
|
||||
var xs []*storagenode.Peer
|
||||
defer func() {
|
||||
@ -552,6 +554,9 @@ func (planet *Planet) newStorageNodes(count int) ([]*storagenode.Peer, error) {
|
||||
|
||||
AgreementSenderCheckInterval: time.Hour,
|
||||
CollectorInterval: time.Hour,
|
||||
|
||||
SatelliteIDRestriction: true,
|
||||
WhitelistedSatelliteIDs: strings.Join(whitelistedSatelliteIDs, ","),
|
||||
},
|
||||
}
|
||||
if planet.config.Reconfigure.StorageNode != nil {
|
||||
|
@ -15,6 +15,7 @@ import (
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"storj.io/storj/pkg/auth/signing"
|
||||
"storj.io/storj/pkg/cfgstruct"
|
||||
"storj.io/storj/pkg/identity"
|
||||
"storj.io/storj/pkg/overlay"
|
||||
@ -28,6 +29,8 @@ import (
|
||||
"storj.io/storj/satellite"
|
||||
"storj.io/storj/satellite/console"
|
||||
"storj.io/storj/uplink"
|
||||
"storj.io/storj/uplink/metainfo"
|
||||
"storj.io/storj/uplink/piecestore"
|
||||
)
|
||||
|
||||
// Uplink is a general purpose
|
||||
@ -130,6 +133,26 @@ func (uplink *Uplink) DialPointerDB(destination Peer, apikey string) (pdbclient.
|
||||
return pdbclient.NewClient(uplink.Transport, destination.Addr(), apikey)
|
||||
}
|
||||
|
||||
// DialMetainfo dials destination with apikey and returns metainfo Client
|
||||
func (uplink *Uplink) DialMetainfo(ctx context.Context, destination Peer, apikey string) (metainfo.Client, error) {
|
||||
// TODO: handle disconnect
|
||||
return metainfo.NewClient(ctx, uplink.Transport, destination.Addr(), apikey)
|
||||
}
|
||||
|
||||
// DialPiecestore dials destination storagenode and returns a piecestore client.
|
||||
func (uplink *Uplink) DialPiecestore(ctx context.Context, destination Peer) (*piecestore.Client, error) {
|
||||
node := destination.Local()
|
||||
|
||||
conn, err := uplink.Transport.DialNode(ctx, &node)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
signer := signing.SignerFromFullIdentity(uplink.Transport.Identity())
|
||||
|
||||
return piecestore.NewClient(uplink.Log.Named("uplink>piecestore"), signer, conn, piecestore.DefaultConfig), nil
|
||||
}
|
||||
|
||||
// DialOverlay dials destination and returns an overlay.Client
|
||||
func (uplink *Uplink) DialOverlay(destination Peer) (overlay.Client, error) {
|
||||
info := destination.Local()
|
||||
@ -197,20 +220,28 @@ func uploadStream(ctx context.Context, streams streams.Store, mutableObject stor
|
||||
return errs.Combine(err, upload.Close())
|
||||
}
|
||||
|
||||
// Download data from specific satellite
|
||||
func (uplink *Uplink) Download(ctx context.Context, satellite *satellite.Peer, bucket string, path storj.Path) ([]byte, error) {
|
||||
// DownloadStream returns stream for downloading data.
|
||||
func (uplink *Uplink) DownloadStream(ctx context.Context, satellite *satellite.Peer, bucket string, path storj.Path) (*stream.Download, error) {
|
||||
config := uplink.getConfig(satellite)
|
||||
metainfo, streams, err := config.GetMetainfo(ctx, uplink.Identity)
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
readOnlyStream, err := metainfo.GetObjectStream(ctx, bucket, path)
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
download := stream.NewDownload(ctx, readOnlyStream, streams)
|
||||
return stream.NewDownload(ctx, readOnlyStream, streams), nil
|
||||
}
|
||||
|
||||
// Download data from specific satellite
|
||||
func (uplink *Uplink) Download(ctx context.Context, satellite *satellite.Peer, bucket string, path storj.Path) ([]byte, error) {
|
||||
download, err := uplink.DownloadStream(ctx, satellite, bucket, path)
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
defer func() { err = errs.Combine(err, download.Close()) }()
|
||||
|
||||
data, err := ioutil.ReadAll(download)
|
||||
@ -220,6 +251,16 @@ func (uplink *Uplink) Download(ctx context.Context, satellite *satellite.Peer, b
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// Delete data to specific satellite
|
||||
func (uplink *Uplink) Delete(ctx context.Context, satellite *satellite.Peer, bucket string, path storj.Path) error {
|
||||
config := uplink.getConfig(satellite)
|
||||
metainfo, _, err := config.GetMetainfo(ctx, uplink.Identity)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return metainfo.DeleteObject(ctx, bucket, path)
|
||||
}
|
||||
|
||||
func (uplink *Uplink) getConfig(satellite *satellite.Peer) uplink.Config {
|
||||
config := getDefaultConfig()
|
||||
config.Client.OverlayAddr = satellite.Addr()
|
||||
|
19
internal/teststorj/pieceid.go
Normal file
19
internal/teststorj/pieceid.go
Normal file
@ -0,0 +1,19 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package teststorj
|
||||
|
||||
import (
|
||||
"storj.io/storj/pkg/storj"
|
||||
)
|
||||
|
||||
// PieceIDFromBytes converts a byte slice into a piece ID
|
||||
func PieceIDFromBytes(b []byte) storj.PieceID {
|
||||
id, _ := storj.PieceIDFromBytes(fit(b))
|
||||
return id
|
||||
}
|
||||
|
||||
// PieceIDFromString decodes a hex encoded piece ID string
|
||||
func PieceIDFromString(s string) storj.PieceID {
|
||||
return PieceIDFromBytes([]byte(s))
|
||||
}
|
@ -52,6 +52,7 @@ func New(logger *zap.Logger, accountingDB accounting.DB, bwAgreementDB bwagreeme
|
||||
func (t *Tally) Run(ctx context.Context) (err error) {
|
||||
t.logger.Info("Tally service starting up")
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
for {
|
||||
if err = t.Tally(ctx); err != nil {
|
||||
t.logger.Error("Tally failed", zap.Error(err))
|
||||
|
@ -9,10 +9,12 @@ import (
|
||||
"math/big"
|
||||
"sync"
|
||||
|
||||
"github.com/vivint/infectious"
|
||||
"github.com/golang/protobuf/ptypes/timestamp"
|
||||
|
||||
"storj.io/storj/pkg/auth/signing"
|
||||
"storj.io/storj/pkg/eestream"
|
||||
"storj.io/storj/pkg/identity"
|
||||
"storj.io/storj/pkg/overlay"
|
||||
"storj.io/storj/pkg/pb"
|
||||
"storj.io/storj/pkg/pointerdb"
|
||||
"storj.io/storj/pkg/storage/meta"
|
||||
@ -21,23 +23,31 @@ import (
|
||||
|
||||
// Stripe keeps track of a stripe's index and its parent segment
|
||||
type Stripe struct {
|
||||
Index int
|
||||
Segment *pb.Pointer
|
||||
PBA *pb.OrderLimit
|
||||
Index int64
|
||||
Segment *pb.Pointer
|
||||
OrderLimits []*pb.AddressedOrderLimit
|
||||
}
|
||||
|
||||
// Cursor keeps track of audit location in pointer db
|
||||
type Cursor struct {
|
||||
pointers *pointerdb.Service
|
||||
pointerdb *pointerdb.Service
|
||||
allocation *pointerdb.AllocationSigner
|
||||
cache *overlay.Cache
|
||||
identity *identity.FullIdentity
|
||||
signer signing.Signer
|
||||
lastPath storj.Path
|
||||
mutex sync.Mutex
|
||||
}
|
||||
|
||||
// NewCursor creates a Cursor which iterates over pointer db
|
||||
func NewCursor(pointers *pointerdb.Service, allocation *pointerdb.AllocationSigner, identity *identity.FullIdentity) *Cursor {
|
||||
return &Cursor{pointers: pointers, allocation: allocation, identity: identity}
|
||||
func NewCursor(pointerdb *pointerdb.Service, allocation *pointerdb.AllocationSigner, cache *overlay.Cache, identity *identity.FullIdentity) *Cursor {
|
||||
return &Cursor{
|
||||
pointerdb: pointerdb,
|
||||
allocation: allocation,
|
||||
cache: cache,
|
||||
identity: identity,
|
||||
signer: signing.SignerFromFullIdentity(identity),
|
||||
}
|
||||
}
|
||||
|
||||
// NextStripe returns a random stripe to be audited
|
||||
@ -49,7 +59,7 @@ func (cursor *Cursor) NextStripe(ctx context.Context) (stripe *Stripe, err error
|
||||
var path storj.Path
|
||||
var more bool
|
||||
|
||||
pointerItems, more, err = cursor.pointers.List("", cursor.lastPath, "", true, 0, meta.None)
|
||||
pointerItems, more, err = cursor.pointerdb.List("", cursor.lastPath, "", true, 0, meta.None)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -73,12 +83,7 @@ func (cursor *Cursor) NextStripe(ctx context.Context) (stripe *Stripe, err error
|
||||
}
|
||||
|
||||
// get pointer info
|
||||
pointer, err := cursor.pointers.Get(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
peerIdentity := &identity.PeerIdentity{ID: cursor.identity.ID, Leaf: cursor.identity.Leaf}
|
||||
pba, err := cursor.allocation.PayerBandwidthAllocation(ctx, peerIdentity, pb.BandwidthAction_GET_AUDIT)
|
||||
pointer, err := cursor.pointerdb.Get(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -87,53 +92,44 @@ func (cursor *Cursor) NextStripe(ctx context.Context) (stripe *Stripe, err error
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// create the erasure scheme so we can get the stripe size
|
||||
es, err := makeErasureScheme(pointer.GetRemote().GetRedundancy())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if pointer.GetSegmentSize() == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
index, err := getRandomStripe(es, pointer)
|
||||
index, err := getRandomStripe(pointer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
limits, err := cursor.createOrderLimits(ctx, pointer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Stripe{
|
||||
Index: index,
|
||||
Segment: pointer,
|
||||
PBA: pba,
|
||||
Index: index,
|
||||
Segment: pointer,
|
||||
OrderLimits: limits,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func makeErasureScheme(rs *pb.RedundancyScheme) (eestream.ErasureScheme, error) {
|
||||
required := int(rs.GetMinReq())
|
||||
total := int(rs.GetTotal())
|
||||
|
||||
fc, err := infectious.NewFEC(required, total)
|
||||
func getRandomStripe(pointer *pb.Pointer) (index int64, err error) {
|
||||
redundancy, err := eestream.NewRedundancyStrategyFromProto(pointer.GetRemote().GetRedundancy())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return 0, err
|
||||
}
|
||||
es := eestream.NewRSScheme(fc, int(rs.GetErasureShareSize()))
|
||||
return es, nil
|
||||
}
|
||||
|
||||
func getRandomStripe(es eestream.ErasureScheme, pointer *pb.Pointer) (index int, err error) {
|
||||
stripeSize := es.StripeSize()
|
||||
|
||||
// the last segment could be smaller than stripe size
|
||||
if pointer.GetSegmentSize() < int64(stripeSize) {
|
||||
if pointer.GetSegmentSize() < int64(redundancy.StripeSize()) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
randomStripeIndex, err := rand.Int(rand.Reader, big.NewInt(pointer.GetSegmentSize()/int64(stripeSize)))
|
||||
randomStripeIndex, err := rand.Int(rand.Reader, big.NewInt(pointer.GetSegmentSize()/int64(redundancy.StripeSize())))
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
return int(randomStripeIndex.Int64()), nil
|
||||
|
||||
return randomStripeIndex.Int64(), nil
|
||||
}
|
||||
|
||||
func getRandomPointer(pointerItems []*pb.ListResponse_Item) (pointer *pb.ListResponse_Item, err error) {
|
||||
@ -141,7 +137,61 @@ func getRandomPointer(pointerItems []*pb.ListResponse_Item) (pointer *pb.ListRes
|
||||
if err != nil {
|
||||
return &pb.ListResponse_Item{}, err
|
||||
}
|
||||
randomNumInt64 := randomNum.Int64()
|
||||
pointerItem := pointerItems[randomNumInt64]
|
||||
return pointerItem, nil
|
||||
|
||||
return pointerItems[randomNum.Int64()], nil
|
||||
}
|
||||
|
||||
func (cursor *Cursor) createOrderLimits(ctx context.Context, pointer *pb.Pointer) ([]*pb.AddressedOrderLimit, error) {
|
||||
auditorIdentity := cursor.identity.PeerIdentity()
|
||||
rootPieceID := pointer.GetRemote().RootPieceId
|
||||
shareSize := pointer.GetRemote().GetRedundancy().GetErasureShareSize()
|
||||
expiration := pointer.ExpirationDate
|
||||
|
||||
limits := make([]*pb.AddressedOrderLimit, pointer.GetRemote().GetRedundancy().GetTotal())
|
||||
for _, piece := range pointer.GetRemote().GetRemotePieces() {
|
||||
derivedPieceID := rootPieceID.Derive(piece.NodeId)
|
||||
orderLimit, err := cursor.createOrderLimit(ctx, auditorIdentity, piece.NodeId, derivedPieceID, expiration, int64(shareSize), pb.PieceAction_GET_AUDIT)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
node, err := cursor.cache.Get(ctx, piece.NodeId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if node != nil {
|
||||
node.Type.DPanicOnInvalid("auditor order limits")
|
||||
}
|
||||
|
||||
limits[piece.GetPieceNum()] = &pb.AddressedOrderLimit{
|
||||
Limit: orderLimit,
|
||||
StorageNodeAddress: node.Address,
|
||||
}
|
||||
}
|
||||
|
||||
return limits, nil
|
||||
}
|
||||
|
||||
func (cursor *Cursor) createOrderLimit(ctx context.Context, uplinkIdentity *identity.PeerIdentity, nodeID storj.NodeID, pieceID storj.PieceID, expiration *timestamp.Timestamp, limit int64, action pb.PieceAction) (*pb.OrderLimit2, error) {
|
||||
parameters := pointerdb.OrderLimitParameters{
|
||||
UplinkIdentity: uplinkIdentity,
|
||||
StorageNodeID: nodeID,
|
||||
PieceID: pieceID,
|
||||
Action: action,
|
||||
PieceExpiration: expiration,
|
||||
Limit: limit,
|
||||
}
|
||||
|
||||
orderLimit, err := cursor.allocation.OrderLimit(ctx, parameters)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
orderLimit, err = signing.SignOrderLimit(cursor.signer, orderLimit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return orderLimit, nil
|
||||
}
|
||||
|
@ -80,10 +80,11 @@ func TestAuditSegment(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
pointers := planet.Satellites[0].Metainfo.Service
|
||||
pointerdb := planet.Satellites[0].Metainfo.Service
|
||||
allocation := planet.Satellites[0].Metainfo.Allocation
|
||||
cache := planet.Satellites[0].Overlay.Service
|
||||
// create a pdb client and instance of audit
|
||||
cursor := audit.NewCursor(pointers, allocation, planet.Satellites[0].Identity)
|
||||
cursor := audit.NewCursor(pointerdb, allocation, cache, planet.Satellites[0].Identity)
|
||||
|
||||
// put 10 paths in db
|
||||
t.Run("putToDB", func(t *testing.T) {
|
||||
@ -95,7 +96,7 @@ func TestAuditSegment(t *testing.T) {
|
||||
putRequest := makePutRequest(tt.path)
|
||||
|
||||
// put pointer into db
|
||||
err := pointers.Put(tt.path, putRequest.Pointer)
|
||||
err := pointerdb.Put(tt.path, putRequest.Pointer)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to put %v: error: %v", putRequest.Pointer, err)
|
||||
assert1.NotNil(err)
|
||||
@ -125,7 +126,7 @@ func TestAuditSegment(t *testing.T) {
|
||||
|
||||
// test to see how random paths are
|
||||
t.Run("probabilisticTest", func(t *testing.T) {
|
||||
list, _, err := pointers.List("", "", "", true, 10, meta.None)
|
||||
list, _, err := pointerdb.List("", "", "", true, 10, meta.None)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, list, 10)
|
||||
|
||||
@ -201,7 +202,7 @@ func makePutRequest(path storj.Path) pb.PutRequest {
|
||||
SuccessThreshold: 3,
|
||||
ErasureShareSize: 2,
|
||||
},
|
||||
PieceId: "testId",
|
||||
RootPieceId: teststorj.PieceIDFromString("testId"),
|
||||
RemotePieces: rps,
|
||||
},
|
||||
SegmentSize: int64(10),
|
||||
|
@ -37,9 +37,9 @@ type Service struct {
|
||||
func NewService(log *zap.Logger, sdb statdb.DB, interval time.Duration, maxRetries int, pointers *pointerdb.Service, allocation *pointerdb.AllocationSigner, transport transport.Client, overlay *overlay.Cache, identity *identity.FullIdentity) (service *Service, err error) {
|
||||
return &Service{
|
||||
log: log,
|
||||
// TODO: instead of overlay.Client use overlay.Service
|
||||
Cursor: NewCursor(pointers, allocation, identity),
|
||||
Verifier: NewVerifier(transport, overlay, identity),
|
||||
|
||||
Cursor: NewCursor(pointers, allocation, overlay, identity),
|
||||
Verifier: NewVerifier(log.Named("audit:verifier"), transport, overlay, identity),
|
||||
Reporter: NewReporter(sdb, maxRetries),
|
||||
|
||||
ticker: time.NewTicker(interval),
|
||||
@ -50,6 +50,7 @@ func NewService(log *zap.Logger, sdb statdb.DB, interval time.Duration, maxRetri
|
||||
func (service *Service) Run(ctx context.Context) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
service.log.Info("Audit cron is starting up")
|
||||
|
||||
for {
|
||||
err := service.process(ctx)
|
||||
if err != nil {
|
||||
|
@ -10,23 +10,25 @@ import (
|
||||
|
||||
"github.com/vivint/infectious"
|
||||
"github.com/zeebo/errs"
|
||||
"go.uber.org/zap"
|
||||
monkit "gopkg.in/spacemonkeygo/monkit.v2"
|
||||
|
||||
"storj.io/storj/pkg/auth/signing"
|
||||
"storj.io/storj/pkg/identity"
|
||||
"storj.io/storj/pkg/overlay"
|
||||
"storj.io/storj/pkg/pb"
|
||||
"storj.io/storj/pkg/piecestore/psclient"
|
||||
"storj.io/storj/pkg/storj"
|
||||
"storj.io/storj/pkg/transport"
|
||||
"storj.io/storj/uplink/piecestore"
|
||||
)
|
||||
|
||||
var mon = monkit.Package()
|
||||
|
||||
// Share represents required information about an audited share
|
||||
type Share struct {
|
||||
Error error
|
||||
PieceNumber int
|
||||
Data []byte
|
||||
Error error
|
||||
PieceNum int
|
||||
Data []byte
|
||||
}
|
||||
|
||||
// Verifier helps verify the correctness of a given stripe
|
||||
@ -35,127 +37,92 @@ type Verifier struct {
|
||||
}
|
||||
|
||||
type downloader interface {
|
||||
DownloadShares(ctx context.Context, pointer *pb.Pointer, stripeIndex int, pba *pb.OrderLimit) (shares map[int]Share, nodes map[int]storj.NodeID, err error)
|
||||
DownloadShares(ctx context.Context, limits []*pb.AddressedOrderLimit, stripeIndex int64, shareSize int32) (shares map[int]Share, nodes map[int]storj.NodeID, err error)
|
||||
}
|
||||
|
||||
// defaultDownloader downloads shares from networked storage nodes
|
||||
type defaultDownloader struct {
|
||||
log *zap.Logger
|
||||
transport transport.Client
|
||||
overlay *overlay.Cache
|
||||
identity *identity.FullIdentity
|
||||
reporter
|
||||
}
|
||||
|
||||
// newDefaultDownloader creates a defaultDownloader
|
||||
func newDefaultDownloader(transport transport.Client, overlay *overlay.Cache, id *identity.FullIdentity) *defaultDownloader {
|
||||
return &defaultDownloader{transport: transport, overlay: overlay, identity: id}
|
||||
func newDefaultDownloader(log *zap.Logger, transport transport.Client, overlay *overlay.Cache, id *identity.FullIdentity) *defaultDownloader {
|
||||
return &defaultDownloader{log: log, transport: transport, overlay: overlay}
|
||||
}
|
||||
|
||||
// NewVerifier creates a Verifier
|
||||
func NewVerifier(transport transport.Client, overlay *overlay.Cache, id *identity.FullIdentity) *Verifier {
|
||||
return &Verifier{downloader: newDefaultDownloader(transport, overlay, id)}
|
||||
func NewVerifier(log *zap.Logger, transport transport.Client, overlay *overlay.Cache, id *identity.FullIdentity) *Verifier {
|
||||
return &Verifier{downloader: newDefaultDownloader(log, transport, overlay, id)}
|
||||
}
|
||||
|
||||
// getShare use piece store clients to download shares from a given node
|
||||
func (d *defaultDownloader) getShare(ctx context.Context, stripeIndex, shareSize, pieceNumber int,
|
||||
id psclient.PieceID, pieceSize int64, fromNode *pb.Node, pba *pb.OrderLimit) (s Share, err error) {
|
||||
// TODO: too many arguments use a struct
|
||||
// getShare use piece store client to download shares from nodes
|
||||
func (d *defaultDownloader) getShare(ctx context.Context, limit *pb.AddressedOrderLimit, stripeIndex int64, shareSize int32, pieceNum int) (share Share, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
if fromNode == nil {
|
||||
// TODO(moby) perhaps we should not penalize this node's reputation if it is not returned by the overlay
|
||||
return s, Error.New("no node returned from overlay for piece %s", id.String())
|
||||
}
|
||||
fromNode.Type.DPanicOnInvalid("audit getShare")
|
||||
storageNodeID := limit.GetLimit().StorageNodeId
|
||||
|
||||
// TODO(nat): the reason for dividing by 8 is because later in psclient/readerwriter.go
|
||||
// the bandwidthMsgSize is arbitrarily multiplied by 8 as a reasonable threshold
|
||||
// for message trust size drift. The 8 should eventually be a config value.
|
||||
var bandwidthMsgSize int
|
||||
remainder := shareSize % 8
|
||||
if remainder == 0 {
|
||||
bandwidthMsgSize = shareSize / 8
|
||||
} else {
|
||||
bandwidthMsgSize = (shareSize + 8 - remainder) / 8
|
||||
}
|
||||
|
||||
ps, err := psclient.NewPSClient(ctx, d.transport, fromNode, bandwidthMsgSize)
|
||||
conn, err := d.transport.DialNode(ctx, &pb.Node{
|
||||
Id: storageNodeID,
|
||||
Address: limit.GetStorageNodeAddress(),
|
||||
Type: pb.NodeType_STORAGE,
|
||||
})
|
||||
if err != nil {
|
||||
return s, err
|
||||
return Share{}, err
|
||||
}
|
||||
ps := piecestore.NewClient(
|
||||
d.log.Named(storageNodeID.String()),
|
||||
signing.SignerFromFullIdentity(d.transport.Identity()),
|
||||
conn,
|
||||
piecestore.DefaultConfig,
|
||||
)
|
||||
|
||||
derivedPieceID, err := id.Derive(fromNode.Id.Bytes())
|
||||
offset := int64(shareSize) * stripeIndex
|
||||
|
||||
downloader, err := ps.Download(ctx, limit.GetLimit(), offset, int64(shareSize))
|
||||
if err != nil {
|
||||
return s, err
|
||||
return Share{}, err
|
||||
}
|
||||
|
||||
rr, err := ps.Get(ctx, derivedPieceID, pieceSize, pba)
|
||||
if err != nil {
|
||||
return s, err
|
||||
}
|
||||
|
||||
offset := shareSize * stripeIndex
|
||||
|
||||
rc, err := rr.Range(ctx, int64(offset), int64(shareSize))
|
||||
if err != nil {
|
||||
return s, err
|
||||
}
|
||||
defer func() { err = errs.Combine(err, rc.Close()) }()
|
||||
defer func() { err = errs.Combine(err, downloader.Close()) }()
|
||||
|
||||
buf := make([]byte, shareSize)
|
||||
_, err = io.ReadFull(rc, buf)
|
||||
_, err = io.ReadFull(downloader, buf)
|
||||
if err != nil {
|
||||
return s, err
|
||||
return Share{}, err
|
||||
}
|
||||
|
||||
s = Share{
|
||||
Error: nil,
|
||||
PieceNumber: pieceNumber,
|
||||
Data: buf,
|
||||
}
|
||||
return s, nil
|
||||
return Share{
|
||||
Error: nil,
|
||||
PieceNum: pieceNum,
|
||||
Data: buf,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Download Shares downloads shares from the nodes where remote pieces are located
|
||||
func (d *defaultDownloader) DownloadShares(ctx context.Context, pointer *pb.Pointer,
|
||||
stripeIndex int, pba *pb.OrderLimit) (shares map[int]Share, nodes map[int]storj.NodeID, err error) {
|
||||
func (d *defaultDownloader) DownloadShares(ctx context.Context, limits []*pb.AddressedOrderLimit, stripeIndex int64, shareSize int32) (shares map[int]Share, nodes map[int]storj.NodeID, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
var nodeIds storj.NodeIDList
|
||||
pieces := pointer.Remote.GetRemotePieces()
|
||||
shares = make(map[int]Share, len(limits))
|
||||
nodes = make(map[int]storj.NodeID, len(limits))
|
||||
|
||||
for _, p := range pieces {
|
||||
nodeIds = append(nodeIds, p.NodeId)
|
||||
}
|
||||
for i, limit := range limits {
|
||||
if limit == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// TODO(moby) nodeSlice will not include offline nodes, so overlay should update uptime for these nodes
|
||||
nodeSlice, err := d.overlay.GetAll(ctx, nodeIds)
|
||||
if err != nil {
|
||||
return nil, nodes, err
|
||||
}
|
||||
|
||||
shares = make(map[int]Share, len(nodeSlice))
|
||||
nodes = make(map[int]storj.NodeID, len(nodeSlice))
|
||||
|
||||
shareSize := int(pointer.Remote.Redundancy.GetErasureShareSize())
|
||||
pieceID := psclient.PieceID(pointer.Remote.GetPieceId())
|
||||
|
||||
// this downloads shares from nodes at the given stripe index
|
||||
for i, node := range nodeSlice {
|
||||
paddedSize := calcPadded(pointer.GetSegmentSize(), shareSize)
|
||||
pieceSize := paddedSize / int64(pointer.Remote.Redundancy.GetMinReq())
|
||||
|
||||
s, err := d.getShare(ctx, stripeIndex, shareSize, int(pieces[i].PieceNum), pieceID, pieceSize, node, pba)
|
||||
share, err := d.getShare(ctx, limit, stripeIndex, shareSize, i)
|
||||
if err != nil {
|
||||
s = Share{
|
||||
Error: err,
|
||||
PieceNumber: int(pieces[i].PieceNum),
|
||||
Data: nil,
|
||||
share = Share{
|
||||
Error: err,
|
||||
PieceNum: i,
|
||||
Data: nil,
|
||||
}
|
||||
}
|
||||
|
||||
shares[s.PieceNumber] = s
|
||||
nodes[s.PieceNumber] = nodeIds[i]
|
||||
shares[share.PieceNum] = share
|
||||
nodes[share.PieceNum] = limit.GetLimit().StorageNodeId
|
||||
}
|
||||
|
||||
return shares, nodes, nil
|
||||
@ -170,7 +137,7 @@ func makeCopies(ctx context.Context, originals map[int]Share) (copies []infectio
|
||||
}
|
||||
copies = append(copies, infectious.Share{
|
||||
Data: append([]byte{}, original.Data...),
|
||||
Number: original.PieceNumber})
|
||||
Number: original.PieceNum})
|
||||
}
|
||||
return copies, nil
|
||||
}
|
||||
@ -201,19 +168,14 @@ func auditShares(ctx context.Context, required, total int, originals map[int]Sha
|
||||
return pieceNums, nil
|
||||
}
|
||||
|
||||
func calcPadded(size int64, blockSize int) int64 {
|
||||
mod := size % int64(blockSize)
|
||||
if mod == 0 {
|
||||
return size
|
||||
}
|
||||
return size + int64(blockSize) - mod
|
||||
}
|
||||
|
||||
// verify downloads shares then verifies the data correctness at the given stripe
|
||||
func (verifier *Verifier) verify(ctx context.Context, stripe *Stripe) (verifiedNodes *RecordAuditsInfo, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
shares, nodes, err := verifier.downloader.DownloadShares(ctx, stripe.Segment, stripe.Index, stripe.PBA)
|
||||
pointer := stripe.Segment
|
||||
shareSize := pointer.GetRemote().GetRedundancy().GetErasureShareSize()
|
||||
|
||||
shares, nodes, err := verifier.downloader.DownloadShares(ctx, stripe.OrderLimits, stripe.Index, shareSize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -225,7 +187,6 @@ func (verifier *Verifier) verify(ctx context.Context, stripe *Stripe) (verifiedN
|
||||
}
|
||||
}
|
||||
|
||||
pointer := stripe.Segment
|
||||
required := int(pointer.Remote.Redundancy.GetMinReq())
|
||||
total := int(pointer.Remote.Redundancy.GetTotal())
|
||||
pieceNums, err := auditShares(ctx, required, total, shares)
|
||||
|
34
pkg/auth/signing/encode.go
Normal file
34
pkg/auth/signing/encode.go
Normal file
@ -0,0 +1,34 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package signing
|
||||
|
||||
import (
|
||||
"github.com/gogo/protobuf/proto"
|
||||
|
||||
"storj.io/storj/pkg/pb"
|
||||
)
|
||||
|
||||
// EncodeOrderLimit encodes order limit into bytes for signing.
|
||||
func EncodeOrderLimit(limit *pb.OrderLimit2) ([]byte, error) {
|
||||
signature := limit.SatelliteSignature
|
||||
limit.SatelliteSignature = nil
|
||||
defer func() { limit.SatelliteSignature = signature }()
|
||||
return proto.Marshal(limit)
|
||||
}
|
||||
|
||||
// EncodeOrder encodes order into bytes for signing.
|
||||
func EncodeOrder(order *pb.Order2) ([]byte, error) {
|
||||
signature := order.UplinkSignature
|
||||
order.UplinkSignature = nil
|
||||
defer func() { order.UplinkSignature = signature }()
|
||||
return proto.Marshal(order)
|
||||
}
|
||||
|
||||
// EncodePieceHash encodes piece hash into bytes for signing.
|
||||
func EncodePieceHash(hash *pb.PieceHash) ([]byte, error) {
|
||||
signature := hash.Signature
|
||||
hash.Signature = nil
|
||||
defer func() { hash.Signature = signature }()
|
||||
return proto.Marshal(hash)
|
||||
}
|
62
pkg/auth/signing/peers.go
Normal file
62
pkg/auth/signing/peers.go
Normal file
@ -0,0 +1,62 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package signing
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
|
||||
"storj.io/storj/pkg/identity"
|
||||
"storj.io/storj/pkg/pkcrypto"
|
||||
"storj.io/storj/pkg/storj"
|
||||
)
|
||||
|
||||
// PrivateKey implements a signer and signee using a crypto.PrivateKey.
|
||||
type PrivateKey struct {
|
||||
Self storj.NodeID
|
||||
Key crypto.PrivateKey
|
||||
}
|
||||
|
||||
// SignerFromFullIdentity returns signer based on full identity.
|
||||
func SignerFromFullIdentity(identity *identity.FullIdentity) Signer {
|
||||
return &PrivateKey{
|
||||
Self: identity.ID,
|
||||
Key: identity.Key,
|
||||
}
|
||||
}
|
||||
|
||||
// ID returns node id associated with PrivateKey.
|
||||
func (private *PrivateKey) ID() storj.NodeID { return private.Self }
|
||||
|
||||
// HashAndSign hashes the data and signs with the used key.
|
||||
func (private *PrivateKey) HashAndSign(data []byte) ([]byte, error) {
|
||||
return pkcrypto.HashAndSign(private.Key, data)
|
||||
}
|
||||
|
||||
// HashAndVerifySignature hashes the data and verifies that the signature belongs to the PrivateKey.
|
||||
func (private *PrivateKey) HashAndVerifySignature(data, signature []byte) error {
|
||||
pub := pkcrypto.PublicKeyFromPrivate(private.Key)
|
||||
return pkcrypto.HashAndVerifySignature(pub, data, signature)
|
||||
}
|
||||
|
||||
// PublicKey implements a signee using crypto.PublicKey.
|
||||
type PublicKey struct {
|
||||
Self storj.NodeID
|
||||
Key crypto.PublicKey
|
||||
}
|
||||
|
||||
// SigneeFromPeerIdentity returns signee based on peer identity.
|
||||
func SigneeFromPeerIdentity(identity *identity.PeerIdentity) Signee {
|
||||
return &PublicKey{
|
||||
Self: identity.ID,
|
||||
Key: identity.Leaf.PublicKey,
|
||||
}
|
||||
}
|
||||
|
||||
// ID returns node id associated with this PublicKey.
|
||||
func (public *PublicKey) ID() storj.NodeID { return public.Self }
|
||||
|
||||
// HashAndVerifySignature hashes the data and verifies that the signature belongs to the PublicKey.
|
||||
func (public *PublicKey) HashAndVerifySignature(data, signature []byte) error {
|
||||
return pkcrypto.HashAndVerifySignature(public.Key, data, signature)
|
||||
}
|
72
pkg/auth/signing/sign.go
Normal file
72
pkg/auth/signing/sign.go
Normal file
@ -0,0 +1,72 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package signing
|
||||
|
||||
import (
|
||||
"github.com/zeebo/errs"
|
||||
|
||||
"storj.io/storj/pkg/pb"
|
||||
"storj.io/storj/pkg/storj"
|
||||
)
|
||||
|
||||
// Error is the default error class for signing package.
|
||||
var Error = errs.Class("signing")
|
||||
|
||||
// Signer is able to sign data and verify own signature belongs.
|
||||
type Signer interface {
|
||||
ID() storj.NodeID
|
||||
HashAndSign(data []byte) ([]byte, error)
|
||||
HashAndVerifySignature(data, signature []byte) error
|
||||
}
|
||||
|
||||
// SignOrderLimit signs the order limit using the specified signer.
|
||||
// Signer is a satellite.
|
||||
func SignOrderLimit(satellite Signer, unsigned *pb.OrderLimit2) (*pb.OrderLimit2, error) {
|
||||
bytes, err := EncodeOrderLimit(unsigned)
|
||||
if err != nil {
|
||||
return nil, Error.Wrap(err)
|
||||
}
|
||||
|
||||
signed := *unsigned
|
||||
signed.SatelliteSignature, err = satellite.HashAndSign(bytes)
|
||||
if err != nil {
|
||||
return nil, Error.Wrap(err)
|
||||
}
|
||||
|
||||
return &signed, nil
|
||||
}
|
||||
|
||||
// SignOrder signs the order using the specified signer.
|
||||
// Signer is an uplink.
|
||||
func SignOrder(uplink Signer, unsigned *pb.Order2) (*pb.Order2, error) {
|
||||
bytes, err := EncodeOrder(unsigned)
|
||||
if err != nil {
|
||||
return nil, Error.Wrap(err)
|
||||
}
|
||||
|
||||
signed := *unsigned
|
||||
signed.UplinkSignature, err = uplink.HashAndSign(bytes)
|
||||
if err != nil {
|
||||
return nil, Error.Wrap(err)
|
||||
}
|
||||
|
||||
return &signed, nil
|
||||
}
|
||||
|
||||
// SignPieceHash signs the piece hash using the specified signer.
|
||||
// Signer is either uplink or storage node.
|
||||
func SignPieceHash(signer Signer, unsigned *pb.PieceHash) (*pb.PieceHash, error) {
|
||||
bytes, err := EncodePieceHash(unsigned)
|
||||
if err != nil {
|
||||
return nil, Error.Wrap(err)
|
||||
}
|
||||
|
||||
signed := *unsigned
|
||||
signed.Signature, err = signer.HashAndSign(bytes)
|
||||
if err != nil {
|
||||
return nil, Error.Wrap(err)
|
||||
}
|
||||
|
||||
return &signed, nil
|
||||
}
|
45
pkg/auth/signing/verify.go
Normal file
45
pkg/auth/signing/verify.go
Normal file
@ -0,0 +1,45 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package signing
|
||||
|
||||
import (
|
||||
"storj.io/storj/pkg/pb"
|
||||
"storj.io/storj/pkg/storj"
|
||||
)
|
||||
|
||||
// Signee is able to verify that the data signature belongs to the signee.
|
||||
type Signee interface {
|
||||
ID() storj.NodeID
|
||||
HashAndVerifySignature(data, signature []byte) error
|
||||
}
|
||||
|
||||
// VerifyOrderLimitSignature verifies that the signature inside order limit belongs to the satellite.
|
||||
func VerifyOrderLimitSignature(satellite Signee, signed *pb.OrderLimit2) error {
|
||||
bytes, err := EncodeOrderLimit(signed)
|
||||
if err != nil {
|
||||
return Error.Wrap(err)
|
||||
}
|
||||
|
||||
return satellite.HashAndVerifySignature(bytes, signed.SatelliteSignature)
|
||||
}
|
||||
|
||||
// VerifyOrderSignature verifies that the signature inside order belongs to the uplink.
|
||||
func VerifyOrderSignature(uplink Signee, signed *pb.Order2) error {
|
||||
bytes, err := EncodeOrder(signed)
|
||||
if err != nil {
|
||||
return Error.Wrap(err)
|
||||
}
|
||||
|
||||
return uplink.HashAndVerifySignature(bytes, signed.UplinkSignature)
|
||||
}
|
||||
|
||||
// VerifyPieceHashSignature verifies that the signature inside piece hash belongs to the signer, which is either uplink or storage node.
|
||||
func VerifyPieceHashSignature(signee Signee, signed *pb.PieceHash) error {
|
||||
bytes, err := EncodePieceHash(signed)
|
||||
if err != nil {
|
||||
return Error.Wrap(err)
|
||||
}
|
||||
|
||||
return signee.HashAndVerifySignature(bytes, signed.Signature)
|
||||
}
|
@ -13,6 +13,7 @@ import (
|
||||
|
||||
"storj.io/storj/internal/testcontext"
|
||||
"storj.io/storj/internal/testplanet"
|
||||
"storj.io/storj/internal/teststorj"
|
||||
"storj.io/storj/pkg/pb"
|
||||
"storj.io/storj/pkg/storj"
|
||||
"storj.io/storj/storage"
|
||||
@ -115,14 +116,14 @@ func TestIdentifyIrreparableSegments(t *testing.T) {
|
||||
MinReq: int32(4),
|
||||
RepairThreshold: int32(8),
|
||||
},
|
||||
PieceId: "fake-piece-id",
|
||||
RootPieceId: teststorj.PieceIDFromString("fake-piece-id"),
|
||||
RemotePieces: pieces,
|
||||
},
|
||||
}
|
||||
|
||||
// put test pointer to db
|
||||
pointerdb := planet.Satellites[0].Metainfo.Service
|
||||
err := pointerdb.Put(pointer.Remote.PieceId, pointer)
|
||||
err := pointerdb.Put("fake-piece-id", pointer)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = checker.IdentifyInjuredSegments(ctx)
|
||||
@ -186,12 +187,12 @@ func makePointer(t *testing.T, planet *testplanet.Planet, pieceID string, create
|
||||
MinReq: int32(minReq),
|
||||
RepairThreshold: int32(repairThreshold),
|
||||
},
|
||||
PieceId: pieceID,
|
||||
RootPieceId: teststorj.PieceIDFromString(pieceID),
|
||||
RemotePieces: pieces,
|
||||
},
|
||||
}
|
||||
// put test pointer to db
|
||||
pointerdb := planet.Satellites[0].Metainfo.Service
|
||||
err := pointerdb.Put(pointer.Remote.PieceId, pointer)
|
||||
err := pointerdb.Put(pieceID, pointer)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
@ -19,12 +19,12 @@ func NewInspector(irrdb DB) *Inspector {
|
||||
return &Inspector{irrdb: irrdb}
|
||||
}
|
||||
|
||||
// ListSegments returns a number of irreparable segments by limit and offset
|
||||
func (srv *Inspector) ListSegments(ctx context.Context, req *pb.ListSegmentsRequest) (*pb.ListSegmentsResponse, error) {
|
||||
// ListIrreparableSegments returns a number of irreparable segments by limit and offset
|
||||
func (srv *Inspector) ListIrreparableSegments(ctx context.Context, req *pb.ListIrreparableSegmentsRequest) (*pb.ListIrreparableSegmentsResponse, error) {
|
||||
segments, err := srv.irrdb.GetLimited(ctx, int(req.GetLimit()), int64(req.GetOffset()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &pb.ListSegmentsResponse{Segments: segments}, err
|
||||
return &pb.ListIrreparableSegmentsResponse{Segments: segments}, err
|
||||
}
|
||||
|
@ -8,8 +8,9 @@ import (
|
||||
"time"
|
||||
|
||||
"storj.io/storj/internal/memory"
|
||||
"storj.io/storj/pkg/identity"
|
||||
"storj.io/storj/pkg/overlay"
|
||||
"storj.io/storj/pkg/pointerdb/pdbclient"
|
||||
"storj.io/storj/pkg/pointerdb"
|
||||
ecclient "storj.io/storj/pkg/storage/ec"
|
||||
"storj.io/storj/pkg/storage/segments"
|
||||
"storj.io/storj/pkg/transport"
|
||||
@ -17,29 +18,16 @@ import (
|
||||
|
||||
// Config contains configurable values for repairer
|
||||
type Config struct {
|
||||
MaxRepair int `help:"maximum segments that can be repaired concurrently" default:"100"`
|
||||
Interval time.Duration `help:"how frequently checker should audit segments" default:"3600s"`
|
||||
OverlayAddr string `help:"Address to contact overlay server through"`
|
||||
PointerDBAddr string `help:"Address to contact pointerdb server through"`
|
||||
MaxBufferMem memory.Size `help:"maximum buffer memory (in bytes) to be allocated for read buffers" default:"4M"`
|
||||
APIKey string `help:"repairer-specific pointerdb access credential"`
|
||||
MaxRepair int `help:"maximum segments that can be repaired concurrently" default:"100"`
|
||||
Interval time.Duration `help:"how frequently checker should audit segments" default:"3600s"`
|
||||
MaxBufferMem memory.Size `help:"maximum buffer memory (in bytes) to be allocated for read buffers" default:"4M"`
|
||||
}
|
||||
|
||||
// GetSegmentRepairer creates a new segment repairer from storeConfig values
|
||||
func (c Config) GetSegmentRepairer(ctx context.Context, tc transport.Client) (ss SegmentRepairer, err error) {
|
||||
func (c Config) GetSegmentRepairer(ctx context.Context, tc transport.Client, pointerdb *pointerdb.Service, allocation *pointerdb.AllocationSigner, cache *overlay.Cache, identity *identity.FullIdentity, selectionPreferences *overlay.NodeSelectionConfig) (ss SegmentRepairer, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
var oc overlay.Client
|
||||
oc, err = overlay.NewClientContext(ctx, tc, c.OverlayAddr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pdb, err := pdbclient.NewClientContext(ctx, tc, c.PointerDBAddr, c.APIKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ec := ecclient.NewClient(tc, c.MaxBufferMem.Int())
|
||||
return segments.NewSegmentRepairer(oc, ec, pdb), nil
|
||||
|
||||
return segments.NewSegmentRepairer(pointerdb, allocation, cache, ec, identity, selectionPreferences), nil
|
||||
}
|
||||
|
@ -10,7 +10,10 @@ import (
|
||||
"go.uber.org/zap"
|
||||
|
||||
"storj.io/storj/internal/sync2"
|
||||
"storj.io/storj/pkg/auth/signing"
|
||||
"storj.io/storj/pkg/datarepair/queue"
|
||||
"storj.io/storj/pkg/overlay"
|
||||
"storj.io/storj/pkg/pointerdb"
|
||||
"storj.io/storj/pkg/storj"
|
||||
"storj.io/storj/pkg/transport"
|
||||
"storj.io/storj/storage"
|
||||
@ -23,22 +26,30 @@ type SegmentRepairer interface {
|
||||
|
||||
// Service contains the information needed to run the repair service
|
||||
type Service struct {
|
||||
queue queue.RepairQueue
|
||||
config *Config
|
||||
transport transport.Client
|
||||
repairer SegmentRepairer
|
||||
limiter *sync2.Limiter
|
||||
ticker *time.Ticker
|
||||
queue queue.RepairQueue
|
||||
config *Config
|
||||
limiter *sync2.Limiter
|
||||
ticker *time.Ticker
|
||||
transport transport.Client
|
||||
pointerdb *pointerdb.Service
|
||||
allocation *pointerdb.AllocationSigner
|
||||
cache *overlay.Cache
|
||||
selectionPreferences *overlay.NodeSelectionConfig
|
||||
repairer SegmentRepairer
|
||||
}
|
||||
|
||||
// NewService creates repairing service
|
||||
func NewService(queue queue.RepairQueue, config *Config, transport transport.Client, interval time.Duration, concurrency int) *Service {
|
||||
func NewService(queue queue.RepairQueue, config *Config, interval time.Duration, concurrency int, transport transport.Client, pointerdb *pointerdb.Service, allocation *pointerdb.AllocationSigner, cache *overlay.Cache, signer signing.Signer, selectionPreferences *overlay.NodeSelectionConfig) *Service {
|
||||
return &Service{
|
||||
queue: queue,
|
||||
config: config,
|
||||
transport: transport,
|
||||
limiter: sync2.NewLimiter(concurrency),
|
||||
ticker: time.NewTicker(interval),
|
||||
queue: queue,
|
||||
config: config,
|
||||
limiter: sync2.NewLimiter(concurrency),
|
||||
ticker: time.NewTicker(interval),
|
||||
transport: transport,
|
||||
pointerdb: pointerdb,
|
||||
allocation: allocation,
|
||||
cache: cache,
|
||||
selectionPreferences: selectionPreferences,
|
||||
}
|
||||
}
|
||||
|
||||
@ -50,7 +61,15 @@ func (service *Service) Run(ctx context.Context) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
// TODO: close segment repairer, currently this leaks connections
|
||||
service.repairer, err = service.config.GetSegmentRepairer(ctx, service.transport)
|
||||
service.repairer, err = service.config.GetSegmentRepairer(
|
||||
ctx,
|
||||
service.transport,
|
||||
service.pointerdb,
|
||||
service.allocation,
|
||||
service.cache,
|
||||
service.transport.Identity(),
|
||||
service.selectionPreferences,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -9,11 +9,13 @@ import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/vivint/infectious"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"storj.io/storj/internal/readcloser"
|
||||
"storj.io/storj/internal/sync2"
|
||||
"storj.io/storj/pkg/encryption"
|
||||
"storj.io/storj/pkg/pb"
|
||||
"storj.io/storj/pkg/ranger"
|
||||
)
|
||||
|
||||
@ -91,6 +93,17 @@ func NewRedundancyStrategy(es ErasureScheme, repairThreshold, optimalThreshold i
|
||||
return RedundancyStrategy{ErasureScheme: es, repairThreshold: repairThreshold, optimalThreshold: optimalThreshold}, nil
|
||||
}
|
||||
|
||||
// NewRedundancyStrategyFromProto creates new RedundancyStrategy from the given
|
||||
// RedundancyScheme protobuf.
|
||||
func NewRedundancyStrategyFromProto(scheme *pb.RedundancyScheme) (RedundancyStrategy, error) {
|
||||
fc, err := infectious.NewFEC(int(scheme.GetMinReq()), int(scheme.GetTotal()))
|
||||
if err != nil {
|
||||
return RedundancyStrategy{}, Error.Wrap(err)
|
||||
}
|
||||
es := NewRSScheme(fc, int(scheme.GetErasureShareSize()))
|
||||
return NewRedundancyStrategy(es, int(scheme.GetRepairThreshold()), int(scheme.GetSuccessThreshold()))
|
||||
}
|
||||
|
||||
// RepairThreshold is the number of available erasure pieces below which
|
||||
// the data must be repaired to avoid loss
|
||||
func (rs *RedundancyStrategy) RepairThreshold() int {
|
||||
@ -250,3 +263,17 @@ func (er *EncodedRanger) Range(ctx context.Context, offset, length int64) ([]io.
|
||||
}
|
||||
return readers, nil
|
||||
}
|
||||
|
||||
// CalcPieceSize calculates what would be the piece size of the encoded data
|
||||
// after erasure coding data with dataSize using the given ErasureScheme.
|
||||
func CalcPieceSize(dataSize int64, scheme ErasureScheme) int64 {
|
||||
stripes := dataSize / int64(scheme.StripeSize())
|
||||
if dataSize%int64(scheme.StripeSize()) != 0 {
|
||||
stripes++
|
||||
}
|
||||
|
||||
encodedSize := stripes * int64(scheme.StripeSize())
|
||||
pieceSize := encodedSize / int64(scheme.RequiredCount())
|
||||
|
||||
return pieceSize
|
||||
}
|
||||
|
@ -125,3 +125,22 @@ func DeriveKey(key *storj.Key, message string) (*storj.Key, error) {
|
||||
|
||||
return derived, nil
|
||||
}
|
||||
|
||||
// CalcEncryptedSize calculates what would be the size of the cipher data after
|
||||
// encrypting data with dataSize using a Transformer with the given encryption
|
||||
// scheme.
|
||||
func CalcEncryptedSize(dataSize int64, scheme storj.EncryptionScheme) (int64, error) {
|
||||
transformer, err := NewEncrypter(scheme.Cipher, new(storj.Key), new(storj.Nonce), int(scheme.BlockSize))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
blocks := dataSize / int64(transformer.InBlockSize())
|
||||
if dataSize%int64(transformer.InBlockSize()) != 0 {
|
||||
blocks++
|
||||
}
|
||||
|
||||
encryptedSize := blocks * int64(transformer.OutBlockSize())
|
||||
|
||||
return encryptedSize, nil
|
||||
}
|
||||
|
@ -12,6 +12,7 @@ import (
|
||||
"storj.io/storj/satellite/console"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/vivint/infectious"
|
||||
|
||||
"storj.io/storj/internal/memory"
|
||||
@ -208,9 +209,7 @@ func TestListBuckets(t *testing.T) {
|
||||
|
||||
for _, name := range bucketNames {
|
||||
_, err := db.CreateBucket(ctx, name, nil)
|
||||
if !assert.NoError(t, err) {
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
for i, tt := range []struct {
|
||||
@ -316,18 +315,14 @@ func runTest(t *testing.T, test func(context.Context, *testplanet.Planet, *kvmet
|
||||
defer ctx.Cleanup()
|
||||
|
||||
planet, err := testplanet.New(t, 1, 4, 1)
|
||||
if !assert.NoError(t, err) {
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
defer ctx.Check(planet.Shutdown)
|
||||
|
||||
planet.Start(ctx)
|
||||
|
||||
db, buckets, streams, err := newMetainfoParts(planet)
|
||||
if !assert.NoError(t, err) {
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
test(ctx, planet, db, buckets, streams)
|
||||
}
|
||||
@ -362,12 +357,12 @@ func newMetainfoParts(planet *testplanet.Planet) (*kvmetainfo.DB, buckets.Store,
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
oc, err := planet.Uplinks[0].DialOverlay(planet.Satellites[0])
|
||||
pdb, err := planet.Uplinks[0].DialPointerDB(planet.Satellites[0], TestAPIKey)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
pdb, err := planet.Uplinks[0].DialPointerDB(planet.Satellites[0], TestAPIKey)
|
||||
metainfo, err := planet.Uplinks[0].DialMetainfo(context.Background(), planet.Satellites[0], TestAPIKey)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
@ -378,17 +373,17 @@ func newMetainfoParts(planet *testplanet.Planet) (*kvmetainfo.DB, buckets.Store,
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
rs, err := eestream.NewRedundancyStrategy(eestream.NewRSScheme(fc, 1*memory.KB.Int()), 3, 4)
|
||||
rs, err := eestream.NewRedundancyStrategy(eestream.NewRSScheme(fc, 1*memory.KiB.Int()), 0, 0)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
segments := segments.NewSegmentStore(oc, ec, pdb, rs, 8*memory.KB.Int())
|
||||
segments := segments.NewSegmentStore(metainfo, ec, rs, 8*memory.KiB.Int(), 8*memory.MiB.Int64())
|
||||
|
||||
key := new(storj.Key)
|
||||
copy(key[:], TestEncKey)
|
||||
|
||||
streams, err := streams.NewStreamStore(segments, 64*memory.MB.Int64(), key, 1*memory.KB.Int(), storj.AESGCM)
|
||||
streams, err := streams.NewStreamStore(segments, 64*memory.MiB.Int64(), key, 1*memory.KiB.Int(), storj.AESGCM)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
@ -49,7 +49,7 @@ func New(buckets buckets.Store, streams streams.Store, segments segments.Store,
|
||||
func (db *DB) Limits() (storj.MetainfoLimits, error) {
|
||||
return storj.MetainfoLimits{
|
||||
ListLimit: storage.LookupLimit,
|
||||
MinimumRemoteSegmentSize: int64(memory.KB), // TODO: is this needed here?
|
||||
MaximumInlineSegmentSize: int64(memory.MB),
|
||||
MinimumRemoteSegmentSize: memory.KiB.Int64(), // TODO: is this needed here?
|
||||
MaximumInlineSegmentSize: memory.MiB.Int64(),
|
||||
}, nil
|
||||
}
|
||||
|
@ -36,13 +36,13 @@ var DefaultRS = storj.RedundancyScheme{
|
||||
RepairShares: 30,
|
||||
OptimalShares: 40,
|
||||
TotalShares: 50,
|
||||
ShareSize: 1 * memory.KB.Int32(),
|
||||
ShareSize: 1 * memory.KiB.Int32(),
|
||||
}
|
||||
|
||||
// DefaultES default values for EncryptionScheme
|
||||
var DefaultES = storj.EncryptionScheme{
|
||||
Cipher: storj.AESGCM,
|
||||
BlockSize: 1 * memory.KB.Int32(),
|
||||
BlockSize: 1 * memory.KiB.Int32(),
|
||||
}
|
||||
|
||||
// GetObject returns information about an object
|
||||
|
@ -11,6 +11,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"storj.io/storj/internal/memory"
|
||||
"storj.io/storj/internal/testplanet"
|
||||
@ -30,19 +31,17 @@ func TestCreateObject(t *testing.T) {
|
||||
RepairShares: 35,
|
||||
OptimalShares: 80,
|
||||
TotalShares: 95,
|
||||
ShareSize: 2 * memory.KB.Int32(),
|
||||
ShareSize: 2 * memory.KiB.Int32(),
|
||||
}
|
||||
|
||||
customES := storj.EncryptionScheme{
|
||||
Cipher: storj.Unencrypted,
|
||||
BlockSize: 1 * memory.KB.Int32(),
|
||||
BlockSize: 1 * memory.KiB.Int32(),
|
||||
}
|
||||
|
||||
runTest(t, func(ctx context.Context, planet *testplanet.Planet, db *kvmetainfo.DB, buckets buckets.Store, streams streams.Store) {
|
||||
bucket, err := db.CreateBucket(ctx, TestBucket, nil)
|
||||
if !assert.NoError(t, err) {
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
for i, tt := range []struct {
|
||||
create *storj.CreateObject
|
||||
@ -70,9 +69,7 @@ func TestCreateObject(t *testing.T) {
|
||||
errTag := fmt.Sprintf("%d. %+v", i, tt)
|
||||
|
||||
obj, err := db.CreateObject(ctx, bucket.Name, TestFile, tt.create)
|
||||
if !assert.NoError(t, err) {
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
info := obj.Info()
|
||||
|
||||
@ -89,10 +86,7 @@ func TestCreateObject(t *testing.T) {
|
||||
func TestGetObject(t *testing.T) {
|
||||
runTest(t, func(ctx context.Context, planet *testplanet.Planet, db *kvmetainfo.DB, buckets buckets.Store, streams streams.Store) {
|
||||
bucket, err := db.CreateBucket(ctx, TestBucket, nil)
|
||||
if !assert.NoError(t, err) {
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
upload(ctx, t, db, streams, bucket, TestFile, nil)
|
||||
|
||||
_, err = db.GetObject(ctx, "", "")
|
||||
@ -118,16 +112,12 @@ func TestGetObject(t *testing.T) {
|
||||
|
||||
func TestGetObjectStream(t *testing.T) {
|
||||
runTest(t, func(ctx context.Context, planet *testplanet.Planet, db *kvmetainfo.DB, buckets buckets.Store, streams streams.Store) {
|
||||
data := make([]byte, 32*memory.KB)
|
||||
data := make([]byte, 32*memory.KiB)
|
||||
_, err := rand.Read(data)
|
||||
if !assert.NoError(t, err) {
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
bucket, err := db.CreateBucket(ctx, TestBucket, nil)
|
||||
if !assert.NoError(t, err) {
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
upload(ctx, t, db, streams, bucket, "empty-file", nil)
|
||||
upload(ctx, t, db, streams, bucket, "small-file", []byte("test"))
|
||||
@ -147,7 +137,7 @@ func TestGetObjectStream(t *testing.T) {
|
||||
|
||||
assertStream(ctx, t, db, streams, bucket, "empty-file", 0, []byte{})
|
||||
assertStream(ctx, t, db, streams, bucket, "small-file", 4, []byte("test"))
|
||||
assertStream(ctx, t, db, streams, bucket, "large-file", 32*memory.KB.Int64(), data)
|
||||
assertStream(ctx, t, db, streams, bucket, "large-file", 32*memory.KiB.Int64(), data)
|
||||
|
||||
/* TODO: Disable stopping due to flakiness.
|
||||
// Stop randomly half of the storage nodes and remove them from satellite's overlay cache
|
||||
@ -158,54 +148,40 @@ func TestGetObjectStream(t *testing.T) {
|
||||
}
|
||||
|
||||
// try downloading the large file again
|
||||
assertStream(ctx, t, db, streams, bucket, "large-file", 32*memory.KB.Int64(), data)
|
||||
assertStream(ctx, t, db, streams, bucket, "large-file", 32*memory.KiB.Int64(), data)
|
||||
*/
|
||||
})
|
||||
}
|
||||
|
||||
func upload(ctx context.Context, t *testing.T, db *kvmetainfo.DB, streams streams.Store, bucket storj.Bucket, path storj.Path, data []byte) {
|
||||
obj, err := db.CreateObject(ctx, bucket.Name, path, nil)
|
||||
if !assert.NoError(t, err) {
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
str, err := obj.CreateStream(ctx)
|
||||
if !assert.NoError(t, err) {
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
upload := stream.NewUpload(ctx, str, streams)
|
||||
|
||||
_, err = upload.Write(data)
|
||||
if !assert.NoError(t, err) {
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
err = upload.Close()
|
||||
if !assert.NoError(t, err) {
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
err = obj.Commit(ctx)
|
||||
if !assert.NoError(t, err) {
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func assertStream(ctx context.Context, t *testing.T, db *kvmetainfo.DB, streams streams.Store, bucket storj.Bucket, path storj.Path, size int64, content []byte) {
|
||||
readOnly, err := db.GetObjectStream(ctx, bucket.Name, path)
|
||||
if !assert.NoError(t, err) {
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, path, readOnly.Info().Path)
|
||||
assert.Equal(t, TestBucket, readOnly.Info().Bucket.Name)
|
||||
assert.Equal(t, storj.AESGCM, readOnly.Info().Bucket.PathCipher)
|
||||
|
||||
segments, more, err := readOnly.Segments(ctx, 0, 0)
|
||||
if !assert.NoError(t, err) {
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.False(t, more)
|
||||
if !assert.Equal(t, 1, len(segments)) {
|
||||
@ -214,7 +190,7 @@ func assertStream(ctx context.Context, t *testing.T, db *kvmetainfo.DB, streams
|
||||
|
||||
assert.EqualValues(t, 0, segments[0].Index)
|
||||
assert.EqualValues(t, len(content), segments[0].Size)
|
||||
if segments[0].Size > 4*memory.KB.Int64() {
|
||||
if segments[0].Size > 4*memory.KiB.Int64() {
|
||||
assertRemoteSegment(t, segments[0])
|
||||
} else {
|
||||
assertInlineSegment(t, segments[0], content)
|
||||
@ -228,9 +204,7 @@ func assertStream(ctx context.Context, t *testing.T, db *kvmetainfo.DB, streams
|
||||
|
||||
data := make([]byte, len(content))
|
||||
n, err := io.ReadFull(download, data)
|
||||
if !assert.NoError(t, err) {
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, len(content), n)
|
||||
assert.Equal(t, content, data)
|
||||
@ -238,7 +212,7 @@ func assertStream(ctx context.Context, t *testing.T, db *kvmetainfo.DB, streams
|
||||
|
||||
func assertInlineSegment(t *testing.T, segment storj.Segment, content []byte) {
|
||||
assert.Equal(t, content, segment.Inline)
|
||||
assert.Nil(t, segment.PieceID)
|
||||
assert.True(t, segment.PieceID.IsZero())
|
||||
assert.Equal(t, 0, len(segment.Pieces))
|
||||
}
|
||||
|
||||
@ -293,9 +267,7 @@ func TestDeleteObject(t *testing.T) {
|
||||
func TestListObjectsEmpty(t *testing.T) {
|
||||
runTest(t, func(ctx context.Context, planet *testplanet.Planet, db *kvmetainfo.DB, buckets buckets.Store, streams streams.Store) {
|
||||
bucket, err := db.CreateBucket(ctx, TestBucket, nil)
|
||||
if !assert.NoError(t, err) {
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = db.ListObjects(ctx, "", storj.ListOptions{})
|
||||
assert.True(t, storj.ErrNoBucket.Has(err))
|
||||
@ -321,9 +293,7 @@ func TestListObjectsEmpty(t *testing.T) {
|
||||
func TestListObjects(t *testing.T) {
|
||||
runTest(t, func(ctx context.Context, planet *testplanet.Planet, db *kvmetainfo.DB, buckets buckets.Store, streams streams.Store) {
|
||||
bucket, err := db.CreateBucket(ctx, TestBucket, &storj.Bucket{PathCipher: storj.Unencrypted})
|
||||
if !assert.NoError(t, err) {
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
filePaths := []string{
|
||||
"a", "aa", "b", "bb", "c",
|
||||
@ -336,9 +306,7 @@ func TestListObjects(t *testing.T) {
|
||||
}
|
||||
|
||||
otherBucket, err := db.CreateBucket(ctx, "otherbucket", nil)
|
||||
if !assert.NoError(t, err) {
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
upload(ctx, t, db, streams, otherBucket, "file-in-other-bucket", nil)
|
||||
|
||||
|
@ -88,7 +88,7 @@ func (stream *readonlyStream) segment(ctx context.Context, index int64) (segment
|
||||
if pointer.GetType() == pb.Pointer_INLINE {
|
||||
segment.Inline, err = encryption.Decrypt(pointer.InlineSegment, stream.info.EncryptionScheme.Cipher, contentKey, nonce)
|
||||
} else {
|
||||
segment.PieceID = storj.PieceID(pointer.Remote.PieceId)
|
||||
segment.PieceID = pointer.Remote.RootPieceId
|
||||
segment.Pieces = make([]storj.Piece, 0, len(pointer.Remote.RemotePieces))
|
||||
for _, piece := range pointer.Remote.RemotePieces {
|
||||
var nodeID storj.NodeID
|
||||
|
@ -686,12 +686,12 @@ func initEnv(planet *testplanet.Planet) (minio.ObjectLayer, storj.Metainfo, stre
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
oc, err := planet.Uplinks[0].DialOverlay(planet.Satellites[0])
|
||||
pdb, err := planet.Uplinks[0].DialPointerDB(planet.Satellites[0], TestAPIKey)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
pdb, err := planet.Uplinks[0].DialPointerDB(planet.Satellites[0], TestAPIKey)
|
||||
metainfo, err := planet.Uplinks[0].DialMetainfo(context.Background(), planet.Satellites[0], TestAPIKey)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
@ -702,32 +702,32 @@ func initEnv(planet *testplanet.Planet) (minio.ObjectLayer, storj.Metainfo, stre
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
rs, err := eestream.NewRedundancyStrategy(eestream.NewRSScheme(fc, 1*memory.KB.Int()), 3, 4)
|
||||
rs, err := eestream.NewRedundancyStrategy(eestream.NewRSScheme(fc, 1*memory.KiB.Int()), 3, 4)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
segments := segments.NewSegmentStore(oc, ec, pdb, rs, 8*memory.KB.Int())
|
||||
segments := segments.NewSegmentStore(metainfo, ec, rs, 4*memory.KiB.Int(), 8*memory.MiB.Int64())
|
||||
|
||||
key := new(storj.Key)
|
||||
copy(key[:], TestEncKey)
|
||||
|
||||
streams, err := streams.NewStreamStore(segments, 64*memory.MB.Int64(), key, 1*memory.KB.Int(), storj.AESGCM)
|
||||
streams, err := streams.NewStreamStore(segments, 64*memory.MiB.Int64(), key, 1*memory.KiB.Int(), storj.AESGCM)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
buckets := buckets.NewStore(streams)
|
||||
|
||||
metainfo := kvmetainfo.New(buckets, streams, segments, pdb, key)
|
||||
kvmetainfo := kvmetainfo.New(buckets, streams, segments, pdb, key)
|
||||
|
||||
gateway := NewStorjGateway(
|
||||
metainfo,
|
||||
kvmetainfo,
|
||||
streams,
|
||||
storj.AESGCM,
|
||||
storj.EncryptionScheme{
|
||||
Cipher: storj.AESGCM,
|
||||
BlockSize: 1 * memory.KB.Int32(),
|
||||
BlockSize: 1 * memory.KiB.Int32(),
|
||||
},
|
||||
storj.RedundancyScheme{
|
||||
Algorithm: storj.ReedSolomon,
|
||||
@ -741,7 +741,7 @@ func initEnv(planet *testplanet.Planet) (minio.ObjectLayer, storj.Metainfo, stre
|
||||
|
||||
layer, err := gateway.NewGatewayLayer(auth.Credentials{})
|
||||
|
||||
return layer, metainfo, streams, err
|
||||
return layer, kvmetainfo, streams, err
|
||||
}
|
||||
|
||||
func createFile(ctx context.Context, metainfo storj.Metainfo, streams streams.Store, bucket string, path storj.Path, createInfo *storj.CreateObject, data []byte) (storj.Object, error) {
|
||||
|
@ -3,14 +3,15 @@
|
||||
|
||||
package pb
|
||||
|
||||
import proto "github.com/gogo/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
import _ "github.com/gogo/protobuf/gogoproto"
|
||||
import duration "github.com/golang/protobuf/ptypes/duration"
|
||||
|
||||
import (
|
||||
context "context"
|
||||
fmt "fmt"
|
||||
_ "github.com/gogo/protobuf/gogoproto"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
duration "github.com/golang/protobuf/ptypes/duration"
|
||||
context "golang.org/x/net/context"
|
||||
grpc "google.golang.org/grpc"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
@ -25,7 +26,7 @@ var _ = math.Inf
|
||||
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
// ListSegments
|
||||
type ListSegmentsRequest struct {
|
||||
type ListIrreparableSegmentsRequest struct {
|
||||
Limit int32 `protobuf:"varint,1,opt,name=limit,proto3" json:"limit,omitempty"`
|
||||
Offset int32 `protobuf:"varint,2,opt,name=offset,proto3" json:"offset,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
@ -33,38 +34,38 @@ type ListSegmentsRequest struct {
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ListSegmentsRequest) Reset() { *m = ListSegmentsRequest{} }
|
||||
func (m *ListSegmentsRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*ListSegmentsRequest) ProtoMessage() {}
|
||||
func (*ListSegmentsRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_a07d9034b2dd9d26, []int{0}
|
||||
func (m *ListIrreparableSegmentsRequest) Reset() { *m = ListIrreparableSegmentsRequest{} }
|
||||
func (m *ListIrreparableSegmentsRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*ListIrreparableSegmentsRequest) ProtoMessage() {}
|
||||
func (*ListIrreparableSegmentsRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_inspector_996d82db2c1f448a, []int{0}
|
||||
}
|
||||
func (m *ListSegmentsRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ListSegmentsRequest.Unmarshal(m, b)
|
||||
func (m *ListIrreparableSegmentsRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ListIrreparableSegmentsRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ListSegmentsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ListSegmentsRequest.Marshal(b, m, deterministic)
|
||||
func (m *ListIrreparableSegmentsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ListIrreparableSegmentsRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ListSegmentsRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ListSegmentsRequest.Merge(m, src)
|
||||
func (dst *ListIrreparableSegmentsRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ListIrreparableSegmentsRequest.Merge(dst, src)
|
||||
}
|
||||
func (m *ListSegmentsRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_ListSegmentsRequest.Size(m)
|
||||
func (m *ListIrreparableSegmentsRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_ListIrreparableSegmentsRequest.Size(m)
|
||||
}
|
||||
func (m *ListSegmentsRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ListSegmentsRequest.DiscardUnknown(m)
|
||||
func (m *ListIrreparableSegmentsRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ListIrreparableSegmentsRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ListSegmentsRequest proto.InternalMessageInfo
|
||||
var xxx_messageInfo_ListIrreparableSegmentsRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *ListSegmentsRequest) GetLimit() int32 {
|
||||
func (m *ListIrreparableSegmentsRequest) GetLimit() int32 {
|
||||
if m != nil {
|
||||
return m.Limit
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *ListSegmentsRequest) GetOffset() int32 {
|
||||
func (m *ListIrreparableSegmentsRequest) GetOffset() int32 {
|
||||
if m != nil {
|
||||
return m.Offset
|
||||
}
|
||||
@ -86,7 +87,7 @@ func (m *IrreparableSegment) Reset() { *m = IrreparableSegment{} }
|
||||
func (m *IrreparableSegment) String() string { return proto.CompactTextString(m) }
|
||||
func (*IrreparableSegment) ProtoMessage() {}
|
||||
func (*IrreparableSegment) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_a07d9034b2dd9d26, []int{1}
|
||||
return fileDescriptor_inspector_996d82db2c1f448a, []int{1}
|
||||
}
|
||||
func (m *IrreparableSegment) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_IrreparableSegment.Unmarshal(m, b)
|
||||
@ -94,8 +95,8 @@ func (m *IrreparableSegment) XXX_Unmarshal(b []byte) error {
|
||||
func (m *IrreparableSegment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_IrreparableSegment.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *IrreparableSegment) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_IrreparableSegment.Merge(m, src)
|
||||
func (dst *IrreparableSegment) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_IrreparableSegment.Merge(dst, src)
|
||||
}
|
||||
func (m *IrreparableSegment) XXX_Size() int {
|
||||
return xxx_messageInfo_IrreparableSegment.Size(m)
|
||||
@ -141,38 +142,38 @@ func (m *IrreparableSegment) GetRepairAttemptCount() int64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
type ListSegmentsResponse struct {
|
||||
type ListIrreparableSegmentsResponse struct {
|
||||
Segments []*IrreparableSegment `protobuf:"bytes,1,rep,name=segments,proto3" json:"segments,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ListSegmentsResponse) Reset() { *m = ListSegmentsResponse{} }
|
||||
func (m *ListSegmentsResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*ListSegmentsResponse) ProtoMessage() {}
|
||||
func (*ListSegmentsResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_a07d9034b2dd9d26, []int{2}
|
||||
func (m *ListIrreparableSegmentsResponse) Reset() { *m = ListIrreparableSegmentsResponse{} }
|
||||
func (m *ListIrreparableSegmentsResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*ListIrreparableSegmentsResponse) ProtoMessage() {}
|
||||
func (*ListIrreparableSegmentsResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_inspector_996d82db2c1f448a, []int{2}
|
||||
}
|
||||
func (m *ListSegmentsResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ListSegmentsResponse.Unmarshal(m, b)
|
||||
func (m *ListIrreparableSegmentsResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ListIrreparableSegmentsResponse.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ListSegmentsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ListSegmentsResponse.Marshal(b, m, deterministic)
|
||||
func (m *ListIrreparableSegmentsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ListIrreparableSegmentsResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ListSegmentsResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ListSegmentsResponse.Merge(m, src)
|
||||
func (dst *ListIrreparableSegmentsResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ListIrreparableSegmentsResponse.Merge(dst, src)
|
||||
}
|
||||
func (m *ListSegmentsResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_ListSegmentsResponse.Size(m)
|
||||
func (m *ListIrreparableSegmentsResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_ListIrreparableSegmentsResponse.Size(m)
|
||||
}
|
||||
func (m *ListSegmentsResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ListSegmentsResponse.DiscardUnknown(m)
|
||||
func (m *ListIrreparableSegmentsResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ListIrreparableSegmentsResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ListSegmentsResponse proto.InternalMessageInfo
|
||||
var xxx_messageInfo_ListIrreparableSegmentsResponse proto.InternalMessageInfo
|
||||
|
||||
func (m *ListSegmentsResponse) GetSegments() []*IrreparableSegment {
|
||||
func (m *ListIrreparableSegmentsResponse) GetSegments() []*IrreparableSegment {
|
||||
if m != nil {
|
||||
return m.Segments
|
||||
}
|
||||
@ -191,7 +192,7 @@ func (m *GetStatsRequest) Reset() { *m = GetStatsRequest{} }
|
||||
func (m *GetStatsRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*GetStatsRequest) ProtoMessage() {}
|
||||
func (*GetStatsRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_a07d9034b2dd9d26, []int{3}
|
||||
return fileDescriptor_inspector_996d82db2c1f448a, []int{3}
|
||||
}
|
||||
func (m *GetStatsRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_GetStatsRequest.Unmarshal(m, b)
|
||||
@ -199,8 +200,8 @@ func (m *GetStatsRequest) XXX_Unmarshal(b []byte) error {
|
||||
func (m *GetStatsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_GetStatsRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *GetStatsRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_GetStatsRequest.Merge(m, src)
|
||||
func (dst *GetStatsRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_GetStatsRequest.Merge(dst, src)
|
||||
}
|
||||
func (m *GetStatsRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_GetStatsRequest.Size(m)
|
||||
@ -225,7 +226,7 @@ func (m *GetStatsResponse) Reset() { *m = GetStatsResponse{} }
|
||||
func (m *GetStatsResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*GetStatsResponse) ProtoMessage() {}
|
||||
func (*GetStatsResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_a07d9034b2dd9d26, []int{4}
|
||||
return fileDescriptor_inspector_996d82db2c1f448a, []int{4}
|
||||
}
|
||||
func (m *GetStatsResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_GetStatsResponse.Unmarshal(m, b)
|
||||
@ -233,8 +234,8 @@ func (m *GetStatsResponse) XXX_Unmarshal(b []byte) error {
|
||||
func (m *GetStatsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_GetStatsResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *GetStatsResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_GetStatsResponse.Merge(m, src)
|
||||
func (dst *GetStatsResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_GetStatsResponse.Merge(dst, src)
|
||||
}
|
||||
func (m *GetStatsResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_GetStatsResponse.Size(m)
|
||||
@ -289,7 +290,7 @@ func (m *CreateStatsRequest) Reset() { *m = CreateStatsRequest{} }
|
||||
func (m *CreateStatsRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*CreateStatsRequest) ProtoMessage() {}
|
||||
func (*CreateStatsRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_a07d9034b2dd9d26, []int{5}
|
||||
return fileDescriptor_inspector_996d82db2c1f448a, []int{5}
|
||||
}
|
||||
func (m *CreateStatsRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_CreateStatsRequest.Unmarshal(m, b)
|
||||
@ -297,8 +298,8 @@ func (m *CreateStatsRequest) XXX_Unmarshal(b []byte) error {
|
||||
func (m *CreateStatsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_CreateStatsRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *CreateStatsRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_CreateStatsRequest.Merge(m, src)
|
||||
func (dst *CreateStatsRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_CreateStatsRequest.Merge(dst, src)
|
||||
}
|
||||
func (m *CreateStatsRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_CreateStatsRequest.Size(m)
|
||||
@ -347,7 +348,7 @@ func (m *CreateStatsResponse) Reset() { *m = CreateStatsResponse{} }
|
||||
func (m *CreateStatsResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*CreateStatsResponse) ProtoMessage() {}
|
||||
func (*CreateStatsResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_a07d9034b2dd9d26, []int{6}
|
||||
return fileDescriptor_inspector_996d82db2c1f448a, []int{6}
|
||||
}
|
||||
func (m *CreateStatsResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_CreateStatsResponse.Unmarshal(m, b)
|
||||
@ -355,8 +356,8 @@ func (m *CreateStatsResponse) XXX_Unmarshal(b []byte) error {
|
||||
func (m *CreateStatsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_CreateStatsResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *CreateStatsResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_CreateStatsResponse.Merge(m, src)
|
||||
func (dst *CreateStatsResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_CreateStatsResponse.Merge(dst, src)
|
||||
}
|
||||
func (m *CreateStatsResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_CreateStatsResponse.Size(m)
|
||||
@ -379,7 +380,7 @@ func (m *CountNodesResponse) Reset() { *m = CountNodesResponse{} }
|
||||
func (m *CountNodesResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*CountNodesResponse) ProtoMessage() {}
|
||||
func (*CountNodesResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_a07d9034b2dd9d26, []int{7}
|
||||
return fileDescriptor_inspector_996d82db2c1f448a, []int{7}
|
||||
}
|
||||
func (m *CountNodesResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_CountNodesResponse.Unmarshal(m, b)
|
||||
@ -387,8 +388,8 @@ func (m *CountNodesResponse) XXX_Unmarshal(b []byte) error {
|
||||
func (m *CountNodesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_CountNodesResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *CountNodesResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_CountNodesResponse.Merge(m, src)
|
||||
func (dst *CountNodesResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_CountNodesResponse.Merge(dst, src)
|
||||
}
|
||||
func (m *CountNodesResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_CountNodesResponse.Size(m)
|
||||
@ -416,7 +417,7 @@ func (m *CountNodesRequest) Reset() { *m = CountNodesRequest{} }
|
||||
func (m *CountNodesRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*CountNodesRequest) ProtoMessage() {}
|
||||
func (*CountNodesRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_a07d9034b2dd9d26, []int{8}
|
||||
return fileDescriptor_inspector_996d82db2c1f448a, []int{8}
|
||||
}
|
||||
func (m *CountNodesRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_CountNodesRequest.Unmarshal(m, b)
|
||||
@ -424,8 +425,8 @@ func (m *CountNodesRequest) XXX_Unmarshal(b []byte) error {
|
||||
func (m *CountNodesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_CountNodesRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *CountNodesRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_CountNodesRequest.Merge(m, src)
|
||||
func (dst *CountNodesRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_CountNodesRequest.Merge(dst, src)
|
||||
}
|
||||
func (m *CountNodesRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_CountNodesRequest.Size(m)
|
||||
@ -447,7 +448,7 @@ func (m *GetBucketsRequest) Reset() { *m = GetBucketsRequest{} }
|
||||
func (m *GetBucketsRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*GetBucketsRequest) ProtoMessage() {}
|
||||
func (*GetBucketsRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_a07d9034b2dd9d26, []int{9}
|
||||
return fileDescriptor_inspector_996d82db2c1f448a, []int{9}
|
||||
}
|
||||
func (m *GetBucketsRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_GetBucketsRequest.Unmarshal(m, b)
|
||||
@ -455,8 +456,8 @@ func (m *GetBucketsRequest) XXX_Unmarshal(b []byte) error {
|
||||
func (m *GetBucketsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_GetBucketsRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *GetBucketsRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_GetBucketsRequest.Merge(m, src)
|
||||
func (dst *GetBucketsRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_GetBucketsRequest.Merge(dst, src)
|
||||
}
|
||||
func (m *GetBucketsRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_GetBucketsRequest.Size(m)
|
||||
@ -479,7 +480,7 @@ func (m *GetBucketsResponse) Reset() { *m = GetBucketsResponse{} }
|
||||
func (m *GetBucketsResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*GetBucketsResponse) ProtoMessage() {}
|
||||
func (*GetBucketsResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_a07d9034b2dd9d26, []int{10}
|
||||
return fileDescriptor_inspector_996d82db2c1f448a, []int{10}
|
||||
}
|
||||
func (m *GetBucketsResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_GetBucketsResponse.Unmarshal(m, b)
|
||||
@ -487,8 +488,8 @@ func (m *GetBucketsResponse) XXX_Unmarshal(b []byte) error {
|
||||
func (m *GetBucketsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_GetBucketsResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *GetBucketsResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_GetBucketsResponse.Merge(m, src)
|
||||
func (dst *GetBucketsResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_GetBucketsResponse.Merge(dst, src)
|
||||
}
|
||||
func (m *GetBucketsResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_GetBucketsResponse.Size(m)
|
||||
@ -518,7 +519,7 @@ func (m *GetBucketRequest) Reset() { *m = GetBucketRequest{} }
|
||||
func (m *GetBucketRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*GetBucketRequest) ProtoMessage() {}
|
||||
func (*GetBucketRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_a07d9034b2dd9d26, []int{11}
|
||||
return fileDescriptor_inspector_996d82db2c1f448a, []int{11}
|
||||
}
|
||||
func (m *GetBucketRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_GetBucketRequest.Unmarshal(m, b)
|
||||
@ -526,8 +527,8 @@ func (m *GetBucketRequest) XXX_Unmarshal(b []byte) error {
|
||||
func (m *GetBucketRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_GetBucketRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *GetBucketRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_GetBucketRequest.Merge(m, src)
|
||||
func (dst *GetBucketRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_GetBucketRequest.Merge(dst, src)
|
||||
}
|
||||
func (m *GetBucketRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_GetBucketRequest.Size(m)
|
||||
@ -550,7 +551,7 @@ func (m *GetBucketResponse) Reset() { *m = GetBucketResponse{} }
|
||||
func (m *GetBucketResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*GetBucketResponse) ProtoMessage() {}
|
||||
func (*GetBucketResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_a07d9034b2dd9d26, []int{12}
|
||||
return fileDescriptor_inspector_996d82db2c1f448a, []int{12}
|
||||
}
|
||||
func (m *GetBucketResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_GetBucketResponse.Unmarshal(m, b)
|
||||
@ -558,8 +559,8 @@ func (m *GetBucketResponse) XXX_Unmarshal(b []byte) error {
|
||||
func (m *GetBucketResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_GetBucketResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *GetBucketResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_GetBucketResponse.Merge(m, src)
|
||||
func (dst *GetBucketResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_GetBucketResponse.Merge(dst, src)
|
||||
}
|
||||
func (m *GetBucketResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_GetBucketResponse.Size(m)
|
||||
@ -588,7 +589,7 @@ func (m *Bucket) Reset() { *m = Bucket{} }
|
||||
func (m *Bucket) String() string { return proto.CompactTextString(m) }
|
||||
func (*Bucket) ProtoMessage() {}
|
||||
func (*Bucket) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_a07d9034b2dd9d26, []int{13}
|
||||
return fileDescriptor_inspector_996d82db2c1f448a, []int{13}
|
||||
}
|
||||
func (m *Bucket) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Bucket.Unmarshal(m, b)
|
||||
@ -596,8 +597,8 @@ func (m *Bucket) XXX_Unmarshal(b []byte) error {
|
||||
func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Bucket.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *Bucket) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Bucket.Merge(m, src)
|
||||
func (dst *Bucket) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Bucket.Merge(dst, src)
|
||||
}
|
||||
func (m *Bucket) XXX_Size() int {
|
||||
return xxx_messageInfo_Bucket.Size(m)
|
||||
@ -626,7 +627,7 @@ func (m *BucketList) Reset() { *m = BucketList{} }
|
||||
func (m *BucketList) String() string { return proto.CompactTextString(m) }
|
||||
func (*BucketList) ProtoMessage() {}
|
||||
func (*BucketList) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_a07d9034b2dd9d26, []int{14}
|
||||
return fileDescriptor_inspector_996d82db2c1f448a, []int{14}
|
||||
}
|
||||
func (m *BucketList) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_BucketList.Unmarshal(m, b)
|
||||
@ -634,8 +635,8 @@ func (m *BucketList) XXX_Unmarshal(b []byte) error {
|
||||
func (m *BucketList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_BucketList.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *BucketList) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_BucketList.Merge(m, src)
|
||||
func (dst *BucketList) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_BucketList.Merge(dst, src)
|
||||
}
|
||||
func (m *BucketList) XXX_Size() int {
|
||||
return xxx_messageInfo_BucketList.Size(m)
|
||||
@ -666,7 +667,7 @@ func (m *PingNodeRequest) Reset() { *m = PingNodeRequest{} }
|
||||
func (m *PingNodeRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*PingNodeRequest) ProtoMessage() {}
|
||||
func (*PingNodeRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_a07d9034b2dd9d26, []int{15}
|
||||
return fileDescriptor_inspector_996d82db2c1f448a, []int{15}
|
||||
}
|
||||
func (m *PingNodeRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_PingNodeRequest.Unmarshal(m, b)
|
||||
@ -674,8 +675,8 @@ func (m *PingNodeRequest) XXX_Unmarshal(b []byte) error {
|
||||
func (m *PingNodeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_PingNodeRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *PingNodeRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_PingNodeRequest.Merge(m, src)
|
||||
func (dst *PingNodeRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_PingNodeRequest.Merge(dst, src)
|
||||
}
|
||||
func (m *PingNodeRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_PingNodeRequest.Size(m)
|
||||
@ -704,7 +705,7 @@ func (m *PingNodeResponse) Reset() { *m = PingNodeResponse{} }
|
||||
func (m *PingNodeResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*PingNodeResponse) ProtoMessage() {}
|
||||
func (*PingNodeResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_a07d9034b2dd9d26, []int{16}
|
||||
return fileDescriptor_inspector_996d82db2c1f448a, []int{16}
|
||||
}
|
||||
func (m *PingNodeResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_PingNodeResponse.Unmarshal(m, b)
|
||||
@ -712,8 +713,8 @@ func (m *PingNodeResponse) XXX_Unmarshal(b []byte) error {
|
||||
func (m *PingNodeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_PingNodeResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *PingNodeResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_PingNodeResponse.Merge(m, src)
|
||||
func (dst *PingNodeResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_PingNodeResponse.Merge(dst, src)
|
||||
}
|
||||
func (m *PingNodeResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_PingNodeResponse.Size(m)
|
||||
@ -743,7 +744,7 @@ func (m *LookupNodeRequest) Reset() { *m = LookupNodeRequest{} }
|
||||
func (m *LookupNodeRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*LookupNodeRequest) ProtoMessage() {}
|
||||
func (*LookupNodeRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_a07d9034b2dd9d26, []int{17}
|
||||
return fileDescriptor_inspector_996d82db2c1f448a, []int{17}
|
||||
}
|
||||
func (m *LookupNodeRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_LookupNodeRequest.Unmarshal(m, b)
|
||||
@ -751,8 +752,8 @@ func (m *LookupNodeRequest) XXX_Unmarshal(b []byte) error {
|
||||
func (m *LookupNodeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_LookupNodeRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *LookupNodeRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_LookupNodeRequest.Merge(m, src)
|
||||
func (dst *LookupNodeRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_LookupNodeRequest.Merge(dst, src)
|
||||
}
|
||||
func (m *LookupNodeRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_LookupNodeRequest.Size(m)
|
||||
@ -789,7 +790,7 @@ func (m *LookupNodeResponse) Reset() { *m = LookupNodeResponse{} }
|
||||
func (m *LookupNodeResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*LookupNodeResponse) ProtoMessage() {}
|
||||
func (*LookupNodeResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_a07d9034b2dd9d26, []int{18}
|
||||
return fileDescriptor_inspector_996d82db2c1f448a, []int{18}
|
||||
}
|
||||
func (m *LookupNodeResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_LookupNodeResponse.Unmarshal(m, b)
|
||||
@ -797,8 +798,8 @@ func (m *LookupNodeResponse) XXX_Unmarshal(b []byte) error {
|
||||
func (m *LookupNodeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_LookupNodeResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *LookupNodeResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_LookupNodeResponse.Merge(m, src)
|
||||
func (dst *LookupNodeResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_LookupNodeResponse.Merge(dst, src)
|
||||
}
|
||||
func (m *LookupNodeResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_LookupNodeResponse.Size(m)
|
||||
@ -835,7 +836,7 @@ func (m *NodeInfoRequest) Reset() { *m = NodeInfoRequest{} }
|
||||
func (m *NodeInfoRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*NodeInfoRequest) ProtoMessage() {}
|
||||
func (*NodeInfoRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_a07d9034b2dd9d26, []int{19}
|
||||
return fileDescriptor_inspector_996d82db2c1f448a, []int{19}
|
||||
}
|
||||
func (m *NodeInfoRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_NodeInfoRequest.Unmarshal(m, b)
|
||||
@ -843,8 +844,8 @@ func (m *NodeInfoRequest) XXX_Unmarshal(b []byte) error {
|
||||
func (m *NodeInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_NodeInfoRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *NodeInfoRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_NodeInfoRequest.Merge(m, src)
|
||||
func (dst *NodeInfoRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_NodeInfoRequest.Merge(dst, src)
|
||||
}
|
||||
func (m *NodeInfoRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_NodeInfoRequest.Size(m)
|
||||
@ -875,7 +876,7 @@ func (m *NodeInfoResponse) Reset() { *m = NodeInfoResponse{} }
|
||||
func (m *NodeInfoResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*NodeInfoResponse) ProtoMessage() {}
|
||||
func (*NodeInfoResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_a07d9034b2dd9d26, []int{20}
|
||||
return fileDescriptor_inspector_996d82db2c1f448a, []int{20}
|
||||
}
|
||||
func (m *NodeInfoResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_NodeInfoResponse.Unmarshal(m, b)
|
||||
@ -883,8 +884,8 @@ func (m *NodeInfoResponse) XXX_Unmarshal(b []byte) error {
|
||||
func (m *NodeInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_NodeInfoResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *NodeInfoResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_NodeInfoResponse.Merge(m, src)
|
||||
func (dst *NodeInfoResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_NodeInfoResponse.Merge(dst, src)
|
||||
}
|
||||
func (m *NodeInfoResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_NodeInfoResponse.Size(m)
|
||||
@ -929,7 +930,7 @@ func (m *FindNearRequest) Reset() { *m = FindNearRequest{} }
|
||||
func (m *FindNearRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*FindNearRequest) ProtoMessage() {}
|
||||
func (*FindNearRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_a07d9034b2dd9d26, []int{21}
|
||||
return fileDescriptor_inspector_996d82db2c1f448a, []int{21}
|
||||
}
|
||||
func (m *FindNearRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_FindNearRequest.Unmarshal(m, b)
|
||||
@ -937,8 +938,8 @@ func (m *FindNearRequest) XXX_Unmarshal(b []byte) error {
|
||||
func (m *FindNearRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_FindNearRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *FindNearRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_FindNearRequest.Merge(m, src)
|
||||
func (dst *FindNearRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_FindNearRequest.Merge(dst, src)
|
||||
}
|
||||
func (m *FindNearRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_FindNearRequest.Size(m)
|
||||
@ -967,7 +968,7 @@ func (m *FindNearResponse) Reset() { *m = FindNearResponse{} }
|
||||
func (m *FindNearResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*FindNearResponse) ProtoMessage() {}
|
||||
func (*FindNearResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_a07d9034b2dd9d26, []int{22}
|
||||
return fileDescriptor_inspector_996d82db2c1f448a, []int{22}
|
||||
}
|
||||
func (m *FindNearResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_FindNearResponse.Unmarshal(m, b)
|
||||
@ -975,8 +976,8 @@ func (m *FindNearResponse) XXX_Unmarshal(b []byte) error {
|
||||
func (m *FindNearResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_FindNearResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *FindNearResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_FindNearResponse.Merge(m, src)
|
||||
func (dst *FindNearResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_FindNearResponse.Merge(dst, src)
|
||||
}
|
||||
func (m *FindNearResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_FindNearResponse.Size(m)
|
||||
@ -1004,7 +1005,7 @@ func (m *DumpNodesRequest) Reset() { *m = DumpNodesRequest{} }
|
||||
func (m *DumpNodesRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*DumpNodesRequest) ProtoMessage() {}
|
||||
func (*DumpNodesRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_a07d9034b2dd9d26, []int{23}
|
||||
return fileDescriptor_inspector_996d82db2c1f448a, []int{23}
|
||||
}
|
||||
func (m *DumpNodesRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_DumpNodesRequest.Unmarshal(m, b)
|
||||
@ -1012,8 +1013,8 @@ func (m *DumpNodesRequest) XXX_Unmarshal(b []byte) error {
|
||||
func (m *DumpNodesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_DumpNodesRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *DumpNodesRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_DumpNodesRequest.Merge(m, src)
|
||||
func (dst *DumpNodesRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_DumpNodesRequest.Merge(dst, src)
|
||||
}
|
||||
func (m *DumpNodesRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_DumpNodesRequest.Size(m)
|
||||
@ -1035,7 +1036,7 @@ func (m *DumpNodesResponse) Reset() { *m = DumpNodesResponse{} }
|
||||
func (m *DumpNodesResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*DumpNodesResponse) ProtoMessage() {}
|
||||
func (*DumpNodesResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_a07d9034b2dd9d26, []int{24}
|
||||
return fileDescriptor_inspector_996d82db2c1f448a, []int{24}
|
||||
}
|
||||
func (m *DumpNodesResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_DumpNodesResponse.Unmarshal(m, b)
|
||||
@ -1043,8 +1044,8 @@ func (m *DumpNodesResponse) XXX_Unmarshal(b []byte) error {
|
||||
func (m *DumpNodesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_DumpNodesResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *DumpNodesResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_DumpNodesResponse.Merge(m, src)
|
||||
func (dst *DumpNodesResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_DumpNodesResponse.Merge(dst, src)
|
||||
}
|
||||
func (m *DumpNodesResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_DumpNodesResponse.Size(m)
|
||||
@ -1072,7 +1073,7 @@ func (m *StatsRequest) Reset() { *m = StatsRequest{} }
|
||||
func (m *StatsRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*StatsRequest) ProtoMessage() {}
|
||||
func (*StatsRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_a07d9034b2dd9d26, []int{25}
|
||||
return fileDescriptor_inspector_996d82db2c1f448a, []int{25}
|
||||
}
|
||||
func (m *StatsRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_StatsRequest.Unmarshal(m, b)
|
||||
@ -1080,8 +1081,8 @@ func (m *StatsRequest) XXX_Unmarshal(b []byte) error {
|
||||
func (m *StatsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_StatsRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *StatsRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_StatsRequest.Merge(m, src)
|
||||
func (dst *StatsRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_StatsRequest.Merge(dst, src)
|
||||
}
|
||||
func (m *StatsRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_StatsRequest.Size(m)
|
||||
@ -1106,7 +1107,7 @@ func (m *StatSummaryResponse) Reset() { *m = StatSummaryResponse{} }
|
||||
func (m *StatSummaryResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*StatSummaryResponse) ProtoMessage() {}
|
||||
func (*StatSummaryResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_a07d9034b2dd9d26, []int{26}
|
||||
return fileDescriptor_inspector_996d82db2c1f448a, []int{26}
|
||||
}
|
||||
func (m *StatSummaryResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_StatSummaryResponse.Unmarshal(m, b)
|
||||
@ -1114,8 +1115,8 @@ func (m *StatSummaryResponse) XXX_Unmarshal(b []byte) error {
|
||||
func (m *StatSummaryResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_StatSummaryResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *StatSummaryResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_StatSummaryResponse.Merge(m, src)
|
||||
func (dst *StatSummaryResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_StatSummaryResponse.Merge(dst, src)
|
||||
}
|
||||
func (m *StatSummaryResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_StatSummaryResponse.Size(m)
|
||||
@ -1164,7 +1165,7 @@ func (m *DashboardRequest) Reset() { *m = DashboardRequest{} }
|
||||
func (m *DashboardRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*DashboardRequest) ProtoMessage() {}
|
||||
func (*DashboardRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_a07d9034b2dd9d26, []int{27}
|
||||
return fileDescriptor_inspector_996d82db2c1f448a, []int{27}
|
||||
}
|
||||
func (m *DashboardRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_DashboardRequest.Unmarshal(m, b)
|
||||
@ -1172,8 +1173,8 @@ func (m *DashboardRequest) XXX_Unmarshal(b []byte) error {
|
||||
func (m *DashboardRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_DashboardRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *DashboardRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_DashboardRequest.Merge(m, src)
|
||||
func (dst *DashboardRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_DashboardRequest.Merge(dst, src)
|
||||
}
|
||||
func (m *DashboardRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_DashboardRequest.Size(m)
|
||||
@ -1185,7 +1186,7 @@ func (m *DashboardRequest) XXX_DiscardUnknown() {
|
||||
var xxx_messageInfo_DashboardRequest proto.InternalMessageInfo
|
||||
|
||||
type DashboardResponse struct {
|
||||
NodeId string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
|
||||
NodeId NodeID `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3,customtype=NodeID" json:"node_id"`
|
||||
NodeConnections int64 `protobuf:"varint,2,opt,name=node_connections,json=nodeConnections,proto3" json:"node_connections,omitempty"`
|
||||
BootstrapAddress string `protobuf:"bytes,3,opt,name=bootstrap_address,json=bootstrapAddress,proto3" json:"bootstrap_address,omitempty"`
|
||||
InternalAddress string `protobuf:"bytes,4,opt,name=internal_address,json=internalAddress,proto3" json:"internal_address,omitempty"`
|
||||
@ -1202,7 +1203,7 @@ func (m *DashboardResponse) Reset() { *m = DashboardResponse{} }
|
||||
func (m *DashboardResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*DashboardResponse) ProtoMessage() {}
|
||||
func (*DashboardResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_a07d9034b2dd9d26, []int{28}
|
||||
return fileDescriptor_inspector_996d82db2c1f448a, []int{28}
|
||||
}
|
||||
func (m *DashboardResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_DashboardResponse.Unmarshal(m, b)
|
||||
@ -1210,8 +1211,8 @@ func (m *DashboardResponse) XXX_Unmarshal(b []byte) error {
|
||||
func (m *DashboardResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_DashboardResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *DashboardResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_DashboardResponse.Merge(m, src)
|
||||
func (dst *DashboardResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_DashboardResponse.Merge(dst, src)
|
||||
}
|
||||
func (m *DashboardResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_DashboardResponse.Size(m)
|
||||
@ -1222,13 +1223,6 @@ func (m *DashboardResponse) XXX_DiscardUnknown() {
|
||||
|
||||
var xxx_messageInfo_DashboardResponse proto.InternalMessageInfo
|
||||
|
||||
func (m *DashboardResponse) GetNodeId() string {
|
||||
if m != nil {
|
||||
return m.NodeId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *DashboardResponse) GetNodeConnections() int64 {
|
||||
if m != nil {
|
||||
return m.NodeConnections
|
||||
@ -1279,9 +1273,9 @@ func (m *DashboardResponse) GetUptime() *duration.Duration {
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*ListSegmentsRequest)(nil), "inspector.ListSegmentsRequest")
|
||||
proto.RegisterType((*ListIrreparableSegmentsRequest)(nil), "inspector.ListIrreparableSegmentsRequest")
|
||||
proto.RegisterType((*IrreparableSegment)(nil), "inspector.IrreparableSegment")
|
||||
proto.RegisterType((*ListSegmentsResponse)(nil), "inspector.ListSegmentsResponse")
|
||||
proto.RegisterType((*ListIrreparableSegmentsResponse)(nil), "inspector.ListIrreparableSegmentsResponse")
|
||||
proto.RegisterType((*GetStatsRequest)(nil), "inspector.GetStatsRequest")
|
||||
proto.RegisterType((*GetStatsResponse)(nil), "inspector.GetStatsResponse")
|
||||
proto.RegisterType((*CreateStatsRequest)(nil), "inspector.CreateStatsRequest")
|
||||
@ -1310,94 +1304,6 @@ func init() {
|
||||
proto.RegisterType((*DashboardResponse)(nil), "inspector.DashboardResponse")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("inspector.proto", fileDescriptor_a07d9034b2dd9d26) }
|
||||
|
||||
var fileDescriptor_a07d9034b2dd9d26 = []byte{
|
||||
// 1300 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0xcd, 0xae, 0xdb, 0x44,
|
||||
0x14, 0xae, 0xf3, 0xd7, 0xe4, 0x24, 0xcd, 0xcf, 0x24, 0xd0, 0x10, 0xda, 0x7b, 0x2f, 0x16, 0xd0,
|
||||
0x4b, 0x2b, 0xa5, 0x25, 0x94, 0x45, 0x91, 0x58, 0x34, 0xb9, 0x6a, 0x1b, 0xb5, 0xb4, 0xc5, 0x81,
|
||||
0x0d, 0x42, 0x44, 0x93, 0x78, 0x6e, 0x6a, 0xdd, 0xc4, 0x63, 0x3c, 0xe3, 0xc2, 0x7d, 0x11, 0x16,
|
||||
0x6c, 0x81, 0x47, 0x60, 0xc5, 0x0b, 0xf0, 0x0c, 0x2c, 0xba, 0xa9, 0xc4, 0x73, 0xa0, 0xf9, 0xb1,
|
||||
0xc7, 0x76, 0x12, 0xf5, 0x0a, 0xc1, 0xce, 0xf3, 0x7d, 0x9f, 0xbf, 0x39, 0xe7, 0xcc, 0xf8, 0xcc,
|
||||
0x18, 0x5a, 0x9e, 0xcf, 0x02, 0xb2, 0xe4, 0x34, 0x1c, 0x06, 0x21, 0xe5, 0x14, 0xd5, 0x12, 0x60,
|
||||
0x00, 0x2b, 0xba, 0xa2, 0x0a, 0x1e, 0x80, 0x4f, 0x5d, 0xa2, 0x9f, 0x5b, 0x01, 0xf5, 0x7c, 0x4e,
|
||||
0x42, 0x77, 0xa1, 0x81, 0x83, 0x15, 0xa5, 0xab, 0x35, 0xb9, 0x2d, 0x47, 0x8b, 0xe8, 0xf4, 0xb6,
|
||||
0x1b, 0x85, 0x98, 0x7b, 0xd4, 0x57, 0xbc, 0x3d, 0x81, 0xee, 0x13, 0x8f, 0xf1, 0x19, 0x59, 0x6d,
|
||||
0x88, 0xcf, 0x99, 0x43, 0xbe, 0x8f, 0x08, 0xe3, 0xa8, 0x07, 0xe5, 0xb5, 0xb7, 0xf1, 0x78, 0xdf,
|
||||
0x3a, 0xb2, 0x8e, 0xcb, 0x8e, 0x1a, 0xa0, 0xb7, 0xa1, 0x42, 0x4f, 0x4f, 0x19, 0xe1, 0xfd, 0x82,
|
||||
0x84, 0xf5, 0xc8, 0xfe, 0xdb, 0x02, 0x34, 0x0d, 0x43, 0x12, 0xe0, 0x10, 0x2f, 0xd6, 0x44, 0x9b,
|
||||
0x21, 0x04, 0xa5, 0x00, 0xf3, 0x17, 0xd2, 0xa3, 0xe1, 0xc8, 0x67, 0x74, 0x0f, 0x9a, 0x4c, 0xd1,
|
||||
0x73, 0x97, 0x70, 0xec, 0xad, 0xa5, 0x55, 0x7d, 0x84, 0x86, 0x26, 0xf2, 0xe7, 0xea, 0xc9, 0xb9,
|
||||
0xa2, 0x95, 0x27, 0x52, 0x88, 0x0e, 0xa1, 0xbe, 0xa6, 0x8c, 0xcf, 0x03, 0x8f, 0x2c, 0x09, 0xeb,
|
||||
0x17, 0x65, 0x08, 0x20, 0xa0, 0xe7, 0x12, 0x41, 0x43, 0xe8, 0xae, 0x31, 0xe3, 0x73, 0x11, 0x88,
|
||||
0x17, 0xce, 0x31, 0xe7, 0x64, 0x13, 0xf0, 0x7e, 0xe9, 0xc8, 0x3a, 0x2e, 0x3a, 0x1d, 0x41, 0x39,
|
||||
0x92, 0xb9, 0xaf, 0x08, 0x74, 0x07, 0x7a, 0x59, 0xe9, 0x7c, 0x49, 0x23, 0x9f, 0xf7, 0xcb, 0xf2,
|
||||
0x05, 0x14, 0xa6, 0xc5, 0x13, 0xc1, 0xd8, 0x5f, 0x42, 0x2f, 0x5b, 0x2d, 0x16, 0x50, 0x9f, 0x11,
|
||||
0x74, 0x0f, 0xaa, 0x3a, 0x56, 0xd6, 0xb7, 0x8e, 0x8a, 0xc7, 0xf5, 0xd1, 0xf5, 0xa1, 0x59, 0xbd,
|
||||
0xed, 0xd2, 0x38, 0x89, 0xdc, 0xfe, 0x0c, 0x5a, 0x0f, 0x09, 0x9f, 0x71, 0x6c, 0x8a, 0x7f, 0x03,
|
||||
0x2e, 0x8b, 0x25, 0x9d, 0x7b, 0xae, 0x2a, 0xdd, 0xb8, 0xf9, 0xe7, 0xab, 0xc3, 0x4b, 0x7f, 0xbd,
|
||||
0x3a, 0xac, 0x3c, 0xa5, 0x2e, 0x99, 0x9e, 0x38, 0x15, 0x41, 0x4f, 0x5d, 0xfb, 0x67, 0x0b, 0xda,
|
||||
0xe6, 0x65, 0x1d, 0xcb, 0x21, 0xd4, 0x71, 0xe4, 0x7a, 0x71, 0x32, 0x96, 0x4c, 0x06, 0x24, 0x24,
|
||||
0x93, 0x30, 0x02, 0xb9, 0x11, 0x64, 0xfd, 0x2d, 0x2d, 0x70, 0x04, 0x82, 0xde, 0x83, 0x46, 0x14,
|
||||
0x70, 0x6f, 0x43, 0xb4, 0x45, 0x51, 0x5a, 0xd4, 0x15, 0xa6, 0x3c, 0x8c, 0x44, 0x99, 0x94, 0xa4,
|
||||
0x89, 0x96, 0x48, 0x17, 0xfb, 0xb5, 0x05, 0x68, 0x12, 0x12, 0xcc, 0xc9, 0xbf, 0x4a, 0x2e, 0x9f,
|
||||
0x47, 0x61, 0x2b, 0x8f, 0x21, 0x74, 0x95, 0x80, 0x45, 0xcb, 0x25, 0x61, 0x2c, 0x13, 0x6d, 0x47,
|
||||
0x52, 0x33, 0xc5, 0xe4, 0x63, 0x56, 0xc2, 0xd2, 0x76, 0x5a, 0x77, 0xa0, 0xa7, 0x25, 0x59, 0x4f,
|
||||
0xbd, 0x23, 0x14, 0x97, 0x36, 0xb5, 0xdf, 0x82, 0x6e, 0x26, 0x49, 0xb5, 0x08, 0xf6, 0x4d, 0x40,
|
||||
0x92, 0x17, 0x39, 0x99, 0xa5, 0xe9, 0x41, 0x39, 0xbd, 0x28, 0x6a, 0x60, 0x77, 0xa1, 0x93, 0xd6,
|
||||
0xca, 0x32, 0x09, 0xf0, 0x21, 0xe1, 0xe3, 0x68, 0x79, 0x46, 0x92, 0xda, 0xd9, 0x8f, 0x00, 0xa5,
|
||||
0x41, 0xe3, 0xca, 0x29, 0xc7, 0xeb, 0xd8, 0x55, 0x0e, 0xd0, 0x35, 0x28, 0x7a, 0x2e, 0xeb, 0x17,
|
||||
0x8e, 0x8a, 0xc7, 0x8d, 0x31, 0xa4, 0xea, 0x2b, 0x60, 0x7b, 0x24, 0x37, 0x8e, 0x72, 0x8a, 0x57,
|
||||
0xe6, 0x00, 0x0a, 0x7b, 0x17, 0xa5, 0xe0, 0xb9, 0xf6, 0xd7, 0xa9, 0x90, 0x92, 0xc9, 0xdf, 0xf0,
|
||||
0x12, 0x3a, 0x82, 0xb2, 0x58, 0x4f, 0x15, 0x48, 0x7d, 0x04, 0x43, 0xd9, 0xac, 0x84, 0xc0, 0x51,
|
||||
0x84, 0x7d, 0x13, 0x2a, 0xca, 0xf3, 0x02, 0xda, 0x21, 0x80, 0xd2, 0x8a, 0xaf, 0xd0, 0xe8, 0xad,
|
||||
0x7d, 0xfa, 0xc7, 0xd0, 0x7a, 0xee, 0xf9, 0x2b, 0x09, 0x5d, 0x2c, 0x4b, 0xd4, 0x87, 0xcb, 0xd8,
|
||||
0x75, 0x43, 0xc2, 0x98, 0xdc, 0x72, 0x35, 0x27, 0x1e, 0xda, 0x36, 0xb4, 0x8d, 0x99, 0x4e, 0xbf,
|
||||
0x09, 0x05, 0x7a, 0x26, 0xdd, 0xaa, 0x4e, 0x81, 0x9e, 0xd9, 0x9f, 0x43, 0xe7, 0x09, 0xa5, 0x67,
|
||||
0x51, 0x90, 0x9e, 0xb2, 0x99, 0x4c, 0x59, 0x7b, 0xc3, 0x14, 0xdf, 0x02, 0x4a, 0xbf, 0x9e, 0xd4,
|
||||
0xb8, 0x24, 0xd2, 0x91, 0x0e, 0xd9, 0x34, 0x25, 0x8e, 0x3e, 0x84, 0xd2, 0x86, 0x70, 0x9c, 0x74,
|
||||
0xd2, 0x84, 0xff, 0x82, 0x70, 0xec, 0x62, 0x8e, 0x1d, 0xc9, 0xdb, 0xdf, 0x41, 0x4b, 0x26, 0xea,
|
||||
0x9f, 0xd2, 0x8b, 0x56, 0xe3, 0x56, 0x36, 0xd4, 0xfa, 0xa8, 0x63, 0xdc, 0xef, 0x2b, 0xc2, 0x44,
|
||||
0xff, 0x93, 0x05, 0x6d, 0x33, 0x81, 0x0e, 0xde, 0x86, 0x12, 0x3f, 0x0f, 0x54, 0xf0, 0xcd, 0x51,
|
||||
0xd3, 0xbc, 0xfe, 0xd5, 0x79, 0x40, 0x1c, 0xc9, 0xa1, 0x21, 0x54, 0x69, 0x40, 0x42, 0xcc, 0x69,
|
||||
0xb8, 0x9d, 0xc4, 0x33, 0xcd, 0x38, 0x89, 0x46, 0xe8, 0x97, 0x38, 0xc0, 0x4b, 0x8f, 0x9f, 0xcb,
|
||||
0xcf, 0x3d, 0xa3, 0x9f, 0x68, 0xc6, 0x49, 0x34, 0xf6, 0x06, 0x5a, 0x0f, 0x3c, 0xdf, 0x7d, 0x4a,
|
||||
0x70, 0x78, 0xd1, 0xc4, 0xdf, 0x87, 0x32, 0xe3, 0x38, 0x54, 0x7d, 0x67, 0x5b, 0xa2, 0x48, 0x73,
|
||||
0x4c, 0xaa, 0xa6, 0xa3, 0x06, 0xf6, 0x5d, 0x68, 0x9b, 0xe9, 0x74, 0x19, 0xde, 0xbc, 0xb7, 0x11,
|
||||
0xb4, 0x4f, 0xa2, 0x4d, 0x90, 0xe9, 0x02, 0x9f, 0x42, 0x27, 0x85, 0xe5, 0xad, 0xf6, 0x6e, 0xfb,
|
||||
0x26, 0x34, 0xd2, 0x3d, 0xd7, 0xfe, 0xdd, 0x82, 0xae, 0x00, 0x66, 0xd1, 0x66, 0x83, 0xc3, 0xf3,
|
||||
0xc4, 0xe9, 0x3a, 0x40, 0xc4, 0x88, 0x3b, 0x67, 0x01, 0x5e, 0x12, 0xdd, 0x3e, 0x6a, 0x02, 0x99,
|
||||
0x09, 0x00, 0xdd, 0x80, 0x16, 0x7e, 0x89, 0xbd, 0xb5, 0x38, 0xb8, 0xb4, 0x46, 0x75, 0xe1, 0x66,
|
||||
0x02, 0x2b, 0xe1, 0x07, 0xd0, 0x94, 0x3e, 0x0b, 0xec, 0xbb, 0x3f, 0x78, 0x2e, 0x7f, 0xa1, 0xeb,
|
||||
0x71, 0x45, 0xa0, 0xe3, 0x18, 0x44, 0xb7, 0xa1, 0x6b, 0xfc, 0x8c, 0x56, 0xf5, 0x61, 0x94, 0x50,
|
||||
0xc9, 0x0b, 0xb2, 0x24, 0x98, 0xbd, 0x58, 0x50, 0x1c, 0xba, 0x71, 0x2e, 0xaf, 0x0b, 0xd0, 0x49,
|
||||
0x81, 0x3a, 0x93, 0xab, 0xd9, 0x53, 0xa5, 0x96, 0x9c, 0x22, 0x1f, 0x41, 0x5b, 0x12, 0x4b, 0xea,
|
||||
0xfb, 0x64, 0x29, 0x2e, 0x3e, 0x4c, 0x27, 0xd1, 0x12, 0xf8, 0xc4, 0xc0, 0xe8, 0x16, 0x74, 0x16,
|
||||
0x94, 0x72, 0xc6, 0x43, 0x1c, 0xcc, 0xe3, 0x5d, 0x5f, 0x94, 0x6e, 0xed, 0x84, 0xd0, 0x9b, 0x5e,
|
||||
0xf8, 0xca, 0x4b, 0x8a, 0x8f, 0xd7, 0x89, 0xb6, 0x24, 0xb5, 0xad, 0x18, 0x4f, 0x49, 0xc9, 0x8f,
|
||||
0x39, 0x69, 0x59, 0x49, 0x63, 0x3c, 0x96, 0xde, 0x95, 0xbb, 0x8e, 0xb3, 0x7e, 0x45, 0xee, 0xea,
|
||||
0x83, 0xd4, 0x25, 0x62, 0xc7, 0xfa, 0x39, 0x4a, 0x8c, 0x0e, 0x00, 0x4c, 0x7a, 0xfd, 0xcb, 0xb2,
|
||||
0x19, 0xa5, 0x10, 0xf4, 0x31, 0x54, 0xd4, 0xc9, 0xd5, 0xaf, 0x4a, 0xdb, 0x77, 0x86, 0xea, 0x52,
|
||||
0x38, 0x8c, 0x2f, 0x85, 0xc3, 0x13, 0x7d, 0x29, 0x74, 0xb4, 0x70, 0xf4, 0x47, 0x11, 0x1a, 0x8f,
|
||||
0xb1, 0x3b, 0x8d, 0xa7, 0x47, 0x53, 0x00, 0x73, 0x48, 0xa1, 0x6b, 0xa9, 0xc0, 0xb6, 0xce, 0xae,
|
||||
0xc1, 0xf5, 0x3d, 0xac, 0x5e, 0xab, 0x09, 0x54, 0xe3, 0x3e, 0x8a, 0x06, 0x29, 0x69, 0xae, 0x53,
|
||||
0x0f, 0xde, 0xdd, 0xc9, 0x69, 0x93, 0x29, 0x80, 0xe9, 0x94, 0x99, 0x78, 0xb6, 0xfa, 0x6f, 0x26,
|
||||
0x9e, 0x1d, 0xed, 0x75, 0x02, 0xd5, 0xb8, 0x6b, 0x65, 0xe2, 0xc9, 0xf5, 0xca, 0x4c, 0x3c, 0x5b,
|
||||
0x6d, 0x6e, 0x02, 0xd5, 0xf8, 0x9b, 0xcf, 0x98, 0xe4, 0xfa, 0x4e, 0xc6, 0x64, 0xab, 0x49, 0x3c,
|
||||
0x80, 0x5a, 0xf2, 0xb9, 0xa3, 0xb4, 0x32, 0xdf, 0x18, 0x06, 0xd7, 0x76, 0x93, 0xca, 0x67, 0xf4,
|
||||
0x9b, 0x05, 0xed, 0x67, 0x2f, 0x49, 0xb8, 0xc6, 0xe7, 0xff, 0xcb, 0x0a, 0xfe, 0x57, 0x71, 0xfe,
|
||||
0x6a, 0x41, 0x4b, 0xec, 0xeb, 0x93, 0xb1, 0x09, 0x73, 0x02, 0xd5, 0xf8, 0x4a, 0x9b, 0x29, 0x64,
|
||||
0xee, 0x92, 0x9c, 0x29, 0xe4, 0xd6, 0x1d, 0xf8, 0x09, 0xd4, 0x53, 0xb7, 0x32, 0x94, 0x49, 0x67,
|
||||
0xeb, 0x4a, 0x3a, 0x38, 0xd8, 0x47, 0xeb, 0x30, 0x7f, 0xb1, 0xa0, 0x2b, 0x7f, 0x31, 0x66, 0x9c,
|
||||
0x86, 0xc4, 0x84, 0x3a, 0x86, 0xb2, 0xf2, 0xbf, 0x9a, 0xfb, 0x4e, 0x77, 0x3a, 0xef, 0xf8, 0x80,
|
||||
0xed, 0x4b, 0xe8, 0x11, 0xd4, 0x92, 0x6e, 0x96, 0x2d, 0x65, 0xae, 0xf1, 0x65, 0x4b, 0x99, 0x6f,
|
||||
0x80, 0xf6, 0xa5, 0xd1, 0x0a, 0x7a, 0xa9, 0x1f, 0x0d, 0x13, 0xe5, 0x33, 0x68, 0xa4, 0xff, 0x59,
|
||||
0x50, 0x3a, 0xa6, 0x1d, 0xbf, 0x7e, 0x83, 0xc3, 0xbd, 0xbc, 0x9a, 0x6a, 0x5c, 0xfa, 0xa6, 0x10,
|
||||
0x2c, 0x16, 0x15, 0xd9, 0x3c, 0x3e, 0xf9, 0x27, 0x00, 0x00, 0xff, 0xff, 0xea, 0x2e, 0x78, 0x27,
|
||||
0xa6, 0x0e, 0x00, 0x00,
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ context.Context
|
||||
var _ grpc.ClientConn
|
||||
@ -1954,8 +1860,8 @@ var _PieceStoreInspector_serviceDesc = grpc.ServiceDesc{
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
|
||||
type IrreparableInspectorClient interface {
|
||||
// ListSegments returns damaged segments
|
||||
ListSegments(ctx context.Context, in *ListSegmentsRequest, opts ...grpc.CallOption) (*ListSegmentsResponse, error)
|
||||
// ListIrreparableSegments returns damaged segments
|
||||
ListIrreparableSegments(ctx context.Context, in *ListIrreparableSegmentsRequest, opts ...grpc.CallOption) (*ListIrreparableSegmentsResponse, error)
|
||||
}
|
||||
|
||||
type irreparableInspectorClient struct {
|
||||
@ -1966,9 +1872,9 @@ func NewIrreparableInspectorClient(cc *grpc.ClientConn) IrreparableInspectorClie
|
||||
return &irreparableInspectorClient{cc}
|
||||
}
|
||||
|
||||
func (c *irreparableInspectorClient) ListSegments(ctx context.Context, in *ListSegmentsRequest, opts ...grpc.CallOption) (*ListSegmentsResponse, error) {
|
||||
out := new(ListSegmentsResponse)
|
||||
err := c.cc.Invoke(ctx, "/inspector.IrreparableInspector/ListSegments", in, out, opts...)
|
||||
func (c *irreparableInspectorClient) ListIrreparableSegments(ctx context.Context, in *ListIrreparableSegmentsRequest, opts ...grpc.CallOption) (*ListIrreparableSegmentsResponse, error) {
|
||||
out := new(ListIrreparableSegmentsResponse)
|
||||
err := c.cc.Invoke(ctx, "/inspector.IrreparableInspector/ListIrreparableSegments", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -1977,28 +1883,28 @@ func (c *irreparableInspectorClient) ListSegments(ctx context.Context, in *ListS
|
||||
|
||||
// IrreparableInspectorServer is the server API for IrreparableInspector service.
|
||||
type IrreparableInspectorServer interface {
|
||||
// ListSegments returns damaged segments
|
||||
ListSegments(context.Context, *ListSegmentsRequest) (*ListSegmentsResponse, error)
|
||||
// ListIrreparableSegments returns damaged segments
|
||||
ListIrreparableSegments(context.Context, *ListIrreparableSegmentsRequest) (*ListIrreparableSegmentsResponse, error)
|
||||
}
|
||||
|
||||
func RegisterIrreparableInspectorServer(s *grpc.Server, srv IrreparableInspectorServer) {
|
||||
s.RegisterService(&_IrreparableInspector_serviceDesc, srv)
|
||||
}
|
||||
|
||||
func _IrreparableInspector_ListSegments_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(ListSegmentsRequest)
|
||||
func _IrreparableInspector_ListIrreparableSegments_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(ListIrreparableSegmentsRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(IrreparableInspectorServer).ListSegments(ctx, in)
|
||||
return srv.(IrreparableInspectorServer).ListIrreparableSegments(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/inspector.IrreparableInspector/ListSegments",
|
||||
FullMethod: "/inspector.IrreparableInspector/ListIrreparableSegments",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(IrreparableInspectorServer).ListSegments(ctx, req.(*ListSegmentsRequest))
|
||||
return srv.(IrreparableInspectorServer).ListIrreparableSegments(ctx, req.(*ListIrreparableSegmentsRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
@ -2008,10 +1914,98 @@ var _IrreparableInspector_serviceDesc = grpc.ServiceDesc{
|
||||
HandlerType: (*IrreparableInspectorServer)(nil),
|
||||
Methods: []grpc.MethodDesc{
|
||||
{
|
||||
MethodName: "ListSegments",
|
||||
Handler: _IrreparableInspector_ListSegments_Handler,
|
||||
MethodName: "ListIrreparableSegments",
|
||||
Handler: _IrreparableInspector_ListIrreparableSegments_Handler,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{},
|
||||
Metadata: "inspector.proto",
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("inspector.proto", fileDescriptor_inspector_996d82db2c1f448a) }
|
||||
|
||||
var fileDescriptor_inspector_996d82db2c1f448a = []byte{
|
||||
// 1307 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0xcb, 0x8e, 0x1b, 0x45,
|
||||
0x17, 0x4e, 0xfb, 0x16, 0xcf, 0xf1, 0xc4, 0x97, 0xf2, 0xfc, 0x7f, 0x8c, 0x49, 0x66, 0x86, 0x16,
|
||||
0x90, 0x49, 0x22, 0x39, 0xc1, 0x84, 0x45, 0x90, 0x58, 0xc4, 0x1e, 0x25, 0x19, 0x25, 0x24, 0x51,
|
||||
0x1b, 0x36, 0x28, 0xc2, 0x2a, 0xbb, 0x6b, 0x9c, 0xd6, 0xd8, 0x5d, 0x4d, 0x57, 0x75, 0x60, 0xde,
|
||||
0x80, 0x27, 0x60, 0xc1, 0x16, 0x78, 0x04, 0x56, 0xbc, 0x00, 0xcf, 0xc0, 0x22, 0x1b, 0x24, 0x5e,
|
||||
0x80, 0x17, 0x40, 0x75, 0xe9, 0xae, 0xbe, 0xd8, 0x8c, 0x85, 0x60, 0xe7, 0xfa, 0xce, 0x57, 0x5f,
|
||||
0x9d, 0x73, 0xaa, 0xea, 0xeb, 0x32, 0xb4, 0x3c, 0x9f, 0x05, 0x64, 0xce, 0x69, 0x38, 0x08, 0x42,
|
||||
0xca, 0x29, 0xda, 0x49, 0x80, 0x3e, 0x2c, 0xe8, 0x82, 0x2a, 0xb8, 0x0f, 0x3e, 0x75, 0x89, 0xfe,
|
||||
0xdd, 0x0a, 0xa8, 0xe7, 0x73, 0x12, 0xba, 0x33, 0x0d, 0xec, 0x2f, 0x28, 0x5d, 0x2c, 0xc9, 0x1d,
|
||||
0x39, 0x9a, 0x45, 0xa7, 0x77, 0xdc, 0x28, 0xc4, 0xdc, 0xa3, 0xbe, 0x8a, 0xdb, 0xcf, 0x60, 0xff,
|
||||
0xa9, 0xc7, 0xf8, 0x49, 0x18, 0x92, 0x00, 0x87, 0x78, 0xb6, 0x24, 0x13, 0xb2, 0x58, 0x11, 0x9f,
|
||||
0x33, 0x87, 0x7c, 0x15, 0x11, 0xc6, 0xd1, 0x1e, 0x54, 0x97, 0xde, 0xca, 0xe3, 0x3d, 0xeb, 0xd0,
|
||||
0x3a, 0xaa, 0x3a, 0x6a, 0x80, 0xfe, 0x0f, 0x35, 0x7a, 0x7a, 0xca, 0x08, 0xef, 0x95, 0x24, 0xac,
|
||||
0x47, 0xf6, 0x1f, 0x16, 0xa0, 0xa2, 0x18, 0x42, 0x50, 0x09, 0x30, 0x7f, 0x25, 0x35, 0x76, 0x1d,
|
||||
0xf9, 0x1b, 0xdd, 0x87, 0x26, 0x53, 0xe1, 0xa9, 0x4b, 0x38, 0xf6, 0x96, 0x52, 0xaa, 0x31, 0x44,
|
||||
0x03, 0x53, 0xc4, 0x0b, 0xf5, 0xcb, 0xb9, 0xa2, 0x99, 0xc7, 0x92, 0x88, 0x0e, 0xa0, 0xb1, 0xa4,
|
||||
0x8c, 0x4f, 0x03, 0x8f, 0xcc, 0x09, 0xeb, 0x95, 0x65, 0x0a, 0x20, 0xa0, 0x17, 0x12, 0x41, 0x03,
|
||||
0xe8, 0x2e, 0x31, 0xe3, 0x53, 0x91, 0x88, 0x17, 0x4e, 0x31, 0xe7, 0x64, 0x15, 0xf0, 0x5e, 0xe5,
|
||||
0xd0, 0x3a, 0x2a, 0x3b, 0x1d, 0x11, 0x72, 0x64, 0xe4, 0x81, 0x0a, 0xa0, 0xbb, 0xb0, 0x97, 0xa5,
|
||||
0x4e, 0xe7, 0x34, 0xf2, 0x79, 0xaf, 0x2a, 0x27, 0xa0, 0x30, 0x4d, 0x1e, 0x8b, 0x88, 0xfd, 0x12,
|
||||
0x0e, 0x36, 0x36, 0x8e, 0x05, 0xd4, 0x67, 0x04, 0xdd, 0x87, 0xba, 0x4e, 0x9b, 0xf5, 0xac, 0xc3,
|
||||
0xf2, 0x51, 0x63, 0x78, 0x7d, 0x60, 0xf6, 0xb4, 0x38, 0xd3, 0x49, 0xe8, 0xf6, 0xc7, 0xd0, 0x7a,
|
||||
0x44, 0xf8, 0x84, 0x63, 0xb3, 0x0f, 0x37, 0xe0, 0xb2, 0xd8, 0xe8, 0xa9, 0xe7, 0xaa, 0x2e, 0x8e,
|
||||
0x9a, 0xbf, 0xbe, 0x39, 0xb8, 0xf4, 0xdb, 0x9b, 0x83, 0xda, 0x33, 0xea, 0x92, 0x93, 0x63, 0xa7,
|
||||
0x26, 0xc2, 0x27, 0xae, 0xfd, 0xbd, 0x05, 0x6d, 0x33, 0x59, 0xe7, 0x72, 0x00, 0x0d, 0x1c, 0xb9,
|
||||
0x5e, 0x5c, 0x97, 0x25, 0xeb, 0x02, 0x09, 0xc9, 0x7a, 0x0c, 0x41, 0x1e, 0x0f, 0xb9, 0x15, 0x96,
|
||||
0x26, 0x38, 0x02, 0x41, 0xef, 0xc0, 0x6e, 0x14, 0x70, 0x6f, 0x45, 0xb4, 0x44, 0x59, 0x4a, 0x34,
|
||||
0x14, 0xa6, 0x34, 0x0c, 0x45, 0x89, 0x54, 0xa4, 0x88, 0xa6, 0x48, 0x15, 0xfb, 0x77, 0x0b, 0xd0,
|
||||
0x38, 0x24, 0x98, 0x93, 0x7f, 0x54, 0x5c, 0xbe, 0x8e, 0x52, 0xa1, 0x8e, 0x01, 0x74, 0x15, 0x81,
|
||||
0x45, 0xf3, 0x39, 0x61, 0x2c, 0x93, 0x6d, 0x47, 0x86, 0x26, 0x2a, 0x92, 0xcf, 0x59, 0x11, 0x2b,
|
||||
0xc5, 0xb2, 0xee, 0xc2, 0x9e, 0xa6, 0x64, 0x35, 0xf5, 0xe1, 0x50, 0xb1, 0xb4, 0xa8, 0xfd, 0x3f,
|
||||
0xe8, 0x66, 0x8a, 0x54, 0x9b, 0x60, 0xdf, 0x02, 0x24, 0xe3, 0xa2, 0x26, 0xb3, 0x35, 0x7b, 0x50,
|
||||
0x4d, 0x6f, 0x8a, 0x1a, 0xd8, 0x5d, 0xe8, 0xa4, 0xb9, 0xb2, 0x4d, 0x02, 0x7c, 0x44, 0xf8, 0x28,
|
||||
0x9a, 0x9f, 0x91, 0xa4, 0x77, 0xf6, 0x63, 0x40, 0x69, 0xd0, 0xa8, 0x72, 0xca, 0xf1, 0x32, 0x56,
|
||||
0x95, 0x03, 0x74, 0x0d, 0xca, 0x9e, 0xcb, 0x7a, 0xa5, 0xc3, 0xf2, 0xd1, 0xee, 0x08, 0x52, 0xfd,
|
||||
0x15, 0xb0, 0x3d, 0x94, 0x07, 0x47, 0x29, 0xc5, 0x3b, 0xb3, 0x0f, 0xa5, 0x8d, 0x9b, 0x52, 0xf2,
|
||||
0x5c, 0xfb, 0xf3, 0x54, 0x4a, 0xc9, 0xe2, 0x17, 0x4c, 0x42, 0x87, 0x50, 0x15, 0xfb, 0xa9, 0x12,
|
||||
0x69, 0x0c, 0x61, 0x20, 0x2d, 0x4c, 0x10, 0x1c, 0x15, 0xb0, 0x6f, 0x41, 0x4d, 0x69, 0x6e, 0xc1,
|
||||
0x1d, 0x00, 0x28, 0xae, 0xb8, 0x90, 0x86, 0x6f, 0x6d, 0xe2, 0x3f, 0x81, 0xd6, 0x0b, 0xcf, 0x5f,
|
||||
0x48, 0x68, 0xbb, 0x2a, 0x51, 0x0f, 0x2e, 0x63, 0xd7, 0x0d, 0x09, 0x63, 0xf2, 0xc8, 0xed, 0x38,
|
||||
0xf1, 0xd0, 0xb6, 0xa1, 0x6d, 0xc4, 0x74, 0xf9, 0x4d, 0x28, 0xd1, 0x33, 0xa9, 0x56, 0x77, 0x4a,
|
||||
0xf4, 0xcc, 0xfe, 0x04, 0x3a, 0x4f, 0x29, 0x3d, 0x8b, 0x82, 0xf4, 0x92, 0xcd, 0x64, 0xc9, 0x9d,
|
||||
0x0b, 0x96, 0x78, 0x09, 0x28, 0x3d, 0x3d, 0xe9, 0x71, 0x45, 0x94, 0x23, 0x15, 0xb2, 0x65, 0x4a,
|
||||
0x1c, 0xbd, 0x0f, 0x95, 0x15, 0xe1, 0x38, 0x31, 0xd5, 0x24, 0xfe, 0x29, 0xe1, 0xd8, 0xc5, 0x1c,
|
||||
0x3b, 0x32, 0x6e, 0x7f, 0x09, 0x2d, 0x59, 0xa8, 0x7f, 0x4a, 0xb7, 0xed, 0xc6, 0xed, 0x6c, 0xaa,
|
||||
0x8d, 0x61, 0xc7, 0xa8, 0x3f, 0x50, 0x01, 0x93, 0xfd, 0x77, 0x16, 0xb4, 0xcd, 0x02, 0x3a, 0x79,
|
||||
0x1b, 0x2a, 0xfc, 0x3c, 0x50, 0xc9, 0x37, 0x87, 0x4d, 0x33, 0xfd, 0xb3, 0xf3, 0x80, 0x38, 0x32,
|
||||
0x86, 0x06, 0x50, 0xa7, 0x01, 0x09, 0x31, 0xa7, 0x61, 0xb1, 0x88, 0xe7, 0x3a, 0xe2, 0x24, 0x1c,
|
||||
0xc1, 0x9f, 0xe3, 0x00, 0xcf, 0x3d, 0x7e, 0x2e, 0xaf, 0x7b, 0x86, 0x3f, 0xd6, 0x11, 0x27, 0xe1,
|
||||
0xd8, 0x2b, 0x68, 0x3d, 0xf4, 0x7c, 0xf7, 0x19, 0xc1, 0xe1, 0xb6, 0x85, 0xbf, 0x0b, 0x55, 0xc6,
|
||||
0x71, 0xa8, 0x7c, 0xa7, 0x48, 0x51, 0x41, 0xf3, 0xc5, 0x54, 0xa6, 0xa3, 0x06, 0xf6, 0x3d, 0x68,
|
||||
0x9b, 0xe5, 0x74, 0x1b, 0x2e, 0x3e, 0xdb, 0x08, 0xda, 0xc7, 0xd1, 0x2a, 0xc8, 0xb8, 0xc0, 0x47,
|
||||
0xd0, 0x49, 0x61, 0x79, 0xa9, 0x8d, 0xc7, 0xbe, 0x09, 0xbb, 0x69, 0xcf, 0xb5, 0x7f, 0xb6, 0xa0,
|
||||
0x2b, 0x80, 0x49, 0xb4, 0x5a, 0xe1, 0xf0, 0x3c, 0x51, 0xba, 0x0e, 0x10, 0x31, 0xe2, 0x4e, 0x59,
|
||||
0x80, 0xe7, 0x44, 0xdb, 0xc7, 0x8e, 0x40, 0x26, 0x02, 0x40, 0x37, 0xa0, 0x85, 0x5f, 0x63, 0x6f,
|
||||
0x29, 0x3e, 0x5c, 0x9a, 0xa3, 0x5c, 0xb8, 0x99, 0xc0, 0x8a, 0xf8, 0x1e, 0x34, 0xa5, 0xce, 0x0c,
|
||||
0xfb, 0xee, 0xd7, 0x9e, 0xcb, 0x5f, 0xe9, 0x7e, 0x5c, 0x11, 0xe8, 0x28, 0x06, 0xd1, 0x1d, 0xe8,
|
||||
0x1a, 0x3d, 0xc3, 0x55, 0x3e, 0x8c, 0x92, 0x50, 0x32, 0x41, 0xb6, 0x04, 0xb3, 0x57, 0x33, 0x8a,
|
||||
0x43, 0x37, 0xae, 0xe5, 0xcf, 0x12, 0x74, 0x52, 0xa0, 0xae, 0x64, 0xeb, 0xaf, 0xca, 0x4d, 0x68,
|
||||
0x4b, 0xe2, 0x9c, 0xfa, 0x3e, 0x99, 0x8b, 0xe7, 0x11, 0xd3, 0x45, 0xb5, 0x04, 0x3e, 0x36, 0x30,
|
||||
0xba, 0x0d, 0x9d, 0x19, 0xa5, 0x9c, 0xf1, 0x10, 0x07, 0xd3, 0xf8, 0x16, 0x94, 0xe5, 0x85, 0x6d,
|
||||
0x27, 0x01, 0x7d, 0x09, 0x84, 0xae, 0x7c, 0xbf, 0xf8, 0x78, 0x99, 0x70, 0x2b, 0x92, 0xdb, 0x8a,
|
||||
0xf1, 0x14, 0x95, 0x7c, 0x93, 0xa3, 0x56, 0x15, 0x35, 0xc6, 0x63, 0xea, 0x3d, 0x79, 0x0a, 0x39,
|
||||
0xeb, 0xd5, 0xe4, 0x29, 0xdf, 0x4f, 0x3d, 0x2a, 0xd6, 0xec, 0xa7, 0xa3, 0xc8, 0x68, 0x1f, 0xc0,
|
||||
0x94, 0xd7, 0xbb, 0x2c, 0xcd, 0x29, 0x85, 0xa0, 0x0f, 0xa0, 0xa6, 0xbe, 0x64, 0xbd, 0xba, 0x94,
|
||||
0x7d, 0x6b, 0xa0, 0x9e, 0x8e, 0x83, 0xf8, 0xe9, 0x38, 0x38, 0xd6, 0x4f, 0x47, 0x47, 0x13, 0x87,
|
||||
0xbf, 0x94, 0x61, 0xf7, 0x09, 0x76, 0x4f, 0xe2, 0xe5, 0xd1, 0x09, 0x80, 0xf9, 0x68, 0xa1, 0x6b,
|
||||
0xa9, 0xc4, 0x0a, 0xdf, 0xb2, 0xfe, 0xf5, 0x0d, 0x51, 0xbd, 0x77, 0x63, 0xa8, 0xc7, 0xbe, 0x8a,
|
||||
0xfa, 0x29, 0x6a, 0xce, 0xb9, 0xfb, 0x6f, 0xaf, 0x8d, 0x69, 0x91, 0x13, 0x00, 0xe3, 0x9c, 0x99,
|
||||
0x7c, 0x0a, 0x7e, 0x9c, 0xc9, 0x67, 0x8d, 0xdd, 0x8e, 0xa1, 0x1e, 0xbb, 0x58, 0x26, 0x9f, 0x9c,
|
||||
0x77, 0x66, 0xf2, 0x29, 0xd8, 0xde, 0x18, 0xea, 0xb1, 0x07, 0x64, 0x44, 0x72, 0x3e, 0x94, 0x11,
|
||||
0x29, 0x98, 0xc6, 0x43, 0xd8, 0x49, 0xae, 0x3f, 0x4a, 0x33, 0xf3, 0x46, 0xd1, 0xbf, 0xb6, 0x3e,
|
||||
0xa8, 0x74, 0x86, 0x3f, 0x59, 0xd0, 0x7e, 0xfe, 0x9a, 0x84, 0x4b, 0x7c, 0xfe, 0x9f, 0xec, 0xe0,
|
||||
0xbf, 0x95, 0xe7, 0x8f, 0x16, 0xb4, 0xc4, 0xb9, 0x3e, 0x1e, 0x99, 0x34, 0xc7, 0x50, 0x8f, 0x9f,
|
||||
0xb8, 0x99, 0x46, 0xe6, 0x1e, 0xcd, 0x99, 0x46, 0x16, 0xde, 0xc4, 0x4f, 0xa1, 0x91, 0x7a, 0xa5,
|
||||
0xa1, 0x4c, 0x39, 0x85, 0x27, 0x6a, 0x7f, 0x7f, 0x53, 0x58, 0xa7, 0xf9, 0x83, 0x05, 0x5d, 0xf9,
|
||||
0xef, 0x63, 0xc2, 0x69, 0x48, 0x4c, 0xaa, 0x23, 0xa8, 0x2a, 0xfd, 0xab, 0xb9, 0x7b, 0xba, 0x56,
|
||||
0x79, 0xcd, 0x05, 0xb6, 0x2f, 0xa1, 0xc7, 0xb0, 0x93, 0xb8, 0x5b, 0xb6, 0x95, 0x39, 0x23, 0xcc,
|
||||
0xb6, 0x32, 0x6f, 0x88, 0xf6, 0xa5, 0xe1, 0xb7, 0x16, 0xec, 0xa5, 0xfe, 0x79, 0x98, 0x34, 0x03,
|
||||
0xb8, 0xba, 0xe1, 0xff, 0x0c, 0xba, 0x99, 0xbe, 0x19, 0x7f, 0xfb, 0x67, 0xb1, 0x7f, 0x6b, 0x1b,
|
||||
0xaa, 0x4a, 0x66, 0x54, 0xf9, 0xa2, 0x14, 0xcc, 0x66, 0x35, 0x69, 0x2f, 0x1f, 0xfe, 0x15, 0x00,
|
||||
0x00, 0xff, 0xff, 0xd1, 0x95, 0xd9, 0x8e, 0xee, 0x0e, 0x00, 0x00,
|
||||
}
|
||||
|
@ -49,12 +49,12 @@ service PieceStoreInspector {
|
||||
}
|
||||
|
||||
service IrreparableInspector {
|
||||
// ListSegments returns damaged segments
|
||||
rpc ListSegments(ListSegmentsRequest) returns (ListSegmentsResponse);
|
||||
// ListIrreparableSegments returns damaged segments
|
||||
rpc ListIrreparableSegments(ListIrreparableSegmentsRequest) returns (ListIrreparableSegmentsResponse);
|
||||
}
|
||||
|
||||
// ListSegments
|
||||
message ListSegmentsRequest {
|
||||
message ListIrreparableSegmentsRequest {
|
||||
int32 limit = 1;
|
||||
int32 offset = 2;
|
||||
}
|
||||
@ -67,7 +67,7 @@ message IrreparableSegment {
|
||||
int64 repair_attempt_count = 5;
|
||||
}
|
||||
|
||||
message ListSegmentsResponse {
|
||||
message ListIrreparableSegmentsResponse {
|
||||
repeated IrreparableSegment segments = 1;
|
||||
}
|
||||
|
||||
@ -189,7 +189,7 @@ message DashboardRequest {
|
||||
}
|
||||
|
||||
message DashboardResponse {
|
||||
string node_id = 1;
|
||||
bytes node_id = 1 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false];
|
||||
int64 node_connections = 2;
|
||||
string bootstrap_address = 3;
|
||||
string internal_address = 4;
|
||||
|
1081
pkg/pb/metainfo.pb.go
Normal file
1081
pkg/pb/metainfo.pb.go
Normal file
File diff suppressed because it is too large
Load Diff
106
pkg/pb/metainfo.proto
Normal file
106
pkg/pb/metainfo.proto
Normal file
@ -0,0 +1,106 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
syntax = "proto3";
|
||||
option go_package = "pb";
|
||||
|
||||
package metainfo;
|
||||
|
||||
import "gogo.proto";
|
||||
import "google/protobuf/timestamp.proto";
|
||||
import "node.proto";
|
||||
import "pointerdb.proto";
|
||||
import "orders.proto";
|
||||
|
||||
// Metainfo it's a satellite RPC service
|
||||
service Metainfo {
|
||||
rpc CreateSegment(SegmentWriteRequest) returns (SegmentWriteResponse);
|
||||
rpc CommitSegment(SegmentCommitRequest) returns (SegmentCommitResponse);
|
||||
rpc SegmentInfo(SegmentInfoRequest) returns (SegmentInfoResponse);
|
||||
rpc DownloadSegment(SegmentDownloadRequest) returns (SegmentDownloadResponse);
|
||||
rpc DeleteSegment(SegmentDeleteRequest) returns (SegmentDeleteResponse);
|
||||
rpc ListSegments(ListSegmentsRequest) returns (ListSegmentsResponse);
|
||||
}
|
||||
|
||||
message AddressedOrderLimit {
|
||||
orders.OrderLimit2 limit = 1;
|
||||
node.NodeAddress storage_node_address = 2;
|
||||
}
|
||||
|
||||
message SegmentWriteRequest {
|
||||
bytes bucket = 1;
|
||||
bytes path = 2;
|
||||
int64 segment = 3;
|
||||
pointerdb.RedundancyScheme redundancy = 4;
|
||||
int64 max_encrypted_segment_size = 5;
|
||||
google.protobuf.Timestamp expiration = 6;
|
||||
}
|
||||
|
||||
message SegmentWriteResponse {
|
||||
repeated AddressedOrderLimit addressed_limits = 1;
|
||||
bytes root_piece_id = 2 [(gogoproto.customtype) = "PieceID", (gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
message SegmentCommitRequest {
|
||||
bytes bucket = 1;
|
||||
bytes path = 2;
|
||||
int64 segment = 3;
|
||||
pointerdb.Pointer pointer = 4;
|
||||
repeated orders.OrderLimit2 original_limits = 5;
|
||||
}
|
||||
|
||||
message SegmentCommitResponse {
|
||||
pointerdb.Pointer pointer = 1;
|
||||
}
|
||||
|
||||
message SegmentDownloadRequest {
|
||||
bytes bucket = 1;
|
||||
bytes path = 2;
|
||||
int64 segment = 3;
|
||||
}
|
||||
|
||||
message SegmentDownloadResponse {
|
||||
repeated AddressedOrderLimit addressed_limits = 1;
|
||||
pointerdb.Pointer pointer = 2;
|
||||
}
|
||||
|
||||
message SegmentInfoRequest {
|
||||
bytes bucket = 1;
|
||||
bytes path = 2;
|
||||
int64 segment = 3;
|
||||
}
|
||||
|
||||
message SegmentInfoResponse {
|
||||
pointerdb.Pointer pointer = 2;
|
||||
}
|
||||
|
||||
message SegmentDeleteRequest {
|
||||
bytes bucket = 1;
|
||||
bytes path = 2;
|
||||
int64 segment = 3;
|
||||
}
|
||||
|
||||
message SegmentDeleteResponse {
|
||||
repeated AddressedOrderLimit addressed_limits = 1;
|
||||
}
|
||||
|
||||
message ListSegmentsRequest {
|
||||
bytes bucket = 1;
|
||||
bytes prefix = 2;
|
||||
bytes start_after = 3;
|
||||
bytes end_before = 4;
|
||||
bool recursive = 5;
|
||||
int32 limit = 6;
|
||||
fixed32 meta_flags = 7;
|
||||
}
|
||||
|
||||
message ListSegmentsResponse {
|
||||
message Item {
|
||||
bytes path = 1;
|
||||
pointerdb.Pointer pointer = 2;
|
||||
bool is_prefix = 3;
|
||||
}
|
||||
|
||||
repeated Item items = 1;
|
||||
bool more = 2;
|
||||
}
|
301
pkg/pb/orders.pb.go
Normal file
301
pkg/pb/orders.pb.go
Normal file
@ -0,0 +1,301 @@
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: orders.proto
|
||||
|
||||
package pb
|
||||
|
||||
import proto "github.com/gogo/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
import _ "github.com/gogo/protobuf/gogoproto"
|
||||
import timestamp "github.com/golang/protobuf/ptypes/timestamp"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
// PieceAction is an enumeration of all possible executed actions on storage node
|
||||
type PieceAction int32
|
||||
|
||||
const (
|
||||
PieceAction_INVALID PieceAction = 0
|
||||
PieceAction_PUT PieceAction = 1
|
||||
PieceAction_GET PieceAction = 2
|
||||
PieceAction_GET_AUDIT PieceAction = 3
|
||||
PieceAction_GET_REPAIR PieceAction = 4
|
||||
PieceAction_PUT_REPAIR PieceAction = 5
|
||||
PieceAction_DELETE PieceAction = 6
|
||||
)
|
||||
|
||||
var PieceAction_name = map[int32]string{
|
||||
0: "INVALID",
|
||||
1: "PUT",
|
||||
2: "GET",
|
||||
3: "GET_AUDIT",
|
||||
4: "GET_REPAIR",
|
||||
5: "PUT_REPAIR",
|
||||
6: "DELETE",
|
||||
}
|
||||
var PieceAction_value = map[string]int32{
|
||||
"INVALID": 0,
|
||||
"PUT": 1,
|
||||
"GET": 2,
|
||||
"GET_AUDIT": 3,
|
||||
"GET_REPAIR": 4,
|
||||
"PUT_REPAIR": 5,
|
||||
"DELETE": 6,
|
||||
}
|
||||
|
||||
func (x PieceAction) String() string {
|
||||
return proto.EnumName(PieceAction_name, int32(x))
|
||||
}
|
||||
func (PieceAction) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_orders_ba7c18f81666afaa, []int{0}
|
||||
}
|
||||
|
||||
// OrderLimit2 is provided by satellite to execute specific action on storage node within some limits
|
||||
type OrderLimit2 struct {
|
||||
// unique serial to avoid replay attacks
|
||||
SerialNumber []byte `protobuf:"bytes,1,opt,name=serial_number,json=serialNumber,proto3" json:"serial_number,omitempty"`
|
||||
// satellite who issued this order limit allowing orderer to do the specified action
|
||||
SatelliteId NodeID `protobuf:"bytes,2,opt,name=satellite_id,json=satelliteId,proto3,customtype=NodeID" json:"satellite_id"`
|
||||
// uplink who requested or whom behalf the order limit to do an action
|
||||
UplinkId NodeID `protobuf:"bytes,3,opt,name=uplink_id,json=uplinkId,proto3,customtype=NodeID" json:"uplink_id"`
|
||||
// storage node who can reclaim the order limit specified by serial
|
||||
StorageNodeId NodeID `protobuf:"bytes,4,opt,name=storage_node_id,json=storageNodeId,proto3,customtype=NodeID" json:"storage_node_id"`
|
||||
// piece which is allowed to be touched
|
||||
PieceId PieceID `protobuf:"bytes,5,opt,name=piece_id,json=pieceId,proto3,customtype=PieceID" json:"piece_id"`
|
||||
// limit in bytes how much can be changed
|
||||
Limit int64 `protobuf:"varint,6,opt,name=limit,proto3" json:"limit,omitempty"`
|
||||
Action PieceAction `protobuf:"varint,7,opt,name=action,proto3,enum=orders.PieceAction" json:"action,omitempty"`
|
||||
PieceExpiration *timestamp.Timestamp `protobuf:"bytes,8,opt,name=piece_expiration,json=pieceExpiration,proto3" json:"piece_expiration,omitempty"`
|
||||
OrderExpiration *timestamp.Timestamp `protobuf:"bytes,9,opt,name=order_expiration,json=orderExpiration,proto3" json:"order_expiration,omitempty"`
|
||||
SatelliteSignature []byte `protobuf:"bytes,10,opt,name=satellite_signature,json=satelliteSignature,proto3" json:"satellite_signature,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *OrderLimit2) Reset() { *m = OrderLimit2{} }
|
||||
func (m *OrderLimit2) String() string { return proto.CompactTextString(m) }
|
||||
func (*OrderLimit2) ProtoMessage() {}
|
||||
func (*OrderLimit2) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_orders_ba7c18f81666afaa, []int{0}
|
||||
}
|
||||
func (m *OrderLimit2) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_OrderLimit2.Unmarshal(m, b)
|
||||
}
|
||||
func (m *OrderLimit2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_OrderLimit2.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *OrderLimit2) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_OrderLimit2.Merge(dst, src)
|
||||
}
|
||||
func (m *OrderLimit2) XXX_Size() int {
|
||||
return xxx_messageInfo_OrderLimit2.Size(m)
|
||||
}
|
||||
func (m *OrderLimit2) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_OrderLimit2.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_OrderLimit2 proto.InternalMessageInfo
|
||||
|
||||
func (m *OrderLimit2) GetSerialNumber() []byte {
|
||||
if m != nil {
|
||||
return m.SerialNumber
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *OrderLimit2) GetLimit() int64 {
|
||||
if m != nil {
|
||||
return m.Limit
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *OrderLimit2) GetAction() PieceAction {
|
||||
if m != nil {
|
||||
return m.Action
|
||||
}
|
||||
return PieceAction_INVALID
|
||||
}
|
||||
|
||||
func (m *OrderLimit2) GetPieceExpiration() *timestamp.Timestamp {
|
||||
if m != nil {
|
||||
return m.PieceExpiration
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *OrderLimit2) GetOrderExpiration() *timestamp.Timestamp {
|
||||
if m != nil {
|
||||
return m.OrderExpiration
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *OrderLimit2) GetSatelliteSignature() []byte {
|
||||
if m != nil {
|
||||
return m.SatelliteSignature
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Order2 is a one step of fullfilling Amount number of bytes from an OrderLimit2 with SerialNumber
|
||||
type Order2 struct {
|
||||
// serial of the order limit that was signed
|
||||
SerialNumber []byte `protobuf:"bytes,1,opt,name=serial_number,json=serialNumber,proto3" json:"serial_number,omitempty"`
|
||||
// amount to be signed for
|
||||
Amount int64 `protobuf:"varint,2,opt,name=amount,proto3" json:"amount,omitempty"`
|
||||
// signature
|
||||
UplinkSignature []byte `protobuf:"bytes,3,opt,name=uplink_signature,json=uplinkSignature,proto3" json:"uplink_signature,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Order2) Reset() { *m = Order2{} }
|
||||
func (m *Order2) String() string { return proto.CompactTextString(m) }
|
||||
func (*Order2) ProtoMessage() {}
|
||||
func (*Order2) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_orders_ba7c18f81666afaa, []int{1}
|
||||
}
|
||||
func (m *Order2) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Order2.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Order2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Order2.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *Order2) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Order2.Merge(dst, src)
|
||||
}
|
||||
func (m *Order2) XXX_Size() int {
|
||||
return xxx_messageInfo_Order2.Size(m)
|
||||
}
|
||||
func (m *Order2) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Order2.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Order2 proto.InternalMessageInfo
|
||||
|
||||
func (m *Order2) GetSerialNumber() []byte {
|
||||
if m != nil {
|
||||
return m.SerialNumber
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Order2) GetAmount() int64 {
|
||||
if m != nil {
|
||||
return m.Amount
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Order2) GetUplinkSignature() []byte {
|
||||
if m != nil {
|
||||
return m.UplinkSignature
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type PieceHash struct {
|
||||
// piece id
|
||||
PieceId PieceID `protobuf:"bytes,1,opt,name=piece_id,json=pieceId,proto3,customtype=PieceID" json:"piece_id"`
|
||||
// hash of the piece that was/is uploaded
|
||||
Hash []byte `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash,omitempty"`
|
||||
// signature either satellite or storage node
|
||||
Signature []byte `protobuf:"bytes,3,opt,name=signature,proto3" json:"signature,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *PieceHash) Reset() { *m = PieceHash{} }
|
||||
func (m *PieceHash) String() string { return proto.CompactTextString(m) }
|
||||
func (*PieceHash) ProtoMessage() {}
|
||||
func (*PieceHash) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_orders_ba7c18f81666afaa, []int{2}
|
||||
}
|
||||
func (m *PieceHash) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_PieceHash.Unmarshal(m, b)
|
||||
}
|
||||
func (m *PieceHash) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_PieceHash.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *PieceHash) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_PieceHash.Merge(dst, src)
|
||||
}
|
||||
func (m *PieceHash) XXX_Size() int {
|
||||
return xxx_messageInfo_PieceHash.Size(m)
|
||||
}
|
||||
func (m *PieceHash) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_PieceHash.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_PieceHash proto.InternalMessageInfo
|
||||
|
||||
func (m *PieceHash) GetHash() []byte {
|
||||
if m != nil {
|
||||
return m.Hash
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *PieceHash) GetSignature() []byte {
|
||||
if m != nil {
|
||||
return m.Signature
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*OrderLimit2)(nil), "orders.OrderLimit2")
|
||||
proto.RegisterType((*Order2)(nil), "orders.Order2")
|
||||
proto.RegisterType((*PieceHash)(nil), "orders.PieceHash")
|
||||
proto.RegisterEnum("orders.PieceAction", PieceAction_name, PieceAction_value)
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("orders.proto", fileDescriptor_orders_ba7c18f81666afaa) }
|
||||
|
||||
var fileDescriptor_orders_ba7c18f81666afaa = []byte{
|
||||
// 489 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x52, 0xcd, 0x6a, 0xdb, 0x4c,
|
||||
0x14, 0x8d, 0x2c, 0x5b, 0xb6, 0xaf, 0xff, 0xc4, 0xe4, 0xe3, 0xc3, 0x98, 0x82, 0x4d, 0xba, 0x71,
|
||||
0x13, 0xb0, 0xa9, 0x0b, 0xdd, 0x3b, 0x48, 0xa4, 0x02, 0xe3, 0x9a, 0xa9, 0xdc, 0x45, 0x37, 0x66,
|
||||
0x1c, 0x4d, 0xe5, 0xa1, 0x92, 0x46, 0x48, 0x23, 0xe8, 0x23, 0xf6, 0x19, 0xba, 0xc8, 0x73, 0x74,
|
||||
0x59, 0x74, 0x47, 0xfe, 0x69, 0x09, 0xa4, 0xbb, 0x39, 0xe7, 0x9e, 0x73, 0x0f, 0xba, 0x47, 0xd0,
|
||||
0x95, 0x59, 0xc0, 0xb3, 0x7c, 0x96, 0x66, 0x52, 0x49, 0x62, 0x69, 0x34, 0x82, 0x50, 0x86, 0x52,
|
||||
0x73, 0xa3, 0x71, 0x28, 0x65, 0x18, 0xf1, 0x39, 0xa2, 0x7d, 0xf1, 0x75, 0xae, 0x44, 0xcc, 0x73,
|
||||
0xc5, 0xe2, 0x54, 0x0b, 0x6e, 0x7e, 0x99, 0xd0, 0xf9, 0x58, 0xfa, 0x56, 0x22, 0x16, 0x6a, 0x41,
|
||||
0x5e, 0x43, 0x2f, 0xe7, 0x99, 0x60, 0xd1, 0x2e, 0x29, 0xe2, 0x3d, 0xcf, 0x86, 0xc6, 0xc4, 0x98,
|
||||
0x76, 0x69, 0x57, 0x93, 0x6b, 0xe4, 0xc8, 0x5b, 0xe8, 0xe6, 0x4c, 0xf1, 0x28, 0x12, 0x8a, 0xef,
|
||||
0x44, 0x30, 0xac, 0x95, 0x9a, 0xfb, 0xfe, 0x8f, 0xa7, 0xf1, 0xd5, 0xcf, 0xa7, 0xb1, 0xb5, 0x96,
|
||||
0x01, 0xf7, 0x1c, 0xda, 0x39, 0x69, 0xbc, 0x80, 0xdc, 0x41, 0xbb, 0x48, 0x23, 0x91, 0x7c, 0x2b,
|
||||
0xf5, 0xe6, 0xb3, 0xfa, 0x96, 0x16, 0x78, 0x01, 0x79, 0x0f, 0x83, 0x5c, 0xc9, 0x8c, 0x85, 0x7c,
|
||||
0x97, 0xc8, 0x00, 0x23, 0xea, 0xcf, 0x5a, 0x7a, 0x95, 0x0c, 0x61, 0x40, 0x6e, 0xa1, 0x95, 0x0a,
|
||||
0xfe, 0x88, 0x86, 0x06, 0x1a, 0x06, 0x95, 0xa1, 0xb9, 0x29, 0x79, 0xcf, 0xa1, 0x4d, 0x14, 0x78,
|
||||
0x01, 0xf9, 0x0f, 0x1a, 0x51, 0xf9, 0xc9, 0x43, 0x6b, 0x62, 0x4c, 0x4d, 0xaa, 0x01, 0xb9, 0x03,
|
||||
0x8b, 0x3d, 0x2a, 0x21, 0x93, 0x61, 0x73, 0x62, 0x4c, 0xfb, 0x8b, 0xeb, 0x59, 0x75, 0x62, 0xf4,
|
||||
0x2f, 0x71, 0x44, 0x2b, 0x09, 0x71, 0xc1, 0xd6, 0x71, 0xfc, 0x7b, 0x2a, 0x32, 0x86, 0xb6, 0xd6,
|
||||
0xc4, 0x98, 0x76, 0x16, 0xa3, 0x99, 0xbe, 0xfb, 0xec, 0x78, 0xf7, 0x99, 0x7f, 0xbc, 0x3b, 0x1d,
|
||||
0xa0, 0xc7, 0x3d, 0x59, 0xca, 0x35, 0x18, 0x72, 0xb9, 0xa6, 0xfd, 0xf2, 0x1a, 0xf4, 0x5c, 0xac,
|
||||
0x99, 0xc3, 0xf5, 0xb9, 0x94, 0x5c, 0x84, 0x09, 0x53, 0x45, 0xc6, 0x87, 0x80, 0xfd, 0x91, 0xd3,
|
||||
0xe8, 0xd3, 0x71, 0x72, 0x93, 0x82, 0x85, 0xcd, 0xff, 0x63, 0xe9, 0xff, 0x83, 0xc5, 0x62, 0x59,
|
||||
0x24, 0x0a, 0xeb, 0x36, 0x69, 0x85, 0xc8, 0x1b, 0xb0, 0xab, 0x66, 0xcf, 0xa1, 0x58, 0x30, 0x1d,
|
||||
0x68, 0xfe, 0x9c, 0x28, 0xa0, 0x8d, 0x77, 0xfc, 0xc0, 0xf2, 0xc3, 0x1f, 0x65, 0x19, 0x2f, 0x94,
|
||||
0x45, 0xa0, 0x7e, 0x60, 0xf9, 0x41, 0xff, 0x68, 0x14, 0xdf, 0xe4, 0x15, 0xb4, 0xff, 0x0e, 0x3c,
|
||||
0x13, 0xb7, 0x21, 0x74, 0x2e, 0x2a, 0x23, 0x1d, 0x68, 0x7a, 0xeb, 0xcf, 0xcb, 0x95, 0xe7, 0xd8,
|
||||
0x57, 0xa4, 0x09, 0xe6, 0x66, 0xeb, 0xdb, 0x46, 0xf9, 0x78, 0x70, 0x7d, 0xbb, 0x46, 0x7a, 0xd0,
|
||||
0x7e, 0x70, 0xfd, 0xdd, 0x72, 0xeb, 0x78, 0xbe, 0x6d, 0x92, 0x3e, 0x40, 0x09, 0xa9, 0xbb, 0x59,
|
||||
0x7a, 0xd4, 0xae, 0x97, 0x78, 0xb3, 0x3d, 0xe1, 0x06, 0x01, 0xb0, 0x1c, 0x77, 0xe5, 0xfa, 0xae,
|
||||
0x6d, 0xdd, 0xd7, 0xbf, 0xd4, 0xd2, 0xfd, 0xde, 0xc2, 0x86, 0xde, 0xfd, 0x0e, 0x00, 0x00, 0xff,
|
||||
0xff, 0x21, 0xad, 0x30, 0xc2, 0x92, 0x03, 0x00, 0x00,
|
||||
}
|
63
pkg/pb/orders.proto
Normal file
63
pkg/pb/orders.proto
Normal file
@ -0,0 +1,63 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
syntax = "proto3";
|
||||
option go_package = "pb";
|
||||
|
||||
package orders;
|
||||
|
||||
import "gogo.proto";
|
||||
import "google/protobuf/timestamp.proto";
|
||||
|
||||
// PieceAction is an enumeration of all possible executed actions on storage node
|
||||
enum PieceAction {
|
||||
INVALID = 0;
|
||||
PUT = 1;
|
||||
GET = 2;
|
||||
GET_AUDIT = 3;
|
||||
GET_REPAIR = 4;
|
||||
PUT_REPAIR = 5;
|
||||
DELETE = 6;
|
||||
}
|
||||
|
||||
// OrderLimit2 is provided by satellite to execute specific action on storage node within some limits
|
||||
message OrderLimit2 {
|
||||
// unique serial to avoid replay attacks
|
||||
bytes serial_number = 1; // can this be int64?
|
||||
// satellite who issued this order limit allowing orderer to do the specified action
|
||||
bytes satellite_id = 2 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false];
|
||||
// uplink who requested or whom behalf the order limit to do an action
|
||||
bytes uplink_id = 3 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false];
|
||||
// storage node who can reclaim the order limit specified by serial
|
||||
bytes storage_node_id = 4 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false];
|
||||
|
||||
// piece which is allowed to be touched
|
||||
bytes piece_id = 5 [(gogoproto.customtype) = "PieceID", (gogoproto.nullable) = false];
|
||||
// limit in bytes how much can be changed
|
||||
int64 limit = 6;
|
||||
PieceAction action = 7;
|
||||
|
||||
google.protobuf.Timestamp piece_expiration = 8;
|
||||
google.protobuf.Timestamp order_expiration = 9;
|
||||
|
||||
bytes satellite_signature = 10;
|
||||
}
|
||||
|
||||
// Order2 is a one step of fullfilling Amount number of bytes from an OrderLimit2 with SerialNumber
|
||||
message Order2 {
|
||||
// serial of the order limit that was signed
|
||||
bytes serial_number = 1;
|
||||
// amount to be signed for
|
||||
int64 amount = 2;
|
||||
// signature
|
||||
bytes uplink_signature = 3;
|
||||
}
|
||||
|
||||
message PieceHash {
|
||||
// piece id
|
||||
bytes piece_id = 1 [(gogoproto.customtype) = "PieceID", (gogoproto.nullable) = false];
|
||||
// hash of the piece that was/is uploaded
|
||||
bytes hash = 2;
|
||||
// signature either satellite or storage node
|
||||
bytes signature = 3;
|
||||
}
|
695
pkg/pb/piecestore2.pb.go
Normal file
695
pkg/pb/piecestore2.pb.go
Normal file
@ -0,0 +1,695 @@
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: piecestore2.proto
|
||||
|
||||
package pb
|
||||
|
||||
import proto "github.com/gogo/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
import _ "github.com/gogo/protobuf/gogoproto"
|
||||
|
||||
import (
|
||||
context "golang.org/x/net/context"
|
||||
grpc "google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
// Expected order of messages from uplink:
|
||||
// OrderLimit ->
|
||||
// repeated
|
||||
// Order ->
|
||||
// Chunk ->
|
||||
// PieceHash signed by uplink ->
|
||||
// <- PieceHash signed by storage node
|
||||
//
|
||||
type PieceUploadRequest struct {
|
||||
// first message to show that we are allowed to upload
|
||||
Limit *OrderLimit2 `protobuf:"bytes,1,opt,name=limit,proto3" json:"limit,omitempty"`
|
||||
// order for uploading
|
||||
Order *Order2 `protobuf:"bytes,2,opt,name=order,proto3" json:"order,omitempty"`
|
||||
Chunk *PieceUploadRequest_Chunk `protobuf:"bytes,3,opt,name=chunk,proto3" json:"chunk,omitempty"`
|
||||
// final message
|
||||
Done *PieceHash `protobuf:"bytes,4,opt,name=done,proto3" json:"done,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *PieceUploadRequest) Reset() { *m = PieceUploadRequest{} }
|
||||
func (m *PieceUploadRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*PieceUploadRequest) ProtoMessage() {}
|
||||
func (*PieceUploadRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_piecestore2_6f4182b059c6fda3, []int{0}
|
||||
}
|
||||
func (m *PieceUploadRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_PieceUploadRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *PieceUploadRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_PieceUploadRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *PieceUploadRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_PieceUploadRequest.Merge(dst, src)
|
||||
}
|
||||
func (m *PieceUploadRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_PieceUploadRequest.Size(m)
|
||||
}
|
||||
func (m *PieceUploadRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_PieceUploadRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_PieceUploadRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *PieceUploadRequest) GetLimit() *OrderLimit2 {
|
||||
if m != nil {
|
||||
return m.Limit
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *PieceUploadRequest) GetOrder() *Order2 {
|
||||
if m != nil {
|
||||
return m.Order
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *PieceUploadRequest) GetChunk() *PieceUploadRequest_Chunk {
|
||||
if m != nil {
|
||||
return m.Chunk
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *PieceUploadRequest) GetDone() *PieceHash {
|
||||
if m != nil {
|
||||
return m.Done
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// data message
|
||||
type PieceUploadRequest_Chunk struct {
|
||||
Offset int64 `protobuf:"varint,1,opt,name=offset,proto3" json:"offset,omitempty"`
|
||||
Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *PieceUploadRequest_Chunk) Reset() { *m = PieceUploadRequest_Chunk{} }
|
||||
func (m *PieceUploadRequest_Chunk) String() string { return proto.CompactTextString(m) }
|
||||
func (*PieceUploadRequest_Chunk) ProtoMessage() {}
|
||||
func (*PieceUploadRequest_Chunk) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_piecestore2_6f4182b059c6fda3, []int{0, 0}
|
||||
}
|
||||
func (m *PieceUploadRequest_Chunk) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_PieceUploadRequest_Chunk.Unmarshal(m, b)
|
||||
}
|
||||
func (m *PieceUploadRequest_Chunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_PieceUploadRequest_Chunk.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *PieceUploadRequest_Chunk) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_PieceUploadRequest_Chunk.Merge(dst, src)
|
||||
}
|
||||
func (m *PieceUploadRequest_Chunk) XXX_Size() int {
|
||||
return xxx_messageInfo_PieceUploadRequest_Chunk.Size(m)
|
||||
}
|
||||
func (m *PieceUploadRequest_Chunk) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_PieceUploadRequest_Chunk.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_PieceUploadRequest_Chunk proto.InternalMessageInfo
|
||||
|
||||
func (m *PieceUploadRequest_Chunk) GetOffset() int64 {
|
||||
if m != nil {
|
||||
return m.Offset
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *PieceUploadRequest_Chunk) GetData() []byte {
|
||||
if m != nil {
|
||||
return m.Data
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type PieceUploadResponse struct {
|
||||
Done *PieceHash `protobuf:"bytes,1,opt,name=done,proto3" json:"done,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *PieceUploadResponse) Reset() { *m = PieceUploadResponse{} }
|
||||
func (m *PieceUploadResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*PieceUploadResponse) ProtoMessage() {}
|
||||
func (*PieceUploadResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_piecestore2_6f4182b059c6fda3, []int{1}
|
||||
}
|
||||
func (m *PieceUploadResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_PieceUploadResponse.Unmarshal(m, b)
|
||||
}
|
||||
func (m *PieceUploadResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_PieceUploadResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *PieceUploadResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_PieceUploadResponse.Merge(dst, src)
|
||||
}
|
||||
func (m *PieceUploadResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_PieceUploadResponse.Size(m)
|
||||
}
|
||||
func (m *PieceUploadResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_PieceUploadResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_PieceUploadResponse proto.InternalMessageInfo
|
||||
|
||||
func (m *PieceUploadResponse) GetDone() *PieceHash {
|
||||
if m != nil {
|
||||
return m.Done
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Expected order of messages from uplink:
|
||||
// {OrderLimit, Chunk} ->
|
||||
// go repeated
|
||||
// Order -> (async)
|
||||
// go repeated
|
||||
// <- PieceDownloadResponse.Chunk
|
||||
type PieceDownloadRequest struct {
|
||||
// first message to show that we are allowed to upload
|
||||
Limit *OrderLimit2 `protobuf:"bytes,1,opt,name=limit,proto3" json:"limit,omitempty"`
|
||||
// order for downloading
|
||||
Order *Order2 `protobuf:"bytes,2,opt,name=order,proto3" json:"order,omitempty"`
|
||||
// request for the chunk
|
||||
Chunk *PieceDownloadRequest_Chunk `protobuf:"bytes,3,opt,name=chunk,proto3" json:"chunk,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *PieceDownloadRequest) Reset() { *m = PieceDownloadRequest{} }
|
||||
func (m *PieceDownloadRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*PieceDownloadRequest) ProtoMessage() {}
|
||||
func (*PieceDownloadRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_piecestore2_6f4182b059c6fda3, []int{2}
|
||||
}
|
||||
func (m *PieceDownloadRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_PieceDownloadRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *PieceDownloadRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_PieceDownloadRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *PieceDownloadRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_PieceDownloadRequest.Merge(dst, src)
|
||||
}
|
||||
func (m *PieceDownloadRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_PieceDownloadRequest.Size(m)
|
||||
}
|
||||
func (m *PieceDownloadRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_PieceDownloadRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_PieceDownloadRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *PieceDownloadRequest) GetLimit() *OrderLimit2 {
|
||||
if m != nil {
|
||||
return m.Limit
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *PieceDownloadRequest) GetOrder() *Order2 {
|
||||
if m != nil {
|
||||
return m.Order
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *PieceDownloadRequest) GetChunk() *PieceDownloadRequest_Chunk {
|
||||
if m != nil {
|
||||
return m.Chunk
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Chunk that we wish to download
|
||||
type PieceDownloadRequest_Chunk struct {
|
||||
Offset int64 `protobuf:"varint,1,opt,name=offset,proto3" json:"offset,omitempty"`
|
||||
ChunkSize int64 `protobuf:"varint,2,opt,name=chunk_size,json=chunkSize,proto3" json:"chunk_size,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *PieceDownloadRequest_Chunk) Reset() { *m = PieceDownloadRequest_Chunk{} }
|
||||
func (m *PieceDownloadRequest_Chunk) String() string { return proto.CompactTextString(m) }
|
||||
func (*PieceDownloadRequest_Chunk) ProtoMessage() {}
|
||||
func (*PieceDownloadRequest_Chunk) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_piecestore2_6f4182b059c6fda3, []int{2, 0}
|
||||
}
|
||||
func (m *PieceDownloadRequest_Chunk) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_PieceDownloadRequest_Chunk.Unmarshal(m, b)
|
||||
}
|
||||
func (m *PieceDownloadRequest_Chunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_PieceDownloadRequest_Chunk.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *PieceDownloadRequest_Chunk) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_PieceDownloadRequest_Chunk.Merge(dst, src)
|
||||
}
|
||||
func (m *PieceDownloadRequest_Chunk) XXX_Size() int {
|
||||
return xxx_messageInfo_PieceDownloadRequest_Chunk.Size(m)
|
||||
}
|
||||
func (m *PieceDownloadRequest_Chunk) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_PieceDownloadRequest_Chunk.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_PieceDownloadRequest_Chunk proto.InternalMessageInfo
|
||||
|
||||
func (m *PieceDownloadRequest_Chunk) GetOffset() int64 {
|
||||
if m != nil {
|
||||
return m.Offset
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *PieceDownloadRequest_Chunk) GetChunkSize() int64 {
|
||||
if m != nil {
|
||||
return m.ChunkSize
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type PieceDownloadResponse struct {
|
||||
Chunk *PieceDownloadResponse_Chunk `protobuf:"bytes,1,opt,name=chunk,proto3" json:"chunk,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *PieceDownloadResponse) Reset() { *m = PieceDownloadResponse{} }
|
||||
func (m *PieceDownloadResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*PieceDownloadResponse) ProtoMessage() {}
|
||||
func (*PieceDownloadResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_piecestore2_6f4182b059c6fda3, []int{3}
|
||||
}
|
||||
func (m *PieceDownloadResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_PieceDownloadResponse.Unmarshal(m, b)
|
||||
}
|
||||
func (m *PieceDownloadResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_PieceDownloadResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *PieceDownloadResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_PieceDownloadResponse.Merge(dst, src)
|
||||
}
|
||||
func (m *PieceDownloadResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_PieceDownloadResponse.Size(m)
|
||||
}
|
||||
func (m *PieceDownloadResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_PieceDownloadResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_PieceDownloadResponse proto.InternalMessageInfo
|
||||
|
||||
func (m *PieceDownloadResponse) GetChunk() *PieceDownloadResponse_Chunk {
|
||||
if m != nil {
|
||||
return m.Chunk
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Chunk response for download request
|
||||
type PieceDownloadResponse_Chunk struct {
|
||||
Offset int64 `protobuf:"varint,1,opt,name=offset,proto3" json:"offset,omitempty"`
|
||||
Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *PieceDownloadResponse_Chunk) Reset() { *m = PieceDownloadResponse_Chunk{} }
|
||||
func (m *PieceDownloadResponse_Chunk) String() string { return proto.CompactTextString(m) }
|
||||
func (*PieceDownloadResponse_Chunk) ProtoMessage() {}
|
||||
func (*PieceDownloadResponse_Chunk) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_piecestore2_6f4182b059c6fda3, []int{3, 0}
|
||||
}
|
||||
func (m *PieceDownloadResponse_Chunk) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_PieceDownloadResponse_Chunk.Unmarshal(m, b)
|
||||
}
|
||||
func (m *PieceDownloadResponse_Chunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_PieceDownloadResponse_Chunk.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *PieceDownloadResponse_Chunk) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_PieceDownloadResponse_Chunk.Merge(dst, src)
|
||||
}
|
||||
func (m *PieceDownloadResponse_Chunk) XXX_Size() int {
|
||||
return xxx_messageInfo_PieceDownloadResponse_Chunk.Size(m)
|
||||
}
|
||||
func (m *PieceDownloadResponse_Chunk) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_PieceDownloadResponse_Chunk.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_PieceDownloadResponse_Chunk proto.InternalMessageInfo
|
||||
|
||||
func (m *PieceDownloadResponse_Chunk) GetOffset() int64 {
|
||||
if m != nil {
|
||||
return m.Offset
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *PieceDownloadResponse_Chunk) GetData() []byte {
|
||||
if m != nil {
|
||||
return m.Data
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type PieceDeleteRequest struct {
|
||||
Limit *OrderLimit2 `protobuf:"bytes,1,opt,name=limit,proto3" json:"limit,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *PieceDeleteRequest) Reset() { *m = PieceDeleteRequest{} }
|
||||
func (m *PieceDeleteRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*PieceDeleteRequest) ProtoMessage() {}
|
||||
func (*PieceDeleteRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_piecestore2_6f4182b059c6fda3, []int{4}
|
||||
}
|
||||
func (m *PieceDeleteRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_PieceDeleteRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *PieceDeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_PieceDeleteRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *PieceDeleteRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_PieceDeleteRequest.Merge(dst, src)
|
||||
}
|
||||
func (m *PieceDeleteRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_PieceDeleteRequest.Size(m)
|
||||
}
|
||||
func (m *PieceDeleteRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_PieceDeleteRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_PieceDeleteRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *PieceDeleteRequest) GetLimit() *OrderLimit2 {
|
||||
if m != nil {
|
||||
return m.Limit
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type PieceDeleteResponse struct {
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *PieceDeleteResponse) Reset() { *m = PieceDeleteResponse{} }
|
||||
func (m *PieceDeleteResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*PieceDeleteResponse) ProtoMessage() {}
|
||||
func (*PieceDeleteResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_piecestore2_6f4182b059c6fda3, []int{5}
|
||||
}
|
||||
func (m *PieceDeleteResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_PieceDeleteResponse.Unmarshal(m, b)
|
||||
}
|
||||
func (m *PieceDeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_PieceDeleteResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *PieceDeleteResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_PieceDeleteResponse.Merge(dst, src)
|
||||
}
|
||||
func (m *PieceDeleteResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_PieceDeleteResponse.Size(m)
|
||||
}
|
||||
func (m *PieceDeleteResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_PieceDeleteResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_PieceDeleteResponse proto.InternalMessageInfo
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*PieceUploadRequest)(nil), "piecestore.PieceUploadRequest")
|
||||
proto.RegisterType((*PieceUploadRequest_Chunk)(nil), "piecestore.PieceUploadRequest.Chunk")
|
||||
proto.RegisterType((*PieceUploadResponse)(nil), "piecestore.PieceUploadResponse")
|
||||
proto.RegisterType((*PieceDownloadRequest)(nil), "piecestore.PieceDownloadRequest")
|
||||
proto.RegisterType((*PieceDownloadRequest_Chunk)(nil), "piecestore.PieceDownloadRequest.Chunk")
|
||||
proto.RegisterType((*PieceDownloadResponse)(nil), "piecestore.PieceDownloadResponse")
|
||||
proto.RegisterType((*PieceDownloadResponse_Chunk)(nil), "piecestore.PieceDownloadResponse.Chunk")
|
||||
proto.RegisterType((*PieceDeleteRequest)(nil), "piecestore.PieceDeleteRequest")
|
||||
proto.RegisterType((*PieceDeleteResponse)(nil), "piecestore.PieceDeleteResponse")
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ context.Context
|
||||
var _ grpc.ClientConn
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
const _ = grpc.SupportPackageIsVersion4
|
||||
|
||||
// PiecestoreClient is the client API for Piecestore service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
|
||||
type PiecestoreClient interface {
|
||||
Upload(ctx context.Context, opts ...grpc.CallOption) (Piecestore_UploadClient, error)
|
||||
Download(ctx context.Context, opts ...grpc.CallOption) (Piecestore_DownloadClient, error)
|
||||
Delete(ctx context.Context, in *PieceDeleteRequest, opts ...grpc.CallOption) (*PieceDeleteResponse, error)
|
||||
}
|
||||
|
||||
type piecestoreClient struct {
|
||||
cc *grpc.ClientConn
|
||||
}
|
||||
|
||||
func NewPiecestoreClient(cc *grpc.ClientConn) PiecestoreClient {
|
||||
return &piecestoreClient{cc}
|
||||
}
|
||||
|
||||
func (c *piecestoreClient) Upload(ctx context.Context, opts ...grpc.CallOption) (Piecestore_UploadClient, error) {
|
||||
stream, err := c.cc.NewStream(ctx, &_Piecestore_serviceDesc.Streams[0], "/piecestore.Piecestore/Upload", opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
x := &piecestoreUploadClient{stream}
|
||||
return x, nil
|
||||
}
|
||||
|
||||
type Piecestore_UploadClient interface {
|
||||
Send(*PieceUploadRequest) error
|
||||
CloseAndRecv() (*PieceUploadResponse, error)
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
type piecestoreUploadClient struct {
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
func (x *piecestoreUploadClient) Send(m *PieceUploadRequest) error {
|
||||
return x.ClientStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func (x *piecestoreUploadClient) CloseAndRecv() (*PieceUploadResponse, error) {
|
||||
if err := x.ClientStream.CloseSend(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m := new(PieceUploadResponse)
|
||||
if err := x.ClientStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (c *piecestoreClient) Download(ctx context.Context, opts ...grpc.CallOption) (Piecestore_DownloadClient, error) {
|
||||
stream, err := c.cc.NewStream(ctx, &_Piecestore_serviceDesc.Streams[1], "/piecestore.Piecestore/Download", opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
x := &piecestoreDownloadClient{stream}
|
||||
return x, nil
|
||||
}
|
||||
|
||||
type Piecestore_DownloadClient interface {
|
||||
Send(*PieceDownloadRequest) error
|
||||
Recv() (*PieceDownloadResponse, error)
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
type piecestoreDownloadClient struct {
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
func (x *piecestoreDownloadClient) Send(m *PieceDownloadRequest) error {
|
||||
return x.ClientStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func (x *piecestoreDownloadClient) Recv() (*PieceDownloadResponse, error) {
|
||||
m := new(PieceDownloadResponse)
|
||||
if err := x.ClientStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (c *piecestoreClient) Delete(ctx context.Context, in *PieceDeleteRequest, opts ...grpc.CallOption) (*PieceDeleteResponse, error) {
|
||||
out := new(PieceDeleteResponse)
|
||||
err := c.cc.Invoke(ctx, "/piecestore.Piecestore/Delete", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// PiecestoreServer is the server API for Piecestore service.
|
||||
type PiecestoreServer interface {
|
||||
Upload(Piecestore_UploadServer) error
|
||||
Download(Piecestore_DownloadServer) error
|
||||
Delete(context.Context, *PieceDeleteRequest) (*PieceDeleteResponse, error)
|
||||
}
|
||||
|
||||
func RegisterPiecestoreServer(s *grpc.Server, srv PiecestoreServer) {
|
||||
s.RegisterService(&_Piecestore_serviceDesc, srv)
|
||||
}
|
||||
|
||||
func _Piecestore_Upload_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||
return srv.(PiecestoreServer).Upload(&piecestoreUploadServer{stream})
|
||||
}
|
||||
|
||||
type Piecestore_UploadServer interface {
|
||||
SendAndClose(*PieceUploadResponse) error
|
||||
Recv() (*PieceUploadRequest, error)
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
type piecestoreUploadServer struct {
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
func (x *piecestoreUploadServer) SendAndClose(m *PieceUploadResponse) error {
|
||||
return x.ServerStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func (x *piecestoreUploadServer) Recv() (*PieceUploadRequest, error) {
|
||||
m := new(PieceUploadRequest)
|
||||
if err := x.ServerStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func _Piecestore_Download_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||
return srv.(PiecestoreServer).Download(&piecestoreDownloadServer{stream})
|
||||
}
|
||||
|
||||
type Piecestore_DownloadServer interface {
|
||||
Send(*PieceDownloadResponse) error
|
||||
Recv() (*PieceDownloadRequest, error)
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
type piecestoreDownloadServer struct {
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
func (x *piecestoreDownloadServer) Send(m *PieceDownloadResponse) error {
|
||||
return x.ServerStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func (x *piecestoreDownloadServer) Recv() (*PieceDownloadRequest, error) {
|
||||
m := new(PieceDownloadRequest)
|
||||
if err := x.ServerStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func _Piecestore_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(PieceDeleteRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(PiecestoreServer).Delete(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/piecestore.Piecestore/Delete",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(PiecestoreServer).Delete(ctx, req.(*PieceDeleteRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
var _Piecestore_serviceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "piecestore.Piecestore",
|
||||
HandlerType: (*PiecestoreServer)(nil),
|
||||
Methods: []grpc.MethodDesc{
|
||||
{
|
||||
MethodName: "Delete",
|
||||
Handler: _Piecestore_Delete_Handler,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{
|
||||
{
|
||||
StreamName: "Upload",
|
||||
Handler: _Piecestore_Upload_Handler,
|
||||
ClientStreams: true,
|
||||
},
|
||||
{
|
||||
StreamName: "Download",
|
||||
Handler: _Piecestore_Download_Handler,
|
||||
ServerStreams: true,
|
||||
ClientStreams: true,
|
||||
},
|
||||
},
|
||||
Metadata: "piecestore2.proto",
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("piecestore2.proto", fileDescriptor_piecestore2_6f4182b059c6fda3) }
|
||||
|
||||
var fileDescriptor_piecestore2_6f4182b059c6fda3 = []byte{
|
||||
// 404 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x94, 0x51, 0x4f, 0xea, 0x30,
|
||||
0x14, 0xc7, 0x29, 0x6c, 0xcb, 0xbd, 0xe7, 0x92, 0x9b, 0x50, 0xc4, 0x2c, 0x4b, 0x54, 0x5c, 0x50,
|
||||
0xf1, 0x65, 0x31, 0xe3, 0xcd, 0xa0, 0x26, 0xca, 0x83, 0x89, 0x1a, 0x49, 0x0d, 0x2f, 0xbe, 0x98,
|
||||
0xc1, 0x0a, 0x2c, 0xe2, 0x3a, 0xd7, 0x11, 0x13, 0xbe, 0x82, 0x9f, 0xd3, 0x8f, 0x61, 0x34, 0x6b,
|
||||
0x37, 0xc8, 0x00, 0x21, 0x9a, 0xf8, 0x04, 0x3d, 0xe7, 0x7f, 0xfa, 0xff, 0xf5, 0xdf, 0x66, 0x50,
|
||||
0x0a, 0x3c, 0xda, 0xa3, 0x3c, 0x62, 0x21, 0xb5, 0xad, 0x20, 0x64, 0x11, 0xc3, 0x30, 0x2b, 0x19,
|
||||
0x30, 0x60, 0x03, 0x26, 0xeb, 0x46, 0x91, 0x85, 0x2e, 0x0d, 0xb9, 0x5c, 0x99, 0xef, 0x08, 0x70,
|
||||
0x3b, 0x16, 0x76, 0x82, 0x11, 0x73, 0x5c, 0x42, 0x9f, 0xc7, 0x94, 0x47, 0xf8, 0x10, 0xd4, 0x91,
|
||||
0xf7, 0xe4, 0x45, 0x3a, 0xaa, 0xa2, 0xfa, 0x3f, 0xbb, 0x6c, 0x25, 0x43, 0xb7, 0xf1, 0xcf, 0x75,
|
||||
0xdc, 0xb1, 0x89, 0x54, 0xe0, 0x1a, 0xa8, 0xa2, 0xa9, 0xe7, 0x85, 0xf4, 0x7f, 0x46, 0x6a, 0x13,
|
||||
0xd9, 0xc4, 0xc7, 0xa0, 0xf6, 0x86, 0x63, 0xff, 0x51, 0x2f, 0x08, 0x55, 0xcd, 0x9a, 0xd1, 0x59,
|
||||
0x8b, 0xfe, 0xd6, 0x45, 0xac, 0x25, 0x72, 0x04, 0xef, 0x81, 0xe2, 0x32, 0x9f, 0xea, 0x8a, 0x18,
|
||||
0x2d, 0xa5, 0x06, 0x62, 0xec, 0xd2, 0xe1, 0x43, 0x22, 0xda, 0x46, 0x03, 0x54, 0x31, 0x86, 0x37,
|
||||
0x41, 0x63, 0xfd, 0x3e, 0xa7, 0x92, 0xbe, 0x40, 0x92, 0x15, 0xc6, 0xa0, 0xb8, 0x4e, 0xe4, 0x08,
|
||||
0xd0, 0x22, 0x11, 0xff, 0xcd, 0x26, 0x94, 0x33, 0xf6, 0x3c, 0x60, 0x3e, 0xa7, 0x53, 0x4b, 0xb4,
|
||||
0xd2, 0xd2, 0x7c, 0x43, 0xb0, 0x21, 0x6a, 0x2d, 0xf6, 0xe2, 0xff, 0x6a, 0x7e, 0xcd, 0x6c, 0x7e,
|
||||
0xfb, 0x0b, 0xf9, 0xcd, 0x11, 0x64, 0x12, 0x34, 0x4e, 0xd7, 0x45, 0xb3, 0x05, 0x20, 0x94, 0x0f,
|
||||
0xdc, 0x9b, 0x50, 0x41, 0x52, 0x20, 0x7f, 0x45, 0xe5, 0xce, 0x9b, 0x50, 0xf3, 0x15, 0x41, 0x65,
|
||||
0xce, 0x25, 0x09, 0xea, 0x24, 0xe5, 0x92, 0x07, 0x3d, 0x58, 0xc1, 0x25, 0x27, 0xb2, 0x60, 0x3f,
|
||||
0xba, 0xb3, 0xb3, 0xe4, 0xc9, 0xb6, 0xe8, 0x88, 0x46, 0xf4, 0xfb, 0x91, 0x9b, 0x95, 0xe4, 0xd2,
|
||||
0xd3, 0x0d, 0x24, 0x99, 0xfd, 0x81, 0x00, 0xda, 0x53, 0x7c, 0x7c, 0x03, 0x9a, 0x7c, 0x15, 0x78,
|
||||
0x7b, 0xf5, 0x6b, 0x35, 0x76, 0xbe, 0xec, 0xcb, 0x9d, 0xcd, 0x5c, 0x1d, 0xe1, 0x0e, 0xfc, 0x49,
|
||||
0xb3, 0xc0, 0xd5, 0x75, 0xd7, 0x67, 0xec, 0xae, 0x0d, 0x32, 0xde, 0xf4, 0x08, 0xe1, 0x2b, 0xd0,
|
||||
0xe4, 0x31, 0x96, 0x50, 0x66, 0x02, 0x5a, 0x42, 0x99, 0x3d, 0xbf, 0x99, 0x3b, 0x57, 0xee, 0xf3,
|
||||
0x41, 0xb7, 0xab, 0x89, 0x4f, 0x43, 0xe3, 0x33, 0x00, 0x00, 0xff, 0xff, 0xf5, 0xfa, 0xf7, 0xbe,
|
||||
0x55, 0x04, 0x00, 0x00,
|
||||
}
|
82
pkg/pb/piecestore2.proto
Normal file
82
pkg/pb/piecestore2.proto
Normal file
@ -0,0 +1,82 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
syntax = "proto3";
|
||||
option go_package = "pb";
|
||||
|
||||
package piecestore;
|
||||
|
||||
import "gogo.proto";
|
||||
import "orders.proto";
|
||||
|
||||
service Piecestore {
|
||||
rpc Upload(stream PieceUploadRequest) returns (PieceUploadResponse) {}
|
||||
rpc Download(stream PieceDownloadRequest) returns (stream PieceDownloadResponse) {}
|
||||
rpc Delete(PieceDeleteRequest) returns (PieceDeleteResponse) {}
|
||||
}
|
||||
|
||||
// Expected order of messages from uplink:
|
||||
// OrderLimit ->
|
||||
// repeated
|
||||
// Order ->
|
||||
// Chunk ->
|
||||
// PieceHash signed by uplink ->
|
||||
// <- PieceHash signed by storage node
|
||||
//
|
||||
message PieceUploadRequest {
|
||||
// first message to show that we are allowed to upload
|
||||
orders.OrderLimit2 limit = 1;
|
||||
// order for uploading
|
||||
orders.Order2 order = 2;
|
||||
|
||||
// data message
|
||||
message Chunk {
|
||||
int64 offset = 1;
|
||||
bytes data = 2;
|
||||
}
|
||||
Chunk chunk = 3;
|
||||
// final message
|
||||
orders.PieceHash done = 4;
|
||||
}
|
||||
|
||||
message PieceUploadResponse {
|
||||
orders.PieceHash done = 1;
|
||||
}
|
||||
|
||||
// Expected order of messages from uplink:
|
||||
// {OrderLimit, Chunk} ->
|
||||
// go repeated
|
||||
// Order -> (async)
|
||||
// go repeated
|
||||
// <- PieceDownloadResponse.Chunk
|
||||
message PieceDownloadRequest {
|
||||
// first message to show that we are allowed to upload
|
||||
orders.OrderLimit2 limit = 1;
|
||||
// order for downloading
|
||||
orders.Order2 order = 2;
|
||||
|
||||
// Chunk that we wish to download
|
||||
message Chunk {
|
||||
int64 offset = 1;
|
||||
int64 chunk_size = 2;
|
||||
}
|
||||
|
||||
// request for the chunk
|
||||
Chunk chunk = 3;
|
||||
}
|
||||
|
||||
message PieceDownloadResponse {
|
||||
// Chunk response for download request
|
||||
message Chunk {
|
||||
int64 offset = 1;
|
||||
bytes data = 2;
|
||||
}
|
||||
Chunk chunk = 1;
|
||||
}
|
||||
|
||||
message PieceDeleteRequest {
|
||||
orders.OrderLimit2 limit = 1;
|
||||
}
|
||||
|
||||
message PieceDeleteResponse {
|
||||
}
|
@ -3,14 +3,15 @@
|
||||
|
||||
package pb
|
||||
|
||||
import proto "github.com/gogo/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
import _ "github.com/gogo/protobuf/gogoproto"
|
||||
import timestamp "github.com/golang/protobuf/ptypes/timestamp"
|
||||
|
||||
import (
|
||||
context "context"
|
||||
fmt "fmt"
|
||||
_ "github.com/gogo/protobuf/gogoproto"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
timestamp "github.com/golang/protobuf/ptypes/timestamp"
|
||||
context "golang.org/x/net/context"
|
||||
grpc "google.golang.org/grpc"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
@ -33,7 +34,6 @@ const (
|
||||
var RedundancyScheme_SchemeType_name = map[int32]string{
|
||||
0: "RS",
|
||||
}
|
||||
|
||||
var RedundancyScheme_SchemeType_value = map[string]int32{
|
||||
"RS": 0,
|
||||
}
|
||||
@ -41,9 +41,8 @@ var RedundancyScheme_SchemeType_value = map[string]int32{
|
||||
func (x RedundancyScheme_SchemeType) String() string {
|
||||
return proto.EnumName(RedundancyScheme_SchemeType_name, int32(x))
|
||||
}
|
||||
|
||||
func (RedundancyScheme_SchemeType) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_75fef806d28fc810, []int{0, 0}
|
||||
return fileDescriptor_pointerdb_859f3b4a3b954c14, []int{0, 0}
|
||||
}
|
||||
|
||||
type Pointer_DataType int32
|
||||
@ -57,7 +56,6 @@ var Pointer_DataType_name = map[int32]string{
|
||||
0: "INLINE",
|
||||
1: "REMOTE",
|
||||
}
|
||||
|
||||
var Pointer_DataType_value = map[string]int32{
|
||||
"INLINE": 0,
|
||||
"REMOTE": 1,
|
||||
@ -66,9 +64,8 @@ var Pointer_DataType_value = map[string]int32{
|
||||
func (x Pointer_DataType) String() string {
|
||||
return proto.EnumName(Pointer_DataType_name, int32(x))
|
||||
}
|
||||
|
||||
func (Pointer_DataType) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_75fef806d28fc810, []int{3, 0}
|
||||
return fileDescriptor_pointerdb_859f3b4a3b954c14, []int{3, 0}
|
||||
}
|
||||
|
||||
type RedundancyScheme struct {
|
||||
@ -88,7 +85,7 @@ func (m *RedundancyScheme) Reset() { *m = RedundancyScheme{} }
|
||||
func (m *RedundancyScheme) String() string { return proto.CompactTextString(m) }
|
||||
func (*RedundancyScheme) ProtoMessage() {}
|
||||
func (*RedundancyScheme) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_75fef806d28fc810, []int{0}
|
||||
return fileDescriptor_pointerdb_859f3b4a3b954c14, []int{0}
|
||||
}
|
||||
func (m *RedundancyScheme) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_RedundancyScheme.Unmarshal(m, b)
|
||||
@ -96,8 +93,8 @@ func (m *RedundancyScheme) XXX_Unmarshal(b []byte) error {
|
||||
func (m *RedundancyScheme) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_RedundancyScheme.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *RedundancyScheme) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_RedundancyScheme.Merge(m, src)
|
||||
func (dst *RedundancyScheme) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_RedundancyScheme.Merge(dst, src)
|
||||
}
|
||||
func (m *RedundancyScheme) XXX_Size() int {
|
||||
return xxx_messageInfo_RedundancyScheme.Size(m)
|
||||
@ -151,19 +148,19 @@ func (m *RedundancyScheme) GetErasureShareSize() int32 {
|
||||
}
|
||||
|
||||
type RemotePiece struct {
|
||||
PieceNum int32 `protobuf:"varint,1,opt,name=piece_num,json=pieceNum,proto3" json:"piece_num,omitempty"`
|
||||
NodeId NodeID `protobuf:"bytes,2,opt,name=node_id,json=nodeId,proto3,customtype=NodeID" json:"node_id"`
|
||||
Hash *SignedHash `protobuf:"bytes,3,opt,name=hash,proto3" json:"hash,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
PieceNum int32 `protobuf:"varint,1,opt,name=piece_num,json=pieceNum,proto3" json:"piece_num,omitempty"`
|
||||
NodeId NodeID `protobuf:"bytes,2,opt,name=node_id,json=nodeId,proto3,customtype=NodeID" json:"node_id"`
|
||||
Hash *PieceHash `protobuf:"bytes,3,opt,name=hash,proto3" json:"hash,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *RemotePiece) Reset() { *m = RemotePiece{} }
|
||||
func (m *RemotePiece) String() string { return proto.CompactTextString(m) }
|
||||
func (*RemotePiece) ProtoMessage() {}
|
||||
func (*RemotePiece) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_75fef806d28fc810, []int{1}
|
||||
return fileDescriptor_pointerdb_859f3b4a3b954c14, []int{1}
|
||||
}
|
||||
func (m *RemotePiece) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_RemotePiece.Unmarshal(m, b)
|
||||
@ -171,8 +168,8 @@ func (m *RemotePiece) XXX_Unmarshal(b []byte) error {
|
||||
func (m *RemotePiece) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_RemotePiece.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *RemotePiece) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_RemotePiece.Merge(m, src)
|
||||
func (dst *RemotePiece) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_RemotePiece.Merge(dst, src)
|
||||
}
|
||||
func (m *RemotePiece) XXX_Size() int {
|
||||
return xxx_messageInfo_RemotePiece.Size(m)
|
||||
@ -190,7 +187,7 @@ func (m *RemotePiece) GetPieceNum() int32 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *RemotePiece) GetHash() *SignedHash {
|
||||
func (m *RemotePiece) GetHash() *PieceHash {
|
||||
if m != nil {
|
||||
return m.Hash
|
||||
}
|
||||
@ -198,21 +195,20 @@ func (m *RemotePiece) GetHash() *SignedHash {
|
||||
}
|
||||
|
||||
type RemoteSegment struct {
|
||||
Redundancy *RedundancyScheme `protobuf:"bytes,1,opt,name=redundancy,proto3" json:"redundancy,omitempty"`
|
||||
// TODO: may want to use customtype and fixed-length byte slice
|
||||
PieceId string `protobuf:"bytes,2,opt,name=piece_id,json=pieceId,proto3" json:"piece_id,omitempty"`
|
||||
RemotePieces []*RemotePiece `protobuf:"bytes,3,rep,name=remote_pieces,json=remotePieces,proto3" json:"remote_pieces,omitempty"`
|
||||
MerkleRoot []byte `protobuf:"bytes,4,opt,name=merkle_root,json=merkleRoot,proto3" json:"merkle_root,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
Redundancy *RedundancyScheme `protobuf:"bytes,1,opt,name=redundancy,proto3" json:"redundancy,omitempty"`
|
||||
RootPieceId PieceID `protobuf:"bytes,2,opt,name=root_piece_id,json=rootPieceId,proto3,customtype=PieceID" json:"root_piece_id"`
|
||||
RemotePieces []*RemotePiece `protobuf:"bytes,3,rep,name=remote_pieces,json=remotePieces,proto3" json:"remote_pieces,omitempty"`
|
||||
MerkleRoot []byte `protobuf:"bytes,4,opt,name=merkle_root,json=merkleRoot,proto3" json:"merkle_root,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *RemoteSegment) Reset() { *m = RemoteSegment{} }
|
||||
func (m *RemoteSegment) String() string { return proto.CompactTextString(m) }
|
||||
func (*RemoteSegment) ProtoMessage() {}
|
||||
func (*RemoteSegment) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_75fef806d28fc810, []int{2}
|
||||
return fileDescriptor_pointerdb_859f3b4a3b954c14, []int{2}
|
||||
}
|
||||
func (m *RemoteSegment) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_RemoteSegment.Unmarshal(m, b)
|
||||
@ -220,8 +216,8 @@ func (m *RemoteSegment) XXX_Unmarshal(b []byte) error {
|
||||
func (m *RemoteSegment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_RemoteSegment.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *RemoteSegment) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_RemoteSegment.Merge(m, src)
|
||||
func (dst *RemoteSegment) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_RemoteSegment.Merge(dst, src)
|
||||
}
|
||||
func (m *RemoteSegment) XXX_Size() int {
|
||||
return xxx_messageInfo_RemoteSegment.Size(m)
|
||||
@ -239,13 +235,6 @@ func (m *RemoteSegment) GetRedundancy() *RedundancyScheme {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *RemoteSegment) GetPieceId() string {
|
||||
if m != nil {
|
||||
return m.PieceId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *RemoteSegment) GetRemotePieces() []*RemotePiece {
|
||||
if m != nil {
|
||||
return m.RemotePieces
|
||||
@ -277,7 +266,7 @@ func (m *Pointer) Reset() { *m = Pointer{} }
|
||||
func (m *Pointer) String() string { return proto.CompactTextString(m) }
|
||||
func (*Pointer) ProtoMessage() {}
|
||||
func (*Pointer) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_75fef806d28fc810, []int{3}
|
||||
return fileDescriptor_pointerdb_859f3b4a3b954c14, []int{3}
|
||||
}
|
||||
func (m *Pointer) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Pointer.Unmarshal(m, b)
|
||||
@ -285,8 +274,8 @@ func (m *Pointer) XXX_Unmarshal(b []byte) error {
|
||||
func (m *Pointer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Pointer.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *Pointer) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Pointer.Merge(m, src)
|
||||
func (dst *Pointer) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Pointer.Merge(dst, src)
|
||||
}
|
||||
func (m *Pointer) XXX_Size() int {
|
||||
return xxx_messageInfo_Pointer.Size(m)
|
||||
@ -359,7 +348,7 @@ func (m *PutRequest) Reset() { *m = PutRequest{} }
|
||||
func (m *PutRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*PutRequest) ProtoMessage() {}
|
||||
func (*PutRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_75fef806d28fc810, []int{4}
|
||||
return fileDescriptor_pointerdb_859f3b4a3b954c14, []int{4}
|
||||
}
|
||||
func (m *PutRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_PutRequest.Unmarshal(m, b)
|
||||
@ -367,8 +356,8 @@ func (m *PutRequest) XXX_Unmarshal(b []byte) error {
|
||||
func (m *PutRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_PutRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *PutRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_PutRequest.Merge(m, src)
|
||||
func (dst *PutRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_PutRequest.Merge(dst, src)
|
||||
}
|
||||
func (m *PutRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_PutRequest.Size(m)
|
||||
@ -405,7 +394,7 @@ func (m *GetRequest) Reset() { *m = GetRequest{} }
|
||||
func (m *GetRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*GetRequest) ProtoMessage() {}
|
||||
func (*GetRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_75fef806d28fc810, []int{5}
|
||||
return fileDescriptor_pointerdb_859f3b4a3b954c14, []int{5}
|
||||
}
|
||||
func (m *GetRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_GetRequest.Unmarshal(m, b)
|
||||
@ -413,8 +402,8 @@ func (m *GetRequest) XXX_Unmarshal(b []byte) error {
|
||||
func (m *GetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_GetRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *GetRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_GetRequest.Merge(m, src)
|
||||
func (dst *GetRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_GetRequest.Merge(dst, src)
|
||||
}
|
||||
func (m *GetRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_GetRequest.Size(m)
|
||||
@ -449,7 +438,7 @@ func (m *ListRequest) Reset() { *m = ListRequest{} }
|
||||
func (m *ListRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*ListRequest) ProtoMessage() {}
|
||||
func (*ListRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_75fef806d28fc810, []int{6}
|
||||
return fileDescriptor_pointerdb_859f3b4a3b954c14, []int{6}
|
||||
}
|
||||
func (m *ListRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ListRequest.Unmarshal(m, b)
|
||||
@ -457,8 +446,8 @@ func (m *ListRequest) XXX_Unmarshal(b []byte) error {
|
||||
func (m *ListRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ListRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ListRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ListRequest.Merge(m, src)
|
||||
func (dst *ListRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ListRequest.Merge(dst, src)
|
||||
}
|
||||
func (m *ListRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_ListRequest.Size(m)
|
||||
@ -522,7 +511,7 @@ func (m *PutResponse) Reset() { *m = PutResponse{} }
|
||||
func (m *PutResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*PutResponse) ProtoMessage() {}
|
||||
func (*PutResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_75fef806d28fc810, []int{7}
|
||||
return fileDescriptor_pointerdb_859f3b4a3b954c14, []int{7}
|
||||
}
|
||||
func (m *PutResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_PutResponse.Unmarshal(m, b)
|
||||
@ -530,8 +519,8 @@ func (m *PutResponse) XXX_Unmarshal(b []byte) error {
|
||||
func (m *PutResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_PutResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *PutResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_PutResponse.Merge(m, src)
|
||||
func (dst *PutResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_PutResponse.Merge(dst, src)
|
||||
}
|
||||
func (m *PutResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_PutResponse.Size(m)
|
||||
@ -557,7 +546,7 @@ func (m *GetResponse) Reset() { *m = GetResponse{} }
|
||||
func (m *GetResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*GetResponse) ProtoMessage() {}
|
||||
func (*GetResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_75fef806d28fc810, []int{8}
|
||||
return fileDescriptor_pointerdb_859f3b4a3b954c14, []int{8}
|
||||
}
|
||||
func (m *GetResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_GetResponse.Unmarshal(m, b)
|
||||
@ -565,8 +554,8 @@ func (m *GetResponse) XXX_Unmarshal(b []byte) error {
|
||||
func (m *GetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_GetResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *GetResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_GetResponse.Merge(m, src)
|
||||
func (dst *GetResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_GetResponse.Merge(dst, src)
|
||||
}
|
||||
func (m *GetResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_GetResponse.Size(m)
|
||||
@ -618,7 +607,7 @@ func (m *ListResponse) Reset() { *m = ListResponse{} }
|
||||
func (m *ListResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*ListResponse) ProtoMessage() {}
|
||||
func (*ListResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_75fef806d28fc810, []int{9}
|
||||
return fileDescriptor_pointerdb_859f3b4a3b954c14, []int{9}
|
||||
}
|
||||
func (m *ListResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ListResponse.Unmarshal(m, b)
|
||||
@ -626,8 +615,8 @@ func (m *ListResponse) XXX_Unmarshal(b []byte) error {
|
||||
func (m *ListResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ListResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ListResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ListResponse.Merge(m, src)
|
||||
func (dst *ListResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ListResponse.Merge(dst, src)
|
||||
}
|
||||
func (m *ListResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_ListResponse.Size(m)
|
||||
@ -665,7 +654,7 @@ func (m *ListResponse_Item) Reset() { *m = ListResponse_Item{} }
|
||||
func (m *ListResponse_Item) String() string { return proto.CompactTextString(m) }
|
||||
func (*ListResponse_Item) ProtoMessage() {}
|
||||
func (*ListResponse_Item) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_75fef806d28fc810, []int{9, 0}
|
||||
return fileDescriptor_pointerdb_859f3b4a3b954c14, []int{9, 0}
|
||||
}
|
||||
func (m *ListResponse_Item) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ListResponse_Item.Unmarshal(m, b)
|
||||
@ -673,8 +662,8 @@ func (m *ListResponse_Item) XXX_Unmarshal(b []byte) error {
|
||||
func (m *ListResponse_Item) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ListResponse_Item.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ListResponse_Item) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ListResponse_Item.Merge(m, src)
|
||||
func (dst *ListResponse_Item) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ListResponse_Item.Merge(dst, src)
|
||||
}
|
||||
func (m *ListResponse_Item) XXX_Size() int {
|
||||
return xxx_messageInfo_ListResponse_Item.Size(m)
|
||||
@ -717,7 +706,7 @@ func (m *DeleteRequest) Reset() { *m = DeleteRequest{} }
|
||||
func (m *DeleteRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*DeleteRequest) ProtoMessage() {}
|
||||
func (*DeleteRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_75fef806d28fc810, []int{10}
|
||||
return fileDescriptor_pointerdb_859f3b4a3b954c14, []int{10}
|
||||
}
|
||||
func (m *DeleteRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_DeleteRequest.Unmarshal(m, b)
|
||||
@ -725,8 +714,8 @@ func (m *DeleteRequest) XXX_Unmarshal(b []byte) error {
|
||||
func (m *DeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_DeleteRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *DeleteRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_DeleteRequest.Merge(m, src)
|
||||
func (dst *DeleteRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_DeleteRequest.Merge(dst, src)
|
||||
}
|
||||
func (m *DeleteRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_DeleteRequest.Size(m)
|
||||
@ -755,7 +744,7 @@ func (m *DeleteResponse) Reset() { *m = DeleteResponse{} }
|
||||
func (m *DeleteResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*DeleteResponse) ProtoMessage() {}
|
||||
func (*DeleteResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_75fef806d28fc810, []int{11}
|
||||
return fileDescriptor_pointerdb_859f3b4a3b954c14, []int{11}
|
||||
}
|
||||
func (m *DeleteResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_DeleteResponse.Unmarshal(m, b)
|
||||
@ -763,8 +752,8 @@ func (m *DeleteResponse) XXX_Unmarshal(b []byte) error {
|
||||
func (m *DeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_DeleteResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *DeleteResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_DeleteResponse.Merge(m, src)
|
||||
func (dst *DeleteResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_DeleteResponse.Merge(dst, src)
|
||||
}
|
||||
func (m *DeleteResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_DeleteResponse.Size(m)
|
||||
@ -790,7 +779,7 @@ func (m *IterateRequest) Reset() { *m = IterateRequest{} }
|
||||
func (m *IterateRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*IterateRequest) ProtoMessage() {}
|
||||
func (*IterateRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_75fef806d28fc810, []int{12}
|
||||
return fileDescriptor_pointerdb_859f3b4a3b954c14, []int{12}
|
||||
}
|
||||
func (m *IterateRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_IterateRequest.Unmarshal(m, b)
|
||||
@ -798,8 +787,8 @@ func (m *IterateRequest) XXX_Unmarshal(b []byte) error {
|
||||
func (m *IterateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_IterateRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *IterateRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_IterateRequest.Merge(m, src)
|
||||
func (dst *IterateRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_IterateRequest.Merge(dst, src)
|
||||
}
|
||||
func (m *IterateRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_IterateRequest.Size(m)
|
||||
@ -849,7 +838,7 @@ func (m *PayerBandwidthAllocationRequest) Reset() { *m = PayerBandwidthA
|
||||
func (m *PayerBandwidthAllocationRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*PayerBandwidthAllocationRequest) ProtoMessage() {}
|
||||
func (*PayerBandwidthAllocationRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_75fef806d28fc810, []int{13}
|
||||
return fileDescriptor_pointerdb_859f3b4a3b954c14, []int{13}
|
||||
}
|
||||
func (m *PayerBandwidthAllocationRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_PayerBandwidthAllocationRequest.Unmarshal(m, b)
|
||||
@ -857,8 +846,8 @@ func (m *PayerBandwidthAllocationRequest) XXX_Unmarshal(b []byte) error {
|
||||
func (m *PayerBandwidthAllocationRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_PayerBandwidthAllocationRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *PayerBandwidthAllocationRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_PayerBandwidthAllocationRequest.Merge(m, src)
|
||||
func (dst *PayerBandwidthAllocationRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_PayerBandwidthAllocationRequest.Merge(dst, src)
|
||||
}
|
||||
func (m *PayerBandwidthAllocationRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_PayerBandwidthAllocationRequest.Size(m)
|
||||
@ -887,7 +876,7 @@ func (m *PayerBandwidthAllocationResponse) Reset() { *m = PayerBandwidth
|
||||
func (m *PayerBandwidthAllocationResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*PayerBandwidthAllocationResponse) ProtoMessage() {}
|
||||
func (*PayerBandwidthAllocationResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_75fef806d28fc810, []int{14}
|
||||
return fileDescriptor_pointerdb_859f3b4a3b954c14, []int{14}
|
||||
}
|
||||
func (m *PayerBandwidthAllocationResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_PayerBandwidthAllocationResponse.Unmarshal(m, b)
|
||||
@ -895,8 +884,8 @@ func (m *PayerBandwidthAllocationResponse) XXX_Unmarshal(b []byte) error {
|
||||
func (m *PayerBandwidthAllocationResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_PayerBandwidthAllocationResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *PayerBandwidthAllocationResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_PayerBandwidthAllocationResponse.Merge(m, src)
|
||||
func (dst *PayerBandwidthAllocationResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_PayerBandwidthAllocationResponse.Merge(dst, src)
|
||||
}
|
||||
func (m *PayerBandwidthAllocationResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_PayerBandwidthAllocationResponse.Size(m)
|
||||
@ -915,8 +904,6 @@ func (m *PayerBandwidthAllocationResponse) GetPba() *PayerBandwidthAllocation {
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterEnum("pointerdb.RedundancyScheme_SchemeType", RedundancyScheme_SchemeType_name, RedundancyScheme_SchemeType_value)
|
||||
proto.RegisterEnum("pointerdb.Pointer_DataType", Pointer_DataType_name, Pointer_DataType_value)
|
||||
proto.RegisterType((*RedundancyScheme)(nil), "pointerdb.RedundancyScheme")
|
||||
proto.RegisterType((*RemotePiece)(nil), "pointerdb.RemotePiece")
|
||||
proto.RegisterType((*RemoteSegment)(nil), "pointerdb.RemoteSegment")
|
||||
@ -933,82 +920,8 @@ func init() {
|
||||
proto.RegisterType((*IterateRequest)(nil), "pointerdb.IterateRequest")
|
||||
proto.RegisterType((*PayerBandwidthAllocationRequest)(nil), "pointerdb.PayerBandwidthAllocationRequest")
|
||||
proto.RegisterType((*PayerBandwidthAllocationResponse)(nil), "pointerdb.PayerBandwidthAllocationResponse")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("pointerdb.proto", fileDescriptor_75fef806d28fc810) }
|
||||
|
||||
var fileDescriptor_75fef806d28fc810 = []byte{
|
||||
// 1114 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x5d, 0x6f, 0x1b, 0x45,
|
||||
0x17, 0xee, 0xfa, 0x33, 0x3e, 0x6b, 0xa7, 0x7e, 0x47, 0x7d, 0xd3, 0xad, 0x5b, 0x94, 0x74, 0x11,
|
||||
0x50, 0xda, 0xca, 0xad, 0x4c, 0x25, 0x04, 0x05, 0xa1, 0x86, 0x84, 0x62, 0xa9, 0x0d, 0xd1, 0x24,
|
||||
0x57, 0x08, 0x69, 0x99, 0x78, 0x4f, 0xec, 0x11, 0xde, 0x8f, 0xce, 0xcc, 0x96, 0xa6, 0xf7, 0xfc,
|
||||
0x08, 0xfe, 0x09, 0x37, 0x5c, 0x22, 0xf1, 0x1b, 0xb8, 0xe8, 0x05, 0xe2, 0x67, 0x70, 0x81, 0xe6,
|
||||
0x63, 0xed, 0x75, 0x83, 0x53, 0x04, 0x37, 0xc9, 0x9e, 0x73, 0x9e, 0x73, 0xe6, 0xcc, 0x79, 0x9e,
|
||||
0x39, 0x86, 0xcb, 0x79, 0xc6, 0x53, 0x85, 0x22, 0x3e, 0x19, 0xe6, 0x22, 0x53, 0x19, 0xe9, 0x2c,
|
||||
0x1c, 0x83, 0xed, 0x69, 0x96, 0x4d, 0xe7, 0x78, 0xcf, 0x04, 0x4e, 0x8a, 0xd3, 0x7b, 0x8a, 0x27,
|
||||
0x28, 0x15, 0x4b, 0x72, 0x8b, 0x1d, 0xc0, 0x34, 0x9b, 0x66, 0xe5, 0x77, 0x9a, 0xc5, 0xe8, 0xbe,
|
||||
0xfb, 0x39, 0xc7, 0x09, 0x4a, 0x95, 0x09, 0xe7, 0x09, 0x7f, 0xac, 0x41, 0x9f, 0x62, 0x5c, 0xa4,
|
||||
0x31, 0x4b, 0x27, 0x67, 0x47, 0x93, 0x19, 0x26, 0x48, 0x3e, 0x86, 0x86, 0x3a, 0xcb, 0x31, 0xf0,
|
||||
0x76, 0xbc, 0x5b, 0x9b, 0xa3, 0x77, 0x87, 0xcb, 0x56, 0x5e, 0x87, 0x0e, 0xed, 0xbf, 0xe3, 0xb3,
|
||||
0x1c, 0xa9, 0xc9, 0x21, 0x57, 0xa1, 0x9d, 0xf0, 0x34, 0x12, 0xf8, 0x2c, 0xa8, 0xed, 0x78, 0xb7,
|
||||
0x9a, 0xb4, 0x95, 0xf0, 0x94, 0xe2, 0x33, 0x72, 0x05, 0x9a, 0x2a, 0x53, 0x6c, 0x1e, 0xd4, 0x8d,
|
||||
0xdb, 0x1a, 0xe4, 0x7d, 0xe8, 0x0b, 0xcc, 0x19, 0x17, 0x91, 0x9a, 0x09, 0x94, 0xb3, 0x6c, 0x1e,
|
||||
0x07, 0x0d, 0x03, 0xb8, 0x6c, 0xfd, 0xc7, 0xa5, 0x9b, 0xdc, 0x81, 0xff, 0xc9, 0x62, 0x32, 0x41,
|
||||
0x29, 0x2b, 0xd8, 0xa6, 0xc1, 0xf6, 0x5d, 0x60, 0x09, 0xbe, 0x0b, 0x04, 0x05, 0x93, 0x85, 0xc0,
|
||||
0x48, 0xce, 0x98, 0xfe, 0xcb, 0x5f, 0x62, 0xd0, 0xb2, 0x68, 0x17, 0x39, 0xd2, 0x81, 0x23, 0xfe,
|
||||
0x12, 0xc3, 0x2b, 0x00, 0xcb, 0x8b, 0x90, 0x16, 0xd4, 0xe8, 0x51, 0xff, 0x52, 0xf8, 0x83, 0x07,
|
||||
0x3e, 0xc5, 0x24, 0x53, 0x78, 0xa8, 0xc7, 0x46, 0xae, 0x43, 0xc7, 0xcc, 0x2f, 0x4a, 0x8b, 0xc4,
|
||||
0xcc, 0xa6, 0x49, 0x37, 0x8c, 0xe3, 0xa0, 0x48, 0xc8, 0x7b, 0xd0, 0xd6, 0x83, 0x8e, 0x78, 0x6c,
|
||||
0xee, 0xdd, 0xdd, 0xdd, 0xfc, 0xf5, 0xd5, 0xf6, 0xa5, 0xdf, 0x5e, 0x6d, 0xb7, 0x0e, 0xb2, 0x18,
|
||||
0xc7, 0x7b, 0xb4, 0xa5, 0xc3, 0xe3, 0x98, 0xdc, 0x87, 0xc6, 0x8c, 0xc9, 0x99, 0x19, 0x83, 0x3f,
|
||||
0xba, 0x31, 0x5c, 0x52, 0x22, 0xb2, 0x42, 0xa1, 0x1c, 0x1e, 0xf1, 0x69, 0x8a, 0xf1, 0x97, 0x4c,
|
||||
0xce, 0xa8, 0x41, 0x86, 0xbf, 0x78, 0xd0, 0xb3, 0x7d, 0x1c, 0xe1, 0x34, 0xc1, 0x54, 0x91, 0x87,
|
||||
0x00, 0x62, 0xc1, 0x84, 0x69, 0xc5, 0x1f, 0x5d, 0xbf, 0x80, 0x26, 0x5a, 0x81, 0x93, 0x6b, 0x60,
|
||||
0xbb, 0x2e, 0x5b, 0xed, 0xd0, 0xb6, 0xb1, 0xc7, 0x31, 0x79, 0x08, 0x3d, 0x61, 0x0e, 0x8a, 0x6c,
|
||||
0x57, 0x41, 0x7d, 0xa7, 0x7e, 0xcb, 0x1f, 0x6d, 0xad, 0x94, 0x5e, 0x0c, 0x84, 0x76, 0xc5, 0xd2,
|
||||
0x90, 0x64, 0x1b, 0xfc, 0x04, 0xc5, 0x77, 0x73, 0x8c, 0x44, 0x96, 0x29, 0xc3, 0x62, 0x97, 0x82,
|
||||
0x75, 0xd1, 0x2c, 0x53, 0xe1, 0x9f, 0x35, 0x68, 0x1f, 0xda, 0x42, 0xe4, 0xde, 0x8a, 0xc4, 0xaa,
|
||||
0xbd, 0x3b, 0xc4, 0x70, 0x8f, 0x29, 0x56, 0xd1, 0xd5, 0x3b, 0xb0, 0xc9, 0xd3, 0x39, 0x4f, 0x31,
|
||||
0x92, 0x76, 0x08, 0x66, 0x80, 0x5d, 0xda, 0xb3, 0xde, 0x72, 0x32, 0xf7, 0xa1, 0x65, 0x9b, 0x32,
|
||||
0xe7, 0xfb, 0xa3, 0xe0, 0x5c, 0xeb, 0x0e, 0x49, 0x1d, 0x8e, 0xdc, 0x84, 0xae, 0xab, 0x68, 0x35,
|
||||
0xa2, 0x15, 0x55, 0xa7, 0xbe, 0xf3, 0x69, 0x79, 0x90, 0xcf, 0xa0, 0x37, 0x11, 0xc8, 0x14, 0xcf,
|
||||
0xd2, 0x28, 0x66, 0xca, 0xea, 0xc8, 0x1f, 0x0d, 0x86, 0xf6, 0x1d, 0x0e, 0xcb, 0x77, 0x38, 0x3c,
|
||||
0x2e, 0xdf, 0x21, 0xed, 0x96, 0x09, 0x7b, 0x4c, 0x21, 0xf9, 0x1c, 0x2e, 0xe3, 0x8b, 0x9c, 0x8b,
|
||||
0x4a, 0x89, 0xf6, 0x1b, 0x4b, 0x6c, 0x2e, 0x53, 0x4c, 0x91, 0x01, 0x6c, 0x24, 0xa8, 0x58, 0xcc,
|
||||
0x14, 0x0b, 0x36, 0xcc, 0xdd, 0x17, 0x76, 0x18, 0xc2, 0x46, 0x39, 0x2f, 0x02, 0xd0, 0x1a, 0x1f,
|
||||
0x3c, 0x19, 0x1f, 0xec, 0xf7, 0x2f, 0xe9, 0x6f, 0xba, 0xff, 0xf4, 0xab, 0xe3, 0xfd, 0xbe, 0x17,
|
||||
0x1e, 0x00, 0x1c, 0x16, 0x8a, 0xe2, 0xb3, 0x02, 0xa5, 0x22, 0x04, 0x1a, 0x39, 0x53, 0x33, 0x43,
|
||||
0x40, 0x87, 0x9a, 0x6f, 0x72, 0x17, 0xda, 0x6e, 0x5a, 0x46, 0x18, 0xfe, 0x88, 0x9c, 0xe7, 0x85,
|
||||
0x96, 0x90, 0x70, 0x07, 0xe0, 0x31, 0x5e, 0x54, 0x2f, 0xfc, 0xc9, 0x03, 0xff, 0x09, 0x97, 0x0b,
|
||||
0xcc, 0x16, 0xb4, 0x72, 0x81, 0xa7, 0xfc, 0x85, 0x43, 0x39, 0x4b, 0x2b, 0x47, 0x2a, 0x26, 0x54,
|
||||
0xc4, 0x4e, 0xcb, 0xb3, 0x3b, 0x14, 0x8c, 0xeb, 0x91, 0xf6, 0x90, 0xb7, 0x00, 0x30, 0x8d, 0xa3,
|
||||
0x13, 0x3c, 0xcd, 0x04, 0x1a, 0xe2, 0x3b, 0xb4, 0x83, 0x69, 0xbc, 0x6b, 0x1c, 0xe4, 0x06, 0x74,
|
||||
0x04, 0x4e, 0x0a, 0x21, 0xf9, 0x73, 0xcb, 0xfb, 0x06, 0x5d, 0x3a, 0xf4, 0xe2, 0x99, 0xf3, 0x84,
|
||||
0x2b, 0xb7, 0x2b, 0xac, 0xa1, 0x4b, 0xea, 0xe9, 0x45, 0xa7, 0x73, 0x36, 0x95, 0x86, 0xd0, 0x36,
|
||||
0xed, 0x68, 0xcf, 0x17, 0xda, 0x11, 0xf6, 0xc0, 0x37, 0xc3, 0x92, 0x79, 0x96, 0x4a, 0x0c, 0x7f,
|
||||
0xf7, 0xc0, 0x37, 0x97, 0xb5, 0x76, 0x75, 0x52, 0xde, 0x1b, 0x27, 0x45, 0x76, 0xa0, 0xa9, 0x1f,
|
||||
0xbf, 0x0c, 0x6a, 0xe6, 0x39, 0xc1, 0xd0, 0xac, 0x64, 0xbd, 0x17, 0xa8, 0x0d, 0x90, 0x4f, 0xa0,
|
||||
0x9e, 0x9f, 0x30, 0xb7, 0x13, 0x6e, 0x9f, 0xdf, 0x09, 0x87, 0xec, 0x0c, 0xc5, 0x2e, 0x4b, 0xe3,
|
||||
0xef, 0x79, 0xac, 0x66, 0x8f, 0xe6, 0xf3, 0x6c, 0x62, 0x84, 0x41, 0x75, 0x1a, 0xd9, 0x87, 0x1e,
|
||||
0x2b, 0xd4, 0x2c, 0x13, 0xfc, 0xa5, 0xf1, 0x3a, 0xed, 0x6f, 0xaf, 0xdb, 0x2d, 0x4f, 0x51, 0x4a,
|
||||
0x36, 0x45, 0xba, 0x9a, 0x15, 0xfe, 0xec, 0x41, 0xd7, 0xd2, 0xe5, 0x6e, 0x39, 0x82, 0x26, 0x57,
|
||||
0x98, 0xc8, 0xc0, 0x33, 0x7d, 0xdf, 0xa8, 0xdc, 0xb1, 0x8a, 0x1b, 0x8e, 0x15, 0x26, 0xd4, 0x42,
|
||||
0xb5, 0x0e, 0x12, 0x4d, 0x52, 0xcd, 0xd0, 0x60, 0xbe, 0x07, 0x08, 0x0d, 0x0d, 0xf9, 0xef, 0x9a,
|
||||
0xd3, 0x2b, 0x98, 0xcb, 0xc8, 0x89, 0xa8, 0x6e, 0x8e, 0xd8, 0xe0, 0xf2, 0xd0, 0xd8, 0xe1, 0xdb,
|
||||
0xd0, 0xdb, 0xc3, 0x39, 0x2a, 0xbc, 0x48, 0x93, 0x7d, 0xd8, 0x2c, 0x41, 0x8e, 0x5b, 0x01, 0x9b,
|
||||
0x63, 0x85, 0x82, 0x2d, 0xf3, 0xd6, 0xe9, 0xf4, 0x0a, 0x34, 0x4f, 0xb9, 0x90, 0xca, 0x29, 0xd4,
|
||||
0x1a, 0x24, 0x80, 0xb6, 0x15, 0x1b, 0xba, 0x8e, 0x4a, 0xd3, 0x46, 0x9e, 0xa3, 0x8e, 0x34, 0xca,
|
||||
0x88, 0x31, 0xc3, 0x6f, 0x60, 0x7b, 0x2d, 0xa5, 0xae, 0x89, 0x8f, 0xa0, 0xc5, 0x26, 0x86, 0x4d,
|
||||
0xbb, 0x23, 0x6f, 0x9e, 0x67, 0x73, 0x99, 0x6d, 0x80, 0xd4, 0x25, 0x84, 0xdf, 0xc2, 0xce, 0xfa,
|
||||
0xea, 0x8e, 0x5b, 0xa7, 0x38, 0xef, 0x5f, 0x29, 0x6e, 0xf4, 0x47, 0x0d, 0x3a, 0x8e, 0x9c, 0xbd,
|
||||
0x5d, 0xf2, 0x00, 0xea, 0x87, 0x85, 0x22, 0xff, 0xaf, 0x32, 0xb7, 0xd8, 0x34, 0x83, 0xad, 0xd7,
|
||||
0xdd, 0xae, 0x83, 0x07, 0x50, 0x7f, 0x8c, 0xab, 0x59, 0xcb, 0x7d, 0xb2, 0x92, 0x55, 0x7d, 0x79,
|
||||
0x1f, 0x42, 0x43, 0x6b, 0x8f, 0x6c, 0x9d, 0x13, 0xa3, 0xcd, 0xbb, 0xba, 0x46, 0xa4, 0xe4, 0x53,
|
||||
0x68, 0x59, 0xe2, 0x49, 0xf5, 0x37, 0x61, 0x45, 0x30, 0x83, 0x6b, 0x7f, 0x13, 0x71, 0xe9, 0x12,
|
||||
0x82, 0x75, 0x23, 0x21, 0xb7, 0xab, 0x37, 0xbc, 0x98, 0xd6, 0xc1, 0x9d, 0x7f, 0x84, 0xb5, 0x87,
|
||||
0xee, 0x36, 0xbe, 0xae, 0xe5, 0x27, 0x27, 0x2d, 0xf3, 0xe3, 0xf0, 0xc1, 0x5f, 0x01, 0x00, 0x00,
|
||||
0xff, 0xff, 0x93, 0x3d, 0x6a, 0x1d, 0x13, 0x0a, 0x00, 0x00,
|
||||
proto.RegisterEnum("pointerdb.RedundancyScheme_SchemeType", RedundancyScheme_SchemeType_name, RedundancyScheme_SchemeType_value)
|
||||
proto.RegisterEnum("pointerdb.Pointer_DataType", Pointer_DataType_name, Pointer_DataType_value)
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
@ -1224,3 +1137,80 @@ var _PointerDB_serviceDesc = grpc.ServiceDesc{
|
||||
Streams: []grpc.StreamDesc{},
|
||||
Metadata: "pointerdb.proto",
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("pointerdb.proto", fileDescriptor_pointerdb_859f3b4a3b954c14) }
|
||||
|
||||
var fileDescriptor_pointerdb_859f3b4a3b954c14 = []byte{
|
||||
// 1129 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x6f, 0x6f, 0x1b, 0xc5,
|
||||
0x13, 0xae, 0xff, 0xc7, 0x73, 0x76, 0xe2, 0xae, 0xfa, 0x4b, 0xfd, 0x73, 0x8b, 0x92, 0x1e, 0x2a,
|
||||
0x94, 0xb6, 0x72, 0x91, 0x5b, 0x09, 0x41, 0x41, 0xa8, 0x21, 0xa1, 0x58, 0x6a, 0x43, 0xb4, 0xc9,
|
||||
0x2b, 0x84, 0x74, 0x6c, 0x7c, 0x13, 0x7b, 0x85, 0xef, 0xf6, 0xba, 0xbb, 0x57, 0x9a, 0x7c, 0x13,
|
||||
0xbe, 0x09, 0x6f, 0x78, 0xcf, 0x67, 0xe0, 0x45, 0x91, 0x10, 0x1f, 0x83, 0x17, 0x68, 0xff, 0x9c,
|
||||
0x7d, 0x6e, 0x48, 0x8a, 0xe0, 0x4d, 0x72, 0x33, 0xf3, 0xcc, 0xec, 0xec, 0x3c, 0xcf, 0x8e, 0x61,
|
||||
0x23, 0x13, 0x3c, 0xd5, 0x28, 0xe3, 0xe3, 0x61, 0x26, 0x85, 0x16, 0xa4, 0xbd, 0x70, 0x0c, 0xb6,
|
||||
0xa6, 0x42, 0x4c, 0xe7, 0xf8, 0xc0, 0x06, 0x8e, 0xf3, 0x93, 0x07, 0x9a, 0x27, 0xa8, 0x34, 0x4b,
|
||||
0x32, 0x87, 0x1d, 0xc0, 0x54, 0x4c, 0x45, 0xf1, 0x9d, 0x8a, 0x18, 0xfd, 0x77, 0x2f, 0xe3, 0x38,
|
||||
0x41, 0xa5, 0x85, 0x2c, 0x3c, 0x1d, 0x21, 0x63, 0x94, 0xca, 0x59, 0xe1, 0x8f, 0x55, 0xe8, 0x51,
|
||||
0x8c, 0xf3, 0x34, 0x66, 0xe9, 0xe4, 0xf4, 0x70, 0x32, 0xc3, 0x04, 0xc9, 0x27, 0x50, 0xd7, 0xa7,
|
||||
0x19, 0xf6, 0x2b, 0xdb, 0x95, 0x3b, 0xeb, 0xa3, 0xf7, 0x86, 0xcb, 0xc6, 0xde, 0x84, 0x0e, 0xdd,
|
||||
0xbf, 0xa3, 0xd3, 0x0c, 0xa9, 0xcd, 0x21, 0xd7, 0xa1, 0x95, 0xf0, 0x34, 0x92, 0xf8, 0xa2, 0x5f,
|
||||
0xdd, 0xae, 0xdc, 0x69, 0xd0, 0x66, 0xc2, 0x53, 0x8a, 0x2f, 0xc8, 0x35, 0x68, 0x68, 0xa1, 0xd9,
|
||||
0xbc, 0x5f, 0xb3, 0x6e, 0x67, 0x90, 0x0f, 0xa0, 0x27, 0x31, 0x63, 0x5c, 0x46, 0x7a, 0x26, 0x51,
|
||||
0xcd, 0xc4, 0x3c, 0xee, 0xd7, 0x2d, 0x60, 0xc3, 0xf9, 0x8f, 0x0a, 0x37, 0xb9, 0x07, 0x57, 0x55,
|
||||
0x3e, 0x99, 0xa0, 0x52, 0x25, 0x6c, 0xc3, 0x62, 0x7b, 0x3e, 0xb0, 0x04, 0xdf, 0x07, 0x82, 0x92,
|
||||
0xa9, 0x5c, 0x62, 0xa4, 0x66, 0xcc, 0xfc, 0xe5, 0x67, 0xd8, 0x6f, 0x3a, 0xb4, 0x8f, 0x1c, 0x9a,
|
||||
0xc0, 0x21, 0x3f, 0xc3, 0xf0, 0x1a, 0xc0, 0xf2, 0x22, 0xa4, 0x09, 0x55, 0x7a, 0xd8, 0xbb, 0x12,
|
||||
0x9e, 0x41, 0x40, 0x31, 0x11, 0x1a, 0x0f, 0xcc, 0x0c, 0xc9, 0x0d, 0x68, 0xdb, 0x61, 0x46, 0x69,
|
||||
0x9e, 0xd8, 0xd1, 0x34, 0xe8, 0x9a, 0x75, 0xec, 0xe7, 0x09, 0x79, 0x1f, 0x5a, 0x66, 0xea, 0x11,
|
||||
0x8f, 0xed, 0xb5, 0x3b, 0x3b, 0xeb, 0xbf, 0xbc, 0xde, 0xba, 0xf2, 0xeb, 0xeb, 0xad, 0xe6, 0xbe,
|
||||
0x88, 0x71, 0xbc, 0x4b, 0x9b, 0x26, 0x3c, 0x8e, 0xc9, 0x6d, 0xa8, 0xcf, 0x98, 0x9a, 0xd9, 0x29,
|
||||
0x04, 0xa3, 0xab, 0x43, 0xcf, 0x86, 0x3d, 0xe2, 0x2b, 0xa6, 0x66, 0xd4, 0x86, 0xc3, 0xdf, 0x2a,
|
||||
0xd0, 0x75, 0x87, 0x1f, 0xe2, 0x34, 0xc1, 0x54, 0x93, 0xc7, 0x00, 0x72, 0x31, 0x7d, 0x7b, 0x7e,
|
||||
0x30, 0xba, 0x71, 0x09, 0x35, 0xb4, 0x04, 0x27, 0x0f, 0xa1, 0x2b, 0x85, 0xd0, 0x91, 0xbb, 0xc0,
|
||||
0xa2, 0xc9, 0x0d, 0xdf, 0x64, 0xcb, 0x1e, 0x3f, 0xde, 0xa5, 0x81, 0x41, 0x39, 0x23, 0x26, 0x8f,
|
||||
0xa1, 0x2b, 0x6d, 0x0b, 0x2e, 0x4d, 0xf5, 0x6b, 0xdb, 0xb5, 0x3b, 0xc1, 0x68, 0x73, 0xe5, 0xd0,
|
||||
0xc5, 0x7c, 0x68, 0x47, 0x2e, 0x0d, 0x45, 0xb6, 0x20, 0x48, 0x50, 0x7e, 0x3f, 0xc7, 0xc8, 0x94,
|
||||
0xb4, 0x9c, 0x76, 0x28, 0x38, 0x17, 0x15, 0x42, 0x87, 0x7f, 0x56, 0xa1, 0x75, 0xe0, 0x0a, 0x91,
|
||||
0x07, 0x2b, 0x82, 0x2b, 0xdf, 0xca, 0x23, 0x86, 0xbb, 0x4c, 0xb3, 0x92, 0xca, 0x6e, 0xc3, 0x3a,
|
||||
0x4f, 0xe7, 0x3c, 0xc5, 0x48, 0xb9, 0xf1, 0xd8, 0x79, 0x76, 0x68, 0xd7, 0x79, 0x8b, 0x99, 0x7d,
|
||||
0x08, 0x4d, 0xd7, 0x94, 0x3d, 0x3f, 0x18, 0xf5, 0xcf, 0xb5, 0xee, 0x91, 0xd4, 0xe3, 0xc8, 0x2d,
|
||||
0xe8, 0xf8, 0x8a, 0x4e, 0x31, 0x46, 0x5f, 0x35, 0x1a, 0x78, 0x9f, 0x11, 0x0b, 0xf9, 0x1c, 0xba,
|
||||
0x13, 0x89, 0x4c, 0x73, 0x91, 0x46, 0x31, 0xd3, 0x4e, 0x55, 0xc1, 0x68, 0x30, 0x74, 0x6f, 0x74,
|
||||
0x58, 0xbc, 0xd1, 0xe1, 0x51, 0xf1, 0x46, 0x69, 0xa7, 0x48, 0xd8, 0x65, 0x1a, 0xc9, 0x17, 0xb0,
|
||||
0x81, 0xaf, 0x32, 0x2e, 0x4b, 0x25, 0x5a, 0x6f, 0x2d, 0xb1, 0xbe, 0x4c, 0xb1, 0x45, 0x06, 0xb0,
|
||||
0x96, 0xa0, 0x66, 0x31, 0xd3, 0xac, 0xbf, 0x66, 0xef, 0xbe, 0xb0, 0xc3, 0x10, 0xd6, 0x8a, 0x79,
|
||||
0x11, 0x80, 0xe6, 0x78, 0xff, 0xd9, 0x78, 0x7f, 0xaf, 0x77, 0xc5, 0x7c, 0xd3, 0xbd, 0xe7, 0x5f,
|
||||
0x1f, 0xed, 0xf5, 0x2a, 0xe1, 0x3e, 0xc0, 0x41, 0xae, 0x29, 0xbe, 0xc8, 0x51, 0x69, 0x42, 0xa0,
|
||||
0x9e, 0x31, 0x3d, 0xb3, 0x04, 0xb4, 0xa9, 0xfd, 0x26, 0xf7, 0xa1, 0xe5, 0xa7, 0x65, 0xd5, 0x12,
|
||||
0x8c, 0xc8, 0x79, 0x5e, 0x68, 0x01, 0x09, 0xb7, 0x01, 0x9e, 0xe2, 0x65, 0xf5, 0xc2, 0x9f, 0x2a,
|
||||
0x10, 0x3c, 0xe3, 0x6a, 0x81, 0xd9, 0x84, 0x66, 0x26, 0xf1, 0x84, 0xbf, 0xf2, 0x28, 0x6f, 0x19,
|
||||
0xe5, 0x28, 0xcd, 0xa4, 0x8e, 0xd8, 0x49, 0x71, 0x76, 0x9b, 0x82, 0x75, 0x3d, 0x31, 0x1e, 0xf2,
|
||||
0x0e, 0x00, 0xa6, 0x71, 0x74, 0x8c, 0x27, 0x42, 0xa2, 0x25, 0xbe, 0x4d, 0xdb, 0x98, 0xc6, 0x3b,
|
||||
0xd6, 0x41, 0x6e, 0x42, 0x5b, 0xe2, 0x24, 0x97, 0x8a, 0xbf, 0x74, 0xbc, 0xaf, 0xd1, 0xa5, 0xc3,
|
||||
0xac, 0xa1, 0x39, 0x4f, 0xb8, 0xf6, 0x9b, 0xc3, 0x19, 0xa6, 0xa4, 0x99, 0x5e, 0x74, 0x32, 0x67,
|
||||
0x53, 0x65, 0x09, 0x6d, 0xd1, 0xb6, 0xf1, 0x7c, 0x69, 0x1c, 0x61, 0x17, 0x02, 0x3b, 0x2c, 0x95,
|
||||
0x89, 0x54, 0x61, 0xf8, 0x7b, 0x05, 0x02, 0x7b, 0x59, 0x67, 0x97, 0x27, 0x55, 0x79, 0xeb, 0xa4,
|
||||
0xc8, 0x36, 0x34, 0xcc, 0x2e, 0x50, 0xfd, 0xaa, 0x7d, 0x4e, 0x30, 0xb4, 0xeb, 0xda, 0xac, 0x09,
|
||||
0xea, 0x02, 0xe4, 0x53, 0xa8, 0x65, 0xc7, 0xcc, 0xaf, 0x88, 0xbb, 0xc3, 0xe5, 0x0a, 0x97, 0x22,
|
||||
0xd7, 0xa8, 0x86, 0x07, 0xec, 0x14, 0xe5, 0x0e, 0x4b, 0xe3, 0x1f, 0x78, 0xac, 0x67, 0x4f, 0xe6,
|
||||
0x73, 0x31, 0xb1, 0xc2, 0xa0, 0x26, 0x8d, 0xec, 0x41, 0x97, 0xe5, 0x7a, 0x26, 0x24, 0x3f, 0xb3,
|
||||
0x5e, 0xaf, 0xfd, 0xad, 0xf3, 0x75, 0x0e, 0xf9, 0x34, 0xc5, 0xf8, 0x39, 0x2a, 0xc5, 0xa6, 0x48,
|
||||
0x57, 0xb3, 0xc2, 0x9f, 0x2b, 0xd0, 0x71, 0x74, 0xf9, 0x5b, 0x8e, 0xa0, 0xc1, 0x35, 0x26, 0xaa,
|
||||
0x5f, 0xb1, 0x7d, 0xdf, 0x2c, 0xdd, 0xb1, 0x8c, 0x1b, 0x8e, 0x35, 0x26, 0xd4, 0x41, 0x8d, 0x0e,
|
||||
0x12, 0x43, 0x52, 0xd5, 0xd2, 0x60, 0xbf, 0x07, 0x08, 0x75, 0x03, 0xf9, 0xef, 0x9a, 0x33, 0x1b,
|
||||
0x99, 0xab, 0xc8, 0x8b, 0xa8, 0x66, 0x8f, 0x58, 0xe3, 0xea, 0xc0, 0xda, 0xe1, 0xbb, 0xd0, 0xdd,
|
||||
0xc5, 0x39, 0x6a, 0xbc, 0x4c, 0x93, 0x3d, 0x58, 0x2f, 0x40, 0x9e, 0x5b, 0x09, 0xeb, 0x63, 0x8d,
|
||||
0x92, 0x2d, 0xf3, 0x2e, 0xd2, 0xe9, 0x35, 0x68, 0x9c, 0x70, 0xa9, 0xb4, 0x57, 0xa8, 0x33, 0x48,
|
||||
0x1f, 0x5a, 0x4e, 0x6c, 0xe8, 0x3b, 0x2a, 0x4c, 0x17, 0x79, 0x89, 0x26, 0x52, 0x2f, 0x22, 0xd6,
|
||||
0x0c, 0xbf, 0x85, 0xad, 0x0b, 0x29, 0xf5, 0x4d, 0x7c, 0x0c, 0x4d, 0x36, 0xb1, 0x6c, 0xba, 0x1d,
|
||||
0x79, 0xeb, 0x3c, 0x9b, 0xcb, 0x6c, 0x0b, 0xa4, 0x3e, 0x21, 0xfc, 0x0e, 0xb6, 0x2f, 0xae, 0xee,
|
||||
0xb9, 0xf5, 0x8a, 0xab, 0xfc, 0x2b, 0xc5, 0x8d, 0xfe, 0xa8, 0x42, 0xdb, 0x93, 0xb3, 0xbb, 0x43,
|
||||
0x1e, 0x41, 0xed, 0x20, 0xd7, 0xe4, 0x7f, 0x65, 0xe6, 0x16, 0x9b, 0x66, 0xb0, 0xf9, 0xa6, 0xdb,
|
||||
0x77, 0xf0, 0x08, 0x6a, 0x4f, 0x71, 0x35, 0x6b, 0xb9, 0x4f, 0x56, 0xb2, 0xca, 0x2f, 0xef, 0x23,
|
||||
0xa8, 0x1b, 0xed, 0x91, 0xcd, 0x73, 0x62, 0x74, 0x79, 0xd7, 0x2f, 0x10, 0x29, 0xf9, 0x0c, 0x9a,
|
||||
0x8e, 0x78, 0x52, 0xfe, 0x4d, 0x58, 0x11, 0xcc, 0xe0, 0xff, 0x7f, 0x13, 0xf1, 0xe9, 0x0a, 0xfa,
|
||||
0x17, 0x8d, 0x84, 0xdc, 0x2d, 0xdf, 0xf0, 0x72, 0x5a, 0x07, 0xf7, 0xfe, 0x11, 0xd6, 0x1d, 0xba,
|
||||
0x53, 0xff, 0xa6, 0x9a, 0x1d, 0x1f, 0x37, 0xed, 0x8f, 0xc3, 0xc3, 0xbf, 0x02, 0x00, 0x00, 0xff,
|
||||
0xff, 0x02, 0xf8, 0xe5, 0x03, 0x2f, 0x0a, 0x00, 0x00,
|
||||
}
|
||||
|
@ -10,6 +10,7 @@ import "google/protobuf/timestamp.proto";
|
||||
import "gogo.proto";
|
||||
import "node.proto";
|
||||
import "piecestore.proto";
|
||||
import "orders.proto";
|
||||
|
||||
// PointerDB defines the interface for interacting with the network state persistence layer
|
||||
service PointerDB {
|
||||
@ -43,13 +44,12 @@ message RedundancyScheme {
|
||||
message RemotePiece {
|
||||
int32 piece_num = 1;
|
||||
bytes node_id = 2 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false];
|
||||
piecestoreroutes.SignedHash hash = 3;
|
||||
orders.PieceHash hash = 3;
|
||||
}
|
||||
|
||||
message RemoteSegment {
|
||||
RedundancyScheme redundancy = 1;
|
||||
// TODO: may want to use customtype and fixed-length byte slice
|
||||
string piece_id = 2;
|
||||
bytes root_piece_id = 2 [(gogoproto.customtype) = "PieceID", (gogoproto.nullable) = false];
|
||||
repeated RemotePiece remote_pieces = 3;
|
||||
bytes merkle_root = 4; // root hash of the hashes of all of these pieces
|
||||
}
|
||||
|
@ -3,11 +3,9 @@
|
||||
|
||||
package pb
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
math "math"
|
||||
)
|
||||
import proto "github.com/gogo/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
@ -32,7 +30,7 @@ func (m *SegmentMeta) Reset() { *m = SegmentMeta{} }
|
||||
func (m *SegmentMeta) String() string { return proto.CompactTextString(m) }
|
||||
func (*SegmentMeta) ProtoMessage() {}
|
||||
func (*SegmentMeta) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_c6bbf8af0ec331d6, []int{0}
|
||||
return fileDescriptor_streams_bbbe703970d9d652, []int{0}
|
||||
}
|
||||
func (m *SegmentMeta) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_SegmentMeta.Unmarshal(m, b)
|
||||
@ -40,8 +38,8 @@ func (m *SegmentMeta) XXX_Unmarshal(b []byte) error {
|
||||
func (m *SegmentMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_SegmentMeta.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *SegmentMeta) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_SegmentMeta.Merge(m, src)
|
||||
func (dst *SegmentMeta) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_SegmentMeta.Merge(dst, src)
|
||||
}
|
||||
func (m *SegmentMeta) XXX_Size() int {
|
||||
return xxx_messageInfo_SegmentMeta.Size(m)
|
||||
@ -80,7 +78,7 @@ func (m *StreamInfo) Reset() { *m = StreamInfo{} }
|
||||
func (m *StreamInfo) String() string { return proto.CompactTextString(m) }
|
||||
func (*StreamInfo) ProtoMessage() {}
|
||||
func (*StreamInfo) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_c6bbf8af0ec331d6, []int{1}
|
||||
return fileDescriptor_streams_bbbe703970d9d652, []int{1}
|
||||
}
|
||||
func (m *StreamInfo) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_StreamInfo.Unmarshal(m, b)
|
||||
@ -88,8 +86,8 @@ func (m *StreamInfo) XXX_Unmarshal(b []byte) error {
|
||||
func (m *StreamInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_StreamInfo.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *StreamInfo) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_StreamInfo.Merge(m, src)
|
||||
func (dst *StreamInfo) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_StreamInfo.Merge(dst, src)
|
||||
}
|
||||
func (m *StreamInfo) XXX_Size() int {
|
||||
return xxx_messageInfo_StreamInfo.Size(m)
|
||||
@ -142,7 +140,7 @@ func (m *StreamMeta) Reset() { *m = StreamMeta{} }
|
||||
func (m *StreamMeta) String() string { return proto.CompactTextString(m) }
|
||||
func (*StreamMeta) ProtoMessage() {}
|
||||
func (*StreamMeta) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_c6bbf8af0ec331d6, []int{2}
|
||||
return fileDescriptor_streams_bbbe703970d9d652, []int{2}
|
||||
}
|
||||
func (m *StreamMeta) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_StreamMeta.Unmarshal(m, b)
|
||||
@ -150,8 +148,8 @@ func (m *StreamMeta) XXX_Unmarshal(b []byte) error {
|
||||
func (m *StreamMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_StreamMeta.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *StreamMeta) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_StreamMeta.Merge(m, src)
|
||||
func (dst *StreamMeta) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_StreamMeta.Merge(dst, src)
|
||||
}
|
||||
func (m *StreamMeta) XXX_Size() int {
|
||||
return xxx_messageInfo_StreamMeta.Size(m)
|
||||
@ -196,9 +194,9 @@ func init() {
|
||||
proto.RegisterType((*StreamMeta)(nil), "streams.StreamMeta")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("streams.proto", fileDescriptor_c6bbf8af0ec331d6) }
|
||||
func init() { proto.RegisterFile("streams.proto", fileDescriptor_streams_bbbe703970d9d652) }
|
||||
|
||||
var fileDescriptor_c6bbf8af0ec331d6 = []byte{
|
||||
var fileDescriptor_streams_bbbe703970d9d652 = []byte{
|
||||
// 304 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0x51, 0xcb, 0x4e, 0xc3, 0x30,
|
||||
0x10, 0x54, 0x5f, 0x50, 0xb6, 0x29, 0x05, 0x03, 0x52, 0x04, 0x17, 0x14, 0x0e, 0x20, 0x84, 0x7a,
|
||||
|
@ -13,3 +13,6 @@ type NodeID = storj.NodeID
|
||||
|
||||
// NodeIDList is an alias to storj.NodeIDList for use in generated protobuf code
|
||||
type NodeIDList = storj.NodeIDList
|
||||
|
||||
// PieceID is an alias to storj.PieceID for use in generated protobuf code
|
||||
type PieceID = storj.PieceID
|
||||
|
@ -1,201 +0,0 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package psclient
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/zeebo/errs"
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"storj.io/storj/internal/memory"
|
||||
"storj.io/storj/pkg/auth"
|
||||
"storj.io/storj/pkg/identity"
|
||||
"storj.io/storj/pkg/pb"
|
||||
"storj.io/storj/pkg/ranger"
|
||||
"storj.io/storj/pkg/storj"
|
||||
"storj.io/storj/pkg/transport"
|
||||
)
|
||||
|
||||
var (
|
||||
// ClientError is any error returned by the client
|
||||
ClientError = errs.Class("piecestore client error")
|
||||
|
||||
// ErrHashDoesNotMatch indicates hash comparison failed
|
||||
ErrHashDoesNotMatch = ClientError.New("hash does not match")
|
||||
|
||||
defaultBandwidthMsgSize = 32 * memory.KB
|
||||
maxBandwidthMsgSize = 64 * memory.KB
|
||||
)
|
||||
|
||||
func init() {
|
||||
flag.Var(&defaultBandwidthMsgSize,
|
||||
"piecestore.rpc.client.default-bandwidth-msg-size",
|
||||
"default bandwidth message size in bytes")
|
||||
flag.Var(&maxBandwidthMsgSize,
|
||||
"piecestore.rpc.client.max-bandwidth-msg-size",
|
||||
"max bandwidth message size in bytes")
|
||||
}
|
||||
|
||||
// Client is an interface describing the functions for interacting with piecestore nodes
|
||||
type Client interface {
|
||||
Meta(ctx context.Context, id PieceID) (*pb.PieceSummary, error)
|
||||
Put(ctx context.Context, id PieceID, data io.Reader, ttl time.Time, ba *pb.OrderLimit) (*pb.SignedHash, error)
|
||||
Get(ctx context.Context, id PieceID, size int64, ba *pb.OrderLimit) (ranger.Ranger, error)
|
||||
Delete(ctx context.Context, pieceID PieceID, satelliteID storj.NodeID) error
|
||||
io.Closer
|
||||
}
|
||||
|
||||
// PieceStore -- Struct Info needed for protobuf api calls
|
||||
type PieceStore struct {
|
||||
closeFunc func() error // function that closes the transport connection
|
||||
client pb.PieceStoreRoutesClient // PieceStore for interacting with Storage Node
|
||||
selfID *identity.FullIdentity // This client's (an uplink) identity
|
||||
bandwidthMsgSize int // max bandwidth message size in bytes
|
||||
remoteID storj.NodeID // Storage node being connected to
|
||||
}
|
||||
|
||||
// NewPSClient initilizes a piecestore client
|
||||
func NewPSClient(ctx context.Context, tc transport.Client, n *pb.Node, bandwidthMsgSize int) (Client, error) {
|
||||
n.Type.DPanicOnInvalid("new ps client")
|
||||
conn, err := tc.DialNode(ctx, n)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if bandwidthMsgSize < 0 || bandwidthMsgSize > maxBandwidthMsgSize.Int() {
|
||||
return nil, ClientError.New("invalid Bandwidth Message Size: %v", bandwidthMsgSize)
|
||||
}
|
||||
|
||||
if bandwidthMsgSize == 0 {
|
||||
bandwidthMsgSize = defaultBandwidthMsgSize.Int()
|
||||
}
|
||||
|
||||
return &PieceStore{
|
||||
closeFunc: conn.Close,
|
||||
client: pb.NewPieceStoreRoutesClient(conn),
|
||||
bandwidthMsgSize: bandwidthMsgSize,
|
||||
selfID: tc.Identity(),
|
||||
remoteID: n.Id,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewCustomRoute creates new PieceStore with custom client interface
|
||||
func NewCustomRoute(client pb.PieceStoreRoutesClient, target *pb.Node, bandwidthMsgSize int, selfID *identity.FullIdentity) (*PieceStore, error) {
|
||||
target.Type.DPanicOnInvalid("new custom route")
|
||||
if bandwidthMsgSize < 0 || bandwidthMsgSize > maxBandwidthMsgSize.Int() {
|
||||
return nil, ClientError.New("invalid Bandwidth Message Size: %v", bandwidthMsgSize)
|
||||
}
|
||||
|
||||
if bandwidthMsgSize == 0 {
|
||||
bandwidthMsgSize = defaultBandwidthMsgSize.Int()
|
||||
}
|
||||
|
||||
return &PieceStore{
|
||||
client: client,
|
||||
bandwidthMsgSize: bandwidthMsgSize,
|
||||
selfID: selfID,
|
||||
remoteID: target.Id,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Close closes the connection with piecestore
|
||||
func (ps *PieceStore) Close() error {
|
||||
if ps.closeFunc == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return ps.closeFunc()
|
||||
}
|
||||
|
||||
// Meta requests info about a piece by Id
|
||||
func (ps *PieceStore) Meta(ctx context.Context, id PieceID) (*pb.PieceSummary, error) {
|
||||
return ps.client.Piece(ctx, &pb.PieceId{Id: id.String()})
|
||||
}
|
||||
|
||||
// Put uploads a Piece to a piece store Server
|
||||
func (ps *PieceStore) Put(ctx context.Context, id PieceID, data io.Reader, ttl time.Time, pba *pb.OrderLimit) (*pb.SignedHash, error) {
|
||||
stream, err := ps.client.Store(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Making a clone, otherwise there will be a data race
|
||||
// when another goroutine tries to write the cached size
|
||||
// of this instance at the same time.
|
||||
pbaClone := pba.Clone()
|
||||
|
||||
rba := &pb.Order{
|
||||
PayerAllocation: pbaClone,
|
||||
StorageNodeId: ps.remoteID,
|
||||
}
|
||||
|
||||
msg := &pb.PieceStore{
|
||||
PieceData: &pb.PieceStore_PieceData{Id: id.String(), ExpirationUnixSec: ttl.Unix()},
|
||||
BandwidthAllocation: rba,
|
||||
}
|
||||
if err = stream.Send(msg); err != nil {
|
||||
if _, closeErr := stream.CloseAndRecv(); closeErr != nil {
|
||||
zap.S().Errorf("error closing stream %s :: %v.Send() = %v", closeErr, stream, closeErr)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("%v.Send() = %v", stream, err)
|
||||
}
|
||||
|
||||
writer := NewStreamWriter(stream, ps, rba)
|
||||
|
||||
bufw := bufio.NewWriterSize(writer, 32*1024)
|
||||
|
||||
_, err = io.Copy(bufw, data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = bufw.Flush()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = writer.Close()
|
||||
if err != nil && err != io.EOF {
|
||||
return nil, ClientError.New("failure during closing writer: %v", err)
|
||||
}
|
||||
|
||||
err = writer.Verify()
|
||||
if err != nil {
|
||||
return nil, ClientError.Wrap(err)
|
||||
}
|
||||
|
||||
return writer.storagenodeHash, nil
|
||||
}
|
||||
|
||||
// Get begins downloading a Piece from a piece store Server
|
||||
func (ps *PieceStore) Get(ctx context.Context, id PieceID, size int64, ba *pb.OrderLimit) (ranger.Ranger, error) {
|
||||
stream, err := ps.client.Retrieve(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return PieceRangerSize(ps, stream, id, size, ba), nil
|
||||
}
|
||||
|
||||
// Delete a Piece from a piece store Server
|
||||
func (ps *PieceStore) Delete(ctx context.Context, id PieceID, satelliteID storj.NodeID) error {
|
||||
reply, err := ps.client.Delete(ctx, &pb.PieceDelete{Id: id.String(), SatelliteId: satelliteID})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
zap.S().Debugf("Delete request route summary: %v", reply)
|
||||
return nil
|
||||
}
|
||||
|
||||
// sign a message using the clients private key
|
||||
func (ps *PieceStore) sign(rba *pb.Order) (err error) {
|
||||
return auth.SignMessage(rba, *ps.selfID)
|
||||
}
|
@ -1,52 +0,0 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package psclient
|
||||
|
||||
import (
|
||||
"crypto/hmac"
|
||||
"crypto/rand"
|
||||
"crypto/sha512"
|
||||
|
||||
"github.com/mr-tron/base58/base58"
|
||||
)
|
||||
|
||||
// PieceID is the unique identifier for pieces
|
||||
type PieceID string
|
||||
|
||||
// NewPieceID creates a PieceID
|
||||
func NewPieceID() PieceID {
|
||||
b := make([]byte, 32)
|
||||
|
||||
_, err := rand.Read(b)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return PieceID(base58.Encode(b))
|
||||
}
|
||||
|
||||
// String representation of the PieceID
|
||||
func (id PieceID) String() string {
|
||||
return string(id)
|
||||
}
|
||||
|
||||
// IsValid checks if the current PieceID is valid
|
||||
func (id PieceID) IsValid() bool {
|
||||
return len(id) >= 20
|
||||
}
|
||||
|
||||
// Derive a new PieceID from the current PieceID and the given secret
|
||||
func (id PieceID) Derive(secret []byte) (derived PieceID, err error) {
|
||||
mac := hmac.New(sha512.New, secret)
|
||||
_, err = mac.Write([]byte(id))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
h := mac.Sum(nil)
|
||||
// Trim the hash if greater than 32 bytes
|
||||
if len(h) > 32 {
|
||||
h = h[:32]
|
||||
}
|
||||
return PieceID(base58.Encode(h)), nil
|
||||
}
|
@ -1,46 +0,0 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package psclient
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/mr-tron/base58/base58"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"storj.io/storj/internal/testcontext"
|
||||
"storj.io/storj/internal/testidentity"
|
||||
)
|
||||
|
||||
func TestNewPieceID(t *testing.T) {
|
||||
t.Run("should return an id string", func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
id := NewPieceID()
|
||||
assert.Equal(id.IsValid(), true)
|
||||
})
|
||||
|
||||
t.Run("should return a different string on each call", func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
assert.NotEqual(NewPieceID(), NewPieceID())
|
||||
})
|
||||
}
|
||||
|
||||
func TestDerivePieceID(t *testing.T) {
|
||||
ctx := testcontext.New(t)
|
||||
defer ctx.Cleanup()
|
||||
|
||||
pid := NewPieceID()
|
||||
fid, err := testidentity.NewTestIdentity(ctx)
|
||||
assert.NoError(t, err)
|
||||
did, err := pid.Derive(fid.ID.Bytes())
|
||||
assert.NoError(t, err)
|
||||
assert.NotEqual(t, pid, did)
|
||||
|
||||
did2, err := pid.Derive(fid.ID.Bytes())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, did, did2)
|
||||
|
||||
_, err = base58.Decode(did.String())
|
||||
assert.NoError(t, err)
|
||||
}
|
@ -1,84 +0,0 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package psclient
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/zeebo/errs"
|
||||
|
||||
"storj.io/storj/pkg/pb"
|
||||
"storj.io/storj/pkg/ranger"
|
||||
)
|
||||
|
||||
// Error is the error class for pieceRanger
|
||||
var Error = errs.Class("pieceRanger error")
|
||||
|
||||
type pieceRanger struct {
|
||||
c *PieceStore
|
||||
id PieceID
|
||||
size int64
|
||||
stream pb.PieceStoreRoutes_RetrieveClient
|
||||
pba *pb.OrderLimit
|
||||
}
|
||||
|
||||
// PieceRanger PieceRanger returns a Ranger from a PieceID.
|
||||
func PieceRanger(ctx context.Context, c *PieceStore, stream pb.PieceStoreRoutes_RetrieveClient, id PieceID, pba *pb.OrderLimit) (ranger.Ranger, error) {
|
||||
piece, err := c.Meta(ctx, id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &pieceRanger{c: c, id: id, size: piece.PieceSize, stream: stream, pba: pba}, nil
|
||||
}
|
||||
|
||||
// PieceRangerSize creates a PieceRanger with known size.
|
||||
// Use it if you know the piece size. This will safe the extra request for
|
||||
// retrieving the piece size from the piece storage.
|
||||
func PieceRangerSize(c *PieceStore, stream pb.PieceStoreRoutes_RetrieveClient, id PieceID, size int64, pba *pb.OrderLimit) ranger.Ranger {
|
||||
return &pieceRanger{c: c, id: id, size: size, stream: stream, pba: pba}
|
||||
}
|
||||
|
||||
// Size implements Ranger.Size
|
||||
func (r *pieceRanger) Size() int64 {
|
||||
return r.size
|
||||
}
|
||||
|
||||
// Range implements Ranger.Range
|
||||
func (r *pieceRanger) Range(ctx context.Context, offset, length int64) (io.ReadCloser, error) {
|
||||
if offset < 0 {
|
||||
return nil, Error.New("negative offset")
|
||||
}
|
||||
if length < 0 {
|
||||
return nil, Error.New("negative length")
|
||||
}
|
||||
if offset+length > r.size {
|
||||
return nil, Error.New("range beyond end")
|
||||
}
|
||||
if length == 0 {
|
||||
return ioutil.NopCloser(bytes.NewReader([]byte{})), nil
|
||||
}
|
||||
|
||||
// Making a copy, otherwise there will be a data race
|
||||
// when another goroutine tries to write the cached size
|
||||
// of this instance at the same time.
|
||||
pbaClone := r.pba.Clone()
|
||||
|
||||
rba := &pb.Order{
|
||||
PayerAllocation: pbaClone,
|
||||
StorageNodeId: r.c.remoteID,
|
||||
}
|
||||
|
||||
// send piece data
|
||||
if err := r.stream.Send(&pb.PieceRetrieval{
|
||||
PieceData: &pb.PieceRetrieval_PieceData{Id: r.id.String(), PieceSize: length, Offset: offset},
|
||||
BandwidthAllocation: rba,
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return NewStreamReader(r.c, r.stream, rba, r.size), nil
|
||||
}
|
@ -1,172 +0,0 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package psclient
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"storj.io/storj/internal/testcontext"
|
||||
"storj.io/storj/internal/testidentity"
|
||||
"storj.io/storj/internal/teststorj"
|
||||
"storj.io/storj/pkg/pb"
|
||||
)
|
||||
|
||||
func TestPieceRanger(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
for i, tt := range []struct {
|
||||
data string
|
||||
size, offset, length int64
|
||||
substr string
|
||||
errString string
|
||||
}{
|
||||
{"", 0, 0, 0, "", ""},
|
||||
{"abcdef", 6, 0, 0, "", ""},
|
||||
{"abcdef", 6, 3, 0, "", ""},
|
||||
{"abcdef", 6, 0, 6, "abcdef", ""},
|
||||
{"abcdef", 6, 0, 5, "abcde", ""},
|
||||
{"abcdef", 6, 0, 4, "abcd", ""},
|
||||
{"abcdef", 6, 1, 4, "bcde", ""},
|
||||
{"abcdef", 6, 2, 4, "cdef", ""},
|
||||
{"abcdefg", 7, 1, 4, "bcde", ""},
|
||||
{"abcdef", 6, 0, 7, "abcdef", "pieceRanger error: range beyond end"},
|
||||
{"abcdef", 6, -1, 7, "abcde", "pieceRanger error: negative offset"},
|
||||
{"abcdef", 6, 0, -1, "abcde", "pieceRanger error: negative length"},
|
||||
} {
|
||||
errTag := fmt.Sprintf("Test case #%d", i)
|
||||
|
||||
ctx := testcontext.New(t)
|
||||
defer ctx.Cleanup()
|
||||
id, err := testidentity.NewTestIdentity(ctx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
route := pb.NewMockPieceStoreRoutesClient(ctrl)
|
||||
|
||||
route.EXPECT().Piece(
|
||||
gomock.Any(), gomock.Any(), gomock.Any(),
|
||||
).Return(&pb.PieceSummary{PieceSize: int64(len(tt.data))}, nil)
|
||||
|
||||
stream := pb.NewMockPieceStoreRoutes_RetrieveClient(ctrl)
|
||||
pid := NewPieceID()
|
||||
|
||||
if tt.offset >= 0 && tt.length > 0 && tt.offset+tt.length <= tt.size {
|
||||
stream.EXPECT().Send(gomock.Any()).Return(nil)
|
||||
stream.EXPECT().Send(gomock.Any()).Return(nil).MinTimes(0).MaxTimes(1)
|
||||
stream.EXPECT().Recv().Return(
|
||||
&pb.PieceRetrievalStream{
|
||||
PieceSize: tt.length,
|
||||
Content: []byte(tt.data)[tt.offset : tt.offset+tt.length],
|
||||
}, nil)
|
||||
stream.EXPECT().Recv().Return(&pb.PieceRetrievalStream{}, io.EOF)
|
||||
}
|
||||
|
||||
target := &pb.Node{
|
||||
Address: &pb.NodeAddress{
|
||||
Address: "",
|
||||
Transport: 0,
|
||||
},
|
||||
Id: teststorj.NodeIDFromString("test-node-id-1234567"),
|
||||
Type: pb.NodeType_STORAGE,
|
||||
}
|
||||
target.Type.DPanicOnInvalid("pr test")
|
||||
c, err := NewCustomRoute(route, target, 32*1024, id)
|
||||
assert.NoError(t, err)
|
||||
rr, err := PieceRanger(ctx, c, stream, pid, &pb.OrderLimit{})
|
||||
if assert.NoError(t, err, errTag) {
|
||||
assert.Equal(t, tt.size, rr.Size(), errTag)
|
||||
}
|
||||
r, err := rr.Range(ctx, tt.offset, tt.length)
|
||||
if tt.errString != "" {
|
||||
assert.EqualError(t, err, tt.errString, errTag)
|
||||
continue
|
||||
}
|
||||
assert.NoError(t, err, errTag)
|
||||
data, err := ioutil.ReadAll(r)
|
||||
if assert.NoError(t, err, errTag) {
|
||||
assert.Equal(t, []byte(tt.substr), data, errTag)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPieceRangerSize(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
ctx := testcontext.New(t)
|
||||
defer ctx.Cleanup()
|
||||
id, err := testidentity.NewTestIdentity(ctx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
for i, tt := range []struct {
|
||||
data string
|
||||
size, offset, length int64
|
||||
substr string
|
||||
errString string
|
||||
}{
|
||||
{"", 0, 0, 0, "", ""},
|
||||
{"abcdef", 6, 0, 0, "", ""},
|
||||
{"abcdef", 6, 3, 0, "", ""},
|
||||
{"abcdef", 6, 0, 6, "abcdef", ""},
|
||||
{"abcdef", 6, 0, 5, "abcde", ""},
|
||||
{"abcdef", 6, 0, 4, "abcd", ""},
|
||||
{"abcdef", 6, 1, 4, "bcde", ""},
|
||||
{"abcdef", 6, 2, 4, "cdef", ""},
|
||||
{"abcdefg", 7, 1, 4, "bcde", ""},
|
||||
{"abcdef", 6, 0, 7, "abcdef", "pieceRanger error: range beyond end"},
|
||||
{"abcdef", 6, -1, 7, "abcde", "pieceRanger error: negative offset"},
|
||||
{"abcdef", 6, 0, -1, "abcde", "pieceRanger error: negative length"},
|
||||
} {
|
||||
errTag := fmt.Sprintf("Test case #%d", i)
|
||||
|
||||
route := pb.NewMockPieceStoreRoutesClient(ctrl)
|
||||
pid := NewPieceID()
|
||||
|
||||
stream := pb.NewMockPieceStoreRoutes_RetrieveClient(ctrl)
|
||||
|
||||
if tt.offset >= 0 && tt.length > 0 && tt.offset+tt.length <= tt.size {
|
||||
stream.EXPECT().Send(gomock.Any()).Return(nil)
|
||||
stream.EXPECT().Send(gomock.Any()).Return(nil).MinTimes(0).MaxTimes(1)
|
||||
stream.EXPECT().Recv().Return(
|
||||
&pb.PieceRetrievalStream{
|
||||
PieceSize: tt.length,
|
||||
Content: []byte(tt.data)[tt.offset : tt.offset+tt.length],
|
||||
}, nil)
|
||||
stream.EXPECT().Recv().Return(&pb.PieceRetrievalStream{}, io.EOF)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
target := &pb.Node{
|
||||
Address: &pb.NodeAddress{
|
||||
Address: "",
|
||||
Transport: 0,
|
||||
},
|
||||
Id: teststorj.NodeIDFromString("test-node-id-1234567"),
|
||||
Type: pb.NodeType_STORAGE,
|
||||
}
|
||||
target.Type.DPanicOnInvalid("pr test 2")
|
||||
c, err := NewCustomRoute(route, target, 32*1024, id)
|
||||
assert.NoError(t, err)
|
||||
rr := PieceRangerSize(c, stream, pid, tt.size, &pb.OrderLimit{})
|
||||
assert.Equal(t, tt.size, rr.Size(), errTag)
|
||||
r, err := rr.Range(ctx, tt.offset, tt.length)
|
||||
if tt.errString != "" {
|
||||
assert.EqualError(t, err, tt.errString, errTag)
|
||||
continue
|
||||
}
|
||||
assert.NoError(t, err, errTag)
|
||||
data, err := ioutil.ReadAll(r)
|
||||
if assert.NoError(t, err, errTag) {
|
||||
assert.Equal(t, []byte(tt.substr), data, errTag)
|
||||
}
|
||||
}
|
||||
}
|
@ -1,189 +0,0 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package psclient
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"hash"
|
||||
|
||||
"go.uber.org/zap"
|
||||
|
||||
"storj.io/storj/internal/sync2"
|
||||
"storj.io/storj/pkg/auth"
|
||||
"storj.io/storj/pkg/pb"
|
||||
"storj.io/storj/pkg/utils"
|
||||
)
|
||||
|
||||
// StreamWriter handles uplink or satellite writing data to the piece store server
|
||||
type StreamWriter struct {
|
||||
stream pb.PieceStoreRoutes_StoreClient
|
||||
signer *PieceStore // We need this for signing
|
||||
totalWritten int64
|
||||
rba *pb.Order
|
||||
hash hash.Hash
|
||||
storagenodeHash *pb.SignedHash
|
||||
}
|
||||
|
||||
// NewStreamWriter creates a StreamWriter for writing data to the piece store server
|
||||
func NewStreamWriter(stream pb.PieceStoreRoutes_StoreClient, signer *PieceStore, rba *pb.Order) *StreamWriter {
|
||||
return &StreamWriter{
|
||||
stream: stream,
|
||||
signer: signer,
|
||||
rba: rba,
|
||||
hash: sha256.New(),
|
||||
}
|
||||
}
|
||||
|
||||
// Write Piece data to a piece store server upload stream
|
||||
func (s *StreamWriter) Write(b []byte) (int, error) {
|
||||
updatedAllocation := s.totalWritten + int64(len(b))
|
||||
|
||||
s.rba.Total = updatedAllocation
|
||||
err := auth.SignMessage(s.rba, *s.signer.selfID)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
msg := &pb.PieceStore{
|
||||
PieceData: &pb.PieceStore_PieceData{Content: b},
|
||||
BandwidthAllocation: s.rba,
|
||||
}
|
||||
s.totalWritten = updatedAllocation
|
||||
// Second we send the actual content
|
||||
if err := s.stream.Send(msg); err != nil {
|
||||
return 0, fmt.Errorf("%v.Send() = %v", s.stream, err)
|
||||
}
|
||||
|
||||
_, err = s.hash.Write(b)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return len(b), nil
|
||||
}
|
||||
|
||||
// Close the piece store Write Stream
|
||||
func (s *StreamWriter) Close() error {
|
||||
reply, err := s.stream.CloseAndRecv()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.storagenodeHash = reply.SignedHash
|
||||
|
||||
zap.S().Debugf("Stream close and recv summary: %s, %d", reply.Message, reply.TotalReceived)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Verify storage node signed hash
|
||||
func (s *StreamWriter) Verify() error {
|
||||
if err := auth.VerifyMsg(s.storagenodeHash, s.signer.remoteID); err != nil {
|
||||
return ClientError.Wrap(err)
|
||||
}
|
||||
|
||||
clientHash := s.hash.Sum(nil)
|
||||
if bytes.Compare(s.storagenodeHash.Hash, clientHash) != 0 {
|
||||
return ErrHashDoesNotMatch
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// StreamReader is a struct for reading piece download stream from server
|
||||
type StreamReader struct {
|
||||
pendingAllocs *sync2.Throttle
|
||||
client *PieceStore
|
||||
stream pb.PieceStoreRoutes_RetrieveClient
|
||||
src *utils.ReaderSource
|
||||
downloaded int64
|
||||
allocated int64
|
||||
size int64
|
||||
}
|
||||
|
||||
// NewStreamReader creates a StreamReader for reading data from the piece store server
|
||||
func NewStreamReader(client *PieceStore, stream pb.PieceStoreRoutes_RetrieveClient, rba *pb.Order, size int64) *StreamReader {
|
||||
sr := &StreamReader{
|
||||
pendingAllocs: sync2.NewThrottle(),
|
||||
client: client,
|
||||
stream: stream,
|
||||
size: size,
|
||||
}
|
||||
// TODO: make these flag/config-file configurable
|
||||
trustLimit := int64(client.bandwidthMsgSize * 64)
|
||||
sendThreshold := int64(client.bandwidthMsgSize * 8)
|
||||
|
||||
// Send signed allocations to the piece store server
|
||||
go func() {
|
||||
// TODO: make this flag/config-file configurable
|
||||
trustedSize := int64(client.bandwidthMsgSize * 8)
|
||||
|
||||
// Allocate until we've reached the file size
|
||||
for sr.allocated < size {
|
||||
allocate := trustedSize
|
||||
if sr.allocated+trustedSize > size {
|
||||
allocate = size - sr.allocated
|
||||
}
|
||||
|
||||
rba.Total = sr.allocated + allocate
|
||||
|
||||
err := auth.SignMessage(rba, *client.selfID)
|
||||
if err != nil {
|
||||
sr.pendingAllocs.Fail(err)
|
||||
}
|
||||
msg := &pb.PieceRetrieval{BandwidthAllocation: rba}
|
||||
|
||||
if err = stream.Send(msg); err != nil {
|
||||
sr.pendingAllocs.Fail(err)
|
||||
return
|
||||
}
|
||||
|
||||
sr.allocated += trustedSize
|
||||
|
||||
if err = sr.pendingAllocs.ProduceAndWaitUntilBelow(allocate, sendThreshold); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Speed up retrieval as server gives us more data
|
||||
trustedSize *= 2
|
||||
if trustedSize > trustLimit {
|
||||
trustedSize = trustLimit
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
sr.src = utils.NewReaderSource(func() ([]byte, error) {
|
||||
resp, err := stream.Recv()
|
||||
if err != nil {
|
||||
sr.pendingAllocs.Fail(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sr.downloaded += int64(len(resp.GetContent()))
|
||||
|
||||
err = sr.pendingAllocs.Consume(int64(len(resp.GetContent())))
|
||||
if err != nil {
|
||||
sr.pendingAllocs.Fail(err)
|
||||
return resp.GetContent(), err
|
||||
}
|
||||
|
||||
return resp.GetContent(), nil
|
||||
})
|
||||
|
||||
return sr
|
||||
}
|
||||
|
||||
// Read Piece data from piece store server download stream
|
||||
func (s *StreamReader) Read(b []byte) (int, error) {
|
||||
return s.src.Read(b)
|
||||
}
|
||||
|
||||
// Close the piece store server Read Stream
|
||||
func (s *StreamReader) Close() error {
|
||||
return utils.CombineErrors(
|
||||
s.stream.CloseSend(),
|
||||
s.client.Close(),
|
||||
)
|
||||
}
|
@ -79,7 +79,7 @@ func (s *Inspector) getDashboardData(ctx context.Context) (*pb.DashboardResponse
|
||||
}
|
||||
|
||||
return &pb.DashboardResponse{
|
||||
NodeId: s.ps.kad.Local().Id.String(),
|
||||
NodeId: s.ps.kad.Local().Id,
|
||||
NodeConnections: int64(len(nodes)),
|
||||
BootstrapAddress: strings.Join(bsNodes[:], ", "),
|
||||
InternalAddress: "",
|
||||
|
@ -142,6 +142,9 @@ func NewEndpoint(log *zap.Logger, config Config, storage Storage, db *psdb.DB, i
|
||||
if config.SatelliteIDRestriction {
|
||||
idStrings := strings.Split(config.WhitelistedSatelliteIDs, ",")
|
||||
for _, s := range idStrings {
|
||||
if s == "" {
|
||||
continue
|
||||
}
|
||||
satID, err := storj.NodeIDFromString(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -1,81 +0,0 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package pstore
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"storj.io/storj/internal/testcontext"
|
||||
)
|
||||
|
||||
func TestStore(t *testing.T) {
|
||||
ctx := testcontext.New(t)
|
||||
defer ctx.Cleanup()
|
||||
|
||||
store := NewStorage(ctx.Dir("example"))
|
||||
defer ctx.Check(store.Close)
|
||||
|
||||
pieceID := strings.Repeat("AB01", 10)
|
||||
|
||||
source := make([]byte, 8000)
|
||||
_, _ = rand.Read(source[:])
|
||||
|
||||
{ // write data
|
||||
w, err := store.Writer(pieceID)
|
||||
require.NoError(t, err)
|
||||
|
||||
n, err := io.Copy(w, bytes.NewReader(source))
|
||||
assert.Equal(t, n, int64(len(source)))
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.NoError(t, w.Close())
|
||||
}
|
||||
|
||||
{ // valid reads
|
||||
read := func(offset, length int64) []byte {
|
||||
reader, err := store.Reader(ctx, pieceID, offset, length)
|
||||
if assert.NoError(t, err) {
|
||||
data, err := ioutil.ReadAll(reader)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, reader.Close())
|
||||
return data
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
assert.Equal(t, source, read(0, -1))
|
||||
assert.Equal(t, source, read(0, 16000))
|
||||
|
||||
assert.Equal(t, source[10:1010], read(10, 1000))
|
||||
assert.Equal(t, source[10:11], read(10, 1))
|
||||
}
|
||||
|
||||
{ // invalid reads
|
||||
badread := func(offset, length int64) error {
|
||||
reader, err := store.Reader(ctx, pieceID, offset, length)
|
||||
if err == nil {
|
||||
assert.NoError(t, reader.Close())
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
assert.Error(t, badread(-100, 0))
|
||||
assert.Error(t, badread(-100, -10))
|
||||
}
|
||||
|
||||
{ // test delete
|
||||
assert.NoError(t, store.Delete(pieceID))
|
||||
|
||||
_, err := store.Reader(ctx, pieceID, 0, -1)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
}
|
@ -5,8 +5,12 @@ package pkcrypto
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"hash"
|
||||
)
|
||||
|
||||
// NewHash returns default hash in storj.
|
||||
func NewHash() hash.Hash { return sha256.New() }
|
||||
|
||||
// SHA256Hash calculates the SHA256 hash of the input data
|
||||
func SHA256Hash(data []byte) []byte {
|
||||
sum := sha256.Sum256(data)
|
||||
|
@ -8,6 +8,9 @@ import (
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
"github.com/golang/protobuf/ptypes/timestamp"
|
||||
"github.com/skyrings/skyring-common/tools/uuid"
|
||||
|
||||
"storj.io/storj/pkg/auth"
|
||||
@ -69,6 +72,68 @@ func (allocation *AllocationSigner) PayerBandwidthAllocation(ctx context.Context
|
||||
return pba, err
|
||||
}
|
||||
|
||||
// OrderLimitParameters parameters necessary to create OrderLimit
|
||||
type OrderLimitParameters struct {
|
||||
UplinkIdentity *identity.PeerIdentity
|
||||
StorageNodeID storj.NodeID
|
||||
PieceID storj.PieceID
|
||||
Action pb.PieceAction
|
||||
Limit int64
|
||||
PieceExpiration *timestamp.Timestamp
|
||||
}
|
||||
|
||||
// OrderLimit returns generated order limit
|
||||
func (allocation *AllocationSigner) OrderLimit(ctx context.Context, parameters OrderLimitParameters) (pba *pb.OrderLimit2, err error) {
|
||||
if parameters.UplinkIdentity == nil {
|
||||
return nil, Error.New("missing uplink identity")
|
||||
}
|
||||
serialNum, err := uuid.New()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// store the corresponding uplink's id and public key into certDB db
|
||||
err = allocation.certdb.SavePublicKey(ctx, parameters.UplinkIdentity.ID, parameters.UplinkIdentity.Leaf.PublicKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := allocation.restrictActionsOrderLimit(parameters.UplinkIdentity.ID, parameters.Action); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// convert bwExpiration from days to seconds
|
||||
orderExpiration, err := ptypes.TimestampProto(time.Unix(int64(allocation.bwExpiration*86400), 0))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pba = &pb.OrderLimit2{
|
||||
SerialNumber: []byte(serialNum.String()),
|
||||
SatelliteId: allocation.satelliteIdentity.ID,
|
||||
UplinkId: parameters.UplinkIdentity.ID,
|
||||
StorageNodeId: parameters.StorageNodeID,
|
||||
PieceId: parameters.PieceID,
|
||||
Action: parameters.Action,
|
||||
Limit: parameters.Limit,
|
||||
PieceExpiration: parameters.PieceExpiration,
|
||||
OrderExpiration: orderExpiration,
|
||||
}
|
||||
|
||||
//TODO this needs to be review if make sense
|
||||
msgBytes, err := proto.Marshal(pba)
|
||||
if err != nil {
|
||||
return nil, auth.ErrMarshal.Wrap(err)
|
||||
}
|
||||
signeture, err := auth.GenerateSignature(msgBytes, allocation.satelliteIdentity)
|
||||
if err != nil {
|
||||
return nil, auth.ErrMarshal.Wrap(err)
|
||||
}
|
||||
pba.SatelliteSignature = signeture
|
||||
|
||||
return pba, err
|
||||
}
|
||||
|
||||
func (allocation *AllocationSigner) restrictActions(peerID storj.NodeID, action pb.BandwidthAction) error {
|
||||
switch action {
|
||||
case pb.BandwidthAction_GET_REPAIR, pb.BandwidthAction_PUT_REPAIR, pb.BandwidthAction_GET_AUDIT:
|
||||
@ -83,3 +148,18 @@ func (allocation *AllocationSigner) restrictActions(peerID storj.NodeID, action
|
||||
return errors.New("unknown action restriction")
|
||||
}
|
||||
}
|
||||
|
||||
func (allocation *AllocationSigner) restrictActionsOrderLimit(peerID storj.NodeID, action pb.PieceAction) error {
|
||||
switch action {
|
||||
case pb.PieceAction_GET_REPAIR, pb.PieceAction_PUT_REPAIR, pb.PieceAction_GET_AUDIT:
|
||||
if peerID != allocation.satelliteIdentity.ID {
|
||||
return errors.New("action restricted to signing satellite")
|
||||
}
|
||||
|
||||
return nil
|
||||
case pb.PieceAction_GET, pb.PieceAction_PUT, pb.PieceAction_DELETE:
|
||||
return nil
|
||||
default:
|
||||
return errors.New("unknown action restriction")
|
||||
}
|
||||
}
|
||||
|
@ -67,7 +67,7 @@ func makePointer(path storj.Path) pb.PutRequest {
|
||||
RepairThreshold: 2,
|
||||
SuccessThreshold: 3,
|
||||
},
|
||||
PieceId: "testId",
|
||||
RootPieceId: teststorj.PieceIDFromString("testId"),
|
||||
RemotePieces: rps,
|
||||
},
|
||||
SegmentSize: int64(1),
|
||||
|
@ -105,16 +105,19 @@ func (s *Server) filterValidPieces(pointer *pb.Pointer) error {
|
||||
var remotePieces []*pb.RemotePiece
|
||||
remote := pointer.Remote
|
||||
for _, piece := range remote.RemotePieces {
|
||||
err := auth.VerifyMsg(piece.Hash, piece.NodeId)
|
||||
if err == nil {
|
||||
// set to nil after verification to avoid storing in DB
|
||||
piece.Hash.SetCerts(nil)
|
||||
piece.Hash.SetSignature(nil)
|
||||
remotePieces = append(remotePieces, piece)
|
||||
} else {
|
||||
// TODO satellite should send Delete request for piece that failed
|
||||
s.logger.Warn("unable to verify piece hash: %v", zap.Error(err))
|
||||
}
|
||||
// TODO enable verification
|
||||
|
||||
// err := auth.VerifyMsg(piece.Hash, piece.NodeId)
|
||||
// if err == nil {
|
||||
// // set to nil after verification to avoid storing in DB
|
||||
// piece.Hash = nil
|
||||
// remotePieces = append(remotePieces, piece)
|
||||
// } else {
|
||||
// // TODO satellite should send Delete request for piece that failed
|
||||
// s.logger.Warn("unable to verify piece hash: %v", zap.Error(err))
|
||||
// }
|
||||
|
||||
remotePieces = append(remotePieces, piece)
|
||||
}
|
||||
|
||||
if int32(len(remotePieces)) < remote.Redundancy.SuccessThreshold {
|
||||
|
@ -26,6 +26,7 @@ import (
|
||||
"storj.io/storj/internal/testidentity"
|
||||
"storj.io/storj/internal/testplanet"
|
||||
"storj.io/storj/pkg/auth"
|
||||
"storj.io/storj/pkg/auth/signing"
|
||||
"storj.io/storj/pkg/pb"
|
||||
"storj.io/storj/pkg/pointerdb"
|
||||
"storj.io/storj/pkg/storage/meta"
|
||||
@ -108,12 +109,13 @@ func makePointer(ctx context.Context, t *testing.T, numOfValidPieces, numOfInval
|
||||
pieces[i] = &pb.RemotePiece{
|
||||
PieceNum: int32(i),
|
||||
NodeId: identity.ID,
|
||||
Hash: &pb.SignedHash{Hash: make([]byte, 32)},
|
||||
Hash: &pb.PieceHash{Hash: make([]byte, 32)},
|
||||
}
|
||||
|
||||
_, err = rand.Read(pieces[i].Hash.Hash)
|
||||
assert.NoError(t, err)
|
||||
err = auth.SignMessage(pieces[i].Hash, *identity)
|
||||
signer := signing.SignerFromFullIdentity(identity)
|
||||
pieces[i].Hash, err = signing.SignPieceHash(signer, pieces[i].Hash)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
@ -124,12 +126,13 @@ func makePointer(ctx context.Context, t *testing.T, numOfValidPieces, numOfInval
|
||||
pieces[i] = &pb.RemotePiece{
|
||||
PieceNum: int32(i),
|
||||
NodeId: storj.NodeID{byte(i)},
|
||||
Hash: &pb.SignedHash{Hash: make([]byte, 32)},
|
||||
Hash: &pb.PieceHash{Hash: make([]byte, 32)},
|
||||
}
|
||||
|
||||
_, err = rand.Read(pieces[i].Hash.Hash)
|
||||
assert.NoError(t, err)
|
||||
err = auth.SignMessage(pieces[i].Hash, *identity)
|
||||
signer := signing.SignerFromFullIdentity(identity)
|
||||
pieces[i].Hash, err = signing.SignPieceHash(signer, pieces[i].Hash)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
|
@ -15,58 +15,66 @@ import (
|
||||
"go.uber.org/zap"
|
||||
monkit "gopkg.in/spacemonkeygo/monkit.v2"
|
||||
|
||||
"storj.io/storj/internal/sync2"
|
||||
"storj.io/storj/pkg/auth/signing"
|
||||
"storj.io/storj/pkg/eestream"
|
||||
"storj.io/storj/pkg/pb"
|
||||
"storj.io/storj/pkg/piecestore/psclient"
|
||||
"storj.io/storj/pkg/ranger"
|
||||
"storj.io/storj/pkg/storj"
|
||||
"storj.io/storj/pkg/transport"
|
||||
"storj.io/storj/pkg/utils"
|
||||
"storj.io/storj/uplink/piecestore"
|
||||
)
|
||||
|
||||
var mon = monkit.Package()
|
||||
|
||||
// Client defines an interface for storing erasure coded data to piece store nodes
|
||||
type Client interface {
|
||||
Put(ctx context.Context, nodes []*pb.Node, rs eestream.RedundancyStrategy, pieceID psclient.PieceID, data io.Reader, expiration time.Time, pba *pb.OrderLimit) (successfulNodes []*pb.Node, successfulHashes []*pb.SignedHash, err error)
|
||||
Get(ctx context.Context, nodes []*pb.Node, es eestream.ErasureScheme, pieceID psclient.PieceID, size int64, pba *pb.OrderLimit) (ranger.Ranger, error)
|
||||
Delete(ctx context.Context, nodes []*pb.Node, pieceID psclient.PieceID, satelliteID storj.NodeID) error
|
||||
Put(ctx context.Context, limits []*pb.AddressedOrderLimit, rs eestream.RedundancyStrategy, data io.Reader, expiration time.Time) (successfulNodes []*pb.Node, successfulHashes []*pb.PieceHash, err error)
|
||||
Get(ctx context.Context, limits []*pb.AddressedOrderLimit, es eestream.ErasureScheme, size int64) (ranger.Ranger, error)
|
||||
Delete(ctx context.Context, limits []*pb.AddressedOrderLimit) error
|
||||
}
|
||||
|
||||
type psClientFunc func(context.Context, transport.Client, *pb.Node, int) (psclient.Client, error)
|
||||
type psClientHelper func(context.Context, *pb.Node) (psclient.Client, error)
|
||||
type psClientHelper func(context.Context, *pb.Node) (*piecestore.Client, error)
|
||||
|
||||
type ecClient struct {
|
||||
transport transport.Client
|
||||
memoryLimit int
|
||||
newPSClientFunc psClientFunc
|
||||
transport transport.Client
|
||||
memoryLimit int
|
||||
}
|
||||
|
||||
// NewClient from the given identity and max buffer memory
|
||||
func NewClient(tc transport.Client, memoryLimit int) Client {
|
||||
return &ecClient{
|
||||
transport: tc,
|
||||
memoryLimit: memoryLimit,
|
||||
newPSClientFunc: psclient.NewPSClient,
|
||||
transport: tc,
|
||||
memoryLimit: memoryLimit,
|
||||
}
|
||||
}
|
||||
|
||||
func (ec *ecClient) newPSClient(ctx context.Context, n *pb.Node) (psclient.Client, error) {
|
||||
func (ec *ecClient) newPSClient(ctx context.Context, n *pb.Node) (*piecestore.Client, error) {
|
||||
n.Type.DPanicOnInvalid("new ps client")
|
||||
return ec.newPSClientFunc(ctx, ec.transport, n, 0)
|
||||
conn, err := ec.transport.DialNode(ctx, n)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return piecestore.NewClient(
|
||||
zap.L().Named(n.Id.String()),
|
||||
signing.SignerFromFullIdentity(ec.transport.Identity()),
|
||||
conn,
|
||||
piecestore.DefaultConfig,
|
||||
), nil
|
||||
}
|
||||
|
||||
func (ec *ecClient) Put(ctx context.Context, nodes []*pb.Node, rs eestream.RedundancyStrategy, pieceID psclient.PieceID, data io.Reader, expiration time.Time, pba *pb.OrderLimit) (successfulNodes []*pb.Node, successfulHashes []*pb.SignedHash, err error) {
|
||||
func (ec *ecClient) Put(ctx context.Context, limits []*pb.AddressedOrderLimit, rs eestream.RedundancyStrategy, data io.Reader, expiration time.Time) (successfulNodes []*pb.Node, successfulHashes []*pb.PieceHash, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
if len(nodes) != rs.TotalCount() {
|
||||
return nil, nil, Error.New("size of nodes slice (%d) does not match total count (%d) of erasure scheme", len(nodes), rs.TotalCount())
|
||||
if len(limits) != rs.TotalCount() {
|
||||
return nil, nil, Error.New("size of limits slice (%d) does not match total count (%d) of erasure scheme", len(limits), rs.TotalCount())
|
||||
}
|
||||
|
||||
if nonNilCount(nodes) < rs.RepairThreshold() {
|
||||
return nil, nil, Error.New("number of non-nil nodes (%d) is less than repair threshold (%d) of erasure scheme", nonNilCount(nodes), rs.RepairThreshold())
|
||||
if nonNilCount(limits) < rs.RepairThreshold() {
|
||||
return nil, nil, Error.New("number of non-nil limits (%d) is less than repair threshold (%d) of erasure scheme", nonNilCount(limits), rs.RepairThreshold())
|
||||
}
|
||||
|
||||
if !unique(nodes) {
|
||||
if !unique(limits) {
|
||||
return nil, nil, Error.New("duplicated nodes are not allowed")
|
||||
}
|
||||
|
||||
@ -79,56 +87,59 @@ func (ec *ecClient) Put(ctx context.Context, nodes []*pb.Node, rs eestream.Redun
|
||||
type info struct {
|
||||
i int
|
||||
err error
|
||||
hash *pb.SignedHash
|
||||
hash *pb.PieceHash
|
||||
}
|
||||
infos := make(chan info, len(nodes))
|
||||
infos := make(chan info, len(limits))
|
||||
|
||||
psCtx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
start := time.Now()
|
||||
|
||||
for i, node := range nodes {
|
||||
if node != nil {
|
||||
node.Type.DPanicOnInvalid("ec client Put")
|
||||
}
|
||||
|
||||
go func(i int, node *pb.Node) {
|
||||
hash, err := ec.putPiece(psCtx, ctx, node, pieceID, readers[i], expiration, pba)
|
||||
for i, addressedLimit := range limits {
|
||||
go func(i int, addressedLimit *pb.AddressedOrderLimit) {
|
||||
hash, err := ec.putPiece(psCtx, ctx, addressedLimit, readers[i], expiration)
|
||||
infos <- info{i: i, err: err, hash: hash}
|
||||
}(i, node)
|
||||
}(i, addressedLimit)
|
||||
}
|
||||
|
||||
successfulNodes = make([]*pb.Node, len(nodes))
|
||||
successfulHashes = make([]*pb.SignedHash, len(nodes))
|
||||
successfulNodes = make([]*pb.Node, len(limits))
|
||||
successfulHashes = make([]*pb.PieceHash, len(limits))
|
||||
var successfulCount int32
|
||||
var timer *time.Timer
|
||||
|
||||
for range nodes {
|
||||
for range limits {
|
||||
info := <-infos
|
||||
if info.err == nil {
|
||||
successfulNodes[info.i] = nodes[info.i]
|
||||
successfulHashes[info.i] = info.hash
|
||||
if info.err != nil {
|
||||
zap.S().Debugf("Upload to storage node %s failed: %v", limits[info.i].GetLimit().StorageNodeId, info.err)
|
||||
continue
|
||||
}
|
||||
|
||||
switch int(atomic.AddInt32(&successfulCount, 1)) {
|
||||
case rs.RepairThreshold():
|
||||
elapsed := time.Since(start)
|
||||
more := elapsed * 3 / 2
|
||||
successfulNodes[info.i] = &pb.Node{
|
||||
Id: limits[info.i].GetLimit().StorageNodeId,
|
||||
Address: limits[info.i].GetStorageNodeAddress(),
|
||||
Type: pb.NodeType_STORAGE,
|
||||
}
|
||||
successfulHashes[info.i] = info.hash
|
||||
|
||||
zap.S().Infof("Repair threshold (%d nodes) reached in %.2f s. Starting a timer for %.2f s for reaching the success threshold (%d nodes)...",
|
||||
rs.RepairThreshold(), elapsed.Seconds(), more.Seconds(), rs.OptimalThreshold())
|
||||
switch int(atomic.AddInt32(&successfulCount, 1)) {
|
||||
case rs.RepairThreshold():
|
||||
elapsed := time.Since(start)
|
||||
more := elapsed * 3 / 2
|
||||
|
||||
timer = time.AfterFunc(more, func() {
|
||||
if ctx.Err() != context.Canceled {
|
||||
zap.S().Infof("Timer expired. Successfully uploaded to %d nodes. Canceling the long tail...", atomic.LoadInt32(&successfulCount))
|
||||
cancel()
|
||||
}
|
||||
})
|
||||
case rs.OptimalThreshold():
|
||||
zap.S().Infof("Success threshold (%d nodes) reached. Canceling the long tail...", rs.OptimalThreshold())
|
||||
timer.Stop()
|
||||
cancel()
|
||||
}
|
||||
zap.S().Infof("Repair threshold (%d nodes) reached in %.2f s. Starting a timer for %.2f s for reaching the success threshold (%d nodes)...",
|
||||
rs.RepairThreshold(), elapsed.Seconds(), more.Seconds(), rs.OptimalThreshold())
|
||||
|
||||
timer = time.AfterFunc(more, func() {
|
||||
if ctx.Err() != context.Canceled {
|
||||
zap.S().Infof("Timer expired. Successfully uploaded to %d nodes. Canceling the long tail...", atomic.LoadInt32(&successfulCount))
|
||||
cancel()
|
||||
}
|
||||
})
|
||||
case rs.OptimalThreshold():
|
||||
zap.S().Infof("Success threshold (%d nodes) reached. Canceling the long tail...", rs.OptimalThreshold())
|
||||
timer.Stop()
|
||||
cancel()
|
||||
}
|
||||
}
|
||||
|
||||
@ -144,7 +155,7 @@ func (ec *ecClient) Put(ctx context.Context, nodes []*pb.Node, rs eestream.Redun
|
||||
case <-ctx.Done():
|
||||
err = utils.CombineErrors(
|
||||
Error.New("upload cancelled by user"),
|
||||
ec.Delete(context.Background(), nodes, pieceID, pba.SatelliteId),
|
||||
// ec.Delete(context.Background(), nodes, pieceID, pba.SatelliteId), //TODO
|
||||
)
|
||||
default:
|
||||
}
|
||||
@ -157,106 +168,88 @@ func (ec *ecClient) Put(ctx context.Context, nodes []*pb.Node, rs eestream.Redun
|
||||
return successfulNodes, successfulHashes, nil
|
||||
}
|
||||
|
||||
func (ec *ecClient) putPiece(ctx, parent context.Context, node *pb.Node, pieceID psclient.PieceID, data io.ReadCloser, expiration time.Time, pba *pb.OrderLimit) (hash *pb.SignedHash, err error) {
|
||||
func (ec *ecClient) putPiece(ctx, parent context.Context, limit *pb.AddressedOrderLimit, data io.ReadCloser, expiration time.Time) (hash *pb.PieceHash, err error) {
|
||||
defer func() { err = errs.Combine(err, data.Close()) }()
|
||||
|
||||
if node == nil {
|
||||
if limit == nil {
|
||||
_, err = io.Copy(ioutil.Discard, data)
|
||||
return nil, err
|
||||
}
|
||||
derivedPieceID, err := pieceID.Derive(node.Id.Bytes())
|
||||
|
||||
storageNodeID := limit.GetLimit().StorageNodeId
|
||||
pieceID := limit.GetLimit().PieceId
|
||||
ps, err := ec.newPSClient(ctx, &pb.Node{
|
||||
Id: storageNodeID,
|
||||
Address: limit.GetStorageNodeAddress(),
|
||||
Type: pb.NodeType_STORAGE,
|
||||
})
|
||||
if err != nil {
|
||||
zap.S().Errorf("Failed deriving piece id for %s: %v", pieceID, err)
|
||||
zap.S().Errorf("Failed dialing for putting piece %s to node %s: %v", pieceID, storageNodeID, err)
|
||||
return nil, err
|
||||
}
|
||||
ps, err := ec.newPSClient(ctx, node)
|
||||
if err != nil {
|
||||
zap.S().Errorf("Failed dialing for putting piece %s -> %s to node %s: %v",
|
||||
pieceID, derivedPieceID, node.Id, err)
|
||||
return nil, err
|
||||
}
|
||||
hash, err = ps.Put(ctx, derivedPieceID, data, expiration, pba)
|
||||
defer func() { err = errs.Combine(err, ps.Close()) }()
|
||||
|
||||
upload, err := ps.Upload(ctx, limit.GetLimit())
|
||||
if err != nil {
|
||||
zap.S().Errorf("Failed requesting upload of piece %s to node %s: %v", pieceID, storageNodeID, err)
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if ctx.Err() != nil || err != nil {
|
||||
hash = nil
|
||||
err = errs.Combine(err, upload.Cancel())
|
||||
return
|
||||
}
|
||||
h, closeErr := upload.Commit()
|
||||
hash = h
|
||||
err = errs.Combine(err, closeErr)
|
||||
}()
|
||||
|
||||
_, err = sync2.Copy(ctx, upload, data)
|
||||
// Canceled context means the piece upload was interrupted by user or due
|
||||
// to slow connection. No error logging for this case.
|
||||
if ctx.Err() == context.Canceled {
|
||||
if parent.Err() == context.Canceled {
|
||||
zap.S().Infof("Upload to node %s canceled by user.", node.Id)
|
||||
zap.S().Infof("Upload to node %s canceled by user.", storageNodeID)
|
||||
} else {
|
||||
zap.S().Infof("Node %s cut from upload due to slow connection.", node.Id)
|
||||
zap.S().Infof("Node %s cut from upload due to slow connection.", storageNodeID)
|
||||
}
|
||||
err = context.Canceled
|
||||
} else if err != nil {
|
||||
nodeAddress := "nil"
|
||||
if node.Address != nil {
|
||||
nodeAddress = node.Address.Address
|
||||
if limit.GetStorageNodeAddress() != nil {
|
||||
nodeAddress = limit.GetStorageNodeAddress().GetAddress()
|
||||
}
|
||||
zap.S().Errorf("Failed putting piece %s -> %s to node %s (%+v): %v",
|
||||
pieceID, derivedPieceID, node.Id, nodeAddress, err)
|
||||
zap.S().Errorf("Failed uploading piece %s to node %s (%+v): %v", pieceID, storageNodeID, nodeAddress, err)
|
||||
}
|
||||
|
||||
return hash, err
|
||||
}
|
||||
|
||||
func (ec *ecClient) Get(ctx context.Context, nodes []*pb.Node, es eestream.ErasureScheme,
|
||||
pieceID psclient.PieceID, size int64, pba *pb.OrderLimit) (rr ranger.Ranger, err error) {
|
||||
func (ec *ecClient) Get(ctx context.Context, limits []*pb.AddressedOrderLimit, es eestream.ErasureScheme, size int64) (rr ranger.Ranger, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
if len(nodes) != es.TotalCount() {
|
||||
return nil, Error.New("size of nodes slice (%d) does not match total count (%d) of erasure scheme", len(nodes), es.TotalCount())
|
||||
if len(limits) != es.TotalCount() {
|
||||
return nil, Error.New("size of limits slice (%d) does not match total count (%d) of erasure scheme", len(limits), es.TotalCount())
|
||||
}
|
||||
|
||||
if nonNilCount(nodes) < es.RequiredCount() {
|
||||
return nil, Error.New("number of non-nil nodes (%d) is less than required count (%d) of erasure scheme", nonNilCount(nodes), es.RequiredCount())
|
||||
if nonNilCount(limits) < es.RequiredCount() {
|
||||
return nil, Error.New("number of non-nil limits (%d) is less than required count (%d) of erasure scheme", nonNilCount(limits), es.RequiredCount())
|
||||
}
|
||||
|
||||
paddedSize := calcPadded(size, es.StripeSize())
|
||||
pieceSize := paddedSize / int64(es.RequiredCount())
|
||||
|
||||
rrs := map[int]ranger.Ranger{}
|
||||
|
||||
type rangerInfo struct {
|
||||
i int
|
||||
rr ranger.Ranger
|
||||
err error
|
||||
}
|
||||
ch := make(chan rangerInfo, len(nodes))
|
||||
|
||||
for i, n := range nodes {
|
||||
|
||||
if n != nil {
|
||||
n.Type.DPanicOnInvalid("ec client Get")
|
||||
}
|
||||
|
||||
if n == nil {
|
||||
ch <- rangerInfo{i: i, rr: nil, err: nil}
|
||||
for i, addressedLimit := range limits {
|
||||
if addressedLimit == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
go func(i int, n *pb.Node) {
|
||||
derivedPieceID, err := pieceID.Derive(n.Id.Bytes())
|
||||
if err != nil {
|
||||
zap.S().Errorf("Failed deriving piece id for %s: %v", pieceID, err)
|
||||
ch <- rangerInfo{i: i, rr: nil, err: err}
|
||||
return
|
||||
}
|
||||
|
||||
rr := &lazyPieceRanger{
|
||||
newPSClientHelper: ec.newPSClient,
|
||||
node: n,
|
||||
id: derivedPieceID,
|
||||
size: pieceSize,
|
||||
pba: pba,
|
||||
}
|
||||
|
||||
ch <- rangerInfo{i: i, rr: rr, err: nil}
|
||||
}(i, n)
|
||||
}
|
||||
|
||||
for range nodes {
|
||||
rri := <-ch
|
||||
if rri.err == nil && rri.rr != nil {
|
||||
rrs[rri.i] = rri.rr
|
||||
rrs[i] = &lazyPieceRanger{
|
||||
newPSClientHelper: ec.newPSClient,
|
||||
limit: addressedLimit,
|
||||
size: pieceSize,
|
||||
}
|
||||
}
|
||||
|
||||
@ -268,54 +261,39 @@ func (ec *ecClient) Get(ctx context.Context, nodes []*pb.Node, es eestream.Erasu
|
||||
return eestream.Unpad(rr, int(paddedSize-size))
|
||||
}
|
||||
|
||||
func (ec *ecClient) Delete(ctx context.Context, nodes []*pb.Node, pieceID psclient.PieceID, satelliteID storj.NodeID) (err error) {
|
||||
func (ec *ecClient) Delete(ctx context.Context, limits []*pb.AddressedOrderLimit) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
errch := make(chan error, len(nodes))
|
||||
for _, v := range nodes {
|
||||
if v != nil {
|
||||
v.Type.DPanicOnInvalid("ec client delete")
|
||||
}
|
||||
}
|
||||
for _, n := range nodes {
|
||||
if n == nil {
|
||||
errch := make(chan error, len(limits))
|
||||
for _, addressedLimit := range limits {
|
||||
if addressedLimit == nil {
|
||||
errch <- nil
|
||||
continue
|
||||
}
|
||||
|
||||
go func(n *pb.Node) {
|
||||
derivedPieceID, err := pieceID.Derive(n.Id.Bytes())
|
||||
go func(addressedLimit *pb.AddressedOrderLimit) {
|
||||
limit := addressedLimit.GetLimit()
|
||||
ps, err := ec.newPSClient(ctx, &pb.Node{
|
||||
Id: limit.StorageNodeId,
|
||||
Address: addressedLimit.GetStorageNodeAddress(),
|
||||
Type: pb.NodeType_STORAGE,
|
||||
})
|
||||
if err != nil {
|
||||
zap.S().Errorf("Failed deriving piece id for %s: %v", pieceID, err)
|
||||
zap.S().Errorf("Failed dialing for deleting piece %s from node %s: %v", limit.PieceId, limit.StorageNodeId, err)
|
||||
errch <- err
|
||||
return
|
||||
}
|
||||
ps, err := ec.newPSClient(ctx, n)
|
||||
if err != nil {
|
||||
zap.S().Errorf("Failed dialing for deleting piece %s -> %s from node %s: %v",
|
||||
pieceID, derivedPieceID, n.Id, err)
|
||||
errch <- err
|
||||
return
|
||||
}
|
||||
err = ps.Delete(ctx, derivedPieceID, satelliteID)
|
||||
// normally the bellow call should be deferred, but doing so fails
|
||||
// randomly the unit tests
|
||||
err = ps.Delete(ctx, limit)
|
||||
err = errs.Combine(err, ps.Close())
|
||||
if err != nil {
|
||||
zap.S().Errorf("Failed deleting piece %s -> %s from node %s: %v",
|
||||
pieceID, derivedPieceID, n.Id, err)
|
||||
zap.S().Errorf("Failed deleting piece %s from node %s: %v", limit.PieceId, limit.StorageNodeId, err)
|
||||
}
|
||||
errch <- err
|
||||
}(n)
|
||||
}(addressedLimit)
|
||||
}
|
||||
|
||||
allerrs := collectErrors(errch, len(nodes))
|
||||
for _, v := range nodes {
|
||||
if v != nil {
|
||||
v.Type.DPanicOnInvalid("ec client delete 2")
|
||||
}
|
||||
}
|
||||
if len(allerrs) > 0 && len(allerrs) == len(nodes) {
|
||||
allerrs := collectErrors(errch, len(limits))
|
||||
if len(allerrs) > 0 && len(allerrs) == len(limits) {
|
||||
return allerrs[0]
|
||||
}
|
||||
|
||||
@ -333,17 +311,15 @@ func collectErrors(errs <-chan error, size int) []error {
|
||||
return result
|
||||
}
|
||||
|
||||
func unique(nodes []*pb.Node) bool {
|
||||
if len(nodes) < 2 {
|
||||
func unique(limits []*pb.AddressedOrderLimit) bool {
|
||||
if len(limits) < 2 {
|
||||
return true
|
||||
}
|
||||
ids := make(storj.NodeIDList, len(nodes))
|
||||
for i, n := range nodes {
|
||||
if n != nil {
|
||||
ids[i] = n.Id
|
||||
n.Type.DPanicOnInvalid("ec client unique")
|
||||
ids := make(storj.NodeIDList, len(limits))
|
||||
for i, addressedLimit := range limits {
|
||||
if addressedLimit != nil {
|
||||
ids[i] = addressedLimit.GetLimit().StorageNodeId
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// sort the ids and check for identical neighbors
|
||||
@ -367,12 +343,9 @@ func calcPadded(size int64, blockSize int) int64 {
|
||||
}
|
||||
|
||||
type lazyPieceRanger struct {
|
||||
ranger ranger.Ranger
|
||||
newPSClientHelper psClientHelper
|
||||
node *pb.Node
|
||||
id psclient.PieceID
|
||||
limit *pb.AddressedOrderLimit
|
||||
size int64
|
||||
pba *pb.OrderLimit
|
||||
}
|
||||
|
||||
// Size implements Ranger.Size
|
||||
@ -382,27 +355,22 @@ func (lr *lazyPieceRanger) Size() int64 {
|
||||
|
||||
// Range implements Ranger.Range to be lazily connected
|
||||
func (lr *lazyPieceRanger) Range(ctx context.Context, offset, length int64) (io.ReadCloser, error) {
|
||||
lr.node.Type.DPanicOnInvalid("Range")
|
||||
if lr.ranger == nil {
|
||||
ps, err := lr.newPSClientHelper(ctx, lr.node)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ranger, err := ps.Get(ctx, lr.id, lr.size, lr.pba)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lr.ranger = ranger
|
||||
ps, err := lr.newPSClientHelper(ctx, &pb.Node{
|
||||
Id: lr.limit.GetLimit().StorageNodeId,
|
||||
Address: lr.limit.GetStorageNodeAddress(),
|
||||
Type: pb.NodeType_STORAGE,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return lr.ranger.Range(ctx, offset, length)
|
||||
return ps.Download(ctx, lr.limit.GetLimit(), offset, length)
|
||||
}
|
||||
|
||||
func nonNilCount(nodes []*pb.Node) int {
|
||||
func nonNilCount(limits []*pb.AddressedOrderLimit) int {
|
||||
total := 0
|
||||
for _, node := range nodes {
|
||||
if node != nil {
|
||||
for _, limit := range limits {
|
||||
if limit != nil {
|
||||
total++
|
||||
node.Type.DPanicOnInvalid("nonNilCount")
|
||||
}
|
||||
}
|
||||
return total
|
||||
|
178
pkg/storage/ec/client_planet_test.go
Normal file
178
pkg/storage/ec/client_planet_test.go
Normal file
@ -0,0 +1,178 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package ecclient_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/ptypes/timestamp"
|
||||
"github.com/skyrings/skyring-common/tools/uuid"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/vivint/infectious"
|
||||
|
||||
"storj.io/storj/internal/memory"
|
||||
"storj.io/storj/internal/testcontext"
|
||||
"storj.io/storj/internal/testplanet"
|
||||
"storj.io/storj/pkg/auth/signing"
|
||||
"storj.io/storj/pkg/eestream"
|
||||
"storj.io/storj/pkg/pb"
|
||||
ecclient "storj.io/storj/pkg/storage/ec"
|
||||
"storj.io/storj/pkg/storj"
|
||||
"storj.io/storj/satellite"
|
||||
"storj.io/storj/storagenode"
|
||||
)
|
||||
|
||||
const (
|
||||
dataSize = 32 * memory.KiB
|
||||
storageNodes = 4
|
||||
)
|
||||
|
||||
func TestECClient(t *testing.T) {
|
||||
ctx := testcontext.New(t)
|
||||
defer ctx.Cleanup()
|
||||
|
||||
planet, err := testplanet.New(t, 1, storageNodes, 1)
|
||||
require.NoError(t, err)
|
||||
|
||||
defer ctx.Check(planet.Shutdown)
|
||||
|
||||
planet.Start(ctx)
|
||||
|
||||
ec := ecclient.NewClient(planet.Uplinks[0].Transport, 0)
|
||||
|
||||
k := storageNodes / 2
|
||||
n := storageNodes
|
||||
fc, err := infectious.NewFEC(k, n)
|
||||
require.NoError(t, err)
|
||||
|
||||
es := eestream.NewRSScheme(fc, dataSize.Int()/n)
|
||||
rs, err := eestream.NewRedundancyStrategy(es, 0, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
data, err := ioutil.ReadAll(io.LimitReader(rand.Reader, dataSize.Int64()))
|
||||
require.NoError(t, err)
|
||||
|
||||
// Erasure encode some random data and upload the pieces
|
||||
successfulNodes, successfulHashes := testPut(ctx, t, planet, ec, rs, data)
|
||||
|
||||
// Download the pieces and erasure decode the data
|
||||
testGet(ctx, t, planet, ec, es, data, successfulNodes, successfulHashes)
|
||||
|
||||
// Delete the pieces
|
||||
testDelete(ctx, t, planet, ec, successfulNodes, successfulHashes)
|
||||
}
|
||||
|
||||
func testPut(ctx context.Context, t *testing.T, planet *testplanet.Planet, ec ecclient.Client, rs eestream.RedundancyStrategy, data []byte) ([]*pb.Node, []*pb.PieceHash) {
|
||||
var err error
|
||||
limits := make([]*pb.AddressedOrderLimit, rs.TotalCount())
|
||||
for i := 0; i < len(limits); i++ {
|
||||
limits[i], err = newAddressedOrderLimit(pb.PieceAction_PUT, planet.Satellites[0], planet.Uplinks[0], planet.StorageNodes[i], storj.NewPieceID())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
ttl := time.Now()
|
||||
|
||||
r := bytes.NewReader(data)
|
||||
|
||||
successfulNodes, successfulHashes, err := ec.Put(ctx, limits, rs, r, ttl)
|
||||
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, len(limits), len(successfulNodes))
|
||||
|
||||
slowNodes := 0
|
||||
for i := range limits {
|
||||
if successfulNodes[i] == nil && limits[i] != nil {
|
||||
slowNodes++
|
||||
} else {
|
||||
assert.Equal(t, limits[i].GetLimit().StorageNodeId, successfulNodes[i].Id)
|
||||
if successfulNodes[i] != nil {
|
||||
assert.NotNil(t, successfulHashes[i])
|
||||
assert.Equal(t, limits[i].GetLimit().PieceId, successfulHashes[i].PieceId)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if slowNodes > rs.TotalCount()-rs.RequiredCount() {
|
||||
assert.Fail(t, fmt.Sprintf("Too many slow nodes: \n"+
|
||||
"expected: <= %d\n"+
|
||||
"actual : %d", rs.TotalCount()-rs.RequiredCount(), slowNodes))
|
||||
}
|
||||
|
||||
return successfulNodes, successfulHashes
|
||||
}
|
||||
|
||||
func testGet(ctx context.Context, t *testing.T, planet *testplanet.Planet, ec ecclient.Client, es eestream.ErasureScheme, data []byte, successfulNodes []*pb.Node, successfulHashes []*pb.PieceHash) {
|
||||
var err error
|
||||
limits := make([]*pb.AddressedOrderLimit, es.TotalCount())
|
||||
for i := 0; i < len(limits); i++ {
|
||||
if successfulNodes[i] != nil {
|
||||
limits[i], err = newAddressedOrderLimit(pb.PieceAction_GET, planet.Satellites[0], planet.Uplinks[0], planet.StorageNodes[i], successfulHashes[i].PieceId)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
rr, err := ec.Get(ctx, limits, es, dataSize.Int64())
|
||||
require.NoError(t, err)
|
||||
|
||||
r, err := rr.Range(ctx, 0, rr.Size())
|
||||
require.NoError(t, err)
|
||||
readData, err := ioutil.ReadAll(r)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, data, readData)
|
||||
assert.NoError(t, r.Close())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func testDelete(ctx context.Context, t *testing.T, planet *testplanet.Planet, ec ecclient.Client, successfulNodes []*pb.Node, successfulHashes []*pb.PieceHash) {
|
||||
var err error
|
||||
limits := make([]*pb.AddressedOrderLimit, len(successfulNodes))
|
||||
for i := 0; i < len(limits); i++ {
|
||||
if successfulNodes[i] != nil {
|
||||
limits[i], err = newAddressedOrderLimit(pb.PieceAction_DELETE, planet.Satellites[0], planet.Uplinks[0], planet.StorageNodes[i], successfulHashes[i].PieceId)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
err = ec.Delete(ctx, limits)
|
||||
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func newAddressedOrderLimit(action pb.PieceAction, satellite *satellite.Peer, uplink *testplanet.Uplink, storageNode *storagenode.Peer, pieceID storj.PieceID) (*pb.AddressedOrderLimit, error) {
|
||||
// TODO refactor to avoid OrderLimit duplication
|
||||
serialNumber, err := uuid.New()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
limit := &pb.OrderLimit2{
|
||||
SerialNumber: []byte(serialNumber.String()),
|
||||
SatelliteId: satellite.ID(),
|
||||
UplinkId: uplink.ID(),
|
||||
StorageNodeId: storageNode.ID(),
|
||||
PieceId: pieceID,
|
||||
Action: action,
|
||||
Limit: dataSize.Int64(),
|
||||
PieceExpiration: new(timestamp.Timestamp),
|
||||
OrderExpiration: new(timestamp.Timestamp),
|
||||
}
|
||||
|
||||
limit, err = signing.SignOrderLimit(signing.SignerFromFullIdentity(satellite.Identity), limit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &pb.AddressedOrderLimit{
|
||||
StorageNodeAddress: storageNode.Local().Address,
|
||||
Limit: limit,
|
||||
}, nil
|
||||
}
|
@ -4,373 +4,43 @@
|
||||
package ecclient
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/vivint/infectious"
|
||||
|
||||
"storj.io/storj/internal/teststorj"
|
||||
"storj.io/storj/pkg/eestream"
|
||||
"storj.io/storj/pkg/identity"
|
||||
"storj.io/storj/pkg/pb"
|
||||
"storj.io/storj/pkg/peertls/tlsopts"
|
||||
"storj.io/storj/pkg/piecestore/psclient"
|
||||
"storj.io/storj/pkg/ranger"
|
||||
"storj.io/storj/pkg/storj"
|
||||
"storj.io/storj/pkg/transport"
|
||||
)
|
||||
|
||||
const (
|
||||
dialFailed = "dial failed"
|
||||
opFailed = "op failed"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrDialFailed = errors.New(dialFailed)
|
||||
ErrOpFailed = errors.New(opFailed)
|
||||
)
|
||||
|
||||
var (
|
||||
node0 = teststorj.MockNode("node-0")
|
||||
node1 = teststorj.MockNode("node-1")
|
||||
node2 = teststorj.MockNode("node-2")
|
||||
node3 = teststorj.MockNode("node-3")
|
||||
)
|
||||
|
||||
func TestNewECClient(t *testing.T) {
|
||||
ident, err := identity.FullIdentityFromPEM([]byte(`-----BEGIN CERTIFICATE-----
|
||||
MIIBPzCB56ADAgECAhBkctCIgrE25/vSSXpUno5SMAoGCCqGSM49BAMCMAAwIhgP
|
||||
MDAwMTAxMDEwMDAwMDBaGA8wMDAxMDEwMTAwMDAwMFowADBZMBMGByqGSM49AgEG
|
||||
CCqGSM49AwEHA0IABFaIq+DPJfvMv8RwFXIpGGxLOHCbsvG8iMyAarv04l8QptPP
|
||||
nSEKiod+KGbhQ6pEJZ0eWEyDbkA9RsUG/axNX96jPzA9MA4GA1UdDwEB/wQEAwIF
|
||||
oDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDAYDVR0TAQH/BAIwADAK
|
||||
BggqhkjOPQQDAgNHADBEAiAc+6+oquoS0zcYrLd4rmoZC6uoh4ItQvH5phP0MK3b
|
||||
YAIgDznIZz/oeowiv+Ui6HZT7aclBvTGjrfHR7Uo7TeGFls=
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIBOjCB4KADAgECAhA7Yb8vONMfR8ri8DCmFP7hMAoGCCqGSM49BAMCMAAwIhgP
|
||||
MDAwMTAxMDEwMDAwMDBaGA8wMDAxMDEwMTAwMDAwMFowADBZMBMGByqGSM49AgEG
|
||||
CCqGSM49AwEHA0IABCqtWDMdx38NKcTW58up4SLn6d6f+E4jljovCp9YY4zVg2lk
|
||||
/GyDAb5tuB/WttbZUO7VUMSdYjpSH5sad8uff3+jODA2MA4GA1UdDwEB/wQEAwIC
|
||||
BDATBgNVHSUEDDAKBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MAoGCCqGSM49
|
||||
BAMCA0kAMEYCIQDFCnJ5qV6KyN2AGD7exywI5ls7Jo3scBO8ekuXT2yNhQIhAK3W
|
||||
qYzzqaR5oPuEeRSitAbV69mNcKznpU21jCnnuSq9
|
||||
-----END CERTIFICATE-----
|
||||
`), []byte(`-----BEGIN EC PRIVATE KEY-----
|
||||
MHcCAQEEICvE+Bd39LJ3VVf/SBdkw/IPjyVmMWq8Sr7GuWzkfdpJoAoGCCqGSM49
|
||||
AwEHoUQDQgAEVoir4M8l+8y/xHAVcikYbEs4cJuy8byIzIBqu/TiXxCm08+dIQqK
|
||||
h34oZuFDqkQlnR5YTINuQD1GxQb9rE1f3g==
|
||||
-----END EC PRIVATE KEY-----`))
|
||||
require.NoError(t, err)
|
||||
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
mbm := 1234
|
||||
|
||||
clientOptions, err := tlsopts.NewOptions(ident, tlsopts.Config{})
|
||||
require.NoError(t, err)
|
||||
|
||||
clientTransport := transport.NewClient(clientOptions)
|
||||
|
||||
ec := NewClient(clientTransport, mbm)
|
||||
assert.NotNil(t, ec)
|
||||
|
||||
ecc, ok := ec.(*ecClient)
|
||||
assert.True(t, ok)
|
||||
assert.NotNil(t, ecc.transport)
|
||||
assert.Equal(t, mbm, ecc.memoryLimit)
|
||||
}
|
||||
|
||||
func TestPut(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
size := 32 * 1024
|
||||
k := 2
|
||||
n := 4
|
||||
fc, err := infectious.NewFEC(k, n)
|
||||
if !assert.NoError(t, err) {
|
||||
return
|
||||
}
|
||||
es := eestream.NewRSScheme(fc, size/n)
|
||||
|
||||
TestLoop:
|
||||
for i, tt := range []struct {
|
||||
nodes []*pb.Node
|
||||
min int
|
||||
badInput bool
|
||||
errs []error
|
||||
errString string
|
||||
}{
|
||||
{[]*pb.Node{}, 0, true, []error{},
|
||||
fmt.Sprintf("ecclient error: size of nodes slice (0) does not match total count (%v) of erasure scheme", n)},
|
||||
{[]*pb.Node{node0, node1, node0, node3}, 0, true,
|
||||
[]error{nil, nil, nil, nil},
|
||||
"ecclient error: duplicated nodes are not allowed"},
|
||||
{[]*pb.Node{node0, node1, node2, node3}, 0, false,
|
||||
[]error{nil, nil, nil, nil}, ""},
|
||||
{[]*pb.Node{node0, node1, node2, node3}, 0, false,
|
||||
[]error{nil, ErrDialFailed, nil, nil},
|
||||
"ecclient error: successful puts (3) less than repair threshold (4)"},
|
||||
{[]*pb.Node{node0, node1, node2, node3}, 0, false,
|
||||
[]error{nil, ErrOpFailed, nil, nil},
|
||||
"ecclient error: successful puts (3) less than repair threshold (4)"},
|
||||
{[]*pb.Node{node0, node1, node2, node3}, 2, false,
|
||||
[]error{nil, ErrDialFailed, nil, nil}, ""},
|
||||
{[]*pb.Node{node0, node1, node2, node3}, 2, false,
|
||||
[]error{ErrOpFailed, ErrDialFailed, nil, ErrDialFailed},
|
||||
"ecclient error: successful puts (1) less than repair threshold (2)"},
|
||||
{[]*pb.Node{nil, nil, node2, node3}, 2, false,
|
||||
[]error{nil, nil, nil, nil}, ""},
|
||||
} {
|
||||
errTag := fmt.Sprintf("Test case #%d", i)
|
||||
|
||||
id := psclient.NewPieceID()
|
||||
ttl := time.Now()
|
||||
|
||||
errs := make(map[*pb.Node]error, len(tt.nodes))
|
||||
for i, n := range tt.nodes {
|
||||
errs[n] = tt.errs[i]
|
||||
}
|
||||
|
||||
clients := make(map[*pb.Node]psclient.Client, len(tt.nodes))
|
||||
for _, n := range tt.nodes {
|
||||
if n == nil || tt.badInput {
|
||||
continue
|
||||
}
|
||||
n.Type.DPanicOnInvalid("ec client test 1")
|
||||
derivedID, err := id.Derive(n.Id.Bytes())
|
||||
if !assert.NoError(t, err, errTag) {
|
||||
continue TestLoop
|
||||
}
|
||||
ps := NewMockPSClient(ctrl)
|
||||
gomock.InOrder(
|
||||
ps.EXPECT().Put(gomock.Any(), derivedID, gomock.Any(), ttl, &pb.OrderLimit{}).Return(&pb.SignedHash{}, errs[n]).
|
||||
Do(func(ctx context.Context, id psclient.PieceID, data io.Reader, ttl time.Time, ba *pb.OrderLimit) {
|
||||
// simulate that the mocked piece store client is reading the data
|
||||
_, err := io.Copy(ioutil.Discard, data)
|
||||
assert.NoError(t, err, errTag)
|
||||
}),
|
||||
ps.EXPECT().Close().Return(nil),
|
||||
)
|
||||
clients[n] = ps
|
||||
}
|
||||
rs, err := eestream.NewRedundancyStrategy(es, tt.min, 0)
|
||||
if !assert.NoError(t, err, errTag) {
|
||||
continue
|
||||
}
|
||||
r := io.LimitReader(rand.Reader, int64(size))
|
||||
ec := ecClient{newPSClientFunc: mockNewPSClient(clients)}
|
||||
|
||||
successfulNodes, successfulHashes, err := ec.Put(ctx, tt.nodes, rs, id, r, ttl, &pb.OrderLimit{})
|
||||
|
||||
if tt.errString != "" {
|
||||
assert.EqualError(t, err, tt.errString, errTag)
|
||||
continue
|
||||
}
|
||||
|
||||
assert.NoError(t, err, errTag)
|
||||
assert.Equal(t, len(tt.nodes), len(successfulNodes), errTag)
|
||||
|
||||
slowNodes := 0
|
||||
for i := range tt.nodes {
|
||||
if tt.errs[i] != nil {
|
||||
assert.Nil(t, successfulNodes[i], errTag)
|
||||
assert.Nil(t, successfulHashes[i], errTag)
|
||||
} else if successfulNodes[i] == nil && tt.nodes[i] != nil {
|
||||
slowNodes++
|
||||
} else {
|
||||
assert.Equal(t, tt.nodes[i], successfulNodes[i], errTag)
|
||||
if successfulNodes[i] != nil {
|
||||
assert.NotNil(t, successfulHashes[i], errTag)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if slowNodes > n-k {
|
||||
assert.Fail(t, fmt.Sprintf("Too many slow nodes: \n"+
|
||||
"expected: <= %d\n"+
|
||||
"actual : %d", n-k, slowNodes), errTag)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func mockNewPSClient(clients map[*pb.Node]psclient.Client) psClientFunc {
|
||||
return func(_ context.Context, _ transport.Client, n *pb.Node, _ int) (psclient.Client, error) {
|
||||
n.Type.DPanicOnInvalid("mock new ps client")
|
||||
c, ok := clients[n]
|
||||
if !ok {
|
||||
return nil, ErrDialFailed
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
}
|
||||
|
||||
func TestGet(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
size := 32 * 1024
|
||||
k := 2
|
||||
n := 4
|
||||
fc, err := infectious.NewFEC(k, n)
|
||||
if !assert.NoError(t, err) {
|
||||
return
|
||||
}
|
||||
es := eestream.NewRSScheme(fc, size/n)
|
||||
|
||||
TestLoop:
|
||||
for i, tt := range []struct {
|
||||
nodes []*pb.Node
|
||||
mbm int
|
||||
errs []error
|
||||
errString string
|
||||
}{
|
||||
{[]*pb.Node{}, 0, []error{}, "ecclient error: " +
|
||||
fmt.Sprintf("size of nodes slice (0) does not match total count (%v) of erasure scheme", n)},
|
||||
{[]*pb.Node{node0, node1, node2, node3}, -1,
|
||||
[]error{nil, nil, nil, nil},
|
||||
"eestream error: negative max buffer memory"},
|
||||
{[]*pb.Node{node0, node1, node2, node3}, 0,
|
||||
[]error{nil, nil, nil, nil}, ""},
|
||||
{[]*pb.Node{node0, node1, node2, node3}, 0,
|
||||
[]error{nil, ErrDialFailed, nil, nil}, ""},
|
||||
{[]*pb.Node{node0, node1, node2, node3}, 0,
|
||||
[]error{nil, ErrOpFailed, nil, nil}, ""},
|
||||
{[]*pb.Node{node0, node1, node2, node3}, 0,
|
||||
[]error{ErrOpFailed, ErrDialFailed, nil, ErrDialFailed}, ""},
|
||||
{[]*pb.Node{node0, node1, node2, node3}, 0,
|
||||
[]error{ErrDialFailed, ErrOpFailed, ErrOpFailed, ErrDialFailed}, ""},
|
||||
{[]*pb.Node{nil, nil, node2, node3}, 0,
|
||||
[]error{nil, nil, nil, nil}, ""},
|
||||
} {
|
||||
errTag := fmt.Sprintf("Test case #%d", i)
|
||||
|
||||
id := psclient.NewPieceID()
|
||||
|
||||
errs := make(map[*pb.Node]error, len(tt.nodes))
|
||||
for i, n := range tt.nodes {
|
||||
errs[n] = tt.errs[i]
|
||||
}
|
||||
|
||||
clients := make(map[*pb.Node]psclient.Client, len(tt.nodes))
|
||||
for _, n := range tt.nodes {
|
||||
if errs[n] == ErrOpFailed {
|
||||
derivedID, err := id.Derive(n.Id.Bytes())
|
||||
if !assert.NoError(t, err, errTag) {
|
||||
continue TestLoop
|
||||
}
|
||||
ps := NewMockPSClient(ctrl)
|
||||
ps.EXPECT().Get(gomock.Any(), derivedID, int64(size/k), gomock.Any()).Return(ranger.ByteRanger(nil), errs[n])
|
||||
clients[n] = ps
|
||||
}
|
||||
}
|
||||
ec := ecClient{newPSClientFunc: mockNewPSClient(clients), memoryLimit: tt.mbm}
|
||||
rr, err := ec.Get(ctx, tt.nodes, es, id, int64(size), nil)
|
||||
if err == nil {
|
||||
_, err := rr.Range(ctx, 0, 0)
|
||||
assert.NoError(t, err, errTag)
|
||||
}
|
||||
if tt.errString != "" {
|
||||
assert.EqualError(t, err, tt.errString, errTag)
|
||||
} else {
|
||||
assert.NoError(t, err, errTag)
|
||||
assert.NotNil(t, rr, errTag)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDelete(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
TestLoop:
|
||||
for i, tt := range []struct {
|
||||
nodes []*pb.Node
|
||||
errs []error
|
||||
errString string
|
||||
}{
|
||||
{[]*pb.Node{}, []error{}, ""},
|
||||
{[]*pb.Node{node0}, []error{nil}, ""},
|
||||
{[]*pb.Node{node0}, []error{ErrDialFailed}, dialFailed},
|
||||
{[]*pb.Node{node0}, []error{ErrOpFailed}, opFailed},
|
||||
{[]*pb.Node{node0, node1}, []error{nil, nil}, ""},
|
||||
{[]*pb.Node{node0, node1}, []error{ErrDialFailed, nil}, ""},
|
||||
{[]*pb.Node{node0, node1}, []error{nil, ErrOpFailed}, ""},
|
||||
{[]*pb.Node{node0, node1}, []error{ErrDialFailed, ErrDialFailed}, dialFailed},
|
||||
{[]*pb.Node{node0, node1}, []error{ErrOpFailed, ErrOpFailed}, opFailed},
|
||||
{[]*pb.Node{nil, node1}, []error{nil, nil}, ""},
|
||||
{[]*pb.Node{nil, nil}, []error{nil, nil}, ""},
|
||||
} {
|
||||
errTag := fmt.Sprintf("Test case #%d", i)
|
||||
|
||||
id := psclient.NewPieceID()
|
||||
|
||||
errs := make(map[*pb.Node]error, len(tt.nodes))
|
||||
for i, n := range tt.nodes {
|
||||
errs[n] = tt.errs[i]
|
||||
}
|
||||
|
||||
clients := make(map[*pb.Node]psclient.Client, len(tt.nodes))
|
||||
for _, n := range tt.nodes {
|
||||
if n != nil && errs[n] != ErrDialFailed {
|
||||
derivedID, err := id.Derive(n.Id.Bytes())
|
||||
if !assert.NoError(t, err, errTag) {
|
||||
continue TestLoop
|
||||
}
|
||||
ps := NewMockPSClient(ctrl)
|
||||
gomock.InOrder(
|
||||
ps.EXPECT().Delete(gomock.Any(), derivedID, gomock.Any()).Return(errs[n]),
|
||||
ps.EXPECT().Close().Return(nil),
|
||||
)
|
||||
clients[n] = ps
|
||||
}
|
||||
}
|
||||
|
||||
ec := ecClient{newPSClientFunc: mockNewPSClient(clients)}
|
||||
err := ec.Delete(ctx, tt.nodes, id, storj.NodeID{})
|
||||
|
||||
if tt.errString != "" {
|
||||
assert.EqualError(t, err, tt.errString, errTag)
|
||||
} else {
|
||||
assert.NoError(t, err, errTag)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnique(t *testing.T) {
|
||||
limits := make([]*pb.AddressedOrderLimit, 4)
|
||||
for i := 0; i < len(limits); i++ {
|
||||
limits[i] = &pb.AddressedOrderLimit{
|
||||
Limit: &pb.OrderLimit2{
|
||||
StorageNodeId: teststorj.NodeIDFromString(fmt.Sprintf("node-%d", i)),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
for i, tt := range []struct {
|
||||
nodes []*pb.Node
|
||||
limits []*pb.AddressedOrderLimit
|
||||
unique bool
|
||||
}{
|
||||
{nil, true},
|
||||
{[]*pb.Node{}, true},
|
||||
{[]*pb.Node{node0}, true},
|
||||
{[]*pb.Node{node0, node1}, true},
|
||||
{[]*pb.Node{node0, node0}, false},
|
||||
{[]*pb.Node{node0, node1, node0}, false},
|
||||
{[]*pb.Node{node1, node0, node0}, false},
|
||||
{[]*pb.Node{node0, node0, node1}, false},
|
||||
{[]*pb.Node{node2, node0, node1}, true},
|
||||
{[]*pb.Node{node2, node0, node3, node1}, true},
|
||||
{[]*pb.Node{node2, node0, node2, node1}, false},
|
||||
{[]*pb.Node{node1, node0, node3, node1}, false},
|
||||
{[]*pb.AddressedOrderLimit{}, true},
|
||||
{[]*pb.AddressedOrderLimit{limits[0]}, true},
|
||||
{[]*pb.AddressedOrderLimit{limits[0], limits[1]}, true},
|
||||
{[]*pb.AddressedOrderLimit{limits[0], limits[0]}, false},
|
||||
{[]*pb.AddressedOrderLimit{limits[0], limits[1], limits[0]}, false},
|
||||
{[]*pb.AddressedOrderLimit{limits[1], limits[0], limits[0]}, false},
|
||||
{[]*pb.AddressedOrderLimit{limits[0], limits[0], limits[1]}, false},
|
||||
{[]*pb.AddressedOrderLimit{limits[2], limits[0], limits[1]}, true},
|
||||
{[]*pb.AddressedOrderLimit{limits[2], limits[0], limits[3], limits[1]}, true},
|
||||
{[]*pb.AddressedOrderLimit{limits[2], limits[0], limits[2], limits[1]}, false},
|
||||
{[]*pb.AddressedOrderLimit{limits[1], limits[0], limits[3], limits[1]}, false},
|
||||
} {
|
||||
errTag := fmt.Sprintf("Test case #%d", i)
|
||||
assert.Equal(t, tt.unique, unique(tt.nodes), errTag)
|
||||
assert.Equal(t, tt.unique, unique(tt.limits), errTag)
|
||||
}
|
||||
}
|
||||
|
@ -1,86 +0,0 @@
|
||||
// Code generated by MockGen. DO NOT EDIT.
|
||||
// Source: storj.io/storj/pkg/storage/ec (interfaces: Client)
|
||||
|
||||
// Package mocks is a generated GoMock package.
|
||||
package mocks
|
||||
|
||||
import (
|
||||
context "context"
|
||||
gomock "github.com/golang/mock/gomock"
|
||||
io "io"
|
||||
reflect "reflect"
|
||||
eestream "storj.io/storj/pkg/eestream"
|
||||
pb "storj.io/storj/pkg/pb"
|
||||
psclient "storj.io/storj/pkg/piecestore/psclient"
|
||||
ranger "storj.io/storj/pkg/ranger"
|
||||
storj "storj.io/storj/pkg/storj"
|
||||
time "time"
|
||||
)
|
||||
|
||||
// MockClient is a mock of Client interface
|
||||
type MockClient struct {
|
||||
ctrl *gomock.Controller
|
||||
recorder *MockClientMockRecorder
|
||||
}
|
||||
|
||||
// MockClientMockRecorder is the mock recorder for MockClient
|
||||
type MockClientMockRecorder struct {
|
||||
mock *MockClient
|
||||
}
|
||||
|
||||
// NewMockClient creates a new mock instance
|
||||
func NewMockClient(ctrl *gomock.Controller) *MockClient {
|
||||
mock := &MockClient{ctrl: ctrl}
|
||||
mock.recorder = &MockClientMockRecorder{mock}
|
||||
return mock
|
||||
}
|
||||
|
||||
// EXPECT returns an object that allows the caller to indicate expected use
|
||||
func (m *MockClient) EXPECT() *MockClientMockRecorder {
|
||||
return m.recorder
|
||||
}
|
||||
|
||||
// Delete mocks base method
|
||||
func (m *MockClient) Delete(arg0 context.Context, arg1 []*pb.Node, arg2 psclient.PieceID, arg3 storj.NodeID) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Delete", arg0, arg1, arg2, arg3)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// Delete indicates an expected call of Delete
|
||||
func (mr *MockClientMockRecorder) Delete(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockClient)(nil).Delete), arg0, arg1, arg2, arg3)
|
||||
}
|
||||
|
||||
// Get mocks base method
|
||||
func (m *MockClient) Get(arg0 context.Context, arg1 []*pb.Node, arg2 eestream.ErasureScheme, arg3 psclient.PieceID, arg4 int64, arg5 *pb.PayerBandwidthAllocation) (ranger.Ranger, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Get", arg0, arg1, arg2, arg3, arg4, arg5)
|
||||
ret0, _ := ret[0].(ranger.Ranger)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// Get indicates an expected call of Get
|
||||
func (mr *MockClientMockRecorder) Get(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockClient)(nil).Get), arg0, arg1, arg2, arg3, arg4, arg5)
|
||||
}
|
||||
|
||||
// Put mocks base method
|
||||
func (m *MockClient) Put(arg0 context.Context, arg1 []*pb.Node, arg2 eestream.RedundancyStrategy, arg3 psclient.PieceID, arg4 io.Reader, arg5 time.Time, arg6 *pb.PayerBandwidthAllocation) ([]*pb.Node, []*pb.SignedHash, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Put", arg0, arg1, arg2, arg3, arg4, arg5, arg6)
|
||||
ret0, _ := ret[0].([]*pb.Node)
|
||||
ret1, _ := ret[1].([]*pb.SignedHash)
|
||||
ret2, _ := ret[2].(error)
|
||||
return ret0, ret1, ret2
|
||||
}
|
||||
|
||||
// Put indicates an expected call of Put
|
||||
func (mr *MockClientMockRecorder) Put(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Put", reflect.TypeOf((*MockClient)(nil).Put), arg0, arg1, arg2, arg3, arg4, arg5, arg6)
|
||||
}
|
@ -1,113 +0,0 @@
|
||||
// Code generated by MockGen. DO NOT EDIT.
|
||||
// Source: storj.io/storj/pkg/piecestore/psclient (interfaces: Client)
|
||||
|
||||
// Package ecclient is a generated GoMock package.
|
||||
package ecclient
|
||||
|
||||
import (
|
||||
context "context"
|
||||
gomock "github.com/golang/mock/gomock"
|
||||
io "io"
|
||||
reflect "reflect"
|
||||
pb "storj.io/storj/pkg/pb"
|
||||
psclient "storj.io/storj/pkg/piecestore/psclient"
|
||||
ranger "storj.io/storj/pkg/ranger"
|
||||
storj "storj.io/storj/pkg/storj"
|
||||
time "time"
|
||||
)
|
||||
|
||||
// MockPSClient is a mock of Client interface
|
||||
type MockPSClient struct {
|
||||
ctrl *gomock.Controller
|
||||
recorder *MockPSClientMockRecorder
|
||||
}
|
||||
|
||||
// MockPSClientMockRecorder is the mock recorder for MockPSClient
|
||||
type MockPSClientMockRecorder struct {
|
||||
mock *MockPSClient
|
||||
}
|
||||
|
||||
// NewMockPSClient creates a new mock instance
|
||||
func NewMockPSClient(ctrl *gomock.Controller) *MockPSClient {
|
||||
mock := &MockPSClient{ctrl: ctrl}
|
||||
mock.recorder = &MockPSClientMockRecorder{mock}
|
||||
return mock
|
||||
}
|
||||
|
||||
// EXPECT returns an object that allows the caller to indicate expected use
|
||||
func (m *MockPSClient) EXPECT() *MockPSClientMockRecorder {
|
||||
return m.recorder
|
||||
}
|
||||
|
||||
// Close mocks base method
|
||||
func (m *MockPSClient) Close() error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Close")
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// Close indicates an expected call of Close
|
||||
func (mr *MockPSClientMockRecorder) Close() *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockPSClient)(nil).Close))
|
||||
}
|
||||
|
||||
// Delete mocks base method
|
||||
func (m *MockPSClient) Delete(arg0 context.Context, arg1 psclient.PieceID, arg2 storj.NodeID) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Delete", arg0, arg1, arg2)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// Delete indicates an expected call of Delete
|
||||
func (mr *MockPSClientMockRecorder) Delete(arg0, arg1, arg2 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockPSClient)(nil).Delete), arg0, arg1, arg2)
|
||||
}
|
||||
|
||||
// Get mocks base method
|
||||
func (m *MockPSClient) Get(arg0 context.Context, arg1 psclient.PieceID, arg2 int64, arg3 *pb.PayerBandwidthAllocation) (ranger.Ranger, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Get", arg0, arg1, arg2, arg3)
|
||||
ret0, _ := ret[0].(ranger.Ranger)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// Get indicates an expected call of Get
|
||||
func (mr *MockPSClientMockRecorder) Get(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockPSClient)(nil).Get), arg0, arg1, arg2, arg3)
|
||||
}
|
||||
|
||||
// Meta mocks base method
|
||||
func (m *MockPSClient) Meta(arg0 context.Context, arg1 psclient.PieceID) (*pb.PieceSummary, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Meta", arg0, arg1)
|
||||
ret0, _ := ret[0].(*pb.PieceSummary)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// Meta indicates an expected call of Meta
|
||||
func (mr *MockPSClientMockRecorder) Meta(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Meta", reflect.TypeOf((*MockPSClient)(nil).Meta), arg0, arg1)
|
||||
}
|
||||
|
||||
// Put mocks base method
|
||||
func (m *MockPSClient) Put(arg0 context.Context, arg1 psclient.PieceID, arg2 io.Reader, arg3 time.Time, arg4 *pb.PayerBandwidthAllocation) (*pb.SignedHash, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Put", arg0, arg1, arg2, arg3, arg4)
|
||||
ret0, _ := ret[0].(*pb.SignedHash)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// Put indicates an expected call of Put
|
||||
func (mr *MockPSClientMockRecorder) Put(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Put", reflect.TypeOf((*MockPSClient)(nil).Put), arg0, arg1, arg2, arg3, arg4)
|
||||
}
|
@ -1,110 +0,0 @@
|
||||
// Code generated by MockGen. DO NOT EDIT.
|
||||
// Source: storj.io/storj/pkg/transport (interfaces: Client)
|
||||
|
||||
// Package ecclient is a generated GoMock package.
|
||||
package ecclient
|
||||
|
||||
import (
|
||||
context "context"
|
||||
gomock "github.com/golang/mock/gomock"
|
||||
grpc "google.golang.org/grpc"
|
||||
reflect "reflect"
|
||||
identity "storj.io/storj/pkg/identity"
|
||||
pb "storj.io/storj/pkg/pb"
|
||||
transport "storj.io/storj/pkg/transport"
|
||||
)
|
||||
|
||||
// MockClient is a mock of Client interface
|
||||
type MockClient struct {
|
||||
ctrl *gomock.Controller
|
||||
recorder *MockClientMockRecorder
|
||||
}
|
||||
|
||||
// MockClientMockRecorder is the mock recorder for MockClient
|
||||
type MockClientMockRecorder struct {
|
||||
mock *MockClient
|
||||
}
|
||||
|
||||
// NewMockClient creates a new mock instance
|
||||
func NewMockClient(ctrl *gomock.Controller) *MockClient {
|
||||
mock := &MockClient{ctrl: ctrl}
|
||||
mock.recorder = &MockClientMockRecorder{mock}
|
||||
return mock
|
||||
}
|
||||
|
||||
// EXPECT returns an object that allows the caller to indicate expected use
|
||||
func (m *MockClient) EXPECT() *MockClientMockRecorder {
|
||||
return m.recorder
|
||||
}
|
||||
|
||||
// DialAddress mocks base method
|
||||
func (m *MockClient) DialAddress(arg0 context.Context, arg1 string, arg2 ...grpc.DialOption) (*grpc.ClientConn, error) {
|
||||
m.ctrl.T.Helper()
|
||||
varargs := []interface{}{arg0, arg1}
|
||||
for _, a := range arg2 {
|
||||
varargs = append(varargs, a)
|
||||
}
|
||||
ret := m.ctrl.Call(m, "DialAddress", varargs...)
|
||||
ret0, _ := ret[0].(*grpc.ClientConn)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// DialAddress indicates an expected call of DialAddress
|
||||
func (mr *MockClientMockRecorder) DialAddress(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
varargs := append([]interface{}{arg0, arg1}, arg2...)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DialAddress", reflect.TypeOf((*MockClient)(nil).DialAddress), varargs...)
|
||||
}
|
||||
|
||||
// DialNode mocks base method
|
||||
func (m *MockClient) DialNode(arg0 context.Context, arg1 *pb.Node, arg2 ...grpc.DialOption) (*grpc.ClientConn, error) {
|
||||
m.ctrl.T.Helper()
|
||||
varargs := []interface{}{arg0, arg1}
|
||||
for _, a := range arg2 {
|
||||
varargs = append(varargs, a)
|
||||
}
|
||||
ret := m.ctrl.Call(m, "DialNode", varargs...)
|
||||
ret0, _ := ret[0].(*grpc.ClientConn)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// DialNode indicates an expected call of DialNode
|
||||
func (mr *MockClientMockRecorder) DialNode(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
varargs := append([]interface{}{arg0, arg1}, arg2...)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DialNode", reflect.TypeOf((*MockClient)(nil).DialNode), varargs...)
|
||||
}
|
||||
|
||||
// Identity mocks base method
|
||||
func (m *MockClient) Identity() *identity.FullIdentity {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Identity")
|
||||
ret0, _ := ret[0].(*identity.FullIdentity)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// Identity indicates an expected call of Identity
|
||||
func (mr *MockClientMockRecorder) Identity() *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Identity", reflect.TypeOf((*MockClient)(nil).Identity))
|
||||
}
|
||||
|
||||
// WithObservers mocks base method
|
||||
func (m *MockClient) WithObservers(arg0 ...transport.Observer) *transport.Transport {
|
||||
m.ctrl.T.Helper()
|
||||
varargs := []interface{}{}
|
||||
for _, a := range arg0 {
|
||||
varargs = append(varargs, a)
|
||||
}
|
||||
ret := m.ctrl.Call(m, "WithObservers", varargs...)
|
||||
ret0, _ := ret[0].(*transport.Transport)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// WithObservers indicates an expected call of WithObservers
|
||||
func (mr *MockClientMockRecorder) WithObservers(arg0 ...interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithObservers", reflect.TypeOf((*MockClient)(nil).WithObservers), arg0...)
|
||||
}
|
@ -6,126 +6,149 @@ package segments
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/golang/protobuf/ptypes/timestamp"
|
||||
"github.com/zeebo/errs"
|
||||
|
||||
"storj.io/storj/pkg/auth/signing"
|
||||
"storj.io/storj/pkg/eestream"
|
||||
"storj.io/storj/pkg/identity"
|
||||
"storj.io/storj/pkg/overlay"
|
||||
"storj.io/storj/pkg/pb"
|
||||
"storj.io/storj/pkg/piecestore/psclient"
|
||||
"storj.io/storj/pkg/pointerdb/pdbclient"
|
||||
"storj.io/storj/pkg/pointerdb"
|
||||
ecclient "storj.io/storj/pkg/storage/ec"
|
||||
"storj.io/storj/pkg/storj"
|
||||
)
|
||||
|
||||
// Repairer for segments
|
||||
type Repairer struct {
|
||||
oc overlay.Client
|
||||
ec ecclient.Client
|
||||
pdb pdbclient.Client
|
||||
nodeStats *pb.NodeStats
|
||||
pointerdb *pointerdb.Service
|
||||
allocation *pointerdb.AllocationSigner
|
||||
cache *overlay.Cache
|
||||
ec ecclient.Client
|
||||
selectionPreferences *overlay.NodeSelectionConfig
|
||||
signer signing.Signer
|
||||
identity *identity.FullIdentity
|
||||
}
|
||||
|
||||
// NewSegmentRepairer creates a new instance of SegmentRepairer
|
||||
func NewSegmentRepairer(oc overlay.Client, ec ecclient.Client, pdb pdbclient.Client) *Repairer {
|
||||
return &Repairer{oc: oc, ec: ec, pdb: pdb}
|
||||
func NewSegmentRepairer(pointerdb *pointerdb.Service, allocation *pointerdb.AllocationSigner, cache *overlay.Cache, ec ecclient.Client, identity *identity.FullIdentity, selectionPreferences *overlay.NodeSelectionConfig) *Repairer {
|
||||
return &Repairer{
|
||||
pointerdb: pointerdb,
|
||||
allocation: allocation,
|
||||
cache: cache,
|
||||
ec: ec,
|
||||
identity: identity,
|
||||
signer: signing.SignerFromFullIdentity(identity),
|
||||
selectionPreferences: selectionPreferences,
|
||||
}
|
||||
}
|
||||
|
||||
// Repair retrieves an at-risk segment and repairs and stores lost pieces on new nodes
|
||||
func (s *Repairer) Repair(ctx context.Context, path storj.Path, lostPieces []int32) (err error) {
|
||||
func (repairer *Repairer) Repair(ctx context.Context, path storj.Path, lostPieces []int32) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
// Read the segment's pointer's info from the PointerDB
|
||||
pr, originalNodes, _, err := s.pdb.Get(ctx, path)
|
||||
// Read the segment pointer from the PointerDB
|
||||
pointer, err := repairer.pointerdb.Get(path)
|
||||
if err != nil {
|
||||
return Error.Wrap(err)
|
||||
}
|
||||
|
||||
if pr.GetType() != pb.Pointer_REMOTE {
|
||||
return Error.New("cannot repair inline segment %s", psclient.PieceID(pr.GetInlineSegment()))
|
||||
if pointer.GetType() != pb.Pointer_REMOTE {
|
||||
return Error.New("cannot repair inline segment %s", path)
|
||||
}
|
||||
|
||||
seg := pr.GetRemote()
|
||||
pid := psclient.PieceID(seg.GetPieceId())
|
||||
|
||||
originalNodes, err = lookupAndAlignNodes(ctx, s.oc, originalNodes, seg)
|
||||
redundancy, err := eestream.NewRedundancyStrategyFromProto(pointer.GetRemote().GetRedundancy())
|
||||
if err != nil {
|
||||
return Error.Wrap(err)
|
||||
}
|
||||
|
||||
// Get the nodes list that needs to be excluded
|
||||
pieceSize := eestream.CalcPieceSize(pointer.GetSegmentSize(), redundancy)
|
||||
rootPieceID := pointer.GetRemote().RootPieceId
|
||||
expiration := pointer.GetExpirationDate()
|
||||
|
||||
var excludeNodeIDs storj.NodeIDList
|
||||
var healthyPieces []*pb.RemotePiece
|
||||
lostPiecesSet := sliceToSet(lostPieces)
|
||||
|
||||
// Count the number of nil nodes thats needs to be repaired
|
||||
totalNilNodes := 0
|
||||
|
||||
healthyNodes := make([]*pb.Node, len(originalNodes))
|
||||
|
||||
// Populate healthyNodes with all nodes from originalNodes except those correlating to indices in lostPieces
|
||||
for i, v := range originalNodes {
|
||||
if v == nil {
|
||||
totalNilNodes++
|
||||
continue
|
||||
// Populate healthyPieces with all pieces from the pointer except those correlating to indices in lostPieces
|
||||
for _, piece := range pointer.GetRemote().GetRemotePieces() {
|
||||
excludeNodeIDs = append(excludeNodeIDs, piece.NodeId)
|
||||
if _, ok := lostPiecesSet[piece.GetPieceNum()]; !ok {
|
||||
healthyPieces = append(healthyPieces, piece)
|
||||
}
|
||||
v.Type.DPanicOnInvalid("repair")
|
||||
excludeNodeIDs = append(excludeNodeIDs, v.Id)
|
||||
}
|
||||
|
||||
// If node index exists in lostPieces, skip adding it to healthyNodes
|
||||
if contains(lostPieces, i) {
|
||||
totalNilNodes++
|
||||
} else {
|
||||
healthyNodes[i] = v
|
||||
// Create the order limits for the GET_REPAIR action
|
||||
getLimits := make([]*pb.AddressedOrderLimit, redundancy.TotalCount())
|
||||
for _, piece := range healthyPieces {
|
||||
derivedPieceID := rootPieceID.Derive(piece.NodeId)
|
||||
orderLimit, err := repairer.createOrderLimit(ctx, piece.NodeId, derivedPieceID, expiration, pieceSize, pb.PieceAction_GET_REPAIR)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
node, err := repairer.cache.Get(ctx, piece.NodeId)
|
||||
if err != nil {
|
||||
return Error.Wrap(err)
|
||||
}
|
||||
|
||||
if node != nil {
|
||||
node.Type.DPanicOnInvalid("repair")
|
||||
}
|
||||
|
||||
getLimits[piece.GetPieceNum()] = &pb.AddressedOrderLimit{
|
||||
Limit: orderLimit,
|
||||
StorageNodeAddress: node.Address,
|
||||
}
|
||||
}
|
||||
|
||||
// Request Overlay for n-h new storage nodes
|
||||
op := overlay.Options{Amount: totalNilNodes, Space: 0, Excluded: excludeNodeIDs}
|
||||
newNodes, err := s.oc.Choose(ctx, op)
|
||||
if err != nil {
|
||||
return err
|
||||
request := &pb.FindStorageNodesRequest{
|
||||
Opts: &pb.OverlayOptions{
|
||||
Amount: int64(redundancy.TotalCount()) - int64(len(healthyPieces)),
|
||||
Restrictions: &pb.NodeRestrictions{
|
||||
FreeBandwidth: pieceSize,
|
||||
FreeDisk: pieceSize,
|
||||
},
|
||||
ExcludedNodes: excludeNodeIDs,
|
||||
},
|
||||
}
|
||||
|
||||
if totalNilNodes != len(newNodes) {
|
||||
return Error.New("Number of new nodes from overlay (%d) does not equal total nil nodes (%d)", len(newNodes), totalNilNodes)
|
||||
}
|
||||
|
||||
totalRepairCount := len(newNodes)
|
||||
|
||||
// Make a repair nodes list just with new unique ids
|
||||
repairNodes := make([]*pb.Node, len(healthyNodes))
|
||||
for i, vr := range healthyNodes {
|
||||
// Check that totalRepairCount is non-negative
|
||||
if totalRepairCount < 0 {
|
||||
return Error.New("Total repair count (%d) less than zero", totalRepairCount)
|
||||
}
|
||||
|
||||
// Find the nil nodes in the healthyNodes list
|
||||
if vr == nil {
|
||||
// Assign the item in repairNodes list with an item from the newNode list
|
||||
totalRepairCount--
|
||||
repairNodes[i] = newNodes[totalRepairCount]
|
||||
}
|
||||
}
|
||||
for _, v := range repairNodes {
|
||||
if v != nil {
|
||||
v.Type.DPanicOnInvalid("repair 2")
|
||||
}
|
||||
}
|
||||
|
||||
// Check that all nil nodes have a replacement prepared
|
||||
if totalRepairCount != 0 {
|
||||
return Error.New("Failed to replace all nil nodes (%d). (%d) new nodes not inserted", len(newNodes), totalRepairCount)
|
||||
}
|
||||
|
||||
rs, err := makeRedundancyStrategy(pr.GetRemote().GetRedundancy())
|
||||
newNodes, err := repairer.cache.FindStorageNodes(ctx, request, repairer.selectionPreferences)
|
||||
if err != nil {
|
||||
return Error.Wrap(err)
|
||||
}
|
||||
|
||||
pbaGet, err := s.pdb.PayerBandwidthAllocation(ctx, pb.BandwidthAction_GET_REPAIR)
|
||||
if err != nil {
|
||||
return Error.Wrap(err)
|
||||
// Create the order limits for the PUT_REPAIR action
|
||||
putLimits := make([]*pb.AddressedOrderLimit, redundancy.TotalCount())
|
||||
pieceNum := 0
|
||||
for _, node := range newNodes {
|
||||
if node != nil {
|
||||
node.Type.DPanicOnInvalid("repair 2")
|
||||
}
|
||||
|
||||
for pieceNum < redundancy.TotalCount() && getLimits[pieceNum] != nil {
|
||||
pieceNum++
|
||||
}
|
||||
|
||||
if pieceNum >= redundancy.TotalCount() {
|
||||
break // should not happen
|
||||
}
|
||||
|
||||
derivedPieceID := rootPieceID.Derive(node.Id)
|
||||
orderLimit, err := repairer.createOrderLimit(ctx, node.Id, derivedPieceID, expiration, pieceSize, pb.PieceAction_PUT_REPAIR)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
putLimits[pieceNum] = &pb.AddressedOrderLimit{
|
||||
Limit: orderLimit,
|
||||
StorageNodeAddress: node.Address,
|
||||
}
|
||||
pieceNum++
|
||||
}
|
||||
// Download the segment using just the healthyNodes
|
||||
rr, err := s.ec.Get(ctx, healthyNodes, rs, pid, pr.GetSegmentSize(), pbaGet)
|
||||
|
||||
// Download the segment using just the healthy pieces
|
||||
rr, err := repairer.ec.Get(ctx, getLimits, redundancy, pointer.GetSegmentSize())
|
||||
if err != nil {
|
||||
return Error.Wrap(err)
|
||||
}
|
||||
@ -136,30 +159,55 @@ func (s *Repairer) Repair(ctx context.Context, path storj.Path, lostPieces []int
|
||||
}
|
||||
defer func() { err = errs.Combine(err, r.Close()) }()
|
||||
|
||||
pbaPut, err := s.pdb.PayerBandwidthAllocation(ctx, pb.BandwidthAction_PUT_REPAIR)
|
||||
if err != nil {
|
||||
return Error.Wrap(err)
|
||||
}
|
||||
// Upload the repaired pieces to the repairNodes
|
||||
successfulNodes, hashes, err := s.ec.Put(ctx, repairNodes, rs, pid, r, convertTime(pr.GetExpirationDate()), pbaPut)
|
||||
// Upload the repaired pieces
|
||||
successfulNodes, hashes, err := repairer.ec.Put(ctx, putLimits, redundancy, r, convertTime(expiration))
|
||||
if err != nil {
|
||||
return Error.Wrap(err)
|
||||
}
|
||||
|
||||
// Merge the successful nodes list into the healthy nodes list
|
||||
for i, v := range healthyNodes {
|
||||
if v == nil {
|
||||
// copy the successfuNode info
|
||||
healthyNodes[i] = successfulNodes[i]
|
||||
// Add the successfully uploaded pieces to the healthyPieces
|
||||
for i, node := range successfulNodes {
|
||||
if node == nil {
|
||||
continue
|
||||
}
|
||||
healthyPieces = append(healthyPieces, &pb.RemotePiece{
|
||||
PieceNum: int32(i),
|
||||
NodeId: node.Id,
|
||||
Hash: hashes[i],
|
||||
})
|
||||
}
|
||||
|
||||
metadata := pr.GetMetadata()
|
||||
pointer, err := makeRemotePointer(healthyNodes, hashes, rs, pid, rr.Size(), pr.GetExpirationDate(), metadata)
|
||||
if err != nil {
|
||||
return Error.Wrap(err)
|
||||
}
|
||||
// Update the remote pieces in the pointer
|
||||
pointer.GetRemote().RemotePieces = healthyPieces
|
||||
|
||||
// update the segment info in the pointerDB
|
||||
return s.pdb.Put(ctx, path, pointer)
|
||||
// Update the segment pointer in the PointerDB
|
||||
return repairer.pointerdb.Put(path, pointer)
|
||||
}
|
||||
|
||||
func (repairer *Repairer) createOrderLimit(ctx context.Context, nodeID storj.NodeID, pieceID storj.PieceID, expiration *timestamp.Timestamp, limit int64, action pb.PieceAction) (*pb.OrderLimit2, error) {
|
||||
parameters := pointerdb.OrderLimitParameters{
|
||||
UplinkIdentity: repairer.identity.PeerIdentity(),
|
||||
StorageNodeID: nodeID,
|
||||
PieceID: pieceID,
|
||||
Action: action,
|
||||
PieceExpiration: expiration,
|
||||
Limit: limit,
|
||||
}
|
||||
|
||||
orderLimit, err := repairer.allocation.OrderLimit(ctx, parameters)
|
||||
if err != nil {
|
||||
return nil, Error.Wrap(err)
|
||||
}
|
||||
|
||||
orderLimit, err = signing.SignOrderLimit(repairer.signer, orderLimit)
|
||||
return orderLimit, Error.Wrap(err)
|
||||
}
|
||||
|
||||
// sliceToSet converts the given slice to a set
|
||||
func sliceToSet(slice []int32) map[int32]struct{} {
|
||||
set := make(map[int32]struct{}, len(slice))
|
||||
for _, value := range slice {
|
||||
set[value] = struct{}{}
|
||||
}
|
||||
return set
|
||||
}
|
||||
|
@ -1,121 +0,0 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package segments
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"storj.io/storj/internal/teststorj"
|
||||
mock_overlay "storj.io/storj/pkg/overlay/mocks"
|
||||
"storj.io/storj/pkg/pb"
|
||||
mock_pointerdb "storj.io/storj/pkg/pointerdb/pdbclient/mocks"
|
||||
"storj.io/storj/pkg/ranger"
|
||||
mock_ecclient "storj.io/storj/pkg/storage/ec/mocks"
|
||||
)
|
||||
|
||||
func TestNewSegmentRepairer(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
mockOC := mock_overlay.NewMockClient(ctrl)
|
||||
mockEC := mock_ecclient.NewMockClient(ctrl)
|
||||
mockPDB := mock_pointerdb.NewMockClient(ctrl)
|
||||
|
||||
ss := NewSegmentRepairer(mockOC, mockEC, mockPDB)
|
||||
assert.NotNil(t, ss)
|
||||
}
|
||||
|
||||
func TestSegmentStoreRepairRemote(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
ti := time.Unix(0, 0).UTC()
|
||||
someTime, err := ptypes.TimestampProto(ti)
|
||||
assert.NoError(t, err)
|
||||
|
||||
for _, tt := range []struct {
|
||||
pathInput string
|
||||
thresholdSize int
|
||||
pointerType pb.Pointer_DataType
|
||||
size int64
|
||||
metadata []byte
|
||||
lostPieces []int32
|
||||
newNodes []*pb.Node
|
||||
data string
|
||||
strsize, offset, length int64
|
||||
substr string
|
||||
meta Meta
|
||||
}{
|
||||
{
|
||||
"path/1/2/3",
|
||||
10,
|
||||
pb.Pointer_REMOTE,
|
||||
int64(3),
|
||||
[]byte("metadata"),
|
||||
[]int32{},
|
||||
[]*pb.Node{
|
||||
teststorj.MockNode("1"),
|
||||
teststorj.MockNode("2"),
|
||||
},
|
||||
"abcdefghijkl",
|
||||
12,
|
||||
1,
|
||||
4,
|
||||
"bcde",
|
||||
Meta{},
|
||||
},
|
||||
} {
|
||||
mockOC := mock_overlay.NewMockClient(ctrl)
|
||||
mockEC := mock_ecclient.NewMockClient(ctrl)
|
||||
mockPDB := mock_pointerdb.NewMockClient(ctrl)
|
||||
|
||||
sr := Repairer{mockOC, mockEC, mockPDB, &pb.NodeStats{}}
|
||||
assert.NotNil(t, sr)
|
||||
|
||||
calls := []*gomock.Call{
|
||||
mockPDB.EXPECT().Get(
|
||||
gomock.Any(), gomock.Any(),
|
||||
).Return(&pb.Pointer{
|
||||
Type: tt.pointerType,
|
||||
Remote: &pb.RemoteSegment{
|
||||
Redundancy: &pb.RedundancyScheme{
|
||||
Type: pb.RedundancyScheme_RS,
|
||||
MinReq: 1,
|
||||
Total: 2,
|
||||
RepairThreshold: 1,
|
||||
SuccessThreshold: 2,
|
||||
},
|
||||
PieceId: "here's my piece id",
|
||||
RemotePieces: []*pb.RemotePiece{},
|
||||
},
|
||||
CreationDate: someTime,
|
||||
ExpirationDate: someTime,
|
||||
SegmentSize: tt.size,
|
||||
Metadata: tt.metadata,
|
||||
}, nil, nil, nil),
|
||||
mockOC.EXPECT().BulkLookup(gomock.Any(), gomock.Any()),
|
||||
mockOC.EXPECT().Choose(gomock.Any(), gomock.Any()).Return(tt.newNodes, nil),
|
||||
mockPDB.EXPECT().PayerBandwidthAllocation(gomock.Any(), gomock.Any()),
|
||||
mockEC.EXPECT().Get(
|
||||
gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(),
|
||||
).Return(ranger.ByteRanger([]byte(tt.data)), nil),
|
||||
mockPDB.EXPECT().PayerBandwidthAllocation(gomock.Any(), gomock.Any()),
|
||||
mockEC.EXPECT().Put(
|
||||
gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(),
|
||||
).Return(tt.newNodes, make([]*pb.SignedHash, len(tt.newNodes)), nil),
|
||||
mockPDB.EXPECT().Put(
|
||||
gomock.Any(), gomock.Any(), gomock.Any(),
|
||||
).Return(nil),
|
||||
}
|
||||
gomock.InOrder(calls...)
|
||||
|
||||
err := sr.Repair(ctx, tt.pathInput, tt.lostPieces)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
@ -7,22 +7,21 @@ import (
|
||||
"context"
|
||||
"io"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
"github.com/golang/protobuf/ptypes/timestamp"
|
||||
"github.com/vivint/infectious"
|
||||
"go.uber.org/zap"
|
||||
monkit "gopkg.in/spacemonkeygo/monkit.v2"
|
||||
|
||||
"storj.io/storj/pkg/eestream"
|
||||
"storj.io/storj/pkg/overlay"
|
||||
"storj.io/storj/pkg/pb"
|
||||
"storj.io/storj/pkg/piecestore/psclient"
|
||||
"storj.io/storj/pkg/pointerdb/pdbclient"
|
||||
"storj.io/storj/pkg/ranger"
|
||||
ecclient "storj.io/storj/pkg/storage/ec"
|
||||
"storj.io/storj/pkg/storj"
|
||||
"storj.io/storj/uplink/metainfo"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -54,21 +53,21 @@ type Store interface {
|
||||
}
|
||||
|
||||
type segmentStore struct {
|
||||
oc overlay.Client
|
||||
ec ecclient.Client
|
||||
pdb pdbclient.Client
|
||||
rs eestream.RedundancyStrategy
|
||||
thresholdSize int
|
||||
metainfo metainfo.Client
|
||||
ec ecclient.Client
|
||||
rs eestream.RedundancyStrategy
|
||||
thresholdSize int
|
||||
maxEncryptedSegmentSize int64
|
||||
}
|
||||
|
||||
// NewSegmentStore creates a new instance of segmentStore
|
||||
func NewSegmentStore(oc overlay.Client, ec ecclient.Client, pdb pdbclient.Client, rs eestream.RedundancyStrategy, threshold int) Store {
|
||||
func NewSegmentStore(metainfo metainfo.Client, ec ecclient.Client, rs eestream.RedundancyStrategy, threshold int, maxEncryptedSegmentSize int64) Store {
|
||||
return &segmentStore{
|
||||
oc: oc,
|
||||
ec: ec,
|
||||
pdb: pdb,
|
||||
rs: rs,
|
||||
thresholdSize: threshold,
|
||||
metainfo: metainfo,
|
||||
ec: ec,
|
||||
rs: rs,
|
||||
thresholdSize: threshold,
|
||||
maxEncryptedSegmentSize: maxEncryptedSegmentSize,
|
||||
}
|
||||
}
|
||||
|
||||
@ -76,18 +75,32 @@ func NewSegmentStore(oc overlay.Client, ec ecclient.Client, pdb pdbclient.Client
|
||||
func (s *segmentStore) Meta(ctx context.Context, path storj.Path) (meta Meta, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
pr, _, _, err := s.pdb.Get(ctx, path)
|
||||
bucket, objectPath, segmentIndex, err := split(path)
|
||||
if err != nil {
|
||||
return Meta{}, err
|
||||
}
|
||||
|
||||
pointer, err := s.metainfo.SegmentInfo(ctx, bucket, objectPath, segmentIndex)
|
||||
if err != nil {
|
||||
return Meta{}, Error.Wrap(err)
|
||||
}
|
||||
|
||||
return convertMeta(pr), nil
|
||||
return convertMeta(pointer), nil
|
||||
}
|
||||
|
||||
// Put uploads a segment to an erasure code client
|
||||
func (s *segmentStore) Put(ctx context.Context, data io.Reader, expiration time.Time, segmentInfo func() (storj.Path, []byte, error)) (meta Meta, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
redundancy := &pb.RedundancyScheme{
|
||||
Type: pb.RedundancyScheme_RS,
|
||||
MinReq: int32(s.rs.RequiredCount()),
|
||||
Total: int32(s.rs.TotalCount()),
|
||||
RepairThreshold: int32(s.rs.RepairThreshold()),
|
||||
SuccessThreshold: int32(s.rs.OptimalThreshold()),
|
||||
ErasureShareSize: int32(s.rs.ErasureShareSize()),
|
||||
}
|
||||
|
||||
exp, err := ptypes.TimestampProto(expiration)
|
||||
if err != nil {
|
||||
return Meta{}, Error.Wrap(err)
|
||||
@ -101,6 +114,7 @@ func (s *segmentStore) Put(ctx context.Context, data io.Reader, expiration time.
|
||||
|
||||
var path storj.Path
|
||||
var pointer *pb.Pointer
|
||||
var originalLimits []*pb.OrderLimit2
|
||||
if !remoteSized {
|
||||
p, metadata, err := segmentInfo()
|
||||
if err != nil {
|
||||
@ -116,33 +130,14 @@ func (s *segmentStore) Put(ctx context.Context, data io.Reader, expiration time.
|
||||
Metadata: metadata,
|
||||
}
|
||||
} else {
|
||||
limits, rootPieceID, err := s.metainfo.CreateSegment(ctx, "", "", -1, redundancy, s.maxEncryptedSegmentSize, expiration) // bucket, path and segment index are not known at this point
|
||||
if err != nil {
|
||||
return Meta{}, Error.Wrap(err)
|
||||
}
|
||||
|
||||
sizedReader := SizeReader(peekReader)
|
||||
|
||||
// uses overlay client to request a list of nodes according to configured standards
|
||||
nodes, err := s.oc.Choose(ctx,
|
||||
overlay.Options{
|
||||
Amount: s.rs.TotalCount(),
|
||||
Bandwidth: sizedReader.Size() / int64(s.rs.TotalCount()),
|
||||
Space: sizedReader.Size() / int64(s.rs.TotalCount()),
|
||||
Excluded: nil,
|
||||
})
|
||||
if err != nil {
|
||||
return Meta{}, Error.Wrap(err)
|
||||
}
|
||||
for _, v := range nodes {
|
||||
if v != nil {
|
||||
v.Type.DPanicOnInvalid("ss put")
|
||||
}
|
||||
}
|
||||
|
||||
pieceID := psclient.NewPieceID()
|
||||
|
||||
pba, err := s.pdb.PayerBandwidthAllocation(ctx, pb.BandwidthAction_PUT)
|
||||
if err != nil {
|
||||
return Meta{}, Error.Wrap(err)
|
||||
}
|
||||
|
||||
successfulNodes, successfulHashes, err := s.ec.Put(ctx, nodes, s.rs, pieceID, sizedReader, expiration, pba)
|
||||
successfulNodes, successfulHashes, err := s.ec.Put(ctx, limits, s.rs, sizedReader, expiration)
|
||||
if err != nil {
|
||||
return Meta{}, Error.Wrap(err)
|
||||
}
|
||||
@ -153,83 +148,83 @@ func (s *segmentStore) Put(ctx context.Context, data io.Reader, expiration time.
|
||||
}
|
||||
path = p
|
||||
|
||||
pointer, err = makeRemotePointer(successfulNodes, successfulHashes, s.rs, pieceID, sizedReader.Size(), exp, metadata)
|
||||
pointer, err = makeRemotePointer(successfulNodes, successfulHashes, s.rs, rootPieceID, sizedReader.Size(), exp, metadata)
|
||||
if err != nil {
|
||||
return Meta{}, Error.Wrap(err)
|
||||
}
|
||||
|
||||
originalLimits = make([]*pb.OrderLimit2, len(limits))
|
||||
for i, addressedLimit := range limits {
|
||||
originalLimits[i] = addressedLimit.GetLimit()
|
||||
}
|
||||
}
|
||||
|
||||
// puts pointer to pointerDB
|
||||
err = s.pdb.Put(ctx, path, pointer)
|
||||
bucket, objectPath, segmentIndex, err := split(path)
|
||||
if err != nil {
|
||||
return Meta{}, err
|
||||
}
|
||||
|
||||
savedPointer, err := s.metainfo.CommitSegment(ctx, bucket, objectPath, segmentIndex, pointer, originalLimits)
|
||||
if err != nil {
|
||||
return Meta{}, Error.Wrap(err)
|
||||
}
|
||||
|
||||
// get the metadata for the newly uploaded segment
|
||||
m, err := s.Meta(ctx, path)
|
||||
if err != nil {
|
||||
return Meta{}, Error.Wrap(err)
|
||||
}
|
||||
return m, nil
|
||||
return convertMeta(savedPointer), nil
|
||||
}
|
||||
|
||||
// Get retrieves a segment using erasure code, overlay, and pointerdb clients
|
||||
// Get requests the satellite to read a segment and downloaded the pieces from the storage nodes
|
||||
func (s *segmentStore) Get(ctx context.Context, path storj.Path) (rr ranger.Ranger, meta Meta, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
pr, nodes, pba, err := s.pdb.Get(ctx, path)
|
||||
bucket, objectPath, segmentIndex, err := split(path)
|
||||
if err != nil {
|
||||
return nil, Meta{}, err
|
||||
}
|
||||
|
||||
pointer, limits, err := s.metainfo.ReadSegment(ctx, bucket, objectPath, segmentIndex)
|
||||
if err != nil {
|
||||
return nil, Meta{}, Error.Wrap(err)
|
||||
}
|
||||
|
||||
switch pr.GetType() {
|
||||
switch pointer.GetType() {
|
||||
case pb.Pointer_INLINE:
|
||||
rr = ranger.ByteRanger(pr.InlineSegment)
|
||||
return ranger.ByteRanger(pointer.InlineSegment), convertMeta(pointer), nil
|
||||
case pb.Pointer_REMOTE:
|
||||
seg := pr.GetRemote()
|
||||
pid := psclient.PieceID(seg.GetPieceId())
|
||||
needed := CalcNeededNodes(pointer.GetRemote().GetRedundancy())
|
||||
selected := make([]*pb.AddressedOrderLimit, len(limits))
|
||||
|
||||
nodes, err = lookupAndAlignNodes(ctx, s.oc, nodes, seg)
|
||||
if err != nil {
|
||||
return nil, Meta{}, Error.Wrap(err)
|
||||
}
|
||||
|
||||
rs, err := makeRedundancyStrategy(pr.GetRemote().GetRedundancy())
|
||||
if err != nil {
|
||||
return nil, Meta{}, err
|
||||
}
|
||||
|
||||
needed := calcNeededNodes(pr.GetRemote().GetRedundancy())
|
||||
selected := make([]*pb.Node, rs.TotalCount())
|
||||
|
||||
for _, i := range rand.Perm(len(nodes)) {
|
||||
node := nodes[i]
|
||||
if node == nil {
|
||||
for _, i := range rand.Perm(len(limits)) {
|
||||
limit := limits[i]
|
||||
if limit == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
selected[i] = node
|
||||
selected[i] = limit
|
||||
|
||||
needed--
|
||||
if needed <= 0 {
|
||||
break
|
||||
}
|
||||
node.Type.DPanicOnInvalid("ss get")
|
||||
}
|
||||
|
||||
rr, err = s.ec.Get(ctx, selected, rs, pid, pr.GetSegmentSize(), pba)
|
||||
redundancy, err := eestream.NewRedundancyStrategyFromProto(pointer.GetRemote().GetRedundancy())
|
||||
if err != nil {
|
||||
return nil, Meta{}, err
|
||||
}
|
||||
|
||||
rr, err = s.ec.Get(ctx, selected, redundancy, pointer.GetSegmentSize())
|
||||
if err != nil {
|
||||
return nil, Meta{}, Error.Wrap(err)
|
||||
}
|
||||
default:
|
||||
return nil, Meta{}, Error.New("unsupported pointer type: %d", pr.GetType())
|
||||
}
|
||||
|
||||
return rr, convertMeta(pr), nil
|
||||
return rr, convertMeta(pointer), nil
|
||||
default:
|
||||
return nil, Meta{}, Error.New("unsupported pointer type: %d", pointer.GetType())
|
||||
}
|
||||
}
|
||||
|
||||
// makeRemotePointer creates a pointer of type remote
|
||||
func makeRemotePointer(nodes []*pb.Node, hashes []*pb.SignedHash, rs eestream.RedundancyStrategy, pieceID psclient.PieceID, readerSize int64, exp *timestamp.Timestamp, metadata []byte) (pointer *pb.Pointer, err error) {
|
||||
func makeRemotePointer(nodes []*pb.Node, hashes []*pb.PieceHash, rs eestream.RedundancyStrategy, pieceID storj.PieceID, readerSize int64, exp *timestamp.Timestamp, metadata []byte) (pointer *pb.Pointer, err error) {
|
||||
if len(nodes) != len(hashes) {
|
||||
return nil, Error.New("unable to make pointer: size of nodes != size of hashes")
|
||||
}
|
||||
@ -258,7 +253,7 @@ func makeRemotePointer(nodes []*pb.Node, hashes []*pb.SignedHash, rs eestream.Re
|
||||
SuccessThreshold: int32(rs.OptimalThreshold()),
|
||||
ErasureShareSize: int32(rs.ErasureShareSize()),
|
||||
},
|
||||
PieceId: string(pieceID),
|
||||
RootPieceId: pieceID,
|
||||
RemotePieces: remotePieces,
|
||||
},
|
||||
SegmentSize: readerSize,
|
||||
@ -268,51 +263,51 @@ func makeRemotePointer(nodes []*pb.Node, hashes []*pb.SignedHash, rs eestream.Re
|
||||
return pointer, nil
|
||||
}
|
||||
|
||||
// Delete tells piece stores to delete a segment and deletes pointer from pointerdb
|
||||
// Delete requests the satellite to delete a segment and tells storage nodes
|
||||
// to delete the segment's pieces.
|
||||
func (s *segmentStore) Delete(ctx context.Context, path storj.Path) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
pr, nodes, pba, err := s.pdb.Get(ctx, path)
|
||||
bucket, objectPath, segmentIndex, err := split(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
limits, err := s.metainfo.DeleteSegment(ctx, bucket, objectPath, segmentIndex)
|
||||
if err != nil {
|
||||
return Error.Wrap(err)
|
||||
}
|
||||
|
||||
if pr.GetType() == pb.Pointer_REMOTE {
|
||||
seg := pr.GetRemote()
|
||||
pid := psclient.PieceID(seg.PieceId)
|
||||
|
||||
nodes, err = lookupAndAlignNodes(ctx, s.oc, nodes, seg)
|
||||
if err != nil {
|
||||
return Error.Wrap(err)
|
||||
}
|
||||
for _, v := range nodes {
|
||||
if v != nil {
|
||||
v.Type.DPanicOnInvalid("ss delete")
|
||||
}
|
||||
}
|
||||
|
||||
// ecclient sends delete request
|
||||
err = s.ec.Delete(ctx, nodes, pid, pba.SatelliteId)
|
||||
if err != nil {
|
||||
return Error.Wrap(err)
|
||||
}
|
||||
if len(limits) == 0 {
|
||||
// inline segment - nothing else to do
|
||||
return
|
||||
}
|
||||
|
||||
// deletes pointer from pointerdb
|
||||
return s.pdb.Delete(ctx, path)
|
||||
// remote segment - delete the pieces from storage nodes
|
||||
err = s.ec.Delete(ctx, limits)
|
||||
if err != nil {
|
||||
return Error.Wrap(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// List retrieves paths to segments and their metadata stored in the pointerdb
|
||||
func (s *segmentStore) List(ctx context.Context, prefix, startAfter, endBefore storj.Path, recursive bool, limit int, metaFlags uint32) (items []ListItem, more bool, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
pdbItems, more, err := s.pdb.List(ctx, prefix, startAfter, endBefore, recursive, limit, metaFlags)
|
||||
bucket, strippedPrefix, _, err := split(prefix)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
return nil, false, Error.Wrap(err)
|
||||
}
|
||||
|
||||
items = make([]ListItem, len(pdbItems))
|
||||
for i, itm := range pdbItems {
|
||||
list, more, err := s.metainfo.ListSegments(ctx, bucket, strippedPrefix, startAfter, endBefore, recursive, int32(limit), metaFlags)
|
||||
if err != nil {
|
||||
return nil, false, Error.Wrap(err)
|
||||
}
|
||||
|
||||
items = make([]ListItem, len(list))
|
||||
for i, itm := range list {
|
||||
items[i] = ListItem{
|
||||
Path: itm.Path,
|
||||
Meta: convertMeta(itm.Pointer),
|
||||
@ -323,18 +318,9 @@ func (s *segmentStore) List(ctx context.Context, prefix, startAfter, endBefore s
|
||||
return items, more, nil
|
||||
}
|
||||
|
||||
func makeRedundancyStrategy(scheme *pb.RedundancyScheme) (eestream.RedundancyStrategy, error) {
|
||||
fc, err := infectious.NewFEC(int(scheme.GetMinReq()), int(scheme.GetTotal()))
|
||||
if err != nil {
|
||||
return eestream.RedundancyStrategy{}, Error.Wrap(err)
|
||||
}
|
||||
es := eestream.NewRSScheme(fc, int(scheme.GetErasureShareSize()))
|
||||
return eestream.NewRedundancyStrategy(es, int(scheme.GetRepairThreshold()), int(scheme.GetSuccessThreshold()))
|
||||
}
|
||||
|
||||
// calcNeededNodes calculate how many minimum nodes are needed for download,
|
||||
// CalcNeededNodes calculate how many minimum nodes are needed for download,
|
||||
// based on t = k + (n-o)k/o
|
||||
func calcNeededNodes(rs *pb.RedundancyScheme) int32 {
|
||||
func CalcNeededNodes(rs *pb.RedundancyScheme) int32 {
|
||||
extra := int32(1)
|
||||
|
||||
if rs.GetSuccessThreshold() > 0 {
|
||||
@ -354,49 +340,6 @@ func calcNeededNodes(rs *pb.RedundancyScheme) int32 {
|
||||
return needed
|
||||
}
|
||||
|
||||
// lookupNodes, if necessary, calls Lookup to get node addresses from the overlay.
|
||||
// It also realigns the nodes to an indexed list of nodes based on the piece number.
|
||||
// Missing pieces are represented by a nil node.
|
||||
func lookupAndAlignNodes(ctx context.Context, oc overlay.Client, nodes []*pb.Node, seg *pb.RemoteSegment) (result []*pb.Node, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
if nodes == nil {
|
||||
// Get list of all nodes IDs storing a piece from the segment
|
||||
var nodeIds storj.NodeIDList
|
||||
for _, p := range seg.RemotePieces {
|
||||
nodeIds = append(nodeIds, p.NodeId)
|
||||
}
|
||||
// Lookup the node info from node IDs
|
||||
nodes, err = oc.BulkLookup(ctx, nodeIds)
|
||||
if err != nil {
|
||||
return nil, Error.Wrap(err)
|
||||
}
|
||||
}
|
||||
for _, v := range nodes {
|
||||
if v != nil {
|
||||
v.Type.DPanicOnInvalid("lookup and align nodes")
|
||||
}
|
||||
}
|
||||
|
||||
// Realign the nodes
|
||||
result = make([]*pb.Node, seg.GetRedundancy().GetTotal())
|
||||
for i, p := range seg.GetRemotePieces() {
|
||||
result[p.PieceNum] = nodes[i]
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// contains checks if n exists in list
|
||||
func contains(list []int32, n int) bool {
|
||||
for i := range list {
|
||||
if n == int(list[i]) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// convertMeta converts pointer to segment metadata
|
||||
func convertMeta(pr *pb.Pointer) Meta {
|
||||
return Meta{
|
||||
@ -418,3 +361,37 @@ func convertTime(ts *timestamp.Timestamp) time.Time {
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
func split(path storj.Path) (bucket string, objectPath storj.Path, segmentIndex int64, err error) {
|
||||
components := storj.SplitPath(path)
|
||||
if len(components) < 1 {
|
||||
return "", "", -2, Error.New("empty path")
|
||||
}
|
||||
|
||||
segmentIndex, err = convertSegmentIndex(components[0])
|
||||
if err != nil {
|
||||
return "", "", -2, err
|
||||
}
|
||||
|
||||
if len(components) > 1 {
|
||||
bucket = components[1]
|
||||
objectPath = storj.JoinPaths(components[2:]...)
|
||||
}
|
||||
|
||||
return bucket, objectPath, segmentIndex, nil
|
||||
}
|
||||
|
||||
func convertSegmentIndex(segmentComp string) (segmentIndex int64, err error) {
|
||||
switch {
|
||||
case segmentComp == "l":
|
||||
return -1, nil
|
||||
case strings.HasPrefix(segmentComp, "s"):
|
||||
num, err := strconv.Atoi(segmentComp[1:])
|
||||
if err != nil {
|
||||
return -2, Error.Wrap(err)
|
||||
}
|
||||
return int64(num), nil
|
||||
default:
|
||||
return -2, Error.New("invalid segment component: %s", segmentComp)
|
||||
}
|
||||
}
|
||||
|
@ -1,464 +1,206 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package segments
|
||||
package segments_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"strings"
|
||||
io "io"
|
||||
"io/ioutil"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
time "time"
|
||||
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/vivint/infectious"
|
||||
|
||||
"storj.io/storj/internal/teststorj"
|
||||
"storj.io/storj/internal/memory"
|
||||
"storj.io/storj/internal/testcontext"
|
||||
"storj.io/storj/internal/testplanet"
|
||||
"storj.io/storj/pkg/eestream"
|
||||
mock_eestream "storj.io/storj/pkg/eestream/mocks"
|
||||
mock_overlay "storj.io/storj/pkg/overlay/mocks"
|
||||
mock_pointerdb "storj.io/storj/pkg/pointerdb/pdbclient/mocks"
|
||||
mock_ecclient "storj.io/storj/pkg/storage/ec/mocks"
|
||||
|
||||
"storj.io/storj/pkg/pb"
|
||||
pdb "storj.io/storj/pkg/pointerdb/pdbclient"
|
||||
ecclient "storj.io/storj/pkg/storage/ec"
|
||||
"storj.io/storj/pkg/storage/meta"
|
||||
"storj.io/storj/pkg/storj"
|
||||
"storj.io/storj/pkg/storage/segments"
|
||||
storj "storj.io/storj/pkg/storj"
|
||||
"storj.io/storj/satellite/console"
|
||||
"storj.io/storj/storage"
|
||||
)
|
||||
|
||||
var (
|
||||
ctx = context.Background()
|
||||
)
|
||||
|
||||
func TestNewSegmentStore(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
mockOC := mock_overlay.NewMockClient(ctrl)
|
||||
mockEC := mock_ecclient.NewMockClient(ctrl)
|
||||
mockPDB := mock_pointerdb.NewMockClient(ctrl)
|
||||
rs := eestream.RedundancyStrategy{
|
||||
ErasureScheme: mock_eestream.NewMockErasureScheme(ctrl),
|
||||
}
|
||||
|
||||
ss := NewSegmentStore(mockOC, mockEC, mockPDB, rs, 10)
|
||||
assert.NotNil(t, ss)
|
||||
}
|
||||
|
||||
func TestSegmentStoreMeta(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
mockOC := mock_overlay.NewMockClient(ctrl)
|
||||
mockEC := mock_ecclient.NewMockClient(ctrl)
|
||||
mockPDB := mock_pointerdb.NewMockClient(ctrl)
|
||||
rs := eestream.RedundancyStrategy{
|
||||
ErasureScheme: mock_eestream.NewMockErasureScheme(ctrl),
|
||||
}
|
||||
|
||||
ss := segmentStore{mockOC, mockEC, mockPDB, rs, 10}
|
||||
assert.NotNil(t, ss)
|
||||
|
||||
var mExp time.Time
|
||||
pExp, err := ptypes.TimestampProto(mExp)
|
||||
assert.NoError(t, err)
|
||||
|
||||
for _, tt := range []struct {
|
||||
pathInput string
|
||||
returnPointer *pb.Pointer
|
||||
returnMeta Meta
|
||||
for i, tt := range []struct {
|
||||
path string
|
||||
data []byte
|
||||
metadata []byte
|
||||
expiration time.Time
|
||||
err string
|
||||
}{
|
||||
{"path/1/2/3", &pb.Pointer{CreationDate: pExp, ExpirationDate: pExp}, Meta{Modified: mExp, Expiration: mExp}},
|
||||
{"l/path/1/2/3", []byte("content"), []byte("metadata"), time.Now().UTC().Add(time.Hour * 12), ""},
|
||||
{"l/not_exists_path/1/2/3", []byte{}, []byte{}, time.Now(), "key not found"},
|
||||
{"", []byte{}, []byte{}, time.Now(), "invalid segment component"},
|
||||
} {
|
||||
calls := []*gomock.Call{
|
||||
mockPDB.EXPECT().Get(
|
||||
gomock.Any(), gomock.Any(),
|
||||
).Return(tt.returnPointer, nil, nil, nil),
|
||||
}
|
||||
gomock.InOrder(calls...)
|
||||
t.Run("#"+strconv.Itoa(i), func(t *testing.T) {
|
||||
runTest(t, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet, segmentStore segments.Store) {
|
||||
expectedSize := int64(len(tt.data))
|
||||
reader := bytes.NewReader(tt.data)
|
||||
|
||||
m, err := ss.Meta(ctx, tt.pathInput)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, m, tt.returnMeta)
|
||||
}
|
||||
}
|
||||
beforeModified := time.Now()
|
||||
if tt.err == "" {
|
||||
meta, err := segmentStore.Put(ctx, reader, tt.expiration, func() (storj.Path, []byte, error) {
|
||||
return tt.path, tt.metadata, nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectedSize, meta.Size)
|
||||
assert.Equal(t, tt.metadata, meta.Data)
|
||||
assert.Equal(t, tt.expiration, meta.Expiration)
|
||||
assert.True(t, meta.Modified.After(beforeModified))
|
||||
}
|
||||
|
||||
func TestSegmentStorePutRemote(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
for _, tt := range []struct {
|
||||
name string
|
||||
pathInput string
|
||||
mdInput []byte
|
||||
thresholdSize int
|
||||
expiration time.Time
|
||||
readerContent string
|
||||
}{
|
||||
{"test remote put", "path/1", []byte("abcdefghijklmnopqrstuvwxyz"), 2, time.Unix(0, 0).UTC(), "readerreaderreader"},
|
||||
} {
|
||||
mockOC := mock_overlay.NewMockClient(ctrl)
|
||||
mockEC := mock_ecclient.NewMockClient(ctrl)
|
||||
mockPDB := mock_pointerdb.NewMockClient(ctrl)
|
||||
mockES := mock_eestream.NewMockErasureScheme(ctrl)
|
||||
rs := eestream.RedundancyStrategy{
|
||||
ErasureScheme: mockES,
|
||||
}
|
||||
|
||||
ss := segmentStore{mockOC, mockEC, mockPDB, rs, tt.thresholdSize}
|
||||
assert.NotNil(t, ss)
|
||||
|
||||
calls := []*gomock.Call{
|
||||
mockES.EXPECT().TotalCount().Return(1).AnyTimes(),
|
||||
mockOC.EXPECT().Choose(
|
||||
gomock.Any(), gomock.Any(),
|
||||
).Return([]*pb.Node{
|
||||
{Id: teststorj.NodeIDFromString("im-a-node"),
|
||||
Type: pb.NodeType_STORAGE,
|
||||
},
|
||||
}, nil),
|
||||
mockPDB.EXPECT().PayerBandwidthAllocation(gomock.Any(), gomock.Any()),
|
||||
mockEC.EXPECT().Put(
|
||||
gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(),
|
||||
),
|
||||
mockES.EXPECT().RequiredCount().Return(1),
|
||||
mockES.EXPECT().TotalCount().Return(1),
|
||||
mockES.EXPECT().ErasureShareSize().Return(1),
|
||||
mockPDB.EXPECT().Put(
|
||||
gomock.Any(), gomock.Any(), gomock.Any(),
|
||||
).Return(nil),
|
||||
mockPDB.EXPECT().Get(
|
||||
gomock.Any(), gomock.Any(),
|
||||
),
|
||||
}
|
||||
gomock.InOrder(calls...)
|
||||
|
||||
_, err := ss.Put(ctx, strings.NewReader(tt.readerContent), tt.expiration, func() (storj.Path, []byte, error) {
|
||||
return tt.pathInput, tt.mdInput, nil
|
||||
meta, err := segmentStore.Meta(ctx, tt.path)
|
||||
if tt.err == "" {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectedSize, meta.Size)
|
||||
assert.Equal(t, tt.metadata, meta.Data)
|
||||
assert.Equal(t, tt.expiration, meta.Expiration)
|
||||
assert.True(t, meta.Modified.After(beforeModified))
|
||||
} else {
|
||||
require.Contains(t, err.Error(), tt.err)
|
||||
}
|
||||
})
|
||||
})
|
||||
assert.NoError(t, err, tt.name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSegmentStorePutInline(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
func TestSegmentStorePutGet(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
name string
|
||||
pathInput string
|
||||
mdInput []byte
|
||||
thresholdSize int
|
||||
expiration time.Time
|
||||
readerContent string
|
||||
name string
|
||||
path string
|
||||
metadata []byte
|
||||
expiration time.Time
|
||||
content []byte
|
||||
}{
|
||||
{"test inline put", "path/1", []byte("111"), 1000, time.Unix(0, 0).UTC(), "readerreaderreader"},
|
||||
{"test inline put/get", "l/path/1", []byte("metadata-intline"), time.Now().UTC(), createTestData(t, 2*memory.KiB.Int64())},
|
||||
{"test remote put/get", "s0/test_bucket/mypath/1", []byte("metadata-remote"), time.Now().UTC(), createTestData(t, 100*memory.KiB.Int64())},
|
||||
} {
|
||||
mockOC := mock_overlay.NewMockClient(ctrl)
|
||||
mockEC := mock_ecclient.NewMockClient(ctrl)
|
||||
mockPDB := mock_pointerdb.NewMockClient(ctrl)
|
||||
mockES := mock_eestream.NewMockErasureScheme(ctrl)
|
||||
rs := eestream.RedundancyStrategy{
|
||||
ErasureScheme: mockES,
|
||||
}
|
||||
runTest(t, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet, segmentStore segments.Store) {
|
||||
metadata, err := segmentStore.Put(ctx, bytes.NewReader(tt.content), tt.expiration, func() (storj.Path, []byte, error) {
|
||||
return tt.path, tt.metadata, nil
|
||||
})
|
||||
require.NoError(t, err, tt.name)
|
||||
require.Equal(t, tt.metadata, metadata.Data)
|
||||
|
||||
ss := segmentStore{mockOC, mockEC, mockPDB, rs, tt.thresholdSize}
|
||||
assert.NotNil(t, ss)
|
||||
rr, metadata, err := segmentStore.Get(ctx, tt.path)
|
||||
require.NoError(t, err, tt.name)
|
||||
require.Equal(t, tt.metadata, metadata.Data)
|
||||
|
||||
calls := []*gomock.Call{
|
||||
mockPDB.EXPECT().Put(
|
||||
gomock.Any(), gomock.Any(), gomock.Any(),
|
||||
).Return(nil),
|
||||
mockPDB.EXPECT().Get(
|
||||
gomock.Any(), gomock.Any(),
|
||||
),
|
||||
}
|
||||
gomock.InOrder(calls...)
|
||||
reader, err := rr.Range(ctx, 0, rr.Size())
|
||||
require.NoError(t, err, tt.name)
|
||||
content, err := ioutil.ReadAll(reader)
|
||||
require.NoError(t, err, tt.name)
|
||||
require.Equal(t, tt.content, content)
|
||||
|
||||
_, err := ss.Put(ctx, strings.NewReader(tt.readerContent), tt.expiration, func() (storj.Path, []byte, error) {
|
||||
return tt.pathInput, tt.mdInput, nil
|
||||
require.NoError(t, reader.Close(), tt.name)
|
||||
})
|
||||
assert.NoError(t, err, tt.name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSegmentStoreGetInline(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
ti := time.Unix(0, 0).UTC()
|
||||
someTime, err := ptypes.TimestampProto(ti)
|
||||
assert.NoError(t, err)
|
||||
|
||||
func TestSegmentStoreDelete(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
pathInput string
|
||||
thresholdSize int
|
||||
pointerType pb.Pointer_DataType
|
||||
inlineContent []byte
|
||||
size int64
|
||||
metadata []byte
|
||||
name string
|
||||
path string
|
||||
metadata []byte
|
||||
expiration time.Time
|
||||
content []byte
|
||||
}{
|
||||
{"path/1/2/3", 10, pb.Pointer_INLINE, []byte("000"), int64(3), []byte("metadata")},
|
||||
{"test inline delete", "l/path/1", []byte("metadata"), time.Now(), createTestData(t, 2*memory.KiB.Int64())},
|
||||
{"test remote delete", "s0/test_bucket/mypath/1", []byte("metadata"), time.Now(), createTestData(t, 100*memory.KiB.Int64())},
|
||||
} {
|
||||
mockOC := mock_overlay.NewMockClient(ctrl)
|
||||
mockEC := mock_ecclient.NewMockClient(ctrl)
|
||||
mockPDB := mock_pointerdb.NewMockClient(ctrl)
|
||||
mockES := mock_eestream.NewMockErasureScheme(ctrl)
|
||||
rs := eestream.RedundancyStrategy{
|
||||
ErasureScheme: mockES,
|
||||
}
|
||||
runTest(t, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet, segmentStore segments.Store) {
|
||||
_, err := segmentStore.Put(ctx, bytes.NewReader(tt.content), tt.expiration, func() (storj.Path, []byte, error) {
|
||||
return tt.path, tt.metadata, nil
|
||||
})
|
||||
require.NoError(t, err, tt.name)
|
||||
|
||||
ss := segmentStore{mockOC, mockEC, mockPDB, rs, tt.thresholdSize}
|
||||
assert.NotNil(t, ss)
|
||||
_, _, err = segmentStore.Get(ctx, tt.path)
|
||||
require.NoError(t, err, tt.name)
|
||||
|
||||
calls := []*gomock.Call{
|
||||
mockPDB.EXPECT().Get(
|
||||
gomock.Any(), gomock.Any(),
|
||||
).Return(&pb.Pointer{
|
||||
Type: tt.pointerType,
|
||||
InlineSegment: tt.inlineContent,
|
||||
CreationDate: someTime,
|
||||
ExpirationDate: someTime,
|
||||
SegmentSize: tt.size,
|
||||
Metadata: tt.metadata,
|
||||
}, nil, nil, nil),
|
||||
}
|
||||
gomock.InOrder(calls...)
|
||||
// delete existing
|
||||
err = segmentStore.Delete(ctx, tt.path)
|
||||
require.NoError(t, err, tt.name)
|
||||
|
||||
_, _, err := ss.Get(ctx, tt.pathInput)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
_, _, err = segmentStore.Get(ctx, tt.path)
|
||||
require.Error(t, err, tt.name)
|
||||
require.True(t, storage.ErrKeyNotFound.Has(err))
|
||||
|
||||
func TestSegmentStoreGetRemote(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
ti := time.Unix(0, 0).UTC()
|
||||
someTime, err := ptypes.TimestampProto(ti)
|
||||
assert.NoError(t, err)
|
||||
|
||||
for _, tt := range []struct {
|
||||
pathInput string
|
||||
thresholdSize int
|
||||
pointerType pb.Pointer_DataType
|
||||
size int64
|
||||
metadata []byte
|
||||
}{
|
||||
{"path/1/2/3", 10, pb.Pointer_REMOTE, int64(3), []byte("metadata")},
|
||||
} {
|
||||
mockOC := mock_overlay.NewMockClient(ctrl)
|
||||
mockEC := mock_ecclient.NewMockClient(ctrl)
|
||||
mockPDB := mock_pointerdb.NewMockClient(ctrl)
|
||||
mockES := mock_eestream.NewMockErasureScheme(ctrl)
|
||||
rs := eestream.RedundancyStrategy{
|
||||
ErasureScheme: mockES,
|
||||
}
|
||||
|
||||
ss := segmentStore{mockOC, mockEC, mockPDB, rs, tt.thresholdSize}
|
||||
assert.NotNil(t, ss)
|
||||
|
||||
calls := []*gomock.Call{
|
||||
mockPDB.EXPECT().Get(
|
||||
gomock.Any(), gomock.Any(),
|
||||
).Return(&pb.Pointer{
|
||||
Type: tt.pointerType,
|
||||
Remote: &pb.RemoteSegment{
|
||||
Redundancy: &pb.RedundancyScheme{
|
||||
Type: pb.RedundancyScheme_RS,
|
||||
MinReq: 1,
|
||||
Total: 2,
|
||||
RepairThreshold: 1,
|
||||
SuccessThreshold: 2,
|
||||
},
|
||||
PieceId: "here's my piece id",
|
||||
RemotePieces: []*pb.RemotePiece{},
|
||||
},
|
||||
CreationDate: someTime,
|
||||
ExpirationDate: someTime,
|
||||
SegmentSize: tt.size,
|
||||
Metadata: tt.metadata,
|
||||
}, nil, nil, nil),
|
||||
mockOC.EXPECT().BulkLookup(gomock.Any(), gomock.Any()),
|
||||
mockEC.EXPECT().Get(
|
||||
gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(),
|
||||
),
|
||||
}
|
||||
gomock.InOrder(calls...)
|
||||
|
||||
_, _, err := ss.Get(ctx, tt.pathInput)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSegmentStoreDeleteInline(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
ti := time.Unix(0, 0).UTC()
|
||||
someTime, err := ptypes.TimestampProto(ti)
|
||||
assert.NoError(t, err)
|
||||
|
||||
for _, tt := range []struct {
|
||||
pathInput string
|
||||
thresholdSize int
|
||||
pointerType pb.Pointer_DataType
|
||||
inlineContent []byte
|
||||
size int64
|
||||
metadata []byte
|
||||
}{
|
||||
{"path/1/2/3", 10, pb.Pointer_INLINE, []byte("000"), int64(3), []byte("metadata")},
|
||||
} {
|
||||
mockOC := mock_overlay.NewMockClient(ctrl)
|
||||
mockEC := mock_ecclient.NewMockClient(ctrl)
|
||||
mockPDB := mock_pointerdb.NewMockClient(ctrl)
|
||||
mockES := mock_eestream.NewMockErasureScheme(ctrl)
|
||||
rs := eestream.RedundancyStrategy{
|
||||
ErasureScheme: mockES,
|
||||
}
|
||||
|
||||
ss := segmentStore{mockOC, mockEC, mockPDB, rs, tt.thresholdSize}
|
||||
assert.NotNil(t, ss)
|
||||
|
||||
calls := []*gomock.Call{
|
||||
mockPDB.EXPECT().Get(
|
||||
gomock.Any(), gomock.Any(),
|
||||
).Return(&pb.Pointer{
|
||||
Type: tt.pointerType,
|
||||
InlineSegment: tt.inlineContent,
|
||||
CreationDate: someTime,
|
||||
ExpirationDate: someTime,
|
||||
SegmentSize: tt.size,
|
||||
Metadata: tt.metadata,
|
||||
}, nil, nil, nil),
|
||||
mockPDB.EXPECT().Delete(
|
||||
gomock.Any(), gomock.Any(),
|
||||
),
|
||||
}
|
||||
gomock.InOrder(calls...)
|
||||
|
||||
err := ss.Delete(ctx, tt.pathInput)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSegmentStoreDeleteRemote(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
ti := time.Unix(0, 0).UTC()
|
||||
someTime, err := ptypes.TimestampProto(ti)
|
||||
assert.NoError(t, err)
|
||||
|
||||
for _, tt := range []struct {
|
||||
pathInput string
|
||||
thresholdSize int
|
||||
pointerType pb.Pointer_DataType
|
||||
size int64
|
||||
metadata []byte
|
||||
}{
|
||||
{"path/1/2/3", 10, pb.Pointer_REMOTE, int64(3), []byte("metadata")},
|
||||
} {
|
||||
mockOC := mock_overlay.NewMockClient(ctrl)
|
||||
mockEC := mock_ecclient.NewMockClient(ctrl)
|
||||
mockPDB := mock_pointerdb.NewMockClient(ctrl)
|
||||
mockES := mock_eestream.NewMockErasureScheme(ctrl)
|
||||
rs := eestream.RedundancyStrategy{
|
||||
ErasureScheme: mockES,
|
||||
}
|
||||
|
||||
ss := segmentStore{mockOC, mockEC, mockPDB, rs, tt.thresholdSize}
|
||||
assert.NotNil(t, ss)
|
||||
|
||||
calls := []*gomock.Call{
|
||||
mockPDB.EXPECT().Get(
|
||||
gomock.Any(), gomock.Any(),
|
||||
).Return(&pb.Pointer{
|
||||
Type: tt.pointerType,
|
||||
Remote: &pb.RemoteSegment{
|
||||
Redundancy: &pb.RedundancyScheme{
|
||||
Type: pb.RedundancyScheme_RS,
|
||||
MinReq: 1,
|
||||
Total: 2,
|
||||
RepairThreshold: 1,
|
||||
SuccessThreshold: 2,
|
||||
},
|
||||
PieceId: "here's my piece id",
|
||||
RemotePieces: []*pb.RemotePiece{},
|
||||
},
|
||||
CreationDate: someTime,
|
||||
ExpirationDate: someTime,
|
||||
SegmentSize: tt.size,
|
||||
Metadata: tt.metadata,
|
||||
}, nil, &pb.OrderLimit{}, nil),
|
||||
mockOC.EXPECT().BulkLookup(gomock.Any(), gomock.Any()),
|
||||
mockEC.EXPECT().Delete(
|
||||
gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(),
|
||||
),
|
||||
mockPDB.EXPECT().Delete(
|
||||
gomock.Any(), gomock.Any(),
|
||||
),
|
||||
}
|
||||
gomock.InOrder(calls...)
|
||||
|
||||
err := ss.Delete(ctx, tt.pathInput)
|
||||
assert.NoError(t, err)
|
||||
// delete non existing
|
||||
err = segmentStore.Delete(ctx, tt.path)
|
||||
require.Error(t, err, tt.name)
|
||||
require.True(t, storage.ErrKeyNotFound.Has(err))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSegmentStoreList(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
runTest(t, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet, segmentStore segments.Store) {
|
||||
expiration := time.Now().Add(24 * time.Hour * 10)
|
||||
|
||||
for _, tt := range []struct {
|
||||
prefix string
|
||||
startAfter string
|
||||
thresholdSize int
|
||||
itemPath string
|
||||
inlineContent []byte
|
||||
metadata []byte
|
||||
}{
|
||||
{"bucket1", "s0/path/1", 10, "s0/path/1", []byte("inline"), []byte("metadata")},
|
||||
} {
|
||||
mockOC := mock_overlay.NewMockClient(ctrl)
|
||||
mockEC := mock_ecclient.NewMockClient(ctrl)
|
||||
mockPDB := mock_pointerdb.NewMockClient(ctrl)
|
||||
mockES := mock_eestream.NewMockErasureScheme(ctrl)
|
||||
rs := eestream.RedundancyStrategy{
|
||||
ErasureScheme: mockES,
|
||||
segments := []struct {
|
||||
path string
|
||||
content []byte
|
||||
}{
|
||||
{"l/AAAA/afile1", []byte("content")},
|
||||
{"l/AAAA/bfile2", []byte("content")},
|
||||
{"l/BBBB/afile1", []byte("content")},
|
||||
{"l/BBBB/bfile2", []byte("content")},
|
||||
{"l/BBBB/bfolder/file1", []byte("content")},
|
||||
}
|
||||
for _, segment := range segments {
|
||||
_, err := segmentStore.Put(ctx, bytes.NewReader(segment.content), expiration, func() (storj.Path, []byte, error) {
|
||||
return segment.path, []byte{}, nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
ss := segmentStore{mockOC, mockEC, mockPDB, rs, tt.thresholdSize}
|
||||
assert.NotNil(t, ss)
|
||||
// should list all
|
||||
items, more, err := segmentStore.List(ctx, "l", "", "", true, 10, meta.None)
|
||||
require.NoError(t, err)
|
||||
require.False(t, more)
|
||||
require.Equal(t, len(segments), len(items))
|
||||
|
||||
ti := time.Unix(0, 0).UTC()
|
||||
someTime, err := ptypes.TimestampProto(ti)
|
||||
assert.NoError(t, err)
|
||||
// should list first two and more = true
|
||||
items, more, err = segmentStore.List(ctx, "l", "", "", true, 2, meta.None)
|
||||
require.NoError(t, err)
|
||||
require.True(t, more)
|
||||
require.Equal(t, 2, len(items))
|
||||
|
||||
calls := []*gomock.Call{
|
||||
mockPDB.EXPECT().List(
|
||||
gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(),
|
||||
gomock.Any(), gomock.Any(), gomock.Any(),
|
||||
).Return([]pdb.ListItem{
|
||||
{
|
||||
Path: tt.itemPath,
|
||||
Pointer: &pb.Pointer{
|
||||
Type: pb.Pointer_INLINE,
|
||||
InlineSegment: tt.inlineContent,
|
||||
CreationDate: someTime,
|
||||
ExpirationDate: someTime,
|
||||
SegmentSize: int64(4),
|
||||
Metadata: tt.metadata,
|
||||
},
|
||||
},
|
||||
}, true, nil),
|
||||
}
|
||||
gomock.InOrder(calls...)
|
||||
// should list only prefixes
|
||||
items, more, err = segmentStore.List(ctx, "l", "", "", false, 10, meta.None)
|
||||
require.NoError(t, err)
|
||||
require.False(t, more)
|
||||
require.Equal(t, 2, len(items))
|
||||
|
||||
_, _, err = ss.List(ctx, tt.prefix, tt.startAfter, "", false, 10, meta.Modified)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
// should list only BBBB bucket
|
||||
items, more, err = segmentStore.List(ctx, "l/BBBB", "", "", false, 10, meta.None)
|
||||
require.NoError(t, err)
|
||||
require.False(t, more)
|
||||
require.Equal(t, 3, len(items))
|
||||
|
||||
// should list only BBBB bucket after afile1
|
||||
items, more, err = segmentStore.List(ctx, "l/BBBB", "afile1", "", false, 10, meta.None)
|
||||
require.NoError(t, err)
|
||||
require.False(t, more)
|
||||
require.Equal(t, 2, len(items))
|
||||
|
||||
// should list nothing
|
||||
items, more, err = segmentStore.List(ctx, "l/CCCC", "", "", true, 10, meta.None)
|
||||
require.NoError(t, err)
|
||||
require.False(t, more)
|
||||
require.Equal(t, 0, len(items))
|
||||
})
|
||||
}
|
||||
|
||||
func TestCalcNeededNodes(t *testing.T) {
|
||||
@ -484,6 +226,51 @@ func TestCalcNeededNodes(t *testing.T) {
|
||||
Total: tt.n,
|
||||
}
|
||||
|
||||
assert.Equal(t, tt.needed, calcNeededNodes(&rs), tag)
|
||||
assert.Equal(t, tt.needed, segments.CalcNeededNodes(&rs), tag)
|
||||
}
|
||||
}
|
||||
|
||||
func createTestData(t *testing.T, size int64) []byte {
|
||||
data, err := ioutil.ReadAll(io.LimitReader(rand.Reader, size))
|
||||
require.NoError(t, err)
|
||||
return data
|
||||
}
|
||||
|
||||
func runTest(t *testing.T, test func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet, segmentStore segments.Store)) {
|
||||
testplanet.Run(t, testplanet.Config{
|
||||
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
// TODO move apikey creation to testplanet
|
||||
project, err := planet.Satellites[0].DB.Console().Projects().Insert(context.Background(), &console.Project{
|
||||
Name: "testProject",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
apiKey := console.APIKey{}
|
||||
apiKeyInfo := console.APIKeyInfo{
|
||||
ProjectID: project.ID,
|
||||
Name: "testKey",
|
||||
}
|
||||
|
||||
// add api key to db
|
||||
_, err = planet.Satellites[0].DB.Console().APIKeys().Create(context.Background(), apiKey, apiKeyInfo)
|
||||
require.NoError(t, err)
|
||||
|
||||
TestAPIKey := apiKey.String()
|
||||
|
||||
metainfo, err := planet.Uplinks[0].DialMetainfo(context.Background(), planet.Satellites[0], TestAPIKey)
|
||||
require.NoError(t, err)
|
||||
|
||||
ec := ecclient.NewClient(planet.Uplinks[0].Transport, 0)
|
||||
fc, err := infectious.NewFEC(2, 4)
|
||||
require.NoError(t, err)
|
||||
|
||||
rs, err := eestream.NewRedundancyStrategy(eestream.NewRSScheme(fc, 1*memory.KiB.Int()), 0, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
segmentStore := segments.NewSegmentStore(metainfo, ec, rs, 4*memory.KiB.Int(), 8*memory.MiB.Int64())
|
||||
assert.NotNil(t, segmentStore)
|
||||
|
||||
test(t, ctx, planet, segmentStore)
|
||||
})
|
||||
}
|
||||
|
@ -93,9 +93,6 @@ type Segment struct {
|
||||
EncryptedKey EncryptedPrivateKey
|
||||
}
|
||||
|
||||
// PieceID is the unique identifier for pieces
|
||||
type PieceID []byte
|
||||
|
||||
// Piece is information where a piece is located
|
||||
type Piece struct {
|
||||
Number byte
|
||||
|
@ -7,6 +7,7 @@ import (
|
||||
"crypto/hmac"
|
||||
"crypto/rand"
|
||||
"crypto/sha512"
|
||||
"database/sql/driver"
|
||||
"encoding/base32"
|
||||
|
||||
"github.com/zeebo/errs"
|
||||
@ -18,12 +19,12 @@ var ErrPieceID = errs.Class("piece ID error")
|
||||
// base32Encoding without padding
|
||||
var base32Encoding = base32.StdEncoding.WithPadding(base32.NoPadding)
|
||||
|
||||
// PieceID2 is the unique identifier for pieces
|
||||
type PieceID2 [32]byte
|
||||
// PieceID is the unique identifier for pieces
|
||||
type PieceID [32]byte
|
||||
|
||||
// NewPieceID creates a piece ID
|
||||
func NewPieceID() PieceID2 {
|
||||
var id PieceID2
|
||||
func NewPieceID() PieceID {
|
||||
var id PieceID
|
||||
|
||||
_, err := rand.Read(id[:])
|
||||
if err != nil {
|
||||
@ -34,76 +35,76 @@ func NewPieceID() PieceID2 {
|
||||
}
|
||||
|
||||
// PieceIDFromString decodes a hex encoded piece ID string
|
||||
func PieceIDFromString(s string) (PieceID2, error) {
|
||||
func PieceIDFromString(s string) (PieceID, error) {
|
||||
idBytes, err := base32Encoding.DecodeString(s)
|
||||
if err != nil {
|
||||
return PieceID2{}, ErrNodeID.Wrap(err)
|
||||
return PieceID{}, ErrNodeID.Wrap(err)
|
||||
}
|
||||
return PieceIDFromBytes(idBytes)
|
||||
}
|
||||
|
||||
// PieceIDFromBytes converts a byte slice into a piece ID
|
||||
func PieceIDFromBytes(b []byte) (PieceID2, error) {
|
||||
if len(b) != len(PieceID2{}) {
|
||||
return PieceID2{}, ErrPieceID.New("not enough bytes to make a piece ID; have %d, need %d", len(b), len(NodeID{}))
|
||||
func PieceIDFromBytes(b []byte) (PieceID, error) {
|
||||
if len(b) != len(PieceID{}) {
|
||||
return PieceID{}, ErrPieceID.New("not enough bytes to make a piece ID; have %d, need %d", len(b), len(NodeID{}))
|
||||
}
|
||||
|
||||
var id PieceID2
|
||||
var id PieceID
|
||||
copy(id[:], b[:])
|
||||
return id, nil
|
||||
}
|
||||
|
||||
// IsZero returns whether piece ID is unassigned
|
||||
func (id PieceID2) IsZero() bool {
|
||||
return id == PieceID2{}
|
||||
func (id PieceID) IsZero() bool {
|
||||
return id == PieceID{}
|
||||
}
|
||||
|
||||
// String representation of the piece ID
|
||||
func (id PieceID2) String() string { return base32Encoding.EncodeToString(id.Bytes()) }
|
||||
func (id PieceID) String() string { return base32Encoding.EncodeToString(id.Bytes()) }
|
||||
|
||||
// Bytes returns bytes of the piece ID
|
||||
func (id PieceID2) Bytes() []byte { return id[:] }
|
||||
func (id PieceID) Bytes() []byte { return id[:] }
|
||||
|
||||
// Derive a new PieceID2 from the current piece ID and the given storage node ID
|
||||
func (id PieceID2) Derive(storagenodeID NodeID) PieceID2 {
|
||||
// Derive a new PieceID from the current piece ID and the given storage node ID
|
||||
func (id PieceID) Derive(storagenodeID NodeID) PieceID {
|
||||
// TODO: should the secret / content be swapped?
|
||||
mac := hmac.New(sha512.New, id.Bytes())
|
||||
_, _ = mac.Write(storagenodeID.Bytes()) // on hash.Hash write never returns an error
|
||||
var derived PieceID2
|
||||
var derived PieceID
|
||||
copy(derived[:], mac.Sum(nil))
|
||||
return derived
|
||||
}
|
||||
|
||||
// Marshal serializes a piece ID
|
||||
func (id PieceID2) Marshal() ([]byte, error) {
|
||||
func (id PieceID) Marshal() ([]byte, error) {
|
||||
return id.Bytes(), nil
|
||||
}
|
||||
|
||||
// MarshalTo serializes a piece ID into the passed byte slice
|
||||
func (id *PieceID2) MarshalTo(data []byte) (n int, err error) {
|
||||
func (id *PieceID) MarshalTo(data []byte) (n int, err error) {
|
||||
n = copy(data, id.Bytes())
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Unmarshal deserializes a piece ID
|
||||
func (id *PieceID2) Unmarshal(data []byte) error {
|
||||
func (id *PieceID) Unmarshal(data []byte) error {
|
||||
var err error
|
||||
*id, err = PieceIDFromBytes(data)
|
||||
return err
|
||||
}
|
||||
|
||||
// Size returns the length of a piece ID (implements gogo's custom type interface)
|
||||
func (id *PieceID2) Size() int {
|
||||
func (id *PieceID) Size() int {
|
||||
return len(id)
|
||||
}
|
||||
|
||||
// MarshalJSON serializes a piece ID to a json string as bytes
|
||||
func (id PieceID2) MarshalJSON() ([]byte, error) {
|
||||
func (id PieceID) MarshalJSON() ([]byte, error) {
|
||||
return []byte(`"` + id.String() + `"`), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON deserializes a json string (as bytes) to a piece ID
|
||||
func (id *PieceID2) UnmarshalJSON(data []byte) error {
|
||||
func (id *PieceID) UnmarshalJSON(data []byte) error {
|
||||
var err error
|
||||
*id, err = PieceIDFromString(string(data))
|
||||
if err != nil {
|
||||
@ -111,3 +112,19 @@ func (id *PieceID2) UnmarshalJSON(data []byte) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value set a PieceID to a database field
|
||||
func (id PieceID) Value() (driver.Value, error) {
|
||||
return id.Bytes(), nil
|
||||
}
|
||||
|
||||
// Scan extracts a PieceID from a database field
|
||||
func (id *PieceID) Scan(src interface{}) (err error) {
|
||||
b, ok := src.([]byte)
|
||||
if !ok {
|
||||
return ErrNodeID.New("PieceID Scan expects []byte")
|
||||
}
|
||||
n, err := PieceIDFromBytes(b)
|
||||
*id = n
|
||||
return err
|
||||
}
|
||||
|
@ -17,6 +17,7 @@ func NewReaderSource(src func() ([]byte, error)) *ReaderSource {
|
||||
|
||||
// Read implements io.Reader
|
||||
func (rs *ReaderSource) Read(p []byte) (n int, err error) {
|
||||
// TODO: bug, buffer should be fully read before returning an error
|
||||
if rs.err != nil {
|
||||
return 0, rs.err
|
||||
}
|
||||
|
825
proto.lock
825
proto.lock
@ -558,6 +558,62 @@
|
||||
"protopath": "pkg:/:pb:/:inspector.proto",
|
||||
"def": {
|
||||
"messages": [
|
||||
{
|
||||
"name": "ListIrreparableSegmentsRequest",
|
||||
"fields": [
|
||||
{
|
||||
"id": 1,
|
||||
"name": "limit",
|
||||
"type": "int32"
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"name": "offset",
|
||||
"type": "int32"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "IrreparableSegment",
|
||||
"fields": [
|
||||
{
|
||||
"id": 1,
|
||||
"name": "path",
|
||||
"type": "bytes"
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"name": "segment_detail",
|
||||
"type": "pointerdb.Pointer"
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"name": "lost_pieces",
|
||||
"type": "int32"
|
||||
},
|
||||
{
|
||||
"id": 4,
|
||||
"name": "last_repair_attempt",
|
||||
"type": "int64"
|
||||
},
|
||||
{
|
||||
"id": 5,
|
||||
"name": "repair_attempt_count",
|
||||
"type": "int64"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "ListIrreparableSegmentsResponse",
|
||||
"fields": [
|
||||
{
|
||||
"id": 1,
|
||||
"name": "segments",
|
||||
"type": "IrreparableSegment",
|
||||
"is_repeated": true
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "GetStatsRequest",
|
||||
"fields": [
|
||||
@ -964,7 +1020,17 @@
|
||||
{
|
||||
"id": 1,
|
||||
"name": "node_id",
|
||||
"type": "string"
|
||||
"type": "bytes",
|
||||
"options": [
|
||||
{
|
||||
"name": "(gogoproto.customtype)",
|
||||
"value": "NodeID"
|
||||
},
|
||||
{
|
||||
"name": "(gogoproto.nullable)",
|
||||
"value": "false"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
@ -1084,6 +1150,16 @@
|
||||
"out_type": "DashboardResponse"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "IrreparableInspector",
|
||||
"rpcs": [
|
||||
{
|
||||
"name": "ListIrreparableSegments",
|
||||
"in_type": "ListIrreparableSegmentsRequest",
|
||||
"out_type": "ListIrreparableSegmentsResponse"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"imports": [
|
||||
@ -1093,6 +1169,9 @@
|
||||
{
|
||||
"path": "node.proto"
|
||||
},
|
||||
{
|
||||
"path": "pointerdb.proto"
|
||||
},
|
||||
{
|
||||
"path": "google/protobuf/duration.proto"
|
||||
}
|
||||
@ -1132,6 +1211,362 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"protopath": "pkg:/:pb:/:metainfo.proto",
|
||||
"def": {
|
||||
"messages": [
|
||||
{
|
||||
"name": "AddressedOrderLimit",
|
||||
"fields": [
|
||||
{
|
||||
"id": 1,
|
||||
"name": "limit",
|
||||
"type": "orders.OrderLimit2"
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"name": "storage_node_address",
|
||||
"type": "node.NodeAddress"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "SegmentWriteRequest",
|
||||
"fields": [
|
||||
{
|
||||
"id": 1,
|
||||
"name": "bucket",
|
||||
"type": "bytes"
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"name": "path",
|
||||
"type": "bytes"
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"name": "segment",
|
||||
"type": "int64"
|
||||
},
|
||||
{
|
||||
"id": 4,
|
||||
"name": "redundancy",
|
||||
"type": "pointerdb.RedundancyScheme"
|
||||
},
|
||||
{
|
||||
"id": 5,
|
||||
"name": "max_encrypted_segment_size",
|
||||
"type": "int64"
|
||||
},
|
||||
{
|
||||
"id": 6,
|
||||
"name": "expiration",
|
||||
"type": "google.protobuf.Timestamp"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "SegmentWriteResponse",
|
||||
"fields": [
|
||||
{
|
||||
"id": 1,
|
||||
"name": "addressed_limits",
|
||||
"type": "AddressedOrderLimit",
|
||||
"is_repeated": true
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"name": "root_piece_id",
|
||||
"type": "bytes",
|
||||
"options": [
|
||||
{
|
||||
"name": "(gogoproto.customtype)",
|
||||
"value": "PieceID"
|
||||
},
|
||||
{
|
||||
"name": "(gogoproto.nullable)",
|
||||
"value": "false"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "SegmentCommitRequest",
|
||||
"fields": [
|
||||
{
|
||||
"id": 1,
|
||||
"name": "bucket",
|
||||
"type": "bytes"
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"name": "path",
|
||||
"type": "bytes"
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"name": "segment",
|
||||
"type": "int64"
|
||||
},
|
||||
{
|
||||
"id": 4,
|
||||
"name": "pointer",
|
||||
"type": "pointerdb.Pointer"
|
||||
},
|
||||
{
|
||||
"id": 5,
|
||||
"name": "original_limits",
|
||||
"type": "orders.OrderLimit2",
|
||||
"is_repeated": true
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "SegmentCommitResponse",
|
||||
"fields": [
|
||||
{
|
||||
"id": 1,
|
||||
"name": "pointer",
|
||||
"type": "pointerdb.Pointer"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "SegmentDownloadRequest",
|
||||
"fields": [
|
||||
{
|
||||
"id": 1,
|
||||
"name": "bucket",
|
||||
"type": "bytes"
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"name": "path",
|
||||
"type": "bytes"
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"name": "segment",
|
||||
"type": "int64"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "SegmentDownloadResponse",
|
||||
"fields": [
|
||||
{
|
||||
"id": 1,
|
||||
"name": "addressed_limits",
|
||||
"type": "AddressedOrderLimit",
|
||||
"is_repeated": true
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"name": "pointer",
|
||||
"type": "pointerdb.Pointer"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "SegmentInfoRequest",
|
||||
"fields": [
|
||||
{
|
||||
"id": 1,
|
||||
"name": "bucket",
|
||||
"type": "bytes"
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"name": "path",
|
||||
"type": "bytes"
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"name": "segment",
|
||||
"type": "int64"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "SegmentInfoResponse",
|
||||
"fields": [
|
||||
{
|
||||
"id": 2,
|
||||
"name": "pointer",
|
||||
"type": "pointerdb.Pointer"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "SegmentDeleteRequest",
|
||||
"fields": [
|
||||
{
|
||||
"id": 1,
|
||||
"name": "bucket",
|
||||
"type": "bytes"
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"name": "path",
|
||||
"type": "bytes"
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"name": "segment",
|
||||
"type": "int64"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "SegmentDeleteResponse",
|
||||
"fields": [
|
||||
{
|
||||
"id": 1,
|
||||
"name": "addressed_limits",
|
||||
"type": "AddressedOrderLimit",
|
||||
"is_repeated": true
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "ListSegmentsRequest",
|
||||
"fields": [
|
||||
{
|
||||
"id": 1,
|
||||
"name": "bucket",
|
||||
"type": "bytes"
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"name": "prefix",
|
||||
"type": "bytes"
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"name": "start_after",
|
||||
"type": "bytes"
|
||||
},
|
||||
{
|
||||
"id": 4,
|
||||
"name": "end_before",
|
||||
"type": "bytes"
|
||||
},
|
||||
{
|
||||
"id": 5,
|
||||
"name": "recursive",
|
||||
"type": "bool"
|
||||
},
|
||||
{
|
||||
"id": 6,
|
||||
"name": "limit",
|
||||
"type": "int32"
|
||||
},
|
||||
{
|
||||
"id": 7,
|
||||
"name": "meta_flags",
|
||||
"type": "fixed32"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "ListSegmentsResponse",
|
||||
"fields": [
|
||||
{
|
||||
"id": 1,
|
||||
"name": "items",
|
||||
"type": "Item",
|
||||
"is_repeated": true
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"name": "more",
|
||||
"type": "bool"
|
||||
}
|
||||
],
|
||||
"messages": [
|
||||
{
|
||||
"name": "Item",
|
||||
"fields": [
|
||||
{
|
||||
"id": 1,
|
||||
"name": "path",
|
||||
"type": "bytes"
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"name": "pointer",
|
||||
"type": "pointerdb.Pointer"
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"name": "is_prefix",
|
||||
"type": "bool"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"services": [
|
||||
{
|
||||
"name": "Metainfo",
|
||||
"rpcs": [
|
||||
{
|
||||
"name": "CreateSegment",
|
||||
"in_type": "SegmentWriteRequest",
|
||||
"out_type": "SegmentWriteResponse"
|
||||
},
|
||||
{
|
||||
"name": "CommitSegment",
|
||||
"in_type": "SegmentCommitRequest",
|
||||
"out_type": "SegmentCommitResponse"
|
||||
},
|
||||
{
|
||||
"name": "SegmentInfo",
|
||||
"in_type": "SegmentInfoRequest",
|
||||
"out_type": "SegmentInfoResponse"
|
||||
},
|
||||
{
|
||||
"name": "DownloadSegment",
|
||||
"in_type": "SegmentDownloadRequest",
|
||||
"out_type": "SegmentDownloadResponse"
|
||||
},
|
||||
{
|
||||
"name": "DeleteSegment",
|
||||
"in_type": "SegmentDeleteRequest",
|
||||
"out_type": "SegmentDeleteResponse"
|
||||
},
|
||||
{
|
||||
"name": "ListSegments",
|
||||
"in_type": "ListSegmentsRequest",
|
||||
"out_type": "ListSegmentsResponse"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"imports": [
|
||||
{
|
||||
"path": "gogo.proto"
|
||||
},
|
||||
{
|
||||
"path": "google/protobuf/timestamp.proto"
|
||||
},
|
||||
{
|
||||
"path": "node.proto"
|
||||
},
|
||||
{
|
||||
"path": "pointerdb.proto"
|
||||
},
|
||||
{
|
||||
"path": "orders.proto"
|
||||
}
|
||||
],
|
||||
"package": {
|
||||
"name": "metainfo"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"protopath": "pkg:/:pb:/:node.proto",
|
||||
"def": {
|
||||
@ -1387,6 +1822,203 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"protopath": "pkg:/:pb:/:orders.proto",
|
||||
"def": {
|
||||
"enums": [
|
||||
{
|
||||
"name": "PieceAction",
|
||||
"enum_fields": [
|
||||
{
|
||||
"name": "INVALID"
|
||||
},
|
||||
{
|
||||
"name": "PUT",
|
||||
"integer": 1
|
||||
},
|
||||
{
|
||||
"name": "GET",
|
||||
"integer": 2
|
||||
},
|
||||
{
|
||||
"name": "GET_AUDIT",
|
||||
"integer": 3
|
||||
},
|
||||
{
|
||||
"name": "GET_REPAIR",
|
||||
"integer": 4
|
||||
},
|
||||
{
|
||||
"name": "PUT_REPAIR",
|
||||
"integer": 5
|
||||
},
|
||||
{
|
||||
"name": "DELETE",
|
||||
"integer": 6
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"messages": [
|
||||
{
|
||||
"name": "OrderLimit2",
|
||||
"fields": [
|
||||
{
|
||||
"id": 1,
|
||||
"name": "serial_number",
|
||||
"type": "bytes"
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"name": "satellite_id",
|
||||
"type": "bytes",
|
||||
"options": [
|
||||
{
|
||||
"name": "(gogoproto.customtype)",
|
||||
"value": "NodeID"
|
||||
},
|
||||
{
|
||||
"name": "(gogoproto.nullable)",
|
||||
"value": "false"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"name": "uplink_id",
|
||||
"type": "bytes",
|
||||
"options": [
|
||||
{
|
||||
"name": "(gogoproto.customtype)",
|
||||
"value": "NodeID"
|
||||
},
|
||||
{
|
||||
"name": "(gogoproto.nullable)",
|
||||
"value": "false"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": 4,
|
||||
"name": "storage_node_id",
|
||||
"type": "bytes",
|
||||
"options": [
|
||||
{
|
||||
"name": "(gogoproto.customtype)",
|
||||
"value": "NodeID"
|
||||
},
|
||||
{
|
||||
"name": "(gogoproto.nullable)",
|
||||
"value": "false"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": 5,
|
||||
"name": "piece_id",
|
||||
"type": "bytes",
|
||||
"options": [
|
||||
{
|
||||
"name": "(gogoproto.customtype)",
|
||||
"value": "PieceID"
|
||||
},
|
||||
{
|
||||
"name": "(gogoproto.nullable)",
|
||||
"value": "false"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": 6,
|
||||
"name": "limit",
|
||||
"type": "int64"
|
||||
},
|
||||
{
|
||||
"id": 7,
|
||||
"name": "action",
|
||||
"type": "PieceAction"
|
||||
},
|
||||
{
|
||||
"id": 8,
|
||||
"name": "piece_expiration",
|
||||
"type": "google.protobuf.Timestamp"
|
||||
},
|
||||
{
|
||||
"id": 9,
|
||||
"name": "order_expiration",
|
||||
"type": "google.protobuf.Timestamp"
|
||||
},
|
||||
{
|
||||
"id": 10,
|
||||
"name": "satellite_signature",
|
||||
"type": "bytes"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "Order2",
|
||||
"fields": [
|
||||
{
|
||||
"id": 1,
|
||||
"name": "serial_number",
|
||||
"type": "bytes"
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"name": "amount",
|
||||
"type": "int64"
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"name": "uplink_signature",
|
||||
"type": "bytes"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "PieceHash",
|
||||
"fields": [
|
||||
{
|
||||
"id": 1,
|
||||
"name": "piece_id",
|
||||
"type": "bytes",
|
||||
"options": [
|
||||
{
|
||||
"name": "(gogoproto.customtype)",
|
||||
"value": "PieceID"
|
||||
},
|
||||
{
|
||||
"name": "(gogoproto.nullable)",
|
||||
"value": "false"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"name": "hash",
|
||||
"type": "bytes"
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"name": "signature",
|
||||
"type": "bytes"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"imports": [
|
||||
{
|
||||
"path": "gogo.proto"
|
||||
},
|
||||
{
|
||||
"path": "google/protobuf/timestamp.proto"
|
||||
}
|
||||
],
|
||||
"package": {
|
||||
"name": "orders"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"protopath": "pkg:/:pb:/:overlay.proto",
|
||||
"def": {
|
||||
@ -2194,6 +2826,178 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"protopath": "pkg:/:pb:/:piecestore2.proto",
|
||||
"def": {
|
||||
"messages": [
|
||||
{
|
||||
"name": "PieceUploadRequest",
|
||||
"fields": [
|
||||
{
|
||||
"id": 1,
|
||||
"name": "limit",
|
||||
"type": "orders.OrderLimit2"
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"name": "order",
|
||||
"type": "orders.Order2"
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"name": "chunk",
|
||||
"type": "Chunk"
|
||||
},
|
||||
{
|
||||
"id": 4,
|
||||
"name": "done",
|
||||
"type": "orders.PieceHash"
|
||||
}
|
||||
],
|
||||
"messages": [
|
||||
{
|
||||
"name": "Chunk",
|
||||
"fields": [
|
||||
{
|
||||
"id": 1,
|
||||
"name": "offset",
|
||||
"type": "int64"
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"name": "data",
|
||||
"type": "bytes"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "PieceUploadResponse",
|
||||
"fields": [
|
||||
{
|
||||
"id": 1,
|
||||
"name": "done",
|
||||
"type": "orders.PieceHash"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "PieceDownloadRequest",
|
||||
"fields": [
|
||||
{
|
||||
"id": 1,
|
||||
"name": "limit",
|
||||
"type": "orders.OrderLimit2"
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"name": "order",
|
||||
"type": "orders.Order2"
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"name": "chunk",
|
||||
"type": "Chunk"
|
||||
}
|
||||
],
|
||||
"messages": [
|
||||
{
|
||||
"name": "Chunk",
|
||||
"fields": [
|
||||
{
|
||||
"id": 1,
|
||||
"name": "offset",
|
||||
"type": "int64"
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"name": "chunk_size",
|
||||
"type": "int64"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "PieceDownloadResponse",
|
||||
"fields": [
|
||||
{
|
||||
"id": 1,
|
||||
"name": "chunk",
|
||||
"type": "Chunk"
|
||||
}
|
||||
],
|
||||
"messages": [
|
||||
{
|
||||
"name": "Chunk",
|
||||
"fields": [
|
||||
{
|
||||
"id": 1,
|
||||
"name": "offset",
|
||||
"type": "int64"
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"name": "data",
|
||||
"type": "bytes"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "PieceDeleteRequest",
|
||||
"fields": [
|
||||
{
|
||||
"id": 1,
|
||||
"name": "limit",
|
||||
"type": "orders.OrderLimit2"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "PieceDeleteResponse"
|
||||
}
|
||||
],
|
||||
"services": [
|
||||
{
|
||||
"name": "Piecestore",
|
||||
"rpcs": [
|
||||
{
|
||||
"name": "Upload",
|
||||
"in_type": "PieceUploadRequest",
|
||||
"out_type": "PieceUploadResponse",
|
||||
"in_streamed": true
|
||||
},
|
||||
{
|
||||
"name": "Download",
|
||||
"in_type": "PieceDownloadRequest",
|
||||
"out_type": "PieceDownloadResponse",
|
||||
"in_streamed": true,
|
||||
"out_streamed": true
|
||||
},
|
||||
{
|
||||
"name": "Delete",
|
||||
"in_type": "PieceDeleteRequest",
|
||||
"out_type": "PieceDeleteResponse"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"imports": [
|
||||
{
|
||||
"path": "gogo.proto"
|
||||
},
|
||||
{
|
||||
"path": "orders.proto"
|
||||
}
|
||||
],
|
||||
"package": {
|
||||
"name": "piecestore"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"protopath": "pkg:/:pb:/:pointerdb.proto",
|
||||
"def": {
|
||||
@ -2281,7 +3085,7 @@
|
||||
{
|
||||
"id": 3,
|
||||
"name": "hash",
|
||||
"type": "piecestoreroutes.SignedHash"
|
||||
"type": "orders.PieceHash"
|
||||
}
|
||||
]
|
||||
},
|
||||
@ -2295,8 +3099,18 @@
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"name": "piece_id",
|
||||
"type": "string"
|
||||
"name": "root_piece_id",
|
||||
"type": "bytes",
|
||||
"options": [
|
||||
{
|
||||
"name": "(gogoproto.customtype)",
|
||||
"value": "PieceID"
|
||||
},
|
||||
{
|
||||
"name": "(gogoproto.nullable)",
|
||||
"value": "false"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
@ -2581,6 +3395,9 @@
|
||||
},
|
||||
{
|
||||
"path": "piecestore.proto"
|
||||
},
|
||||
{
|
||||
"path": "orders.proto"
|
||||
}
|
||||
],
|
||||
"package": {
|
||||
|
518
satellite/metainfo/metainfo.go
Normal file
518
satellite/metainfo/metainfo.go
Normal file
@ -0,0 +1,518 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package metainfo
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"strconv"
|
||||
|
||||
"github.com/golang/protobuf/ptypes/timestamp"
|
||||
"github.com/skyrings/skyring-common/tools/uuid"
|
||||
"github.com/zeebo/errs"
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
monkit "gopkg.in/spacemonkeygo/monkit.v2"
|
||||
|
||||
"storj.io/storj/pkg/auth"
|
||||
"storj.io/storj/pkg/auth/signing"
|
||||
"storj.io/storj/pkg/eestream"
|
||||
"storj.io/storj/pkg/identity"
|
||||
"storj.io/storj/pkg/overlay"
|
||||
"storj.io/storj/pkg/pb"
|
||||
"storj.io/storj/pkg/pointerdb"
|
||||
"storj.io/storj/pkg/storj"
|
||||
"storj.io/storj/satellite/console"
|
||||
"storj.io/storj/storage"
|
||||
)
|
||||
|
||||
var (
|
||||
mon = monkit.Package()
|
||||
// Error general metainfo error
|
||||
Error = errs.Class("metainfo error")
|
||||
)
|
||||
|
||||
// APIKeys is api keys store methods used by endpoint
|
||||
type APIKeys interface {
|
||||
GetByKey(ctx context.Context, key console.APIKey) (*console.APIKeyInfo, error)
|
||||
}
|
||||
|
||||
// Endpoint metainfo endpoint
|
||||
type Endpoint struct {
|
||||
log *zap.Logger
|
||||
pointerdb *pointerdb.Service
|
||||
allocation *pointerdb.AllocationSigner
|
||||
cache *overlay.Cache
|
||||
apiKeys APIKeys
|
||||
selectionPreferences *overlay.NodeSelectionConfig
|
||||
signer signing.Signer
|
||||
}
|
||||
|
||||
// NewEndpoint creates new metainfo endpoint instance
|
||||
func NewEndpoint(log *zap.Logger, pointerdb *pointerdb.Service, allocation *pointerdb.AllocationSigner, cache *overlay.Cache, apiKeys APIKeys, signer signing.Signer, selectionPreferences *overlay.NodeSelectionConfig) *Endpoint {
|
||||
// TODO do something with too many params
|
||||
return &Endpoint{
|
||||
log: log,
|
||||
pointerdb: pointerdb,
|
||||
allocation: allocation,
|
||||
cache: cache,
|
||||
apiKeys: apiKeys,
|
||||
selectionPreferences: selectionPreferences,
|
||||
signer: signer,
|
||||
}
|
||||
}
|
||||
|
||||
// Close closes resources
|
||||
func (endpoint *Endpoint) Close() error { return nil }
|
||||
|
||||
func (endpoint *Endpoint) validateAuth(ctx context.Context) (*console.APIKeyInfo, error) {
|
||||
APIKey, ok := auth.GetAPIKey(ctx)
|
||||
if !ok {
|
||||
endpoint.log.Error("unauthorized request: ", zap.Error(status.Errorf(codes.Unauthenticated, "Invalid API credential")))
|
||||
return nil, status.Errorf(codes.Unauthenticated, "Invalid API credential")
|
||||
}
|
||||
|
||||
key, err := console.APIKeyFromBase64(string(APIKey))
|
||||
if err != nil {
|
||||
endpoint.log.Error("unauthorized request: ", zap.Error(status.Errorf(codes.Unauthenticated, "Invalid API credential")))
|
||||
return nil, status.Errorf(codes.Unauthenticated, "Invalid API credential")
|
||||
}
|
||||
|
||||
keyInfo, err := endpoint.apiKeys.GetByKey(ctx, *key)
|
||||
if err != nil {
|
||||
endpoint.log.Error("unauthorized request: ", zap.Error(status.Errorf(codes.Unauthenticated, err.Error())))
|
||||
return nil, status.Errorf(codes.Unauthenticated, "Invalid API credential")
|
||||
}
|
||||
|
||||
return keyInfo, nil
|
||||
}
|
||||
|
||||
// SegmentInfo returns segment metadata info
|
||||
func (endpoint *Endpoint) SegmentInfo(ctx context.Context, req *pb.SegmentInfoRequest) (resp *pb.SegmentInfoResponse, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
keyInfo, err := endpoint.validateAuth(ctx)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Unauthenticated, err.Error())
|
||||
}
|
||||
|
||||
err = endpoint.validateBucket(req.Bucket)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
|
||||
path, err := endpoint.createPath(keyInfo.ProjectID, req.Segment, req.Bucket, req.Path)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
|
||||
// TODO refactor to use []byte directly
|
||||
pointer, err := endpoint.pointerdb.Get(path)
|
||||
if err != nil {
|
||||
if storage.ErrKeyNotFound.Has(err) {
|
||||
return nil, status.Errorf(codes.NotFound, err.Error())
|
||||
}
|
||||
return nil, status.Errorf(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
return &pb.SegmentInfoResponse{Pointer: pointer}, nil
|
||||
}
|
||||
|
||||
// CreateSegment will generate requested number of OrderLimit with coresponding node addresses for them
|
||||
func (endpoint *Endpoint) CreateSegment(ctx context.Context, req *pb.SegmentWriteRequest) (resp *pb.SegmentWriteResponse, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
_, err = endpoint.validateAuth(ctx)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Unauthenticated, err.Error())
|
||||
}
|
||||
|
||||
redundancy, err := eestream.NewRedundancyStrategyFromProto(req.GetRedundancy())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
maxPieceSize := eestream.CalcPieceSize(req.GetMaxEncryptedSegmentSize(), redundancy)
|
||||
|
||||
request := &pb.FindStorageNodesRequest{
|
||||
Opts: &pb.OverlayOptions{
|
||||
Amount: int64(req.Redundancy.Total),
|
||||
Restrictions: &pb.NodeRestrictions{
|
||||
FreeBandwidth: maxPieceSize,
|
||||
FreeDisk: maxPieceSize,
|
||||
},
|
||||
},
|
||||
}
|
||||
nodes, err := endpoint.cache.FindStorageNodes(ctx, request, endpoint.selectionPreferences)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
uplinkIdentity, err := identity.PeerIdentityFromContext(ctx)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
rootPieceID := storj.NewPieceID()
|
||||
limits := make([]*pb.AddressedOrderLimit, len(nodes))
|
||||
for i, node := range nodes {
|
||||
derivedPieceID := rootPieceID.Derive(node.Id)
|
||||
orderLimit, err := endpoint.createOrderLimit(ctx, uplinkIdentity, node.Id, derivedPieceID, req.Expiration, maxPieceSize, pb.PieceAction_PUT)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
limits[i] = &pb.AddressedOrderLimit{
|
||||
Limit: orderLimit,
|
||||
StorageNodeAddress: node.Address,
|
||||
}
|
||||
}
|
||||
|
||||
return &pb.SegmentWriteResponse{AddressedLimits: limits, RootPieceId: rootPieceID}, nil
|
||||
}
|
||||
|
||||
// CommitSegment commits segment metadata
|
||||
func (endpoint *Endpoint) CommitSegment(ctx context.Context, req *pb.SegmentCommitRequest) (resp *pb.SegmentCommitResponse, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
keyInfo, err := endpoint.validateAuth(ctx)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Unauthenticated, err.Error())
|
||||
}
|
||||
|
||||
err = endpoint.validateBucket(req.Bucket)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
|
||||
err = endpoint.validateCommit(req)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
// err = endpoint.filterValidPieces(req.Pointer)
|
||||
// if err != nil {
|
||||
// return nil, status.Errorf(codes.Internal, err.Error())
|
||||
// }
|
||||
|
||||
path, err := endpoint.createPath(keyInfo.ProjectID, req.Segment, req.Bucket, req.Path)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
|
||||
err = endpoint.pointerdb.Put(path, req.Pointer)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
pointer, err := endpoint.pointerdb.Get(path)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
return &pb.SegmentCommitResponse{Pointer: pointer}, nil
|
||||
}
|
||||
|
||||
// DownloadSegment gets Pointer incase of INLINE data or list of OrderLimit necessary to download remote data
|
||||
func (endpoint *Endpoint) DownloadSegment(ctx context.Context, req *pb.SegmentDownloadRequest) (resp *pb.SegmentDownloadResponse, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
keyInfo, err := endpoint.validateAuth(ctx)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Unauthenticated, err.Error())
|
||||
}
|
||||
|
||||
err = endpoint.validateBucket(req.Bucket)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
|
||||
path, err := endpoint.createPath(keyInfo.ProjectID, req.Segment, req.Bucket, req.Path)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
|
||||
// TODO refactor to use []byte directly
|
||||
pointer, err := endpoint.pointerdb.Get(path)
|
||||
if err != nil {
|
||||
if storage.ErrKeyNotFound.Has(err) {
|
||||
return nil, status.Errorf(codes.NotFound, err.Error())
|
||||
}
|
||||
return nil, status.Errorf(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
if pointer.Type == pb.Pointer_INLINE {
|
||||
return &pb.SegmentDownloadResponse{Pointer: pointer}, nil
|
||||
} else if pointer.Type == pb.Pointer_REMOTE && pointer.Remote != nil {
|
||||
limits, err := endpoint.createOrderLimitsForSegment(ctx, pointer, pb.PieceAction_GET)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
return &pb.SegmentDownloadResponse{Pointer: pointer, AddressedLimits: limits}, nil
|
||||
}
|
||||
|
||||
return &pb.SegmentDownloadResponse{}, nil
|
||||
}
|
||||
|
||||
// DeleteSegment deletes segment metadata from satellite and returns OrderLimit array to remove them from storage node
|
||||
func (endpoint *Endpoint) DeleteSegment(ctx context.Context, req *pb.SegmentDeleteRequest) (resp *pb.SegmentDeleteResponse, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
keyInfo, err := endpoint.validateAuth(ctx)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Unauthenticated, err.Error())
|
||||
}
|
||||
|
||||
err = endpoint.validateBucket(req.Bucket)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
|
||||
path, err := endpoint.createPath(keyInfo.ProjectID, req.Segment, req.Bucket, req.Path)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
|
||||
// TODO refactor to use []byte directly
|
||||
pointer, err := endpoint.pointerdb.Get(path)
|
||||
if err != nil {
|
||||
if storage.ErrKeyNotFound.Has(err) {
|
||||
return nil, status.Errorf(codes.NotFound, err.Error())
|
||||
}
|
||||
return nil, status.Errorf(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
err = endpoint.pointerdb.Delete(path)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
if pointer.Type == pb.Pointer_REMOTE && pointer.Remote != nil {
|
||||
limits, err := endpoint.createOrderLimitsForSegment(ctx, pointer, pb.PieceAction_DELETE)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, err.Error())
|
||||
}
|
||||
return &pb.SegmentDeleteResponse{AddressedLimits: limits}, nil
|
||||
}
|
||||
|
||||
return &pb.SegmentDeleteResponse{}, nil
|
||||
}
|
||||
|
||||
func (endpoint *Endpoint) createOrderLimitsForSegment(ctx context.Context, pointer *pb.Pointer, action pb.PieceAction) ([]*pb.AddressedOrderLimit, error) {
|
||||
if pointer.GetRemote() == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
uplinkIdentity, err := identity.PeerIdentityFromContext(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rootPieceID := pointer.GetRemote().RootPieceId
|
||||
|
||||
redundancy, err := eestream.NewRedundancyStrategyFromProto(pointer.GetRemote().GetRedundancy())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pieceSize := eestream.CalcPieceSize(pointer.GetSegmentSize(), redundancy)
|
||||
expiration := pointer.ExpirationDate
|
||||
|
||||
var limits []*pb.AddressedOrderLimit
|
||||
for _, piece := range pointer.GetRemote().GetRemotePieces() {
|
||||
derivedPieceID := rootPieceID.Derive(piece.NodeId)
|
||||
orderLimit, err := endpoint.createOrderLimit(ctx, uplinkIdentity, piece.NodeId, derivedPieceID, expiration, pieceSize, action)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
node, err := endpoint.cache.Get(ctx, piece.NodeId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if node != nil {
|
||||
node.Type.DPanicOnInvalid("metainfo server order limits")
|
||||
}
|
||||
|
||||
limits = append(limits, &pb.AddressedOrderLimit{
|
||||
Limit: orderLimit,
|
||||
StorageNodeAddress: node.Address,
|
||||
})
|
||||
|
||||
}
|
||||
return limits, nil
|
||||
}
|
||||
|
||||
func (endpoint *Endpoint) createOrderLimit(ctx context.Context, uplinkIdentity *identity.PeerIdentity, nodeID storj.NodeID, pieceID storj.PieceID, expiration *timestamp.Timestamp, limit int64, action pb.PieceAction) (*pb.OrderLimit2, error) {
|
||||
parameters := pointerdb.OrderLimitParameters{
|
||||
UplinkIdentity: uplinkIdentity,
|
||||
StorageNodeID: nodeID,
|
||||
PieceID: pieceID,
|
||||
Action: action,
|
||||
PieceExpiration: expiration,
|
||||
Limit: limit,
|
||||
}
|
||||
|
||||
orderLimit, err := endpoint.allocation.OrderLimit(ctx, parameters)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
orderLimit, err = signing.SignOrderLimit(endpoint.signer, orderLimit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return orderLimit, nil
|
||||
}
|
||||
|
||||
// ListSegments returns all Path keys in the Pointers bucket
|
||||
func (endpoint *Endpoint) ListSegments(ctx context.Context, req *pb.ListSegmentsRequest) (resp *pb.ListSegmentsResponse, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
keyInfo, err := endpoint.validateAuth(ctx)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Unauthenticated, err.Error())
|
||||
}
|
||||
|
||||
prefix, err := endpoint.createPath(keyInfo.ProjectID, -1, req.Bucket, req.Prefix)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
|
||||
items, more, err := endpoint.pointerdb.List(prefix, string(req.StartAfter), string(req.EndBefore), req.Recursive, req.Limit, req.MetaFlags)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "ListV2: %v", err)
|
||||
}
|
||||
|
||||
segmentItems := make([]*pb.ListSegmentsResponse_Item, len(items))
|
||||
for i, item := range items {
|
||||
segmentItems[i] = &pb.ListSegmentsResponse_Item{
|
||||
Path: []byte(item.Path),
|
||||
Pointer: item.Pointer,
|
||||
IsPrefix: item.IsPrefix,
|
||||
}
|
||||
}
|
||||
|
||||
return &pb.ListSegmentsResponse{Items: segmentItems, More: more}, nil
|
||||
}
|
||||
|
||||
func (endpoint *Endpoint) createPath(projectID uuid.UUID, segmentIndex int64, bucket, path []byte) (string, error) {
|
||||
if segmentIndex < -1 {
|
||||
return "", Error.New("invalid segment index")
|
||||
}
|
||||
segment := "l"
|
||||
if segmentIndex > -1 {
|
||||
segment = "s" + strconv.FormatInt(segmentIndex, 10)
|
||||
}
|
||||
|
||||
entries := make([]string, 0)
|
||||
entries = append(entries, projectID.String())
|
||||
entries = append(entries, segment)
|
||||
if len(bucket) != 0 {
|
||||
entries = append(entries, string(bucket))
|
||||
}
|
||||
if len(path) != 0 {
|
||||
entries = append(entries, string(path))
|
||||
}
|
||||
return storj.JoinPaths(entries...), nil
|
||||
}
|
||||
|
||||
func (endpoint *Endpoint) filterValidPieces(pointer *pb.Pointer) error {
|
||||
if pointer.Type == pb.Pointer_REMOTE {
|
||||
var remotePieces []*pb.RemotePiece
|
||||
remote := pointer.Remote
|
||||
for _, piece := range remote.RemotePieces {
|
||||
// TODO enable verification
|
||||
|
||||
// err := auth.VerifyMsg(piece.Hash, piece.NodeId)
|
||||
// if err == nil {
|
||||
// // set to nil after verification to avoid storing in DB
|
||||
// piece.Hash = nil
|
||||
// remotePieces = append(remotePieces, piece)
|
||||
// } else {
|
||||
// // TODO satellite should send Delete request for piece that failed
|
||||
// s.logger.Warn("unable to verify piece hash: %v", zap.Error(err))
|
||||
// }
|
||||
|
||||
remotePieces = append(remotePieces, piece)
|
||||
}
|
||||
|
||||
if int32(len(remotePieces)) < remote.Redundancy.SuccessThreshold {
|
||||
return Error.New("Number of valid pieces is lower then success threshold: %v < %v",
|
||||
len(remotePieces),
|
||||
remote.Redundancy.SuccessThreshold,
|
||||
)
|
||||
}
|
||||
|
||||
remote.RemotePieces = remotePieces
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (endpoint *Endpoint) validateBucket(bucket []byte) error {
|
||||
if len(bucket) == 0 {
|
||||
return errs.New("bucket not specified")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (endpoint *Endpoint) validateCommit(req *pb.SegmentCommitRequest) error {
|
||||
err := endpoint.validatePointer(req.Pointer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if req.Pointer.Type == pb.Pointer_REMOTE {
|
||||
remote := req.Pointer.Remote
|
||||
|
||||
if int32(len(req.OriginalLimits)) != remote.Redundancy.Total {
|
||||
return Error.New("invalid no order limit for piece")
|
||||
}
|
||||
|
||||
for _, piece := range remote.RemotePieces {
|
||||
limit := req.OriginalLimits[piece.PieceNum]
|
||||
|
||||
err := signing.VerifyOrderLimitSignature(endpoint.signer, limit)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if limit == nil {
|
||||
return Error.New("invalid no order limit for piece")
|
||||
}
|
||||
derivedPieceID := remote.RootPieceId.Derive(piece.NodeId)
|
||||
if limit.PieceId.IsZero() || limit.PieceId != derivedPieceID {
|
||||
return Error.New("invalid order limit piece id")
|
||||
}
|
||||
if bytes.Compare(piece.NodeId.Bytes(), limit.StorageNodeId.Bytes()) != 0 {
|
||||
return Error.New("piece NodeID != order limit NodeID")
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (endpoint *Endpoint) validatePointer(pointer *pb.Pointer) error {
|
||||
if pointer == nil {
|
||||
return Error.New("no pointer specified")
|
||||
}
|
||||
|
||||
// TODO does it all?
|
||||
if pointer.Type == pb.Pointer_REMOTE {
|
||||
if pointer.Remote == nil {
|
||||
return Error.New("no remote segment specified")
|
||||
}
|
||||
if pointer.Remote.RemotePieces == nil {
|
||||
return Error.New("no remote segment pieces specified")
|
||||
}
|
||||
if pointer.Remote.Redundancy == nil {
|
||||
return Error.New("no redundancy scheme specified")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
@ -25,6 +25,7 @@ import (
|
||||
"storj.io/storj/pkg/accounting/tally"
|
||||
"storj.io/storj/pkg/audit"
|
||||
"storj.io/storj/pkg/auth/grpcauth"
|
||||
"storj.io/storj/pkg/auth/signing"
|
||||
"storj.io/storj/pkg/bwagreement"
|
||||
"storj.io/storj/pkg/certdb"
|
||||
"storj.io/storj/pkg/datarepair/checker"
|
||||
@ -47,6 +48,7 @@ import (
|
||||
"storj.io/storj/satellite/console/consoleweb"
|
||||
"storj.io/storj/satellite/mailservice"
|
||||
"storj.io/storj/satellite/mailservice/simulate"
|
||||
"storj.io/storj/satellite/metainfo"
|
||||
"storj.io/storj/storage"
|
||||
"storj.io/storj/storage/boltdb"
|
||||
"storj.io/storj/storage/storelogger"
|
||||
@ -147,6 +149,7 @@ type Peer struct {
|
||||
Allocation *pointerdb.AllocationSigner
|
||||
Service *pointerdb.Service
|
||||
Endpoint *pointerdb.Server
|
||||
Endpoint2 *metainfo.Endpoint
|
||||
}
|
||||
|
||||
Agreements struct {
|
||||
@ -313,7 +316,29 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB, config *Config) (*
|
||||
config.PointerDB,
|
||||
peer.Identity, peer.DB.Console().APIKeys())
|
||||
|
||||
// TODO remove duplicated code
|
||||
overlayConfig := config.Overlay
|
||||
nodeSelectionConfig := &overlay.NodeSelectionConfig{
|
||||
UptimeCount: overlayConfig.Node.UptimeCount,
|
||||
UptimeRatio: overlayConfig.Node.UptimeRatio,
|
||||
AuditSuccessRatio: overlayConfig.Node.AuditSuccessRatio,
|
||||
AuditCount: overlayConfig.Node.AuditCount,
|
||||
NewNodeAuditThreshold: overlayConfig.Node.NewNodeAuditThreshold,
|
||||
NewNodePercentage: overlayConfig.Node.NewNodePercentage,
|
||||
}
|
||||
|
||||
peer.Metainfo.Endpoint2 = metainfo.NewEndpoint(
|
||||
peer.Log.Named("metainfo:endpoint"),
|
||||
peer.Metainfo.Service,
|
||||
peer.Metainfo.Allocation,
|
||||
peer.Overlay.Service,
|
||||
peer.DB.Console().APIKeys(),
|
||||
signing.SignerFromFullIdentity(peer.Identity),
|
||||
nodeSelectionConfig)
|
||||
|
||||
pb.RegisterPointerDBServer(peer.Server.GRPC(), peer.Metainfo.Endpoint)
|
||||
|
||||
pb.RegisterMetainfoServer(peer.Server.GRPC(), peer.Metainfo.Endpoint2)
|
||||
}
|
||||
|
||||
{ // setup agreements
|
||||
@ -333,13 +358,30 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB, config *Config) (*
|
||||
0, peer.Log.Named("checker"),
|
||||
config.Checker.Interval)
|
||||
|
||||
if config.Repairer.OverlayAddr == "" {
|
||||
config.Repairer.OverlayAddr = peer.Addr()
|
||||
// TODO remove duplicated code
|
||||
overlayConfig := config.Overlay
|
||||
nodeSelectionConfig := &overlay.NodeSelectionConfig{
|
||||
UptimeCount: overlayConfig.Node.UptimeCount,
|
||||
UptimeRatio: overlayConfig.Node.UptimeRatio,
|
||||
AuditSuccessRatio: overlayConfig.Node.AuditSuccessRatio,
|
||||
AuditCount: overlayConfig.Node.AuditCount,
|
||||
NewNodeAuditThreshold: overlayConfig.Node.NewNodeAuditThreshold,
|
||||
NewNodePercentage: overlayConfig.Node.NewNodePercentage,
|
||||
}
|
||||
if config.Repairer.PointerDBAddr == "" {
|
||||
config.Repairer.PointerDBAddr = peer.Addr()
|
||||
}
|
||||
peer.Repair.Repairer = repairer.NewService(peer.DB.RepairQueue(), &config.Repairer, peer.Transport, config.Repairer.Interval, config.Repairer.MaxRepair)
|
||||
|
||||
peer.Repair.Repairer = repairer.NewService(
|
||||
peer.DB.RepairQueue(),
|
||||
&config.Repairer,
|
||||
config.Repairer.Interval,
|
||||
config.Repairer.MaxRepair,
|
||||
peer.Transport,
|
||||
peer.Metainfo.Service,
|
||||
peer.Metainfo.Allocation,
|
||||
peer.Overlay.Service,
|
||||
signing.SignerFromFullIdentity(peer.Identity),
|
||||
nodeSelectionConfig,
|
||||
)
|
||||
|
||||
peer.Repair.Inspector = irreparable.NewInspector(peer.DB.Irreparable())
|
||||
pb.RegisterIrreparableInspectorServer(peer.Server.PrivateGRPC(), peer.Repair.Inspector)
|
||||
}
|
||||
|
@ -61,4 +61,4 @@ then
|
||||
echo "multipart-upload-testfile file matches uploaded file";
|
||||
else
|
||||
echo "multipart-upload-testfile file does not match uploaded file";
|
||||
fi
|
||||
fi
|
@ -57,4 +57,15 @@ then
|
||||
echo "multipart upload testfile matches uploaded file"
|
||||
else
|
||||
echo "multipart upload testfile does not match uploaded file"
|
||||
fi
|
||||
fi
|
||||
|
||||
# check if all data files were removed
|
||||
# FILES=$(find "$STORAGENODE_0_DIR/../" -type f -path "*/blob/*" ! -name "info.*")
|
||||
# if [ -z "$FILES" ];
|
||||
# then
|
||||
# echo "all data files removed from storage nodes"
|
||||
# else
|
||||
# echo "not all data files removed from storage nodes:"
|
||||
# echo $FILES
|
||||
# exit 1
|
||||
# fi
|
@ -54,4 +54,6 @@ type Blobs interface {
|
||||
Open(ctx context.Context, ref BlobRef) (BlobReader, error)
|
||||
// Delete deletes the blob with the namespace and key
|
||||
Delete(ctx context.Context, ref BlobRef) error
|
||||
// FreeSpace return how much free space left for writing
|
||||
FreeSpace() (int64, error)
|
||||
}
|
||||
|
@ -147,7 +147,11 @@ func (dir *Dir) Open(ref storage.BlobRef) (*os.File, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return openFileReadOnly(path, blobPermission)
|
||||
file, err := openFileReadOnly(path, blobPermission)
|
||||
if err != nil {
|
||||
return nil, Error.New("unable to open %q: %v", path, err)
|
||||
}
|
||||
return file, nil
|
||||
}
|
||||
|
||||
// Delete deletes file with the specified ref
|
||||
@ -255,5 +259,9 @@ type DiskInfo struct {
|
||||
|
||||
// Info returns information about the current state of the dir
|
||||
func (dir *Dir) Info() (DiskInfo, error) {
|
||||
return diskInfoFromPath(dir.path)
|
||||
path, err := filepath.Abs(dir.path)
|
||||
if err != nil {
|
||||
return DiskInfo{}, err
|
||||
}
|
||||
return diskInfoFromPath(path)
|
||||
}
|
||||
|
@ -97,16 +97,26 @@ func ignoreSuccess(err error) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Adds `\\?` prefix to ensure that API recognizes it as a long path.
|
||||
// see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx#maxpath
|
||||
func tryFixLongPath(path string) string {
|
||||
abspath, err := filepath.Abs(path)
|
||||
if err != nil {
|
||||
return path
|
||||
}
|
||||
return `\\?\` + abspath
|
||||
}
|
||||
|
||||
// rename implements atomic file rename on windows
|
||||
func rename(oldpath, newpath string) error {
|
||||
const replace_existing = 0x1
|
||||
const write_through = 0x8
|
||||
|
||||
oldpathp, err := windows.UTF16PtrFromString(oldpath)
|
||||
oldpathp, err := windows.UTF16PtrFromString(tryFixLongPath(oldpath))
|
||||
if err != nil {
|
||||
return &os.LinkError{Op: "replace", Old: oldpath, New: newpath, Err: err}
|
||||
}
|
||||
newpathp, err := windows.UTF16PtrFromString(newpath)
|
||||
newpathp, err := windows.UTF16PtrFromString(tryFixLongPath(newpath))
|
||||
if err != nil {
|
||||
return &os.LinkError{Op: "replace", Old: oldpath, New: newpath, Err: err}
|
||||
}
|
||||
@ -122,9 +132,7 @@ func rename(oldpath, newpath string) error {
|
||||
// openFileReadOnly opens the file with read only
|
||||
// a custom implementation, because os.Open doesn't support specifying FILE_SHARE_DELETE
|
||||
func openFileReadOnly(path string, perm os.FileMode) (*os.File, error) {
|
||||
// see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx#maxpath
|
||||
longpath := `\\?\` + path
|
||||
pathp, err := windows.UTF16PtrFromString(longpath)
|
||||
pathp, err := windows.UTF16PtrFromString(tryFixLongPath(path))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -36,6 +36,9 @@ func NewAt(path string) (*Store, error) {
|
||||
return &Store{dir}, nil
|
||||
}
|
||||
|
||||
// Close closes the store.
|
||||
func (store *Store) Close() error { return nil }
|
||||
|
||||
// Open loads blob with the specified hash
|
||||
func (store *Store) Open(ctx context.Context, ref storage.BlobRef) (storage.BlobReader, error) {
|
||||
file, openErr := store.dir.Open(ref)
|
||||
@ -69,3 +72,12 @@ func (store *Store) Create(ctx context.Context, ref storage.BlobRef, size int64)
|
||||
}
|
||||
return newBlobWriter(ref, store, file), nil
|
||||
}
|
||||
|
||||
// FreeSpace returns how much space left in underlying directory
|
||||
func (store *Store) FreeSpace() (int64, error) {
|
||||
info, err := store.dir.Info()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return info.AvailableSpace, nil
|
||||
}
|
||||
|
101
storagenode/bandwidth/db_test.go
Normal file
101
storagenode/bandwidth/db_test.go
Normal file
@ -0,0 +1,101 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package bandwidth_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"storj.io/storj/internal/testcontext"
|
||||
"storj.io/storj/internal/testplanet"
|
||||
"storj.io/storj/pkg/pb"
|
||||
"storj.io/storj/pkg/storj"
|
||||
"storj.io/storj/storagenode"
|
||||
"storj.io/storj/storagenode/bandwidth"
|
||||
"storj.io/storj/storagenode/storagenodedb/storagenodedbtest"
|
||||
)
|
||||
|
||||
func TestDB(t *testing.T) {
|
||||
storagenodedbtest.Run(t, func(t *testing.T, db storagenode.DB) {
|
||||
ctx := testcontext.New(t)
|
||||
defer ctx.Cleanup()
|
||||
|
||||
bandwidthdb := db.Bandwidth()
|
||||
|
||||
satellite0 := testplanet.MustPregeneratedSignedIdentity(0).ID
|
||||
satellite1 := testplanet.MustPregeneratedSignedIdentity(1).ID
|
||||
|
||||
now := time.Now()
|
||||
|
||||
// ensure zero queries work
|
||||
usage, err := bandwidthdb.Summary(ctx, now, now)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, &bandwidth.Usage{}, usage)
|
||||
|
||||
usageBySatellite, err := bandwidthdb.SummaryBySatellite(ctx, now, now)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, map[storj.NodeID]*bandwidth.Usage{}, usageBySatellite)
|
||||
|
||||
actions := []pb.PieceAction{
|
||||
pb.PieceAction_INVALID,
|
||||
|
||||
pb.PieceAction_PUT,
|
||||
pb.PieceAction_GET,
|
||||
pb.PieceAction_GET_AUDIT,
|
||||
pb.PieceAction_GET_REPAIR,
|
||||
pb.PieceAction_PUT_REPAIR,
|
||||
pb.PieceAction_DELETE,
|
||||
|
||||
pb.PieceAction_PUT,
|
||||
pb.PieceAction_GET,
|
||||
pb.PieceAction_GET_AUDIT,
|
||||
pb.PieceAction_GET_REPAIR,
|
||||
pb.PieceAction_PUT_REPAIR,
|
||||
pb.PieceAction_DELETE,
|
||||
}
|
||||
|
||||
expectedUsage := &bandwidth.Usage{}
|
||||
expectedUsageTotal := &bandwidth.Usage{}
|
||||
|
||||
// add bandwidth usages
|
||||
for _, action := range actions {
|
||||
expectedUsage.Include(action, int64(action))
|
||||
expectedUsageTotal.Include(action, int64(2*action))
|
||||
|
||||
err := bandwidthdb.Add(ctx, satellite0, action, int64(action), now)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = bandwidthdb.Add(ctx, satellite1, action, int64(action), now.Add(2*time.Hour))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// test summarizing
|
||||
usage, err = bandwidthdb.Summary(ctx, now.Add(-10*time.Hour), now.Add(10*time.Hour))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedUsageTotal, usage)
|
||||
|
||||
expectedUsageBySatellite := map[storj.NodeID]*bandwidth.Usage{
|
||||
satellite0: expectedUsage,
|
||||
satellite1: expectedUsage,
|
||||
}
|
||||
usageBySatellite, err = bandwidthdb.SummaryBySatellite(ctx, now.Add(-10*time.Hour), now.Add(10*time.Hour))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedUsageBySatellite, usageBySatellite)
|
||||
|
||||
// only range capturing second satellite
|
||||
usage, err = bandwidthdb.Summary(ctx, now.Add(time.Hour), now.Add(10*time.Hour))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedUsage, usage)
|
||||
|
||||
// only range capturing second satellite
|
||||
expectedUsageBySatellite = map[storj.NodeID]*bandwidth.Usage{
|
||||
satellite1: expectedUsage,
|
||||
}
|
||||
usageBySatellite, err = bandwidthdb.SummaryBySatellite(ctx, now.Add(time.Hour), now.Add(10*time.Hour))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedUsageBySatellite, usageBySatellite)
|
||||
})
|
||||
}
|
78
storagenode/bandwidth/usage.go
Normal file
78
storagenode/bandwidth/usage.go
Normal file
@ -0,0 +1,78 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package bandwidth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"storj.io/storj/pkg/pb"
|
||||
"storj.io/storj/pkg/storj"
|
||||
)
|
||||
|
||||
// DB contains information about bandwidth usage.
|
||||
type DB interface {
|
||||
Add(ctx context.Context, satelliteID storj.NodeID, action pb.PieceAction, amount int64, created time.Time) error
|
||||
Summary(ctx context.Context, from, to time.Time) (*Usage, error)
|
||||
SummaryBySatellite(ctx context.Context, from, to time.Time) (map[storj.NodeID]*Usage, error)
|
||||
}
|
||||
|
||||
// Usage contains bandwidth usage information based on the type
|
||||
type Usage struct {
|
||||
Invalid int64
|
||||
Unknown int64
|
||||
|
||||
Put int64
|
||||
Get int64
|
||||
GetAudit int64
|
||||
GetRepair int64
|
||||
PutRepair int64
|
||||
Delete int64
|
||||
}
|
||||
|
||||
// Include adds specified action to the appropriate field.
|
||||
func (usage *Usage) Include(action pb.PieceAction, amount int64) {
|
||||
switch action {
|
||||
case pb.PieceAction_INVALID:
|
||||
usage.Invalid += amount
|
||||
case pb.PieceAction_PUT:
|
||||
usage.Put += amount
|
||||
case pb.PieceAction_GET:
|
||||
usage.Get += amount
|
||||
case pb.PieceAction_GET_AUDIT:
|
||||
usage.GetAudit += amount
|
||||
case pb.PieceAction_GET_REPAIR:
|
||||
usage.GetRepair += amount
|
||||
case pb.PieceAction_PUT_REPAIR:
|
||||
usage.PutRepair += amount
|
||||
case pb.PieceAction_DELETE:
|
||||
usage.Delete += amount
|
||||
default:
|
||||
usage.Unknown += amount
|
||||
}
|
||||
}
|
||||
|
||||
// Add adds another usage to this one.
|
||||
func (usage *Usage) Add(b *Usage) {
|
||||
usage.Invalid += b.Invalid
|
||||
usage.Unknown += b.Unknown
|
||||
usage.Put += b.Put
|
||||
usage.Get += b.Get
|
||||
usage.GetAudit += b.GetAudit
|
||||
usage.GetRepair += b.GetRepair
|
||||
usage.PutRepair += b.PutRepair
|
||||
usage.Delete += b.Delete
|
||||
}
|
||||
|
||||
// Total sums all type of bandwidths
|
||||
func (usage *Usage) Total() int64 {
|
||||
return usage.Invalid +
|
||||
usage.Unknown +
|
||||
usage.Put +
|
||||
usage.Get +
|
||||
usage.GetAudit +
|
||||
usage.GetRepair +
|
||||
usage.PutRepair +
|
||||
usage.Delete
|
||||
}
|
33
storagenode/collector/service.go
Normal file
33
storagenode/collector/service.go
Normal file
@ -0,0 +1,33 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"go.uber.org/zap"
|
||||
|
||||
"storj.io/storj/storagenode/pieces"
|
||||
)
|
||||
|
||||
// Config defines parameters for storage node Collector.
|
||||
type Config struct {
|
||||
Interval time.Duration
|
||||
}
|
||||
|
||||
// Service implements collecting expired pieces on the storage node.
|
||||
type Service struct {
|
||||
log *zap.Logger
|
||||
pieces *pieces.Store
|
||||
pieceinfos pieces.DB
|
||||
}
|
||||
|
||||
// NewService creates a new collector service.
|
||||
func NewService(log *zap.Logger, pieces *pieces.Store, pieceinfos pieces.DB) *Service {
|
||||
return &Service{
|
||||
log: log,
|
||||
pieces: pieces,
|
||||
pieceinfos: pieceinfos,
|
||||
}
|
||||
}
|
146
storagenode/inspector/inspector.go
Normal file
146
storagenode/inspector/inspector.go
Normal file
@ -0,0 +1,146 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package inspector
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
"github.com/zeebo/errs"
|
||||
"go.uber.org/zap"
|
||||
monkit "gopkg.in/spacemonkeygo/monkit.v2"
|
||||
|
||||
"storj.io/storj/pkg/kademlia"
|
||||
"storj.io/storj/pkg/pb"
|
||||
"storj.io/storj/pkg/piecestore/psserver"
|
||||
"storj.io/storj/pkg/piecestore/psserver/psdb"
|
||||
"storj.io/storj/pkg/storj"
|
||||
"storj.io/storj/storagenode/bandwidth"
|
||||
"storj.io/storj/storagenode/pieces"
|
||||
)
|
||||
|
||||
var (
|
||||
mon = monkit.Package()
|
||||
|
||||
// Error is the default error class for piecestore monitor errors
|
||||
Error = errs.Class("piecestore inspector")
|
||||
)
|
||||
|
||||
// Endpoint does inspectory things
|
||||
type Endpoint struct {
|
||||
log *zap.Logger
|
||||
pieceInfo pieces.DB
|
||||
kademlia *kademlia.Kademlia
|
||||
usageDB bandwidth.DB
|
||||
psdbDB *psdb.DB // TODO remove after complete migration
|
||||
|
||||
startTime time.Time
|
||||
config psserver.Config
|
||||
}
|
||||
|
||||
// NewEndpoint creates piecestore inspector instance
|
||||
func NewEndpoint(log *zap.Logger, pieceInfo pieces.DB, kademlia *kademlia.Kademlia, usageDB bandwidth.DB, psdbDB *psdb.DB, config psserver.Config) *Endpoint {
|
||||
return &Endpoint{
|
||||
log: log,
|
||||
pieceInfo: pieceInfo,
|
||||
kademlia: kademlia,
|
||||
usageDB: usageDB,
|
||||
psdbDB: psdbDB,
|
||||
config: config,
|
||||
startTime: time.Now(),
|
||||
}
|
||||
}
|
||||
|
||||
func (inspector *Endpoint) retrieveStats(ctx context.Context) (*pb.StatSummaryResponse, error) {
|
||||
totalUsedSpace, err := inspector.pieceInfo.SpaceUsed(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
usage, err := inspector.usageDB.Summary(ctx, getBeginningOfMonth(), time.Now())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
totalUsedBandwidth := int64(0)
|
||||
oldUsage, err := inspector.psdbDB.SumTTLSizes()
|
||||
if err != nil {
|
||||
inspector.log.Warn("unable to calculate old bandwidth usage")
|
||||
} else {
|
||||
totalUsedBandwidth = oldUsage
|
||||
}
|
||||
|
||||
totalUsedBandwidth += usage.Total()
|
||||
|
||||
return &pb.StatSummaryResponse{
|
||||
UsedSpace: totalUsedSpace,
|
||||
AvailableSpace: (inspector.config.AllocatedDiskSpace.Int64() - totalUsedSpace),
|
||||
UsedBandwidth: totalUsedBandwidth,
|
||||
AvailableBandwidth: (inspector.config.AllocatedBandwidth.Int64() - totalUsedBandwidth),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Stats returns current statistics about the storage node
|
||||
func (inspector *Endpoint) Stats(ctx context.Context, in *pb.StatsRequest) (out *pb.StatSummaryResponse, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
inspector.log.Debug("Getting Stats...")
|
||||
|
||||
statsSummary, err := inspector.retrieveStats(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
inspector.log.Info("Successfully retrieved Stats...")
|
||||
|
||||
return statsSummary, nil
|
||||
}
|
||||
|
||||
func (inspector *Endpoint) getDashboardData(ctx context.Context) (*pb.DashboardResponse, error) {
|
||||
statsSummary, err := inspector.retrieveStats(ctx)
|
||||
if err != nil {
|
||||
return &pb.DashboardResponse{}, Error.Wrap(err)
|
||||
}
|
||||
|
||||
// TODO: querying all nodes is slow, find a more performant way to do this.
|
||||
nodes, err := inspector.kademlia.FindNear(ctx, storj.NodeID{}, 10000000)
|
||||
if err != nil {
|
||||
return &pb.DashboardResponse{}, Error.Wrap(err)
|
||||
}
|
||||
|
||||
bootstrapNodes := inspector.kademlia.GetBootstrapNodes()
|
||||
bsNodes := make([]string, len(bootstrapNodes))
|
||||
for i, node := range bootstrapNodes {
|
||||
bsNodes[i] = node.Address.Address
|
||||
}
|
||||
|
||||
return &pb.DashboardResponse{
|
||||
NodeId: inspector.kademlia.Local().Id,
|
||||
NodeConnections: int64(len(nodes)),
|
||||
BootstrapAddress: strings.Join(bsNodes[:], ", "),
|
||||
InternalAddress: "",
|
||||
ExternalAddress: inspector.kademlia.Local().Address.Address,
|
||||
Connection: true,
|
||||
Uptime: ptypes.DurationProto(time.Since(inspector.startTime)),
|
||||
Stats: statsSummary,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Dashboard returns dashboard information
|
||||
func (inspector *Endpoint) Dashboard(ctx context.Context, in *pb.DashboardRequest) (out *pb.DashboardResponse, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
data, err := inspector.getDashboardData(ctx)
|
||||
if err != nil {
|
||||
inspector.log.Warn("unable to get dashboard information")
|
||||
return nil, err
|
||||
}
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func getBeginningOfMonth() time.Time {
|
||||
t := time.Now()
|
||||
y, m, _ := t.Date()
|
||||
return time.Date(y, m, 1, 0, 0, 0, 0, time.Now().Location())
|
||||
}
|
109
storagenode/inspector/inspector_test.go
Normal file
109
storagenode/inspector/inspector_test.go
Normal file
@ -0,0 +1,109 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package inspector_test
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"storj.io/storj/internal/memory"
|
||||
"storj.io/storj/internal/testcontext"
|
||||
"storj.io/storj/internal/testplanet"
|
||||
"storj.io/storj/pkg/pb"
|
||||
)
|
||||
|
||||
func TestInspectorStats(t *testing.T) {
|
||||
ctx := testcontext.New(t)
|
||||
defer ctx.Cleanup()
|
||||
|
||||
planet, err := testplanet.New(t, 1, 6, 1)
|
||||
require.NoError(t, err)
|
||||
defer ctx.Check(planet.Shutdown)
|
||||
|
||||
planet.Start(ctx)
|
||||
|
||||
var availableBandwidth int64
|
||||
var availableSpace int64
|
||||
for _, storageNode := range planet.StorageNodes {
|
||||
response, err := storageNode.Storage2.Inspector.Stats(ctx, &pb.StatsRequest{})
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Zero(t, response.UsedBandwidth)
|
||||
assert.Zero(t, response.UsedSpace)
|
||||
assert.True(t, response.AvailableBandwidth > 0)
|
||||
assert.True(t, response.AvailableSpace > 0)
|
||||
|
||||
// assume that all storage node should have the same initial values
|
||||
availableBandwidth = response.AvailableBandwidth
|
||||
availableSpace = response.AvailableSpace
|
||||
}
|
||||
|
||||
expectedData := make([]byte, 100*memory.KiB)
|
||||
_, err = rand.Read(expectedData)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "test/bucket", "test/path", expectedData)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, storageNode := range planet.StorageNodes {
|
||||
response, err := storageNode.Storage2.Inspector.Stats(ctx, &pb.StatsRequest{})
|
||||
require.NoError(t, err)
|
||||
|
||||
// TODO set more accurate assertions
|
||||
if response.UsedSpace > 0 {
|
||||
assert.True(t, response.UsedBandwidth > 0)
|
||||
assert.Equal(t, availableBandwidth-response.UsedBandwidth, response.AvailableBandwidth)
|
||||
assert.Equal(t, availableSpace-response.UsedSpace, response.AvailableSpace)
|
||||
|
||||
assert.Equal(t, response.UsedSpace, response.UsedBandwidth)
|
||||
} else {
|
||||
assert.Zero(t, response.UsedSpace)
|
||||
// TODO track why this is failing
|
||||
//assert.Equal(t, availableBandwidth, response.AvailableBandwidth)
|
||||
assert.Equal(t, availableSpace, response.AvailableSpace)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestInspectorDashboard(t *testing.T) {
|
||||
ctx := testcontext.New(t)
|
||||
defer ctx.Cleanup()
|
||||
|
||||
planet, err := testplanet.New(t, 1, 6, 1)
|
||||
require.NoError(t, err)
|
||||
defer ctx.Check(planet.Shutdown)
|
||||
|
||||
planet.Start(ctx)
|
||||
|
||||
for _, storageNode := range planet.StorageNodes {
|
||||
response, err := storageNode.Storage2.Inspector.Dashboard(ctx, &pb.DashboardRequest{})
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.True(t, response.Uptime.Nanos > 0)
|
||||
assert.Equal(t, storageNode.ID(), response.NodeId)
|
||||
assert.Equal(t, storageNode.Addr(), response.ExternalAddress)
|
||||
assert.NotNil(t, response.Stats)
|
||||
}
|
||||
|
||||
expectedData := make([]byte, 100*memory.KiB)
|
||||
_, err = rand.Read(expectedData)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "test/bucket", "test/path", expectedData)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, storageNode := range planet.StorageNodes {
|
||||
response, err := storageNode.Storage2.Inspector.Dashboard(ctx, &pb.DashboardRequest{})
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.True(t, response.Uptime.Nanos > 0)
|
||||
assert.Equal(t, storageNode.ID(), response.NodeId)
|
||||
assert.Equal(t, storageNode.Addr(), response.ExternalAddress)
|
||||
assert.Equal(t, int64(len(planet.StorageNodes)+len(planet.Satellites)), response.NodeConnections)
|
||||
assert.NotNil(t, response.Stats)
|
||||
}
|
||||
}
|
165
storagenode/monitor/monitor.go
Normal file
165
storagenode/monitor/monitor.go
Normal file
@ -0,0 +1,165 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package monitor
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/zeebo/errs"
|
||||
"go.uber.org/zap"
|
||||
monkit "gopkg.in/spacemonkeygo/monkit.v2"
|
||||
|
||||
"storj.io/storj/internal/sync2"
|
||||
"storj.io/storj/pkg/kademlia"
|
||||
"storj.io/storj/pkg/pb"
|
||||
"storj.io/storj/storagenode/bandwidth"
|
||||
"storj.io/storj/storagenode/pieces"
|
||||
)
|
||||
|
||||
var (
|
||||
mon = monkit.Package()
|
||||
|
||||
// Error is the default error class for piecestore monitor errors
|
||||
Error = errs.Class("piecestore monitor")
|
||||
)
|
||||
|
||||
// Config defines parameters for storage node disk and bandwidth usage monitoring.
|
||||
type Config struct {
|
||||
Interval time.Duration `help:"how frequently Kademlia bucket should be refreshed with node stats" default:"1h0m0s"`
|
||||
}
|
||||
|
||||
// Service which monitors disk usage and updates kademlia network as necessary.
|
||||
type Service struct {
|
||||
log *zap.Logger
|
||||
routingTable *kademlia.RoutingTable
|
||||
store *pieces.Store
|
||||
pieceInfo pieces.DB
|
||||
usageDB bandwidth.DB
|
||||
allocatedDiskSpace int64
|
||||
allocatedBandwidth int64
|
||||
Loop sync2.Cycle
|
||||
}
|
||||
|
||||
// TODO: should it be responsible for monitoring actual bandwidth as well?
|
||||
|
||||
// NewService creates a new storage node monitoring service.
|
||||
func NewService(log *zap.Logger, routingTable *kademlia.RoutingTable, store *pieces.Store, pieceInfo pieces.DB, usageDB bandwidth.DB, allocatedDiskSpace, allocatedBandwidth int64, interval time.Duration) *Service {
|
||||
return &Service{
|
||||
log: log,
|
||||
routingTable: routingTable,
|
||||
store: store,
|
||||
pieceInfo: pieceInfo,
|
||||
usageDB: usageDB,
|
||||
allocatedDiskSpace: allocatedDiskSpace,
|
||||
allocatedBandwidth: allocatedBandwidth,
|
||||
Loop: *sync2.NewCycle(interval),
|
||||
}
|
||||
}
|
||||
|
||||
// Run runs monitor service
|
||||
func (service *Service) Run(ctx context.Context) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
// get the disk space details
|
||||
// The returned path ends in a slash only if it represents a root directory, such as "/" on Unix or `C:\` on Windows.
|
||||
info, err := service.store.StorageStatus()
|
||||
if err != nil {
|
||||
return Error.Wrap(err)
|
||||
}
|
||||
freeDiskSpace := info.DiskFree
|
||||
|
||||
totalUsed, err := service.usedSpace(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
usedBandwidth, err := service.usedBandwidth(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if usedBandwidth > service.allocatedBandwidth {
|
||||
service.log.Warn("Exceed the allowed Bandwidth setting")
|
||||
} else {
|
||||
service.log.Info("Remaining Bandwidth", zap.Int64("bytes", service.allocatedBandwidth-usedBandwidth))
|
||||
}
|
||||
|
||||
// check your hard drive is big enough
|
||||
// first time setup as a piece node server
|
||||
if totalUsed == 0 && freeDiskSpace < service.allocatedDiskSpace {
|
||||
service.allocatedDiskSpace = freeDiskSpace
|
||||
service.log.Warn("Disk space is less than requested. Allocating space", zap.Int64("bytes", service.allocatedDiskSpace))
|
||||
}
|
||||
|
||||
// on restarting the Piece node server, assuming already been working as a node
|
||||
// used above the alloacated space, user changed the allocation space setting
|
||||
// before restarting
|
||||
if totalUsed >= service.allocatedDiskSpace {
|
||||
service.log.Warn("Used more space than allocated. Allocating space", zap.Int64("bytes", service.allocatedDiskSpace))
|
||||
}
|
||||
|
||||
// the available diskspace is less than remaining allocated space,
|
||||
// due to change of setting before restarting
|
||||
if freeDiskSpace < service.allocatedDiskSpace-totalUsed {
|
||||
service.allocatedDiskSpace = freeDiskSpace
|
||||
service.log.Warn("Disk space is less than requested. Allocating space", zap.Int64("bytes", service.allocatedDiskSpace))
|
||||
}
|
||||
|
||||
return service.Loop.Run(ctx, func(ctx context.Context) error {
|
||||
err := service.updateNodeInformation(ctx)
|
||||
if err != nil {
|
||||
service.log.Error("error during updating node information: ", zap.Error(err))
|
||||
}
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
func (service *Service) updateNodeInformation(ctx context.Context) error {
|
||||
usedSpace, err := service.usedSpace(ctx)
|
||||
if err != nil {
|
||||
return Error.Wrap(err)
|
||||
}
|
||||
|
||||
usedBandwidth, err := service.usedBandwidth(ctx)
|
||||
if err != nil {
|
||||
return Error.Wrap(err)
|
||||
}
|
||||
|
||||
self := service.routingTable.Local()
|
||||
|
||||
self.Restrictions = &pb.NodeRestrictions{
|
||||
FreeBandwidth: service.allocatedBandwidth - usedBandwidth,
|
||||
FreeDisk: service.allocatedDiskSpace - usedSpace,
|
||||
}
|
||||
|
||||
// Update the routing table with latest restrictions
|
||||
if err := service.routingTable.UpdateSelf(&self); err != nil {
|
||||
return Error.Wrap(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (service *Service) usedSpace(ctx context.Context) (int64, error) {
|
||||
usedSpace, err := service.pieceInfo.SpaceUsed(ctx)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return usedSpace, nil
|
||||
}
|
||||
|
||||
func (service *Service) usedBandwidth(ctx context.Context) (int64, error) {
|
||||
usage, err := service.usageDB.Summary(ctx, getBeginningOfMonth(), time.Now())
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return usage.Total(), nil
|
||||
}
|
||||
|
||||
func getBeginningOfMonth() time.Time {
|
||||
t := time.Now()
|
||||
y, m, _ := t.Date()
|
||||
return time.Date(y, m, 1, 0, 0, 0, 0, time.Now().Location())
|
||||
}
|
65
storagenode/monitor/monitor_test.go
Normal file
65
storagenode/monitor/monitor_test.go
Normal file
@ -0,0 +1,65 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package monitor_test
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"storj.io/storj/internal/memory"
|
||||
"storj.io/storj/internal/testcontext"
|
||||
"storj.io/storj/internal/testplanet"
|
||||
"storj.io/storj/pkg/pb"
|
||||
)
|
||||
|
||||
func TestMonitor(t *testing.T) {
|
||||
ctx := testcontext.New(t)
|
||||
defer ctx.Cleanup()
|
||||
|
||||
planet, err := testplanet.New(t, 1, 6, 1)
|
||||
require.NoError(t, err)
|
||||
defer ctx.Check(planet.Shutdown)
|
||||
|
||||
planet.Start(ctx)
|
||||
|
||||
var freeBandwidth int64
|
||||
var freeSpace int64
|
||||
for _, storageNode := range planet.StorageNodes {
|
||||
storageNode.Storage2.Monitor.Loop.Pause()
|
||||
|
||||
info, err := storageNode.Kademlia.Service.FetchInfo(ctx, storageNode.Local())
|
||||
require.NoError(t, err)
|
||||
|
||||
// assume that all storage nodes have the same initial values
|
||||
freeBandwidth = info.Capacity.FreeBandwidth
|
||||
freeSpace = info.Capacity.FreeDisk
|
||||
}
|
||||
|
||||
expectedData := make([]byte, 100*memory.KiB)
|
||||
_, err = rand.Read(expectedData)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "test/bucket", "test/path", expectedData)
|
||||
require.NoError(t, err)
|
||||
|
||||
nodeAssertions := 0
|
||||
for _, storageNode := range planet.StorageNodes {
|
||||
storageNode.Storage2.Monitor.Loop.TriggerWait()
|
||||
|
||||
info, err := storageNode.Kademlia.Service.FetchInfo(ctx, storageNode.Local())
|
||||
require.NoError(t, err)
|
||||
|
||||
stats, err := storageNode.Storage2.Inspector.Stats(ctx, &pb.StatsRequest{})
|
||||
require.NoError(t, err)
|
||||
if stats.UsedSpace > 0 {
|
||||
assert.Equal(t, freeSpace-stats.UsedSpace, info.Capacity.FreeDisk)
|
||||
assert.Equal(t, freeBandwidth-stats.UsedBandwidth, info.Capacity.FreeBandwidth)
|
||||
nodeAssertions++
|
||||
}
|
||||
}
|
||||
assert.NotZero(t, nodeAssertions, "No storage node were verifed")
|
||||
}
|
87
storagenode/orders/db_test.go
Normal file
87
storagenode/orders/db_test.go
Normal file
@ -0,0 +1,87 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package orders_test
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"storj.io/storj/internal/testcontext"
|
||||
"storj.io/storj/internal/testplanet"
|
||||
"storj.io/storj/pkg/auth/signing"
|
||||
"storj.io/storj/pkg/pb"
|
||||
"storj.io/storj/pkg/storj"
|
||||
"storj.io/storj/storagenode"
|
||||
"storj.io/storj/storagenode/orders"
|
||||
"storj.io/storj/storagenode/storagenodedb/storagenodedbtest"
|
||||
)
|
||||
|
||||
func TestOrders(t *testing.T) {
|
||||
storagenodedbtest.Run(t, func(t *testing.T, db storagenode.DB) {
|
||||
ctx := testcontext.New(t)
|
||||
defer ctx.Cleanup()
|
||||
|
||||
ordersdb := db.Orders()
|
||||
|
||||
storagenode := testplanet.MustPregeneratedSignedIdentity(0)
|
||||
|
||||
satellite0 := testplanet.MustPregeneratedSignedIdentity(1)
|
||||
|
||||
uplink := testplanet.MustPregeneratedSignedIdentity(3)
|
||||
piece := storj.NewPieceID()
|
||||
|
||||
serialNumber := newRandomSerial()
|
||||
|
||||
// basic test
|
||||
_, err := ordersdb.ListUnsent(ctx, 100)
|
||||
require.NoError(t, err)
|
||||
|
||||
now := ptypes.TimestampNow()
|
||||
|
||||
limit, err := signing.SignOrderLimit(signing.SignerFromFullIdentity(satellite0), &pb.OrderLimit2{
|
||||
SerialNumber: serialNumber,
|
||||
SatelliteId: satellite0.ID,
|
||||
UplinkId: uplink.ID,
|
||||
StorageNodeId: storagenode.ID,
|
||||
PieceId: piece,
|
||||
Limit: 100,
|
||||
Action: pb.PieceAction_GET,
|
||||
PieceExpiration: now,
|
||||
OrderExpiration: now,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
order, err := signing.SignOrder(signing.SignerFromFullIdentity(uplink), &pb.Order2{
|
||||
SerialNumber: serialNumber,
|
||||
Amount: 50,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
info := &orders.Info{limit, order, uplink.PeerIdentity()}
|
||||
|
||||
// basic add
|
||||
err = ordersdb.Enqueue(ctx, info)
|
||||
require.NoError(t, err)
|
||||
|
||||
// duplicate add
|
||||
err = ordersdb.Enqueue(ctx, info)
|
||||
require.Error(t, err, "duplicate add")
|
||||
|
||||
unsent, err := ordersdb.ListUnsent(ctx, 100)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Empty(t, cmp.Diff([]*orders.Info{info}, unsent, cmp.Comparer(pb.Equal)))
|
||||
})
|
||||
}
|
||||
|
||||
// TODO: move somewhere better
|
||||
func newRandomSerial() []byte {
|
||||
var serial [16]byte
|
||||
_, _ = rand.Read(serial[:])
|
||||
return serial[:]
|
||||
}
|
57
storagenode/orders/sender.go
Normal file
57
storagenode/orders/sender.go
Normal file
@ -0,0 +1,57 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package orders
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"go.uber.org/zap"
|
||||
|
||||
"storj.io/storj/pkg/identity"
|
||||
"storj.io/storj/pkg/kademlia"
|
||||
"storj.io/storj/pkg/pb"
|
||||
"storj.io/storj/pkg/transport"
|
||||
)
|
||||
|
||||
// Info contains full information about an order.
|
||||
type Info struct {
|
||||
Limit *pb.OrderLimit2
|
||||
Order *pb.Order2
|
||||
Uplink *identity.PeerIdentity
|
||||
}
|
||||
|
||||
// DB implements storing orders for sending to the satellite.
|
||||
type DB interface {
|
||||
// Enqueue inserts order to the list of orders needing to be sent to the satellite.
|
||||
Enqueue(ctx context.Context, info *Info) error
|
||||
// ListUnsent returns orders that haven't been sent yet.
|
||||
ListUnsent(ctx context.Context, limit int) ([]*Info, error)
|
||||
}
|
||||
|
||||
// SenderConfig defines configuration for sending orders.
|
||||
type SenderConfig struct {
|
||||
Interval time.Duration
|
||||
}
|
||||
|
||||
// Sender sends every interval unsent orders to the satellite.
|
||||
type Sender struct {
|
||||
log *zap.Logger
|
||||
config SenderConfig
|
||||
|
||||
client transport.Client
|
||||
kademlia *kademlia.Kademlia
|
||||
orders DB
|
||||
}
|
||||
|
||||
// NewSender creates an order sender.
|
||||
func NewSender(log *zap.Logger, client transport.Client, kademlia *kademlia.Kademlia, orders DB, config SenderConfig) *Sender {
|
||||
return &Sender{
|
||||
log: log,
|
||||
config: config,
|
||||
client: client,
|
||||
kademlia: kademlia,
|
||||
orders: orders,
|
||||
}
|
||||
}
|
@ -11,6 +11,7 @@ import (
|
||||
"golang.org/x/sync/errgroup"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"storj.io/storj/pkg/auth/signing"
|
||||
"storj.io/storj/pkg/identity"
|
||||
"storj.io/storj/pkg/kademlia"
|
||||
"storj.io/storj/pkg/pb"
|
||||
@ -22,6 +23,13 @@ import (
|
||||
"storj.io/storj/pkg/storj"
|
||||
"storj.io/storj/pkg/transport"
|
||||
"storj.io/storj/storage"
|
||||
"storj.io/storj/storagenode/bandwidth"
|
||||
"storj.io/storj/storagenode/inspector"
|
||||
"storj.io/storj/storagenode/monitor"
|
||||
"storj.io/storj/storagenode/orders"
|
||||
"storj.io/storj/storagenode/pieces"
|
||||
"storj.io/storj/storagenode/piecestore"
|
||||
"storj.io/storj/storagenode/trust"
|
||||
)
|
||||
|
||||
// DB is the master database for Storage Node
|
||||
@ -32,6 +40,14 @@ type DB interface {
|
||||
Close() error
|
||||
|
||||
Storage() psserver.Storage
|
||||
Pieces() storage.Blobs
|
||||
|
||||
Orders() orders.DB
|
||||
PieceInfo() pieces.DB
|
||||
CertDB() trust.CertDB
|
||||
Bandwidth() bandwidth.DB
|
||||
UsedSerials() piecestore.UsedSerials
|
||||
|
||||
// TODO: use better interfaces
|
||||
PSDB() *psdb.DB
|
||||
RoutingTable() (kdb, ndb storage.KeyValueStore)
|
||||
@ -44,6 +60,8 @@ type Config struct {
|
||||
Server server.Config
|
||||
Kademlia kademlia.Config
|
||||
Storage psserver.Config
|
||||
|
||||
Storage2 piecestore.Config
|
||||
}
|
||||
|
||||
// Verify verifies whether configuration is consistent and acceptable.
|
||||
@ -72,15 +90,20 @@ type Peer struct {
|
||||
}
|
||||
|
||||
Storage struct {
|
||||
Endpoint *psserver.Server // TODO: separate into endpoint and service
|
||||
Monitor *psserver.Monitor
|
||||
Collector *psserver.Collector
|
||||
Inspector *psserver.Inspector
|
||||
Endpoint *psserver.Server // TODO: separate into endpoint and service
|
||||
}
|
||||
|
||||
Agreements struct {
|
||||
Sender *agreementsender.AgreementSender
|
||||
}
|
||||
|
||||
Storage2 struct {
|
||||
Trust *trust.Pool
|
||||
Store *pieces.Store
|
||||
Endpoint *piecestore.Endpoint
|
||||
Inspector *inspector.Endpoint
|
||||
Monitor *monitor.Service
|
||||
}
|
||||
}
|
||||
|
||||
// New creates a new Storage Node.
|
||||
@ -158,13 +181,6 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB, config Config) (*P
|
||||
return nil, errs.Combine(err, peer.Close())
|
||||
}
|
||||
pb.RegisterPieceStoreRoutesServer(peer.Server.GRPC(), peer.Storage.Endpoint)
|
||||
|
||||
peer.Storage.Inspector = psserver.NewInspector(peer.Storage.Endpoint)
|
||||
pb.RegisterPieceStoreInspectorServer(peer.Server.PrivateGRPC(), peer.Storage.Inspector)
|
||||
|
||||
// TODO: organize better
|
||||
peer.Storage.Monitor = psserver.NewMonitor(peer.Log.Named("piecestore:monitor"), config.KBucketRefreshInterval, peer.Kademlia.RoutingTable, peer.Storage.Endpoint)
|
||||
peer.Storage.Collector = psserver.NewCollector(peer.Log.Named("piecestore:collector"), peer.DB.PSDB(), peer.DB.Storage(), config.CollectorInterval)
|
||||
}
|
||||
|
||||
{ // agreements
|
||||
@ -176,6 +192,54 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB, config Config) (*P
|
||||
)
|
||||
}
|
||||
|
||||
{ // setup storage 2
|
||||
trustAllSatellites := !config.Storage.SatelliteIDRestriction
|
||||
peer.Storage2.Trust, err = trust.NewPool(peer.Kademlia.Service, trustAllSatellites, config.Storage.WhitelistedSatelliteIDs)
|
||||
if err != nil {
|
||||
return nil, errs.Combine(err, peer.Close())
|
||||
}
|
||||
|
||||
peer.Storage2.Store = pieces.NewStore(peer.Log.Named("pieces"), peer.DB.Pieces())
|
||||
|
||||
peer.Storage2.Endpoint, err = piecestore.NewEndpoint(
|
||||
peer.Log.Named("piecestore"),
|
||||
signing.SignerFromFullIdentity(peer.Identity),
|
||||
peer.Storage2.Trust,
|
||||
peer.Storage2.Store,
|
||||
peer.DB.PieceInfo(),
|
||||
peer.DB.Orders(),
|
||||
peer.DB.Bandwidth(),
|
||||
peer.DB.UsedSerials(),
|
||||
config.Storage2,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errs.Combine(err, peer.Close())
|
||||
}
|
||||
pb.RegisterPiecestoreServer(peer.Server.GRPC(), peer.Storage2.Endpoint)
|
||||
|
||||
peer.Storage2.Inspector = inspector.NewEndpoint(
|
||||
peer.Log.Named("pieces:inspector"),
|
||||
peer.DB.PieceInfo(),
|
||||
peer.Kademlia.Service,
|
||||
peer.DB.Bandwidth(),
|
||||
peer.DB.PSDB(),
|
||||
config.Storage,
|
||||
)
|
||||
pb.RegisterPieceStoreInspectorServer(peer.Server.PrivateGRPC(), peer.Storage2.Inspector)
|
||||
|
||||
peer.Storage2.Monitor = monitor.NewService(
|
||||
log.Named("piecestore:monitor"),
|
||||
peer.Kademlia.RoutingTable,
|
||||
peer.Storage2.Store,
|
||||
peer.DB.PieceInfo(),
|
||||
peer.DB.Bandwidth(),
|
||||
config.Storage.AllocatedDiskSpace.Int64(),
|
||||
config.Storage.AllocatedBandwidth.Int64(),
|
||||
//TODO use config.Storage.Monitor.Interval, but for some reason is not set
|
||||
config.Storage.KBucketRefreshInterval,
|
||||
)
|
||||
}
|
||||
|
||||
return peer, nil
|
||||
}
|
||||
|
||||
@ -193,10 +257,7 @@ func (peer *Peer) Run(ctx context.Context) error {
|
||||
return ignoreCancel(peer.Agreements.Sender.Run(ctx))
|
||||
})
|
||||
group.Go(func() error {
|
||||
return ignoreCancel(peer.Storage.Monitor.Run(ctx))
|
||||
})
|
||||
group.Go(func() error {
|
||||
return ignoreCancel(peer.Storage.Collector.Run(ctx))
|
||||
return ignoreCancel(peer.Storage2.Monitor.Run(ctx))
|
||||
})
|
||||
group.Go(func() error {
|
||||
// TODO: move the message into Server instead
|
||||
|
112
storagenode/pieces/db_test.go
Normal file
112
storagenode/pieces/db_test.go
Normal file
@ -0,0 +1,112 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package pieces_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"storj.io/storj/internal/testcontext"
|
||||
"storj.io/storj/internal/testplanet"
|
||||
"storj.io/storj/pkg/auth/signing"
|
||||
"storj.io/storj/pkg/pb"
|
||||
"storj.io/storj/pkg/storj"
|
||||
"storj.io/storj/storagenode"
|
||||
"storj.io/storj/storagenode/pieces"
|
||||
"storj.io/storj/storagenode/storagenodedb/storagenodedbtest"
|
||||
)
|
||||
|
||||
func TestPieceInfo(t *testing.T) {
|
||||
storagenodedbtest.Run(t, func(t *testing.T, db storagenode.DB) {
|
||||
ctx := testcontext.New(t)
|
||||
defer ctx.Cleanup()
|
||||
|
||||
pieceinfos := db.PieceInfo()
|
||||
|
||||
satellite0 := testplanet.MustPregeneratedSignedIdentity(0)
|
||||
satellite1 := testplanet.MustPregeneratedSignedIdentity(1)
|
||||
|
||||
uplink0 := testplanet.MustPregeneratedSignedIdentity(2)
|
||||
uplink1 := testplanet.MustPregeneratedSignedIdentity(3)
|
||||
|
||||
pieceid0 := storj.NewPieceID()
|
||||
|
||||
now := time.Now()
|
||||
|
||||
piecehash0, err := signing.SignPieceHash(
|
||||
signing.SignerFromFullIdentity(uplink0),
|
||||
&pb.PieceHash{
|
||||
PieceId: pieceid0,
|
||||
Hash: []byte{1, 2, 3, 4, 5},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
info0 := &pieces.Info{
|
||||
SatelliteID: satellite0.ID,
|
||||
|
||||
PieceID: pieceid0,
|
||||
PieceSize: 123,
|
||||
PieceExpiration: now,
|
||||
|
||||
UplinkPieceHash: piecehash0,
|
||||
Uplink: uplink0.PeerIdentity(),
|
||||
}
|
||||
|
||||
piecehash1, err := signing.SignPieceHash(
|
||||
signing.SignerFromFullIdentity(uplink1),
|
||||
&pb.PieceHash{
|
||||
PieceId: pieceid0,
|
||||
Hash: []byte{1, 2, 3, 4, 5},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
info1 := &pieces.Info{
|
||||
SatelliteID: satellite1.ID,
|
||||
|
||||
PieceID: pieceid0,
|
||||
PieceSize: 123,
|
||||
PieceExpiration: now,
|
||||
|
||||
UplinkPieceHash: piecehash1,
|
||||
Uplink: uplink1.PeerIdentity(),
|
||||
}
|
||||
|
||||
_, err = pieceinfos.Get(ctx, info0.SatelliteID, info0.PieceID)
|
||||
require.Error(t, err, "getting element that doesn't exist")
|
||||
|
||||
// adding stuff
|
||||
err = pieceinfos.Add(ctx, info0)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = pieceinfos.Add(ctx, info1)
|
||||
require.NoError(t, err, "adding different satellite, but same pieceid")
|
||||
|
||||
err = pieceinfos.Add(ctx, info0)
|
||||
require.Error(t, err, "adding duplicate")
|
||||
|
||||
// getting the added information
|
||||
info0loaded, err := pieceinfos.Get(ctx, info0.SatelliteID, info0.PieceID)
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, cmp.Diff(info0, info0loaded, cmp.Comparer(pb.Equal)))
|
||||
|
||||
info1loaded, err := pieceinfos.Get(ctx, info1.SatelliteID, info1.PieceID)
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, cmp.Diff(info1, info1loaded, cmp.Comparer(pb.Equal)))
|
||||
|
||||
// deleting
|
||||
err = pieceinfos.Delete(ctx, info0.SatelliteID, info0.PieceID)
|
||||
require.NoError(t, err)
|
||||
err = pieceinfos.Delete(ctx, info1.SatelliteID, info1.PieceID)
|
||||
require.NoError(t, err)
|
||||
|
||||
// getting after delete
|
||||
_, err = pieceinfos.Get(ctx, info0.SatelliteID, info0.PieceID)
|
||||
require.Error(t, err)
|
||||
_, err = pieceinfos.Get(ctx, info1.SatelliteID, info1.PieceID)
|
||||
require.Error(t, err)
|
||||
})
|
||||
}
|
117
storagenode/pieces/readwrite.go
Normal file
117
storagenode/pieces/readwrite.go
Normal file
@ -0,0 +1,117 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package pieces
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"hash"
|
||||
"io"
|
||||
|
||||
"github.com/zeebo/errs"
|
||||
|
||||
"storj.io/storj/pkg/pkcrypto"
|
||||
"storj.io/storj/storage"
|
||||
)
|
||||
|
||||
// Writer implements a piece writer that writes content to blob store and calculates a hash.
|
||||
type Writer struct {
|
||||
buf bufio.Writer
|
||||
hash hash.Hash
|
||||
blob storage.BlobWriter
|
||||
size int64
|
||||
}
|
||||
|
||||
// NewWriter creates a new writer for storage.BlobWriter.
|
||||
func NewWriter(blob storage.BlobWriter, bufferSize int) (*Writer, error) {
|
||||
w := &Writer{}
|
||||
w.buf = *bufio.NewWriterSize(blob, bufferSize)
|
||||
w.blob = blob
|
||||
w.hash = pkcrypto.NewHash()
|
||||
return w, nil
|
||||
}
|
||||
|
||||
// Write writes data to the blob and calculates the hash.
|
||||
func (w *Writer) Write(data []byte) (int, error) {
|
||||
n, err := w.buf.Write(data)
|
||||
w.size += int64(n)
|
||||
_, _ = w.hash.Write(data[:n]) // guaranteed not to return an error
|
||||
return n, Error.Wrap(err)
|
||||
}
|
||||
|
||||
// Size returns the amount of data written so far.
|
||||
func (w *Writer) Size() int64 { return w.size }
|
||||
|
||||
// Hash returns the hash of data written so far.
|
||||
func (w *Writer) Hash() []byte { return w.hash.Sum(nil) }
|
||||
|
||||
// Commit commits piece to permanent storage.
|
||||
func (w *Writer) Commit() error {
|
||||
if err := w.buf.Flush(); err != nil {
|
||||
return Error.Wrap(errs.Combine(err, w.Cancel()))
|
||||
}
|
||||
return Error.Wrap(w.blob.Commit())
|
||||
}
|
||||
|
||||
// Cancel deletes any temporarily written data.
|
||||
func (w *Writer) Cancel() error {
|
||||
w.buf.Reset(nil)
|
||||
return Error.Wrap(w.blob.Cancel())
|
||||
}
|
||||
|
||||
// Reader implements a piece writer that writes content to blob store and calculates a hash.
|
||||
type Reader struct {
|
||||
buf bufio.Reader
|
||||
blob storage.BlobReader
|
||||
pos int64
|
||||
size int64
|
||||
}
|
||||
|
||||
// NewReader creates a new reader for storage.BlobReader.
|
||||
func NewReader(blob storage.BlobReader, bufferSize int) (*Reader, error) {
|
||||
size, err := blob.Size()
|
||||
if err != nil {
|
||||
return nil, Error.Wrap(err)
|
||||
}
|
||||
|
||||
reader := &Reader{}
|
||||
reader.buf = *bufio.NewReaderSize(blob, bufferSize)
|
||||
reader.blob = blob
|
||||
reader.size = size
|
||||
|
||||
return reader, nil
|
||||
}
|
||||
|
||||
// Read reads data from the underlying blob, buffering as necessary.
|
||||
func (r *Reader) Read(data []byte) (int, error) {
|
||||
n, err := r.blob.Read(data)
|
||||
r.pos += int64(n)
|
||||
return n, Error.Wrap(err)
|
||||
}
|
||||
|
||||
// Seek seeks to the specified location.
|
||||
func (r *Reader) Seek(offset int64, whence int) (int64, error) {
|
||||
if whence == io.SeekStart && r.pos == offset {
|
||||
return r.pos, nil
|
||||
}
|
||||
|
||||
r.buf.Reset(r.blob)
|
||||
pos, err := r.blob.Seek(offset, whence)
|
||||
r.pos = pos
|
||||
return pos, Error.Wrap(err)
|
||||
}
|
||||
|
||||
// ReadAt reads data at the specified offset
|
||||
func (r *Reader) ReadAt(data []byte, offset int64) (int, error) {
|
||||
n, err := r.blob.ReadAt(data, offset)
|
||||
return n, Error.Wrap(err)
|
||||
}
|
||||
|
||||
// Size returns the amount of data written so far.
|
||||
func (r *Reader) Size() int64 { return r.size }
|
||||
|
||||
// Close closes the reader.
|
||||
func (r *Reader) Close() error {
|
||||
r.buf.Reset(nil)
|
||||
return Error.Wrap(r.blob.Close())
|
||||
}
|
120
storagenode/pieces/store.go
Normal file
120
storagenode/pieces/store.go
Normal file
@ -0,0 +1,120 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package pieces
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/zeebo/errs"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"storj.io/storj/internal/memory"
|
||||
"storj.io/storj/pkg/identity"
|
||||
"storj.io/storj/pkg/pb"
|
||||
"storj.io/storj/pkg/storj"
|
||||
"storj.io/storj/storage"
|
||||
)
|
||||
|
||||
const (
|
||||
readBufferSize = 256 * memory.KiB
|
||||
writeBufferSize = 256 * memory.KiB
|
||||
preallocSize = 4 * memory.MiB
|
||||
)
|
||||
|
||||
// Error is the default error class.
|
||||
var Error = errs.Class("pieces error")
|
||||
|
||||
// Info contains all the information we need to know about a Piece to manage them.
|
||||
type Info struct {
|
||||
SatelliteID storj.NodeID
|
||||
|
||||
PieceID storj.PieceID
|
||||
PieceSize int64
|
||||
PieceExpiration time.Time
|
||||
|
||||
UplinkPieceHash *pb.PieceHash
|
||||
Uplink *identity.PeerIdentity
|
||||
}
|
||||
|
||||
// DB stores meta information about a piece, the actual piece is stored in storage.Blobs
|
||||
type DB interface {
|
||||
// Add inserts Info to the database.
|
||||
Add(context.Context, *Info) error
|
||||
// Get returns Info about a piece.
|
||||
Get(ctx context.Context, satelliteID storj.NodeID, pieceID storj.PieceID) (*Info, error)
|
||||
// Delete deletes Info about a piece.
|
||||
Delete(ctx context.Context, satelliteID storj.NodeID, pieceID storj.PieceID) error
|
||||
// SpaceUsed calculates disk space used by all pieces
|
||||
SpaceUsed(ctx context.Context) (int64, error)
|
||||
}
|
||||
|
||||
// Store implements storing pieces onto a blob storage implementation.
|
||||
type Store struct {
|
||||
log *zap.Logger
|
||||
blobs storage.Blobs
|
||||
}
|
||||
|
||||
// NewStore creates a new piece store
|
||||
func NewStore(log *zap.Logger, blobs storage.Blobs) *Store {
|
||||
return &Store{
|
||||
log: log,
|
||||
blobs: blobs,
|
||||
}
|
||||
}
|
||||
|
||||
// Writer returns a new piece writer.
|
||||
func (store *Store) Writer(ctx context.Context, satellite storj.NodeID, pieceID storj.PieceID) (*Writer, error) {
|
||||
blob, err := store.blobs.Create(ctx, storage.BlobRef{
|
||||
Namespace: satellite.Bytes(),
|
||||
Key: pieceID.Bytes(),
|
||||
}, preallocSize.Int64())
|
||||
if err != nil {
|
||||
return nil, Error.Wrap(err)
|
||||
}
|
||||
|
||||
writer, err := NewWriter(blob, writeBufferSize.Int())
|
||||
return writer, Error.Wrap(err)
|
||||
}
|
||||
|
||||
// Reader returns a new piece reader.
|
||||
func (store *Store) Reader(ctx context.Context, satellite storj.NodeID, pieceID storj.PieceID) (*Reader, error) {
|
||||
blob, err := store.blobs.Open(ctx, storage.BlobRef{
|
||||
Namespace: satellite.Bytes(),
|
||||
Key: pieceID.Bytes(),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, Error.Wrap(err)
|
||||
}
|
||||
|
||||
reader, err := NewReader(blob, readBufferSize.Int())
|
||||
return reader, Error.Wrap(err)
|
||||
}
|
||||
|
||||
// Delete deletes the specified piece.
|
||||
func (store *Store) Delete(ctx context.Context, satellite storj.NodeID, pieceID storj.PieceID) error {
|
||||
err := store.blobs.Delete(ctx, storage.BlobRef{
|
||||
Namespace: satellite.Bytes(),
|
||||
Key: pieceID.Bytes(),
|
||||
})
|
||||
return Error.Wrap(err)
|
||||
}
|
||||
|
||||
// StorageStatus contains information about the disk store is using.
|
||||
type StorageStatus struct {
|
||||
DiskUsed int64
|
||||
DiskFree int64
|
||||
}
|
||||
|
||||
// StorageStatus returns information about the disk.
|
||||
func (store *Store) StorageStatus() (StorageStatus, error) {
|
||||
diskFree, err := store.blobs.FreeSpace()
|
||||
if err != nil {
|
||||
return StorageStatus{}, err
|
||||
}
|
||||
return StorageStatus{
|
||||
DiskUsed: -1, // TODO set value
|
||||
DiskFree: diskFree,
|
||||
}, nil
|
||||
}
|
108
storagenode/pieces/store_test.go
Normal file
108
storagenode/pieces/store_test.go
Normal file
@ -0,0 +1,108 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package pieces_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
"storj.io/storj/internal/testplanet"
|
||||
"storj.io/storj/pkg/pkcrypto"
|
||||
"storj.io/storj/pkg/storj"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/zap/zaptest"
|
||||
|
||||
"storj.io/storj/internal/testcontext"
|
||||
"storj.io/storj/storage/filestore"
|
||||
"storj.io/storj/storagenode/pieces"
|
||||
)
|
||||
|
||||
func TestPieces(t *testing.T) {
|
||||
ctx := testcontext.New(t)
|
||||
defer ctx.Cleanup()
|
||||
|
||||
dir, err := filestore.NewDir(ctx.Dir("pieces"))
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs := filestore.New(dir)
|
||||
defer ctx.Check(blobs.Close)
|
||||
|
||||
store := pieces.NewStore(zaptest.NewLogger(t), blobs)
|
||||
|
||||
satelliteID := testplanet.MustPregeneratedSignedIdentity(0).ID
|
||||
pieceID := storj.NewPieceID()
|
||||
|
||||
source := make([]byte, 8000)
|
||||
_, _ = rand.Read(source[:])
|
||||
|
||||
{ // write data
|
||||
writer, err := store.Writer(ctx, satelliteID, pieceID)
|
||||
require.NoError(t, err)
|
||||
|
||||
n, err := io.Copy(writer, bytes.NewReader(source))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, len(source), int(n))
|
||||
assert.Equal(t, len(source), int(writer.Size()))
|
||||
|
||||
// verify hash
|
||||
hash := pkcrypto.NewHash()
|
||||
_, _ = hash.Write(source)
|
||||
assert.Equal(t, hash.Sum(nil), writer.Hash())
|
||||
|
||||
// commit
|
||||
require.NoError(t, writer.Commit())
|
||||
}
|
||||
|
||||
{ // valid reads
|
||||
read := func(offset, length int64) []byte {
|
||||
reader, err := store.Reader(ctx, satelliteID, pieceID)
|
||||
require.NoError(t, err)
|
||||
|
||||
pos, err := reader.Seek(offset, io.SeekStart)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, offset, pos)
|
||||
|
||||
data := make([]byte, length)
|
||||
n, err := io.ReadFull(reader, data)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int(length), n)
|
||||
|
||||
return data
|
||||
}
|
||||
|
||||
require.Equal(t, source[10:11], read(10, 1))
|
||||
require.Equal(t, source[10:1010], read(10, 1000))
|
||||
require.Equal(t, source, read(0, int64(len(source))))
|
||||
}
|
||||
|
||||
{ // test delete
|
||||
assert.NoError(t, store.Delete(ctx, satelliteID, pieceID))
|
||||
// read should now fail
|
||||
_, err := store.Reader(ctx, satelliteID, pieceID)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
{ // write cancel
|
||||
cancelledPieceID := storj.NewPieceID()
|
||||
writer, err := store.Writer(ctx, satelliteID, cancelledPieceID)
|
||||
require.NoError(t, err)
|
||||
|
||||
n, err := io.Copy(writer, bytes.NewReader(source))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, len(source), int(n))
|
||||
assert.Equal(t, len(source), int(writer.Size()))
|
||||
|
||||
// cancel writing
|
||||
require.NoError(t, writer.Cancel())
|
||||
|
||||
// read should fail
|
||||
_, err = store.Reader(ctx, satelliteID, cancelledPieceID)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
}
|
435
storagenode/piecestore/endpoint.go
Normal file
435
storagenode/piecestore/endpoint.go
Normal file
@ -0,0 +1,435 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package piecestore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
"github.com/zeebo/errs"
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/sync/errgroup"
|
||||
monkit "gopkg.in/spacemonkeygo/monkit.v2"
|
||||
|
||||
"storj.io/storj/internal/memory"
|
||||
"storj.io/storj/internal/sync2"
|
||||
"storj.io/storj/pkg/auth/signing"
|
||||
"storj.io/storj/pkg/identity"
|
||||
"storj.io/storj/pkg/pb"
|
||||
"storj.io/storj/storagenode/bandwidth"
|
||||
"storj.io/storj/storagenode/monitor"
|
||||
"storj.io/storj/storagenode/orders"
|
||||
"storj.io/storj/storagenode/pieces"
|
||||
"storj.io/storj/storagenode/trust"
|
||||
)
|
||||
|
||||
var (
|
||||
mon = monkit.Package()
|
||||
|
||||
// Error is the default error class for piecestore errors
|
||||
Error = errs.Class("piecestore")
|
||||
// ErrProtocol is the default error class for protocol errors.
|
||||
ErrProtocol = errs.Class("piecestore protocol")
|
||||
// ErrInternal is the default error class for internal piecestore errors.
|
||||
ErrInternal = errs.Class("piecestore internal")
|
||||
)
|
||||
var _ pb.PiecestoreServer = (*Endpoint)(nil)
|
||||
|
||||
// Config defines parameters for piecestore endpoint.
|
||||
type Config struct {
|
||||
ExpirationGracePeriod time.Duration `help:"how soon before expiration date should things be considered expired" default:"48h0m0s"`
|
||||
|
||||
Monitor monitor.Config
|
||||
}
|
||||
|
||||
// Endpoint implements uploading, downloading and deleting for a storage node.
|
||||
type Endpoint struct {
|
||||
log *zap.Logger
|
||||
config Config
|
||||
|
||||
signer signing.Signer
|
||||
trust *trust.Pool
|
||||
|
||||
store *pieces.Store
|
||||
pieceinfo pieces.DB
|
||||
orders orders.DB
|
||||
usage bandwidth.DB
|
||||
usedSerials UsedSerials
|
||||
}
|
||||
|
||||
// NewEndpoint creates a new piecestore endpoint.
|
||||
func NewEndpoint(log *zap.Logger, signer signing.Signer, trust *trust.Pool, store *pieces.Store, pieceinfo pieces.DB, orders orders.DB, usage bandwidth.DB, usedSerials UsedSerials, config Config) (*Endpoint, error) {
|
||||
return &Endpoint{
|
||||
log: log,
|
||||
config: config,
|
||||
|
||||
signer: signer,
|
||||
trust: trust,
|
||||
|
||||
store: store,
|
||||
pieceinfo: pieceinfo,
|
||||
orders: orders,
|
||||
usage: usage,
|
||||
usedSerials: usedSerials,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Delete handles deleting a piece on piece store.
|
||||
func (endpoint *Endpoint) Delete(ctx context.Context, delete *pb.PieceDeleteRequest) (_ *pb.PieceDeleteResponse, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
if delete.Limit.Action != pb.PieceAction_DELETE {
|
||||
return nil, Error.New("expected delete action got %v", delete.Limit.Action) // TODO: report grpc status unauthorized or bad request
|
||||
}
|
||||
|
||||
if err := endpoint.VerifyOrderLimit(ctx, delete.Limit); err != nil {
|
||||
// TODO: report grpc status unauthorized or bad request
|
||||
return nil, Error.Wrap(err)
|
||||
}
|
||||
|
||||
// TODO: parallelize this and maybe return early
|
||||
pieceInfoErr := endpoint.pieceinfo.Delete(ctx, delete.Limit.SatelliteId, delete.Limit.PieceId)
|
||||
pieceErr := endpoint.store.Delete(ctx, delete.Limit.SatelliteId, delete.Limit.PieceId)
|
||||
|
||||
if err := errs.Combine(pieceInfoErr, pieceErr); err != nil {
|
||||
// explicitly ignoring error because the errors
|
||||
// TODO: add more debug info
|
||||
endpoint.log.Error("delete failed", zap.Stringer("Piece ID", delete.Limit.PieceId), zap.Error(err))
|
||||
// TODO: report internal server internal or missing error using grpc status,
|
||||
// e.g. missing might happen when we get a deletion request after garbage collection has deleted it
|
||||
} else {
|
||||
endpoint.log.Debug("deleted", zap.Stringer("Piece ID", delete.Limit.PieceId))
|
||||
}
|
||||
|
||||
return &pb.PieceDeleteResponse{}, nil
|
||||
}
|
||||
|
||||
// Upload handles uploading a piece on piece store.
|
||||
func (endpoint *Endpoint) Upload(stream pb.Piecestore_UploadServer) (err error) {
|
||||
ctx := stream.Context()
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
// TODO: set connection timeouts
|
||||
// TODO: set maximum message size
|
||||
|
||||
var message *pb.PieceUploadRequest
|
||||
|
||||
message, err = stream.Recv()
|
||||
switch {
|
||||
case err != nil:
|
||||
return ErrProtocol.Wrap(err)
|
||||
case message == nil:
|
||||
return ErrProtocol.New("expected a message")
|
||||
case message.Limit == nil:
|
||||
return ErrProtocol.New("expected order limit as the first message")
|
||||
}
|
||||
limit := message.Limit
|
||||
|
||||
// TODO: verify that we have have expected amount of storage before continuing
|
||||
|
||||
if limit.Action != pb.PieceAction_PUT && limit.Action != pb.PieceAction_PUT_REPAIR {
|
||||
return ErrProtocol.New("expected put or put repair action got %v", limit.Action) // TODO: report grpc status unauthorized or bad request
|
||||
}
|
||||
|
||||
if err := endpoint.VerifyOrderLimit(ctx, limit); err != nil {
|
||||
return err // TODO: report grpc status unauthorized or bad request
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
endpoint.log.Debug("upload failed", zap.Stringer("Piece ID", limit.PieceId), zap.Error(err))
|
||||
} else {
|
||||
endpoint.log.Debug("uploaded", zap.Stringer("Piece ID", limit.PieceId))
|
||||
}
|
||||
}()
|
||||
|
||||
peer, err := identity.PeerIdentityFromContext(ctx)
|
||||
if err != nil {
|
||||
return Error.Wrap(err)
|
||||
}
|
||||
|
||||
pieceWriter, err := endpoint.store.Writer(ctx, limit.SatelliteId, limit.PieceId)
|
||||
if err != nil {
|
||||
return ErrInternal.Wrap(err) // TODO: report grpc status internal server error
|
||||
}
|
||||
defer func() {
|
||||
// cancel error if it hasn't been committed
|
||||
if cancelErr := pieceWriter.Cancel(); cancelErr != nil {
|
||||
endpoint.log.Error("error during cancelling a piece write", zap.Error(cancelErr))
|
||||
}
|
||||
}()
|
||||
|
||||
largestOrder := pb.Order2{}
|
||||
defer endpoint.SaveOrder(ctx, limit, &largestOrder, peer)
|
||||
|
||||
for {
|
||||
message, err = stream.Recv() // TODO: reuse messages to avoid allocations
|
||||
if err == io.EOF {
|
||||
return ErrProtocol.New("unexpected EOF")
|
||||
} else if err != nil {
|
||||
return ErrProtocol.Wrap(err) // TODO: report grpc status bad message
|
||||
}
|
||||
if message == nil {
|
||||
return ErrProtocol.New("expected a message") // TODO: report grpc status bad message
|
||||
}
|
||||
|
||||
switch {
|
||||
default:
|
||||
return ErrProtocol.New("message didn't contain any of order, chunk or done") // TODO: report grpc status bad message
|
||||
|
||||
case message.Order != nil:
|
||||
if err := endpoint.VerifyOrder(ctx, peer, limit, message.Order, largestOrder.Amount); err != nil {
|
||||
return err
|
||||
}
|
||||
largestOrder = *message.Order
|
||||
|
||||
case message.Chunk != nil:
|
||||
if message.Chunk.Offset != pieceWriter.Size() {
|
||||
return ErrProtocol.New("chunk out of order") // TODO: report grpc status bad message
|
||||
}
|
||||
|
||||
if largestOrder.Amount < pieceWriter.Size()+int64(len(message.Chunk.Data)) {
|
||||
// TODO: should we write currently and give a chance for uplink to remedy the situation?
|
||||
return ErrProtocol.New("not enough allocated, allocated=%v writing=%v", largestOrder.Amount, pieceWriter.Size()+int64(len(message.Chunk.Data))) // TODO: report grpc status ?
|
||||
}
|
||||
|
||||
if _, err := pieceWriter.Write(message.Chunk.Data); err != nil {
|
||||
return ErrInternal.Wrap(err) // TODO: report grpc status internal server error
|
||||
}
|
||||
|
||||
case message.Done != nil:
|
||||
expectedHash := pieceWriter.Hash()
|
||||
if err := endpoint.VerifyPieceHash(ctx, peer, limit, message.Done, expectedHash); err != nil {
|
||||
return err // TODO: report grpc status internal server error
|
||||
}
|
||||
|
||||
if err := pieceWriter.Commit(); err != nil {
|
||||
return ErrInternal.Wrap(err) // TODO: report grpc status internal server error
|
||||
}
|
||||
|
||||
// TODO: do this in a goroutine
|
||||
{
|
||||
expiration, err := ptypes.Timestamp(limit.PieceExpiration)
|
||||
if err != nil {
|
||||
return ErrInternal.Wrap(err)
|
||||
}
|
||||
|
||||
// TODO: maybe this should be as a pieceWriter.Commit(ctx, info)
|
||||
info := &pieces.Info{
|
||||
SatelliteID: limit.SatelliteId,
|
||||
|
||||
PieceID: limit.PieceId,
|
||||
PieceSize: pieceWriter.Size(),
|
||||
PieceExpiration: expiration,
|
||||
|
||||
UplinkPieceHash: message.Done,
|
||||
Uplink: peer,
|
||||
}
|
||||
|
||||
if err := endpoint.pieceinfo.Add(ctx, info); err != nil {
|
||||
return ErrInternal.Wrap(err)
|
||||
}
|
||||
}
|
||||
|
||||
storageNodeHash, err := signing.SignPieceHash(endpoint.signer, &pb.PieceHash{
|
||||
PieceId: limit.PieceId,
|
||||
Hash: expectedHash,
|
||||
})
|
||||
if err != nil {
|
||||
return ErrInternal.Wrap(err)
|
||||
}
|
||||
|
||||
closeErr := stream.SendAndClose(&pb.PieceUploadResponse{
|
||||
Done: storageNodeHash,
|
||||
})
|
||||
return ErrProtocol.Wrap(ignoreEOF(closeErr))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Download implements downloading a piece from piece store.
|
||||
func (endpoint *Endpoint) Download(stream pb.Piecestore_DownloadServer) (err error) {
|
||||
ctx := stream.Context()
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
// TODO: set connection timeouts
|
||||
// TODO: set maximum message size
|
||||
|
||||
var message *pb.PieceDownloadRequest
|
||||
|
||||
// receive limit and chunk from uplink
|
||||
message, err = stream.Recv()
|
||||
if err != nil {
|
||||
return ErrProtocol.Wrap(err)
|
||||
}
|
||||
if message.Limit == nil || message.Chunk == nil {
|
||||
return ErrProtocol.New("expected order limit and chunk as the first message")
|
||||
}
|
||||
limit, chunk := message.Limit, message.Chunk
|
||||
|
||||
if limit.Action != pb.PieceAction_GET && limit.Action != pb.PieceAction_GET_REPAIR && limit.Action != pb.PieceAction_GET_AUDIT {
|
||||
return ErrProtocol.New("expected get or get repair or audit action got %v", limit.Action) // TODO: report grpc status unauthorized or bad request
|
||||
}
|
||||
|
||||
if chunk.ChunkSize > limit.Limit {
|
||||
return ErrProtocol.New("requested more that order limit allows, limit=%v requested=%v", limit.Limit, chunk.ChunkSize)
|
||||
}
|
||||
|
||||
if err := endpoint.VerifyOrderLimit(ctx, limit); err != nil {
|
||||
return Error.Wrap(err) // TODO: report grpc status unauthorized or bad request
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
endpoint.log.Debug("download failed", zap.Stringer("Piece ID", limit.PieceId), zap.Error(err))
|
||||
} else {
|
||||
endpoint.log.Debug("downloaded", zap.Stringer("Piece ID", limit.PieceId))
|
||||
}
|
||||
}()
|
||||
|
||||
peer, err := identity.PeerIdentityFromContext(ctx)
|
||||
if err != nil {
|
||||
return Error.Wrap(err)
|
||||
}
|
||||
|
||||
pieceReader, err := endpoint.store.Reader(ctx, limit.SatelliteId, limit.PieceId)
|
||||
if err != nil {
|
||||
return ErrInternal.Wrap(err) // TODO: report grpc status internal server error
|
||||
}
|
||||
defer func() {
|
||||
err := pieceReader.Close() // similarly how transcation Rollback works
|
||||
if err != nil {
|
||||
// no reason to report this error to the uplink
|
||||
endpoint.log.Error("failed to close piece reader", zap.Error(err))
|
||||
}
|
||||
}()
|
||||
|
||||
// TODO: verify chunk.Size behavior logic with regards to reading all
|
||||
if chunk.Offset+chunk.ChunkSize > pieceReader.Size() {
|
||||
return Error.New("requested more data than available, requesting=%v available=%v", chunk.Offset+chunk.ChunkSize, pieceReader.Size())
|
||||
}
|
||||
|
||||
throttle := sync2.NewThrottle()
|
||||
// TODO: see whether this can be implemented without a goroutine
|
||||
|
||||
group, ctx := errgroup.WithContext(ctx)
|
||||
group.Go(func() (err error) {
|
||||
var maximumChunkSize = 1 * memory.MiB.Int64()
|
||||
|
||||
currentOffset := chunk.Offset
|
||||
unsentAmount := chunk.ChunkSize
|
||||
for unsentAmount > 0 {
|
||||
tryToSend := min(unsentAmount, maximumChunkSize)
|
||||
|
||||
// TODO: add timeout here
|
||||
chunkSize, err := throttle.ConsumeOrWait(tryToSend)
|
||||
if err != nil {
|
||||
// this can happen only because uplink decided to close the connection
|
||||
return nil
|
||||
}
|
||||
|
||||
chunkData := make([]byte, chunkSize)
|
||||
_, err = pieceReader.Seek(currentOffset, io.SeekStart)
|
||||
if err != nil {
|
||||
return ErrInternal.Wrap(err)
|
||||
}
|
||||
|
||||
_, err = pieceReader.Read(chunkData)
|
||||
if err != nil {
|
||||
return ErrInternal.Wrap(err)
|
||||
}
|
||||
|
||||
err = stream.Send(&pb.PieceDownloadResponse{
|
||||
Chunk: &pb.PieceDownloadResponse_Chunk{
|
||||
Offset: currentOffset,
|
||||
Data: chunkData,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
// err is io.EOF when uplink asked for a piece, but decided not to retrieve it,
|
||||
// no need to propagate it
|
||||
return ErrProtocol.Wrap(ignoreEOF(err))
|
||||
}
|
||||
|
||||
currentOffset += chunkSize
|
||||
unsentAmount -= chunkSize
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
recvErr := func() (err error) {
|
||||
largestOrder := pb.Order2{}
|
||||
defer endpoint.SaveOrder(ctx, limit, &largestOrder, peer)
|
||||
|
||||
// ensure that we always terminate sending goroutine
|
||||
defer throttle.Fail(io.EOF)
|
||||
|
||||
for {
|
||||
// TODO: check errors
|
||||
// TODO: add timeout here
|
||||
message, err = stream.Recv()
|
||||
if err != nil {
|
||||
// err is io.EOF when uplink closed the connection, no need to return error
|
||||
return ErrProtocol.Wrap(ignoreEOF(err))
|
||||
}
|
||||
|
||||
if message == nil || message.Order == nil {
|
||||
return ErrProtocol.New("expected order as the message")
|
||||
}
|
||||
|
||||
if err := endpoint.VerifyOrder(ctx, peer, limit, message.Order, largestOrder.Amount); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := throttle.Produce(message.Order.Amount - largestOrder.Amount); err != nil {
|
||||
// shouldn't happen since only receiving side is calling Fail
|
||||
return ErrInternal.Wrap(err)
|
||||
}
|
||||
largestOrder = *message.Order
|
||||
}
|
||||
}()
|
||||
|
||||
// ensure we wait for sender to complete
|
||||
sendErr := group.Wait()
|
||||
return Error.Wrap(errs.Combine(sendErr, recvErr))
|
||||
}
|
||||
|
||||
// SaveOrder saves the order with all necessary information. It assumes it has been already verified.
|
||||
func (endpoint *Endpoint) SaveOrder(ctx context.Context, limit *pb.OrderLimit2, order *pb.Order2, uplink *identity.PeerIdentity) {
|
||||
// TODO: do this in a goroutine
|
||||
if order == nil || order.Amount <= 0 {
|
||||
return
|
||||
}
|
||||
err := endpoint.orders.Enqueue(ctx, &orders.Info{
|
||||
Limit: limit,
|
||||
Order: order,
|
||||
Uplink: uplink,
|
||||
})
|
||||
if err != nil {
|
||||
endpoint.log.Error("failed to add order", zap.Error(err))
|
||||
} else {
|
||||
err := endpoint.usage.Add(ctx, limit.SatelliteId, limit.Action, order.Amount, time.Now())
|
||||
if err != nil {
|
||||
endpoint.log.Error("failed to add bandwidth usage", zap.Error(err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// min finds the min of two values
|
||||
func min(a, b int64) int64 {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// ignoreEOF ignores io.EOF error.
|
||||
func ignoreEOF(err error) error {
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
87
storagenode/piecestore/endpoint_test.go
Normal file
87
storagenode/piecestore/endpoint_test.go
Normal file
@ -0,0 +1,87 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information
|
||||
|
||||
package piecestore_test
|
||||
|
||||
import (
|
||||
"io"
|
||||
"math/rand"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"storj.io/storj/internal/memory"
|
||||
"storj.io/storj/internal/testcontext"
|
||||
"storj.io/storj/internal/testplanet"
|
||||
"storj.io/storj/storagenode/bandwidth"
|
||||
"storj.io/storj/uplink/piecestore"
|
||||
)
|
||||
|
||||
func TestUploadAndPartialDownload(t *testing.T) {
|
||||
ctx := testcontext.New(t)
|
||||
defer ctx.Cleanup()
|
||||
|
||||
planet, err := testplanet.New(t, 1, 6, 1)
|
||||
require.NoError(t, err)
|
||||
defer ctx.Check(planet.Shutdown)
|
||||
|
||||
planet.Start(ctx)
|
||||
|
||||
expectedData := make([]byte, 100*memory.KiB)
|
||||
_, err = rand.Read(expectedData)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "test/bucket", "test/path", expectedData)
|
||||
assert.NoError(t, err)
|
||||
|
||||
var totalDownload int64
|
||||
for _, tt := range []struct {
|
||||
offset, size int64
|
||||
}{
|
||||
{0, 1510},
|
||||
{1513, 1584},
|
||||
{13581, 4783},
|
||||
} {
|
||||
if piecestore.DefaultConfig.InitialStep < tt.size {
|
||||
t.Fatal("test expects initial step to be larger than size to download")
|
||||
}
|
||||
totalDownload += piecestore.DefaultConfig.InitialStep
|
||||
|
||||
download, err := planet.Uplinks[0].DownloadStream(ctx, planet.Satellites[0], "test/bucket", "test/path")
|
||||
require.NoError(t, err)
|
||||
|
||||
pos, err := download.Seek(tt.offset, io.SeekStart)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, pos, tt.offset)
|
||||
|
||||
data := make([]byte, tt.size)
|
||||
n, err := io.ReadFull(download, data)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int(tt.size), n)
|
||||
|
||||
assert.Equal(t, expectedData[tt.offset:tt.offset+tt.size], data)
|
||||
|
||||
require.NoError(t, download.Close())
|
||||
}
|
||||
|
||||
var totalBandwidthUsage bandwidth.Usage
|
||||
for _, storagenode := range planet.StorageNodes {
|
||||
usage, err := storagenode.DB.Bandwidth().Summary(ctx, time.Now().Add(-10*time.Hour), time.Now().Add(10*time.Hour))
|
||||
require.NoError(t, err)
|
||||
totalBandwidthUsage.Add(usage)
|
||||
}
|
||||
|
||||
err = planet.Uplinks[0].Delete(ctx, planet.Satellites[0], "test/bucket", "test/path")
|
||||
require.NoError(t, err)
|
||||
_, err = planet.Uplinks[0].Download(ctx, planet.Satellites[0], "test/bucket", "test/path")
|
||||
require.Error(t, err)
|
||||
|
||||
// check rough limits for the upload and download
|
||||
totalUpload := int64(len(expectedData))
|
||||
t.Log(totalUpload, totalBandwidthUsage.Put, int64(len(planet.StorageNodes))*totalUpload)
|
||||
assert.True(t, totalUpload < totalBandwidthUsage.Put && totalBandwidthUsage.Put < int64(len(planet.StorageNodes))*totalUpload)
|
||||
t.Log(totalDownload, totalBandwidthUsage.Get, int64(len(planet.StorageNodes))*totalDownload)
|
||||
assert.True(t, totalBandwidthUsage.Get < int64(len(planet.StorageNodes))*totalDownload)
|
||||
}
|
27
storagenode/piecestore/serials.go
Normal file
27
storagenode/piecestore/serials.go
Normal file
@ -0,0 +1,27 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package piecestore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"storj.io/storj/pkg/storj"
|
||||
)
|
||||
|
||||
// SerialNumberFn is callback from IterateAll
|
||||
type SerialNumberFn func(satelliteID storj.NodeID, serialNumber []byte, expiration time.Time)
|
||||
|
||||
// UsedSerials is a persistent store for serial numbers.
|
||||
// TODO: maybe this should be in orders.UsedSerials
|
||||
type UsedSerials interface {
|
||||
// Add adds a serial to the database.
|
||||
Add(ctx context.Context, satelliteID storj.NodeID, serialNumber []byte, expiration time.Time) error
|
||||
// DeleteExpired deletes expired serial numbers
|
||||
DeleteExpired(ctx context.Context, now time.Time) error
|
||||
|
||||
// IterateAll iterates all serials.
|
||||
// Note, this will lock the database and should only be used during startup.
|
||||
IterateAll(ctx context.Context, fn SerialNumberFn) error
|
||||
}
|
106
storagenode/piecestore/serials_test.go
Normal file
106
storagenode/piecestore/serials_test.go
Normal file
@ -0,0 +1,106 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package piecestore_test
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"storj.io/storj/internal/testcontext"
|
||||
"storj.io/storj/internal/testplanet"
|
||||
"storj.io/storj/pkg/storj"
|
||||
"storj.io/storj/storagenode"
|
||||
"storj.io/storj/storagenode/storagenodedb/storagenodedbtest"
|
||||
)
|
||||
|
||||
func TestUsedSerials(t *testing.T) {
|
||||
storagenodedbtest.Run(t, func(t *testing.T, db storagenode.DB) {
|
||||
ctx := testcontext.New(t)
|
||||
defer ctx.Cleanup()
|
||||
|
||||
usedSerials := db.UsedSerials()
|
||||
|
||||
node0 := testplanet.MustPregeneratedIdentity(0)
|
||||
node1 := testplanet.MustPregeneratedIdentity(1)
|
||||
|
||||
serial1 := newRandomSerial()
|
||||
serial2 := newRandomSerial()
|
||||
serial3 := newRandomSerial()
|
||||
|
||||
now := time.Now()
|
||||
|
||||
// queries on empty table
|
||||
err := usedSerials.DeleteExpired(ctx, now.Add(6*time.Minute))
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = usedSerials.IterateAll(ctx, func(satellite storj.NodeID, serialNumber []byte, expiration time.Time) {})
|
||||
assert.NoError(t, err)
|
||||
|
||||
// let's start adding data
|
||||
type Serial struct {
|
||||
SatelliteID storj.NodeID
|
||||
SerialNumber []byte
|
||||
Expiration time.Time
|
||||
}
|
||||
|
||||
serialNumbers := []Serial{
|
||||
{node0.ID, serial1, now.Add(time.Minute)},
|
||||
{node0.ID, serial2, now.Add(4 * time.Minute)},
|
||||
{node0.ID, serial3, now.Add(8 * time.Minute)},
|
||||
{node1.ID, serial1, now.Add(time.Minute)},
|
||||
{node1.ID, serial2, now.Add(4 * time.Minute)},
|
||||
{node1.ID, serial3, now.Add(8 * time.Minute)},
|
||||
}
|
||||
|
||||
// basic adding
|
||||
for _, serial := range serialNumbers {
|
||||
err = usedSerials.Add(ctx, serial.SatelliteID, serial.SerialNumber, serial.Expiration)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
// duplicate adds should fail
|
||||
for _, serial := range serialNumbers {
|
||||
expirationDelta := time.Duration(rand.Intn(10)-5) * time.Hour
|
||||
err = usedSerials.Add(ctx, serial.SatelliteID, serial.SerialNumber, serial.Expiration.Add(expirationDelta))
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
// ensure we can list all of them
|
||||
listedNumbers := []Serial{}
|
||||
err = usedSerials.IterateAll(ctx, func(satellite storj.NodeID, serialNumber []byte, expiration time.Time) {
|
||||
listedNumbers = append(listedNumbers, Serial{satellite, serialNumber, expiration})
|
||||
})
|
||||
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, cmp.Diff(serialNumbers, listedNumbers))
|
||||
|
||||
// ensure we can delete expired
|
||||
err = usedSerials.DeleteExpired(ctx, now.Add(6*time.Minute))
|
||||
require.NoError(t, err)
|
||||
|
||||
// ensure we can list after delete
|
||||
listedAfterDelete := []Serial{}
|
||||
err = usedSerials.IterateAll(ctx, func(satellite storj.NodeID, serialNumber []byte, expiration time.Time) {
|
||||
listedAfterDelete = append(listedAfterDelete, Serial{satellite, serialNumber, expiration})
|
||||
})
|
||||
|
||||
// check that we have actually deleted things
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, cmp.Diff([]Serial{
|
||||
{node0.ID, serial3, now.Add(8 * time.Minute)},
|
||||
{node1.ID, serial3, now.Add(8 * time.Minute)},
|
||||
}, listedAfterDelete))
|
||||
})
|
||||
}
|
||||
|
||||
func newRandomSerial() []byte {
|
||||
var serial [16]byte
|
||||
_, _ = rand.Read(serial[:])
|
||||
return serial[:]
|
||||
}
|
150
storagenode/piecestore/verification.go
Normal file
150
storagenode/piecestore/verification.go
Normal file
@ -0,0 +1,150 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package piecestore
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
"github.com/golang/protobuf/ptypes/timestamp"
|
||||
"github.com/zeebo/errs"
|
||||
|
||||
"storj.io/storj/pkg/auth/signing"
|
||||
"storj.io/storj/pkg/identity"
|
||||
"storj.io/storj/pkg/pb"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrVerifyNotAuthorized is returned when the one submitting the action is not authorized to perform that action.
|
||||
ErrVerifyNotAuthorized = errs.Class("not authorized")
|
||||
// ErrVerifyUntrusted is returned when action is not trusted.
|
||||
ErrVerifyUntrusted = errs.Class("untrusted")
|
||||
// ErrVerifyDuplicateRequest is returned when serial number has been already used to submit an action.
|
||||
ErrVerifyDuplicateRequest = errs.Class("duplicate request")
|
||||
)
|
||||
|
||||
// VerifyOrderLimit verifies that the order limit is properly signed and has sane values.
|
||||
// It also verifies that the serial number has not been used.
|
||||
func (endpoint *Endpoint) VerifyOrderLimit(ctx context.Context, limit *pb.OrderLimit2) error {
|
||||
// sanity checks
|
||||
switch {
|
||||
case limit.Limit < 0:
|
||||
return ErrProtocol.New("order limit is negative")
|
||||
case endpoint.signer.ID() != limit.StorageNodeId:
|
||||
return ErrProtocol.New("order intended for other storagenode: %v", limit.StorageNodeId)
|
||||
case endpoint.IsExpired(limit.PieceExpiration):
|
||||
return ErrProtocol.New("piece expired: %v", limit.PieceExpiration)
|
||||
case endpoint.IsExpired(limit.OrderExpiration):
|
||||
return ErrProtocol.New("order expired: %v", limit.OrderExpiration)
|
||||
|
||||
case limit.SatelliteId.IsZero():
|
||||
return ErrProtocol.New("missing satellite id")
|
||||
case limit.UplinkId.IsZero():
|
||||
return ErrProtocol.New("missing uplink id")
|
||||
case len(limit.SatelliteSignature) == 0:
|
||||
return ErrProtocol.New("satellite signature missing")
|
||||
}
|
||||
|
||||
// either uplink or satellite can only make the request
|
||||
// TODO: should this check be based on the action?
|
||||
// with macaroons we might not have either of them doing the action
|
||||
peer, err := identity.PeerIdentityFromContext(ctx)
|
||||
if err != nil || limit.UplinkId != peer.ID && limit.SatelliteId != peer.ID {
|
||||
return ErrVerifyNotAuthorized.New("uplink:%s satellite:%s sender %s", limit.UplinkId, limit.SatelliteId, peer.ID)
|
||||
}
|
||||
|
||||
if err := endpoint.trust.VerifySatelliteID(ctx, limit.SatelliteId); err != nil {
|
||||
return ErrVerifyUntrusted.Wrap(err)
|
||||
}
|
||||
if err := endpoint.trust.VerifyUplinkID(ctx, limit.UplinkId); err != nil {
|
||||
return ErrVerifyUntrusted.Wrap(err)
|
||||
}
|
||||
|
||||
if err := endpoint.VerifyOrderLimitSignature(ctx, limit); err != nil {
|
||||
return ErrVerifyUntrusted.Wrap(err)
|
||||
}
|
||||
|
||||
// TODO: use min of piece and order expiration instead
|
||||
serialExpiration, err := ptypes.Timestamp(limit.OrderExpiration)
|
||||
if err != nil {
|
||||
return ErrInternal.Wrap(err)
|
||||
}
|
||||
if err := endpoint.usedSerials.Add(ctx, limit.SatelliteId, limit.SerialNumber, serialExpiration); err != nil {
|
||||
return ErrVerifyDuplicateRequest.Wrap(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// VerifyOrder verifies that the order corresponds to the order limit and has all the necessary fields.
|
||||
func (endpoint *Endpoint) VerifyOrder(ctx context.Context, peer *identity.PeerIdentity, limit *pb.OrderLimit2, order *pb.Order2, largestOrderAmount int64) error {
|
||||
if !bytes.Equal(order.SerialNumber, limit.SerialNumber) {
|
||||
return ErrProtocol.New("order serial number changed during upload") // TODO: report grpc status bad message
|
||||
}
|
||||
// TODO: add check for minimum allocation step
|
||||
if order.Amount < largestOrderAmount {
|
||||
return ErrProtocol.New("order contained smaller amount=%v, previous=%v", order.Amount, largestOrderAmount) // TODO: report grpc status bad message
|
||||
}
|
||||
if order.Amount > limit.Limit {
|
||||
return ErrProtocol.New("order exceeded allowed amount=%v, limit=%v", order.Amount, limit.Limit) // TODO: report grpc status bad message
|
||||
}
|
||||
|
||||
if err := signing.VerifyOrderSignature(signing.SigneeFromPeerIdentity(peer), order); err != nil {
|
||||
return ErrVerifyUntrusted.New("invalid order signature") // TODO: report grpc status bad message
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// VerifyPieceHash verifies whether the piece hash is properly signed and matches the locally computed hash.
|
||||
func (endpoint *Endpoint) VerifyPieceHash(ctx context.Context, peer *identity.PeerIdentity, limit *pb.OrderLimit2, hash *pb.PieceHash, expectedHash []byte) error {
|
||||
if peer == nil || limit == nil || hash == nil || len(expectedHash) == 0 {
|
||||
return ErrProtocol.New("invalid arguments")
|
||||
}
|
||||
if limit.PieceId != hash.PieceId {
|
||||
return ErrProtocol.New("piece id changed") // TODO: report grpc status bad message
|
||||
}
|
||||
if !bytes.Equal(hash.Hash, expectedHash) {
|
||||
return ErrProtocol.New("hashes don't match") // TODO: report grpc status bad message
|
||||
}
|
||||
|
||||
if err := signing.VerifyPieceHashSignature(signing.SigneeFromPeerIdentity(peer), hash); err != nil {
|
||||
return ErrVerifyUntrusted.New("invalid hash signature: %v", err) // TODO: report grpc status bad message
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// VerifyOrderLimitSignature verifies that the order limit signature is valid.
|
||||
func (endpoint *Endpoint) VerifyOrderLimitSignature(ctx context.Context, limit *pb.OrderLimit2) error {
|
||||
signee, err := endpoint.trust.GetSignee(ctx, limit.SatelliteId)
|
||||
if err != nil {
|
||||
return ErrVerifyUntrusted.New("unable to get signee: %v", err) // TODO: report grpc status bad message
|
||||
}
|
||||
|
||||
if err := signing.VerifyOrderLimitSignature(signee, limit); err != nil {
|
||||
return ErrVerifyUntrusted.New("invalid order limit signature: %v", err) // TODO: report grpc status bad message
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsExpired checks whether the date has already expired (with a threshold) at the time of calling this function.
|
||||
func (endpoint *Endpoint) IsExpired(expiration *timestamp.Timestamp) bool {
|
||||
if expiration == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
expirationTime, err := ptypes.Timestamp(expiration)
|
||||
if err != nil {
|
||||
// TODO: return error
|
||||
return true
|
||||
}
|
||||
|
||||
// TODO: return specific error about either exceeding the expiration completely or just the grace period
|
||||
|
||||
return expirationTime.After(time.Now().Add(-endpoint.config.ExpirationGracePeriod))
|
||||
}
|
109
storagenode/storagenodedb/bandwidthdb.go
Normal file
109
storagenode/storagenodedb/bandwidthdb.go
Normal file
@ -0,0 +1,109 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package storagenodedb
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"time"
|
||||
|
||||
"github.com/zeebo/errs"
|
||||
|
||||
"storj.io/storj/pkg/pb"
|
||||
"storj.io/storj/pkg/storj"
|
||||
"storj.io/storj/storagenode/bandwidth"
|
||||
)
|
||||
|
||||
type bandwidthdb struct{ *infodb }
|
||||
|
||||
// Bandwidth returns table for storing bandwidth usage.
|
||||
func (db *DB) Bandwidth() bandwidth.DB { return db.info.Bandwidth() }
|
||||
|
||||
// Bandwidth returns table for storing bandwidth usage.
|
||||
func (db *infodb) Bandwidth() bandwidth.DB { return &bandwidthdb{db} }
|
||||
|
||||
// Add adds bandwidth usage to the table
|
||||
func (db *bandwidthdb) Add(ctx context.Context, satelliteID storj.NodeID, action pb.PieceAction, amount int64, created time.Time) error {
|
||||
defer db.locked()()
|
||||
|
||||
_, err := db.db.Exec(`
|
||||
INSERT INTO
|
||||
bandwidth_usage(satellite_id, action, amount, created_at)
|
||||
VALUES(?, ?, ?, ?)`, satelliteID, action, amount, created)
|
||||
|
||||
return ErrInfo.Wrap(err)
|
||||
}
|
||||
|
||||
// Summary returns summary of bandwidth usages
|
||||
func (db *bandwidthdb) Summary(ctx context.Context, from, to time.Time) (_ *bandwidth.Usage, err error) {
|
||||
defer db.locked()()
|
||||
|
||||
usage := &bandwidth.Usage{}
|
||||
|
||||
rows, err := db.db.Query(`
|
||||
SELECT action, sum(amount)
|
||||
FROM bandwidth_usage
|
||||
WHERE ? <= created_at AND created_at <= ?
|
||||
GROUP BY action`, from, to)
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return usage, nil
|
||||
}
|
||||
return nil, ErrInfo.Wrap(err)
|
||||
}
|
||||
defer func() { err = errs.Combine(err, rows.Close()) }()
|
||||
|
||||
for rows.Next() {
|
||||
var action pb.PieceAction
|
||||
var amount int64
|
||||
err := rows.Scan(&action, &amount)
|
||||
if err != nil {
|
||||
return nil, ErrInfo.Wrap(err)
|
||||
}
|
||||
usage.Include(action, amount)
|
||||
}
|
||||
|
||||
return usage, ErrInfo.Wrap(rows.Err())
|
||||
}
|
||||
|
||||
// SummaryBySatellite returns summary of bandwidth usage grouping by satellite.
|
||||
func (db *bandwidthdb) SummaryBySatellite(ctx context.Context, from, to time.Time) (_ map[storj.NodeID]*bandwidth.Usage, err error) {
|
||||
defer db.locked()()
|
||||
|
||||
entries := map[storj.NodeID]*bandwidth.Usage{}
|
||||
|
||||
rows, err := db.db.Query(`
|
||||
SELECT satellite_id, action, sum(amount)
|
||||
FROM bandwidth_usage
|
||||
WHERE ? <= created_at AND created_at <= ?
|
||||
GROUP BY satellite_id, action`, from, to)
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return entries, nil
|
||||
}
|
||||
return nil, ErrInfo.Wrap(err)
|
||||
}
|
||||
defer func() { err = errs.Combine(err, rows.Close()) }()
|
||||
|
||||
for rows.Next() {
|
||||
var satelliteID storj.NodeID
|
||||
var action pb.PieceAction
|
||||
var amount int64
|
||||
|
||||
err := rows.Scan(&satelliteID, &action, &amount)
|
||||
if err != nil {
|
||||
return nil, ErrInfo.Wrap(err)
|
||||
}
|
||||
|
||||
entry, ok := entries[satelliteID]
|
||||
if !ok {
|
||||
entry = &bandwidth.Usage{}
|
||||
entries[satelliteID] = entry
|
||||
}
|
||||
|
||||
entry.Include(action, amount)
|
||||
}
|
||||
|
||||
return entries, ErrInfo.Wrap(rows.Err())
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user