protocol: implement new piece signing and verification (#2525)

This commit is contained in:
Egon Elbre 2019-07-11 16:51:40 -04:00 committed by GitHub
parent 8b507f3d73
commit d52f764e54
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
42 changed files with 966 additions and 524 deletions

1
go.sum
View File

@ -404,6 +404,7 @@ golang.org/x/crypto v0.0.0-20180910181607-0e37d006457b/go.mod h1:6SG95UA2DQfeDnf
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8 h1:1wopBVtVdWnn03fZelqdXTqk7U7zPQCb+T4rbU9ZEoU=
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=

View File

@ -17,7 +17,6 @@ import (
"github.com/zeebo/errs"
"go.uber.org/zap"
"storj.io/storj/pkg/auth/signing"
"storj.io/storj/pkg/cfgstruct"
"storj.io/storj/pkg/eestream"
"storj.io/storj/pkg/encryption"
@ -160,8 +159,7 @@ func (uplink *Uplink) DialMetainfo(ctx context.Context, destination Peer, apikey
// DialPiecestore dials destination storagenode and returns a piecestore client.
func (uplink *Uplink) DialPiecestore(ctx context.Context, destination Peer) (*piecestore.Client, error) {
node := destination.Local()
signer := signing.SignerFromFullIdentity(uplink.Transport.Identity())
return piecestore.Dial(ctx, uplink.Transport, &node.Node, uplink.Log.Named("uplink>piecestore"), signer, piecestore.DefaultConfig)
return piecestore.Dial(ctx, uplink.Transport, &node.Node, uplink.Log.Named("uplink>piecestore"), piecestore.DefaultConfig)
}
// Upload data to specific satellite

View File

@ -148,7 +148,7 @@ func TestDisqualifiedNodesGetNoDownload(t *testing.T) {
disqualifiedNode := pointer.GetRemote().GetRemotePieces()[0].NodeId
disqualifyNode(t, ctx, satellite, disqualifiedNode)
limits, err := satellite.Orders.Service.CreateGetOrderLimits(ctx, upl.Identity.PeerIdentity(), bucketID, pointer)
limits, _, err := satellite.Orders.Service.CreateGetOrderLimits(ctx, bucketID, pointer)
require.NoError(t, err)
assert.Len(t, limits, len(pointer.GetRemote().GetRemotePieces())-1)

View File

@ -59,10 +59,10 @@ func TestReverifySuccess(t *testing.T) {
pieces := stripe.Segment.GetRemote().GetRemotePieces()
rootPieceID := stripe.Segment.GetRemote().RootPieceId
limit, err := orders.CreateAuditOrderLimit(ctx, planet.Satellites[0].Identity.PeerIdentity(), bucketID, pieces[0].NodeId, pieces[0].PieceNum, rootPieceID, shareSize)
limit, privateKey, err := orders.CreateAuditOrderLimit(ctx, bucketID, pieces[0].NodeId, pieces[0].PieceNum, rootPieceID, shareSize)
require.NoError(t, err)
share, err := audits.Verifier.GetShare(ctx, limit, stripe.Index, shareSize, int(pieces[0].PieceNum))
share, err := audits.Verifier.GetShare(ctx, limit, privateKey, stripe.Index, shareSize, int(pieces[0].PieceNum))
require.NoError(t, err)
pending := &audit.PendingAudit{
@ -126,10 +126,10 @@ func TestReverifyFailMissingShare(t *testing.T) {
pieces := stripe.Segment.GetRemote().GetRemotePieces()
rootPieceID := stripe.Segment.GetRemote().RootPieceId
limit, err := orders.CreateAuditOrderLimit(ctx, planet.Satellites[0].Identity.PeerIdentity(), bucketID, pieces[0].NodeId, pieces[0].PieceNum, rootPieceID, shareSize)
limit, privateKey, err := orders.CreateAuditOrderLimit(ctx, bucketID, pieces[0].NodeId, pieces[0].PieceNum, rootPieceID, shareSize)
require.NoError(t, err)
share, err := audits.Verifier.GetShare(ctx, limit, stripe.Index, shareSize, int(pieces[0].PieceNum))
share, err := audits.Verifier.GetShare(ctx, limit, privateKey, stripe.Index, shareSize, int(pieces[0].PieceNum))
require.NoError(t, err)
pending := &audit.PendingAudit{

View File

@ -17,7 +17,6 @@ import (
"storj.io/storj/internal/errs2"
"storj.io/storj/internal/memory"
"storj.io/storj/pkg/auth/signing"
"storj.io/storj/pkg/identity"
"storj.io/storj/pkg/overlay"
"storj.io/storj/pkg/pb"
@ -88,7 +87,7 @@ func (verifier *Verifier) Verify(ctx context.Context, stripe *Stripe, skip map[s
containedNodes := make(map[int]storj.NodeID)
sharesToAudit := make(map[int]Share)
orderLimits, err := verifier.orders.CreateAuditOrderLimits(ctx, verifier.auditor, bucketID, pointer, skip)
orderLimits, privateKey, err := verifier.orders.CreateAuditOrderLimits(ctx, bucketID, pointer, skip)
if err != nil {
return nil, err
}
@ -99,7 +98,7 @@ func (verifier *Verifier) Verify(ctx context.Context, stripe *Stripe, skip map[s
verifier.log.Debug("Verify: order limits not created for some nodes (offline/disqualified)", zap.Strings("Node IDs", offlineNodes.Strings()))
}
shares, err := verifier.DownloadShares(ctx, orderLimits, stripe.Index, shareSize)
shares, err := verifier.DownloadShares(ctx, orderLimits, privateKey, stripe.Index, shareSize)
if err != nil {
return &Report{
Offlines: offlineNodes,
@ -235,7 +234,7 @@ func (verifier *Verifier) Verify(ctx context.Context, stripe *Stripe, skip map[s
}
// DownloadShares downloads shares from the nodes where remote pieces are located
func (verifier *Verifier) DownloadShares(ctx context.Context, limits []*pb.AddressedOrderLimit, stripeIndex int64, shareSize int32) (shares map[int]Share, err error) {
func (verifier *Verifier) DownloadShares(ctx context.Context, limits []*pb.AddressedOrderLimit, piecePrivateKey storj.PiecePrivateKey, stripeIndex int64, shareSize int32) (shares map[int]Share, err error) {
defer mon.Task()(&ctx)(&err)
shares = make(map[int]Share, len(limits))
@ -248,7 +247,7 @@ func (verifier *Verifier) DownloadShares(ctx context.Context, limits []*pb.Addre
}
go func(i int, limit *pb.AddressedOrderLimit) {
share, err := verifier.GetShare(ctx, limit, stripeIndex, shareSize, i)
share, err := verifier.GetShare(ctx, limit, piecePrivateKey, stripeIndex, shareSize, i)
if err != nil {
share = Share{
Error: err,
@ -310,7 +309,7 @@ func (verifier *Verifier) Reverify(ctx context.Context, stripe *Stripe) (report
containedInSegment++
go func(pending *PendingAudit, piece *pb.RemotePiece) {
limit, err := verifier.orders.CreateAuditOrderLimit(ctx, verifier.auditor, createBucketID(stripe.SegmentPath), pending.NodeID, piece.PieceNum, pending.PieceID, pending.ShareSize)
limit, piecePrivateKey, err := verifier.orders.CreateAuditOrderLimit(ctx, createBucketID(stripe.SegmentPath), pending.NodeID, piece.PieceNum, pending.PieceID, pending.ShareSize)
if err != nil {
if overlay.ErrNodeDisqualified.Has(err) {
_, errDelete := verifier.containment.Delete(ctx, piece.NodeId)
@ -332,7 +331,7 @@ func (verifier *Verifier) Reverify(ctx context.Context, stripe *Stripe) (report
return
}
share, err := verifier.GetShare(ctx, limit, pending.StripeIndex, pending.ShareSize, int(piece.PieceNum))
share, err := verifier.GetShare(ctx, limit, piecePrivateKey, pending.StripeIndex, pending.ShareSize, int(piece.PieceNum))
// check if the pending audit was deleted while downloading the share
_, getErr := verifier.containment.Get(ctx, piece.NodeId)
@ -434,7 +433,7 @@ func (verifier *Verifier) Reverify(ctx context.Context, stripe *Stripe) (report
}
// GetShare use piece store client to download shares from nodes
func (verifier *Verifier) GetShare(ctx context.Context, limit *pb.AddressedOrderLimit, stripeIndex int64, shareSize int32, pieceNum int) (share Share, err error) {
func (verifier *Verifier) GetShare(ctx context.Context, limit *pb.AddressedOrderLimit, piecePrivateKey storj.PiecePrivateKey, stripeIndex int64, shareSize int32, pieceNum int) (share Share, err error) {
defer mon.Task()(&ctx)(&err)
bandwidthMsgSize := shareSize
@ -454,9 +453,8 @@ func (verifier *Verifier) GetShare(ctx context.Context, limit *pb.AddressedOrder
storageNodeID := limit.GetLimit().StorageNodeId
log := verifier.log.Named(storageNodeID.String())
target := &pb.Node{Id: storageNodeID, Address: limit.GetStorageNodeAddress()}
signer := signing.SignerFromFullIdentity(verifier.transport.Identity())
ps, err := piecestore.Dial(timedCtx, verifier.transport, target, log, signer, piecestore.DefaultConfig)
ps, err := piecestore.Dial(timedCtx, verifier.transport, target, log, piecestore.DefaultConfig)
if err != nil {
return Share{}, Error.Wrap(err)
}
@ -469,7 +467,7 @@ func (verifier *Verifier) GetShare(ctx context.Context, limit *pb.AddressedOrder
offset := int64(shareSize) * stripeIndex
downloader, err := ps.Download(timedCtx, limit.GetLimit(), offset, int64(shareSize))
downloader, err := ps.Download(timedCtx, limit.GetLimit(), piecePrivateKey, offset, int64(shareSize))
if err != nil {
return Share{}, err
}

View File

@ -53,10 +53,10 @@ func TestDownloadSharesHappyPath(t *testing.T) {
require.NoError(t, err)
shareSize := stripe.Segment.GetRemote().GetRedundancy().GetErasureShareSize()
limits, err := planet.Satellites[0].Orders.Service.CreateAuditOrderLimits(ctx, planet.Satellites[0].Identity.PeerIdentity(), bucketID, stripe.Segment, nil)
limits, privateKey, err := planet.Satellites[0].Orders.Service.CreateAuditOrderLimits(ctx, bucketID, stripe.Segment, nil)
require.NoError(t, err)
shares, err := audits.Verifier.DownloadShares(ctx, limits, stripe.Index, shareSize)
shares, err := audits.Verifier.DownloadShares(ctx, limits, privateKey, stripe.Index, shareSize)
require.NoError(t, err)
for _, share := range shares {
@ -97,7 +97,7 @@ func TestDownloadSharesOfflineNode(t *testing.T) {
require.NoError(t, err)
shareSize := stripe.Segment.GetRemote().GetRedundancy().GetErasureShareSize()
limits, err := planet.Satellites[0].Orders.Service.CreateAuditOrderLimits(ctx, planet.Satellites[0].Identity.PeerIdentity(), bucketID, stripe.Segment, nil)
limits, privateKey, err := planet.Satellites[0].Orders.Service.CreateAuditOrderLimits(ctx, bucketID, stripe.Segment, nil)
require.NoError(t, err)
// stop the first node in the pointer
@ -105,7 +105,7 @@ func TestDownloadSharesOfflineNode(t *testing.T) {
err = stopStorageNode(ctx, planet, stoppedNodeID)
require.NoError(t, err)
shares, err := audits.Verifier.DownloadShares(ctx, limits, stripe.Index, shareSize)
shares, err := audits.Verifier.DownloadShares(ctx, limits, privateKey, stripe.Index, shareSize)
require.NoError(t, err)
for _, share := range shares {
@ -153,10 +153,10 @@ func TestDownloadSharesMissingPiece(t *testing.T) {
stripe.Segment.GetRemote().RootPieceId = storj.NewPieceID()
shareSize := stripe.Segment.GetRemote().GetRedundancy().GetErasureShareSize()
limits, err := planet.Satellites[0].Orders.Service.CreateAuditOrderLimits(ctx, planet.Satellites[0].Identity.PeerIdentity(), bucketID, stripe.Segment, nil)
limits, privateKey, err := planet.Satellites[0].Orders.Service.CreateAuditOrderLimits(ctx, bucketID, stripe.Segment, nil)
require.NoError(t, err)
shares, err := audits.Verifier.DownloadShares(ctx, limits, stripe.Index, shareSize)
shares, err := audits.Verifier.DownloadShares(ctx, limits, privateKey, stripe.Index, shareSize)
require.NoError(t, err)
for _, share := range shares {
@ -227,10 +227,10 @@ func TestDownloadSharesDialTimeout(t *testing.T) {
5*time.Second)
shareSize := stripe.Segment.GetRemote().GetRedundancy().GetErasureShareSize()
limits, err := planet.Satellites[0].Orders.Service.CreateAuditOrderLimits(ctx, planet.Satellites[0].Identity.PeerIdentity(), bucketID, stripe.Segment, nil)
limits, privateKey, err := planet.Satellites[0].Orders.Service.CreateAuditOrderLimits(ctx, bucketID, stripe.Segment, nil)
require.NoError(t, err)
shares, err := verifier.DownloadShares(ctx, limits, stripe.Index, shareSize)
shares, err := verifier.DownloadShares(ctx, limits, privateKey, stripe.Index, shareSize)
require.NoError(t, err)
for _, share := range shares {
@ -292,14 +292,14 @@ func TestDownloadSharesDownloadTimeout(t *testing.T) {
150*time.Millisecond)
shareSize := stripe.Segment.GetRemote().GetRedundancy().GetErasureShareSize()
limits, err := planet.Satellites[0].Orders.Service.CreateAuditOrderLimits(ctx, planet.Satellites[0].Identity.PeerIdentity(), bucketID, stripe.Segment, nil)
limits, privateKey, err := planet.Satellites[0].Orders.Service.CreateAuditOrderLimits(ctx, bucketID, stripe.Segment, nil)
require.NoError(t, err)
// make downloads on storage node slower than the timeout on the satellite for downloading shares
delay := 200 * time.Millisecond
storageNodeDB.SetLatency(delay)
shares, err := verifier.DownloadShares(ctx, limits, stripe.Index, shareSize)
shares, err := verifier.DownloadShares(ctx, limits, privateKey, stripe.Index, shareSize)
require.NoError(t, err)
require.Len(t, shares, 1)

View File

@ -0,0 +1,39 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package signing_test
import (
"encoding/hex"
"testing"
"github.com/gogo/protobuf/proto"
"github.com/stretchr/testify/require"
"storj.io/storj/pkg/pb"
)
func TestOrderSerialization(t *testing.T) {
t.Skip("broken")
orderLimitBytes, err := hex.DecodeString(`0A1027C6C39653A24B94BA560A7951698FF312209BD465AB990C1E62C7B99FEE63E71761FF1A7ECD951D502CE95F4A41D4C91A001A209BD465AB990C1E62C7B99FEE63E71761FF1A7ECD951D502CE95F4A41D4C91A0022209A27D4F09F85609E85B861B11F95C785899DC394FEC6BD4E303C502C3B7E2B002A20A86125ACD1B98E7262F9D38D9B27204DAF4E44092B0FBA786474B4754D45753330800838034A0C08B1E1A2EA0510AEF4AED70352463044022035EE84CAE8FE8CEBA52B2C1BD7A3891FA049557D5C4DE6BDEDAF5C92E2D004FA0220170DA89541EF962538763B0B55FDD04F14A623E118F55601FD8FA7DF266A374F`)
require.NoError(t, err)
orderBytes, err := hex.DecodeString(`0A1027C6C39653A24B94BA560A7951698FF31080081A473045022100BB7A53C2835BF5CAC59479C7A3A17447AC9D3DAE894B20849FDDF9E3533F173202207910685EB70107BFF73A2F94AF345369E51B35208941EB5CE903E48EFFB41642`)
require.NoError(t, err)
orderLimit := pb.OrderLimit{}
err = proto.Unmarshal(orderLimitBytes, &orderLimit)
require.NoError(t, err)
orderLimitMarshaled, err := proto.Marshal(&orderLimit)
require.NoError(t, err)
require.Equal(t, orderLimitBytes, orderLimitMarshaled, "order limit marshaling changed")
order := pb.Order{}
err = proto.Unmarshal(orderBytes, &order)
require.NoError(t, err)
orderBytesMarshaled, err := proto.Marshal(&order)
require.NoError(t, err)
require.Equal(t, orderBytes, orderBytesMarshaled, "order marshaling changed")
}

View File

@ -40,9 +40,9 @@ func SignOrderLimit(ctx context.Context, satellite Signer, unsigned *pb.OrderLim
return &signed, nil
}
// SignOrder signs the order using the specified signer.
// SignUplinkOrder signs the order using the specified signer.
// Signer is an uplink.
func SignOrder(ctx context.Context, uplink Signer, unsigned *pb.Order) (_ *pb.Order, err error) {
func SignUplinkOrder(ctx context.Context, privateKey storj.PiecePrivateKey, unsigned *pb.Order) (_ *pb.Order, err error) {
defer mon.Task()(&ctx)(&err)
bytes, err := EncodeOrder(ctx, unsigned)
if err != nil {
@ -50,11 +50,10 @@ func SignOrder(ctx context.Context, uplink Signer, unsigned *pb.Order) (_ *pb.Or
}
signed := *unsigned
signed.UplinkSignature, err = uplink.HashAndSign(ctx, bytes)
signed.UplinkSignature, err = privateKey.Sign(bytes)
if err != nil {
return nil, Error.Wrap(err)
}
return &signed, nil
}
@ -76,6 +75,23 @@ func SignPieceHash(ctx context.Context, signer Signer, unsigned *pb.PieceHash) (
return &signed, nil
}
// SignUplinkPieceHash signs the piece hash using the specified signer.
// Signer is either uplink or storage node.
func SignUplinkPieceHash(ctx context.Context, privateKey storj.PiecePrivateKey, unsigned *pb.PieceHash) (_ *pb.PieceHash, err error) {
defer mon.Task()(&ctx)(&err)
bytes, err := EncodePieceHash(ctx, unsigned)
if err != nil {
return nil, Error.Wrap(err)
}
signed := *unsigned
signed.Signature, err = privateKey.Sign(bytes)
if err != nil {
return nil, Error.Wrap(err)
}
return &signed, nil
}
// SignVoucher signs the voucher using the specified signer
// Signer is a satellite
func SignVoucher(ctx context.Context, signer Signer, unsigned *pb.Voucher) (_ *pb.Voucher, err error) {

View File

@ -16,7 +16,7 @@ type Signee interface {
HashAndVerifySignature(ctx context.Context, data, signature []byte) error
}
// VerifyOrderLimitSignature verifies that the signature inside order limit belongs to the satellite.
// VerifyOrderLimitSignature verifies that the signature inside order limit is valid and belongs to the satellite.
func VerifyOrderLimitSignature(ctx context.Context, satellite Signee, signed *pb.OrderLimit) (err error) {
defer mon.Task()(&ctx)(&err)
bytes, err := EncodeOrderLimit(ctx, signed)
@ -27,7 +27,7 @@ func VerifyOrderLimitSignature(ctx context.Context, satellite Signee, signed *pb
return satellite.HashAndVerifySignature(ctx, bytes, signed.SatelliteSignature)
}
// VerifyOrderSignature verifies that the signature inside order belongs to the uplink.
// VerifyOrderSignature verifies that the signature inside order is valid and belongs to the uplink.
func VerifyOrderSignature(ctx context.Context, uplink Signee, signed *pb.Order) (err error) {
defer mon.Task()(&ctx)(&err)
bytes, err := EncodeOrder(ctx, signed)
@ -38,7 +38,18 @@ func VerifyOrderSignature(ctx context.Context, uplink Signee, signed *pb.Order)
return uplink.HashAndVerifySignature(ctx, bytes, signed.UplinkSignature)
}
// VerifyPieceHashSignature verifies that the signature inside piece hash belongs to the signer, which is either uplink or storage node.
// VerifyUplinkOrderSignature verifies that the signature inside order is valid and belongs to the uplink.
func VerifyUplinkOrderSignature(ctx context.Context, publicKey storj.PiecePublicKey, signed *pb.Order) (err error) {
defer mon.Task()(&ctx)(&err)
bytes, err := EncodeOrder(ctx, signed)
if err != nil {
return Error.Wrap(err)
}
return Error.Wrap(publicKey.Verify(bytes, signed.UplinkSignature))
}
// VerifyPieceHashSignature verifies that the signature inside piece hash is valid and belongs to the signer, which is either uplink or storage node.
func VerifyPieceHashSignature(ctx context.Context, signee Signee, signed *pb.PieceHash) (err error) {
defer mon.Task()(&ctx)(&err)
bytes, err := EncodePieceHash(ctx, signed)
@ -49,7 +60,19 @@ func VerifyPieceHashSignature(ctx context.Context, signee Signee, signed *pb.Pie
return signee.HashAndVerifySignature(ctx, bytes, signed.Signature)
}
// VerifyVoucher verifies that the signature inside voucher belongs to the satellite
// VerifyUplinkPieceHashSignature verifies that the signature inside piece hash is valid and belongs to the signer, which is either uplink or storage node.
func VerifyUplinkPieceHashSignature(ctx context.Context, publicKey storj.PiecePublicKey, signed *pb.PieceHash) (err error) {
defer mon.Task()(&ctx)(&err)
bytes, err := EncodePieceHash(ctx, signed)
if err != nil {
return Error.Wrap(err)
}
return Error.Wrap(publicKey.Verify(bytes, signed.Signature))
}
// VerifyVoucher verifies that the signature inside voucher is valid and belongs to the satellite
func VerifyVoucher(ctx context.Context, satellite Signee, signed *pb.Voucher) (err error) {
defer mon.Task()(&ctx)(&err)
bytes, err := EncodeVoucher(ctx, signed)

View File

@ -713,6 +713,7 @@ func (m *SegmentWriteRequestOld) GetExpiration() time.Time {
type SegmentWriteResponseOld struct {
AddressedLimits []*AddressedOrderLimit `protobuf:"bytes,1,rep,name=addressed_limits,json=addressedLimits,proto3" json:"addressed_limits,omitempty"`
RootPieceId PieceID `protobuf:"bytes,2,opt,name=root_piece_id,json=rootPieceId,proto3,customtype=PieceID" json:"root_piece_id"`
PrivateKey PiecePrivateKey `protobuf:"bytes,3,opt,name=private_key,json=privateKey,proto3,customtype=PiecePrivateKey" json:"private_key"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@ -914,6 +915,7 @@ func (m *SegmentDownloadRequestOld) GetSegment() int64 {
type SegmentDownloadResponseOld struct {
AddressedLimits []*AddressedOrderLimit `protobuf:"bytes,1,rep,name=addressed_limits,json=addressedLimits,proto3" json:"addressed_limits,omitempty"`
Pointer *Pointer `protobuf:"bytes,2,opt,name=pointer,proto3" json:"pointer,omitempty"`
PrivateKey PiecePrivateKey `protobuf:"bytes,3,opt,name=private_key,json=privateKey,proto3,customtype=PiecePrivateKey" json:"private_key"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@ -1105,6 +1107,7 @@ func (m *SegmentDeleteRequestOld) GetSegment() int64 {
type SegmentDeleteResponseOld struct {
AddressedLimits []*AddressedOrderLimit `protobuf:"bytes,1,rep,name=addressed_limits,json=addressedLimits,proto3" json:"addressed_limits,omitempty"`
PrivateKey PiecePrivateKey `protobuf:"bytes,2,opt,name=private_key,json=privateKey,proto3,customtype=PiecePrivateKey" json:"private_key"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@ -1507,95 +1510,98 @@ func init() {
func init() { proto.RegisterFile("metainfo.proto", fileDescriptor_631e2f30a93cd64e) }
var fileDescriptor_631e2f30a93cd64e = []byte{
// 1404 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x57, 0x4b, 0x6f, 0x1b, 0x55,
0x14, 0x66, 0x9c, 0xc4, 0xb1, 0x8f, 0xd3, 0x24, 0xbd, 0x0d, 0x89, 0x3b, 0x6e, 0x1a, 0x77, 0x4a,
0x2b, 0x23, 0x21, 0x17, 0xa5, 0x9b, 0x4a, 0x05, 0x89, 0x3c, 0xfa, 0x08, 0xea, 0x23, 0x1a, 0x23,
0x5a, 0x2a, 0xd0, 0x68, 0xec, 0x39, 0x76, 0x07, 0x3c, 0x0f, 0xee, 0x5c, 0x43, 0xda, 0x35, 0x3f,
0x80, 0x05, 0x0b, 0x56, 0xfc, 0x09, 0xfe, 0x04, 0x62, 0xc9, 0x12, 0xa4, 0xb2, 0xe3, 0x4f, 0xb0,
0x41, 0xf7, 0xe5, 0x99, 0xb1, 0xc7, 0x49, 0x5a, 0xa5, 0x62, 0x37, 0xf7, 0xdc, 0x73, 0xcf, 0x3d,
0xe7, 0xfb, 0xce, 0xe3, 0x0e, 0x2c, 0x07, 0xc8, 0x5c, 0x3f, 0xec, 0x47, 0xed, 0x98, 0x46, 0x2c,
0x22, 0x15, 0xbd, 0x36, 0x57, 0x31, 0xec, 0xd1, 0x17, 0x31, 0xf3, 0xa3, 0x50, 0xee, 0x99, 0x30,
0x88, 0x06, 0x4a, 0xcf, 0xdc, 0x1a, 0x44, 0xd1, 0x60, 0x88, 0x37, 0xc4, 0xaa, 0x3b, 0xea, 0xdf,
0x60, 0x7e, 0x80, 0x09, 0x73, 0x83, 0x58, 0x2b, 0x87, 0x91, 0x87, 0xea, 0x7b, 0x25, 0x8e, 0xfc,
0x90, 0x21, 0xf5, 0xba, 0x4a, 0xb0, 0x14, 0x51, 0x0f, 0x69, 0x22, 0x57, 0xd6, 0xaf, 0x73, 0x50,
0xde, 0x1d, 0xf5, 0xbe, 0x41, 0x46, 0x08, 0xcc, 0x87, 0x6e, 0x80, 0x75, 0xa3, 0x69, 0xb4, 0x96,
0x6c, 0xf1, 0x4d, 0x6e, 0x41, 0x2d, 0x76, 0xd9, 0x73, 0xa7, 0xe7, 0xc7, 0xcf, 0x91, 0xd6, 0x4b,
0x4d, 0xa3, 0xb5, 0xbc, 0xbd, 0xd1, 0xce, 0xb8, 0xb7, 0x27, 0x76, 0x3a, 0x23, 0x9f, 0xa1, 0x0d,
0x5c, 0x57, 0x0a, 0xc8, 0x35, 0x58, 0x76, 0x19, 0xa3, 0x7e, 0x77, 0xc4, 0xd5, 0x1c, 0xdf, 0xab,
0xcf, 0x09, 0xbb, 0xe7, 0x32, 0xd2, 0x03, 0x8f, 0xec, 0x01, 0xf4, 0x28, 0xba, 0x0c, 0x3d, 0xc7,
0x65, 0xf5, 0xf9, 0xa6, 0xd1, 0xaa, 0x6d, 0x9b, 0x6d, 0x19, 0x60, 0x5b, 0x07, 0xd8, 0xfe, 0x4c,
0x07, 0xb8, 0x5b, 0xf9, 0xed, 0xd5, 0xd6, 0x3b, 0x3f, 0xfe, 0xbd, 0x65, 0xd8, 0x55, 0x75, 0x6e,
0x87, 0x91, 0x0f, 0x61, 0xcd, 0xc3, 0xbe, 0x3b, 0x1a, 0x32, 0x27, 0xc1, 0x41, 0x80, 0x21, 0x73,
0x12, 0xff, 0x25, 0xd6, 0x17, 0x9a, 0x46, 0x6b, 0xce, 0x26, 0x6a, 0xaf, 0x23, 0xb7, 0x3a, 0xfe,
0x4b, 0x24, 0x4f, 0xe0, 0xa2, 0x3e, 0x41, 0xd1, 0x1b, 0x85, 0x9e, 0x1b, 0xf6, 0x5e, 0x38, 0x49,
0xef, 0x39, 0x06, 0x58, 0x2f, 0x0b, 0x2f, 0x1a, 0xed, 0x14, 0x39, 0x7b, 0xac, 0xd3, 0x11, 0x2a,
0xf6, 0x86, 0x3a, 0x3d, 0xb9, 0x41, 0x3c, 0xd8, 0xd4, 0x86, 0x53, 0x90, 0x9c, 0xd8, 0xa5, 0x6e,
0x80, 0x0c, 0x69, 0x52, 0x5f, 0x14, 0xc6, 0x9b, 0x59, 0x08, 0xef, 0x8c, 0x3f, 0x0f, 0xc7, 0x7a,
0x76, 0x43, 0x99, 0x29, 0xda, 0xb4, 0x7c, 0x58, 0x96, 0xa4, 0x3d, 0xf0, 0x13, 0x76, 0xc0, 0x30,
0x28, 0x24, 0x2f, 0x8f, 0x6d, 0xe9, 0x8d, 0xb0, 0xb5, 0xfe, 0x2d, 0xc1, 0x05, 0x79, 0xd7, 0x9e,
0x90, 0xd9, 0xf8, 0xed, 0x08, 0x93, 0xff, 0x29, 0x5b, 0x66, 0x11, 0x3d, 0xff, 0x66, 0x44, 0x2f,
0xbc, 0x4d, 0xa2, 0xcb, 0x67, 0x41, 0xf4, 0x27, 0xb0, 0x96, 0x07, 0x3f, 0x89, 0xa3, 0x30, 0x41,
0xd2, 0x82, 0x72, 0x57, 0xc8, 0x05, 0xfe, 0xb5, 0xed, 0xd5, 0xf6, 0xb8, 0x97, 0x48, 0x7d, 0x5b,
0xed, 0x5b, 0xd7, 0x61, 0x55, 0x4a, 0xee, 0x21, 0x3b, 0x86, 0x3b, 0xeb, 0x63, 0x38, 0x9f, 0xd1,
0x7b, 0xed, 0x6b, 0xde, 0xd7, 0x59, 0xb2, 0x8f, 0x43, 0x3c, 0x36, 0x4b, 0xac, 0x75, 0x1d, 0x93,
0x56, 0x95, 0x97, 0x59, 0x3b, 0xda, 0x03, 0x9e, 0xd4, 0xda, 0xc0, 0x3a, 0x94, 0x7b, 0x23, 0x9a,
0x44, 0x54, 0x99, 0x50, 0x2b, 0xb2, 0x06, 0x0b, 0x43, 0x3f, 0xf0, 0x65, 0x5a, 0x2f, 0xd8, 0x72,
0x61, 0x3d, 0x05, 0x92, 0x35, 0xa1, 0xa2, 0x68, 0xc3, 0x82, 0xcf, 0x30, 0x48, 0xea, 0x46, 0x73,
0xae, 0x55, 0xdb, 0xae, 0x4f, 0x06, 0xa1, 0x8b, 0xc8, 0x96, 0x6a, 0xdc, 0xe9, 0x20, 0xa2, 0x28,
0x4c, 0x57, 0x6c, 0xf1, 0x6d, 0x3d, 0x85, 0x86, 0x54, 0xee, 0x20, 0xdb, 0x49, 0x73, 0xf2, 0xb8,
0x6a, 0x98, 0xce, 0xe9, 0x52, 0x41, 0x4e, 0x5b, 0x97, 0xe1, 0x52, 0xb1, 0x65, 0x05, 0xcb, 0x0f,
0x06, 0x5c, 0xd8, 0xf1, 0x3c, 0x8a, 0x49, 0x82, 0xde, 0x63, 0xde, 0xbb, 0x1f, 0xf0, 0x58, 0x49,
0x4b, 0x23, 0x20, 0xa9, 0x21, 0x6d, 0xd5, 0xd7, 0x53, 0x15, 0x85, 0x0a, 0xd9, 0x83, 0xb5, 0x84,
0x45, 0xd4, 0x1d, 0xa0, 0xc3, 0x07, 0x83, 0xe3, 0x4a, 0x6b, 0xaa, 0x23, 0x9c, 0x6f, 0x8b, 0x69,
0xf1, 0x28, 0xf2, 0x50, 0x5d, 0x63, 0x13, 0xa5, 0x9e, 0x91, 0x59, 0xbf, 0x94, 0x60, 0x5d, 0x15,
0xd6, 0x13, 0xea, 0x8f, 0x19, 0x7e, 0x3c, 0xf4, 0x38, 0x47, 0x99, 0x2c, 0x59, 0xd2, 0x39, 0xc1,
0x41, 0xe1, 0x25, 0xae, 0xc2, 0x16, 0xdf, 0xa4, 0x0e, 0x8b, 0xaa, 0x72, 0x45, 0x85, 0xcf, 0xd9,
0x7a, 0x49, 0x6e, 0x03, 0xa4, 0x15, 0xaa, 0x26, 0xc1, 0xb1, 0xa5, 0x99, 0x51, 0x27, 0xb7, 0xc1,
0x0c, 0xdc, 0x23, 0x5d, 0x89, 0xe8, 0x15, 0xcd, 0x81, 0x8d, 0xc0, 0x3d, 0xba, 0xa3, 0x15, 0xb2,
0x3d, 0x62, 0x1f, 0x00, 0x8f, 0x62, 0x9f, 0xba, 0x1c, 0x77, 0x55, 0xb7, 0xa7, 0xeb, 0x93, 0x99,
0x73, 0xd6, 0xcf, 0x06, 0x6c, 0xe4, 0x01, 0x92, 0x04, 0x72, 0x84, 0xee, 0xc3, 0xaa, 0xab, 0x29,
0x74, 0x04, 0x29, 0x3a, 0x19, 0x37, 0xd3, 0x64, 0x2c, 0x20, 0xd9, 0x5e, 0x19, 0x1f, 0x13, 0xeb,
0x84, 0xdc, 0x84, 0x73, 0x34, 0x8a, 0x98, 0x13, 0xfb, 0xd8, 0xc3, 0x71, 0x4e, 0xed, 0xae, 0x70,
0x97, 0xfe, 0x7c, 0xb5, 0xb5, 0x78, 0xc8, 0xe5, 0x07, 0xfb, 0x76, 0x8d, 0x6b, 0xc9, 0x85, 0x67,
0xfd, 0x9e, 0xba, 0xb6, 0x17, 0x05, 0xdc, 0xee, 0x59, 0x93, 0xf7, 0x01, 0x2c, 0x2a, 0xa6, 0x14,
0x73, 0x24, 0xc3, 0xdc, 0xa1, 0xfc, 0xb2, 0xb5, 0x0a, 0xb9, 0x0d, 0x2b, 0x11, 0xf5, 0x07, 0x7e,
0xe8, 0x0e, 0x35, 0x1a, 0x0b, 0x02, 0x8d, 0xa2, 0x24, 0x5e, 0xd6, 0xaa, 0x12, 0x01, 0xeb, 0x3e,
0xd4, 0x27, 0x62, 0x49, 0x71, 0xce, 0xb8, 0x61, 0x9c, 0xe8, 0x86, 0xe5, 0xc2, 0x45, 0x65, 0x69,
0x3f, 0xfa, 0x3e, 0x1c, 0x46, 0xae, 0x77, 0xd6, 0xb8, 0x58, 0x3f, 0x19, 0x60, 0x4e, 0xdd, 0xf1,
0x36, 0xf2, 0x22, 0x13, 0x79, 0xe9, 0xe4, 0xc8, 0xbf, 0x82, 0x77, 0x95, 0x57, 0x07, 0x61, 0x3f,
0x3a, 0xf3, 0xa8, 0xef, 0x8e, 0x5b, 0x85, 0x34, 0x5f, 0x48, 0xd0, 0x29, 0xdc, 0x74, 0xc6, 0x69,
0x9b, 0x9b, 0x2a, 0x67, 0xe7, 0xa8, 0x37, 0xce, 0xa5, 0xfc, 0x2c, 0x3a, 0x53, 0x6e, 0xac, 0xbf,
0x0c, 0x58, 0xe7, 0x33, 0x46, 0x5d, 0x95, 0x9c, 0x22, 0x8c, 0x75, 0x28, 0xc7, 0x14, 0xfb, 0xfe,
0x91, 0x0a, 0x44, 0xad, 0xc8, 0x16, 0xd4, 0x12, 0xe6, 0x52, 0xe6, 0xb8, 0x7d, 0x8e, 0xa1, 0x7c,
0x24, 0x81, 0x10, 0xed, 0x70, 0x09, 0xd9, 0x04, 0xc0, 0xd0, 0x73, 0xba, 0xd8, 0xe7, 0x13, 0x6c,
0x5e, 0xec, 0x57, 0x31, 0xf4, 0x76, 0x85, 0x80, 0x5c, 0x82, 0x2a, 0x45, 0x3e, 0x42, 0xfd, 0xef,
0x64, 0x5b, 0xac, 0xd8, 0xa9, 0x20, 0x1d, 0xaa, 0xe5, 0xcc, 0x50, 0xe5, 0x26, 0x79, 0xbc, 0x4e,
0x7f, 0xe8, 0x0e, 0xe4, 0xfb, 0x75, 0xd1, 0xae, 0x72, 0xc9, 0x5d, 0x2e, 0xb0, 0xfe, 0x30, 0x60,
0x23, 0x1f, 0x5d, 0x8a, 0xe1, 0x47, 0xf9, 0xc9, 0x7b, 0x3d, 0x05, 0x6e, 0xc6, 0x89, 0xf6, 0x09,
0x73, 0xd8, 0x44, 0x98, 0xd7, 0xef, 0x5d, 0xc1, 0xb3, 0x91, 0xe1, 0xf9, 0xb5, 0x92, 0x8b, 0x34,
0xa0, 0xea, 0x27, 0x8e, 0x42, 0x79, 0x4e, 0x5c, 0x51, 0xf1, 0x93, 0x43, 0xb1, 0xb6, 0x9e, 0xf1,
0xc4, 0x28, 0x18, 0xf4, 0x3c, 0xa8, 0x2d, 0xa8, 0x49, 0x96, 0x9c, 0xcc, 0xc8, 0x07, 0x29, 0x7a,
0xc4, 0x07, 0xff, 0x26, 0x40, 0xec, 0x52, 0x16, 0x22, 0x4d, 0x87, 0x7e, 0x55, 0x49, 0x0e, 0x3c,
0xab, 0xc1, 0xdb, 0x4e, 0xd1, 0xa8, 0x7f, 0x3c, 0xf4, 0xac, 0x35, 0x20, 0x87, 0x34, 0xfa, 0x1a,
0x7b, 0xd9, 0xca, 0xb4, 0x6e, 0xc1, 0x85, 0x9c, 0x54, 0x3d, 0x6c, 0xae, 0xc0, 0x52, 0x2c, 0xc5,
0x4e, 0xe2, 0x0e, 0x75, 0x0e, 0xd5, 0x94, 0xac, 0xe3, 0x0e, 0xd9, 0xf6, 0x3f, 0x15, 0xa8, 0x3c,
0x54, 0xa0, 0x93, 0x87, 0xb0, 0x24, 0xdf, 0x91, 0xea, 0x8f, 0x6f, 0x73, 0xf2, 0x25, 0x94, 0x7b,
0xe2, 0x9b, 0x97, 0x67, 0x6d, 0xab, 0xeb, 0xf7, 0xa1, 0x7a, 0x0f, 0x99, 0xb2, 0x65, 0x4e, 0x2a,
0xa7, 0xef, 0x4d, 0xb3, 0x51, 0xb8, 0xa7, 0xac, 0x3c, 0x84, 0x25, 0x59, 0x7c, 0xb3, 0x9c, 0xca,
0xd5, 0xfe, 0xb4, 0x53, 0xf9, 0xca, 0x25, 0xf7, 0xa1, 0xc6, 0x73, 0x4b, 0xee, 0x25, 0xa4, 0x51,
0xf4, 0xd8, 0xd3, 0xb6, 0x2e, 0x15, 0x6f, 0x2a, 0x4b, 0x08, 0x6b, 0x1d, 0x1d, 0x5e, 0x86, 0x2d,
0x72, 0x6d, 0xf2, 0x54, 0x61, 0xa6, 0x98, 0xd7, 0x4f, 0x52, 0x53, 0xd7, 0x3c, 0x81, 0x55, 0x89,
0xab, 0x2a, 0x07, 0x9e, 0x62, 0xcd, 0xf4, 0x6c, 0xf1, 0x9b, 0xcb, 0xbc, 0x32, 0x4b, 0x23, 0x2d,
0xbe, 0x2f, 0x60, 0x55, 0x4e, 0xc8, 0x8c, 0xe1, 0xe9, 0x63, 0x93, 0x0f, 0x02, 0xd3, 0x9a, 0xa9,
0x92, 0x9a, 0xee, 0xc0, 0x72, 0xa6, 0xc1, 0x8b, 0xa2, 0x98, 0x3a, 0x95, 0x9f, 0x2c, 0x66, 0x73,
0x86, 0x42, 0x6a, 0xd4, 0x01, 0xa2, 0x67, 0x64, 0xc6, 0xe3, 0xab, 0x53, 0xe7, 0xa6, 0x87, 0xb5,
0xf9, 0xde, 0x31, 0x4a, 0x39, 0x40, 0x64, 0xb2, 0x1c, 0x0b, 0xc8, 0xe4, 0xa8, 0x29, 0x00, 0x64,
0x7a, 0x58, 0x7c, 0x0e, 0x2b, 0xd9, 0x8e, 0x36, 0xc1, 0x61, 0x71, 0xf3, 0xcf, 0x72, 0x38, 0xab,
0x81, 0x7e, 0x09, 0xe7, 0xf3, 0x69, 0xc3, 0x85, 0x39, 0x87, 0x8a, 0x9b, 0x94, 0x79, 0x75, 0xb6,
0x4e, 0x6a, 0xfd, 0x53, 0xa8, 0x65, 0xda, 0x0a, 0xc9, 0x94, 0xc3, 0x74, 0x0f, 0x32, 0x37, 0x67,
0xec, 0x4a, 0x73, 0xbb, 0xf3, 0xcf, 0x4a, 0x71, 0xb7, 0x5b, 0x16, 0xcf, 0xe5, 0x9b, 0xff, 0x05,
0x00, 0x00, 0xff, 0xff, 0xe0, 0xfa, 0x95, 0x5f, 0xdb, 0x12, 0x00, 0x00,
// 1444 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x57, 0x4b, 0x6f, 0xdb, 0xc6,
0x16, 0xbe, 0x94, 0x6d, 0x59, 0x3a, 0x72, 0x6c, 0x67, 0xe2, 0x6b, 0x2b, 0x54, 0x1c, 0x2b, 0xcc,
0x4d, 0xa0, 0x0b, 0x5c, 0x28, 0x17, 0xce, 0x26, 0x40, 0x5a, 0xa0, 0x7e, 0xe4, 0xe1, 0x36, 0x0f,
0x83, 0x2a, 0x9a, 0x34, 0x68, 0x41, 0x50, 0xe2, 0x91, 0xc2, 0x46, 0x7c, 0x74, 0x38, 0x4a, 0xed,
0xac, 0xfb, 0x03, 0xfa, 0x0b, 0xf2, 0x27, 0xfa, 0x27, 0x8a, 0x2e, 0xba, 0xe8, 0xb2, 0x05, 0xd2,
0x5d, 0xff, 0x44, 0x37, 0xc5, 0xbc, 0x44, 0x52, 0xa2, 0xec, 0x24, 0x70, 0xd0, 0x1d, 0xe7, 0xcc,
0x99, 0x33, 0x73, 0xbe, 0xef, 0xbc, 0x08, 0xcb, 0x01, 0x32, 0xd7, 0x0f, 0xfb, 0x51, 0x3b, 0xa6,
0x11, 0x8b, 0x48, 0x45, 0xaf, 0xcd, 0x55, 0x0c, 0x7b, 0xf4, 0x38, 0x66, 0x7e, 0x14, 0xca, 0x3d,
0x13, 0x06, 0xd1, 0x40, 0xe9, 0x99, 0x5b, 0x83, 0x28, 0x1a, 0x0c, 0xf1, 0x86, 0x58, 0x75, 0x47,
0xfd, 0x1b, 0xcc, 0x0f, 0x30, 0x61, 0x6e, 0x10, 0x6b, 0xe5, 0x30, 0xf2, 0x50, 0x7d, 0xaf, 0xc4,
0x91, 0x1f, 0x32, 0xa4, 0x5e, 0x57, 0x09, 0x96, 0x22, 0xea, 0x21, 0x4d, 0xe4, 0xca, 0xfa, 0x71,
0x0e, 0xca, 0xbb, 0xa3, 0xde, 0x0b, 0x64, 0x84, 0xc0, 0x7c, 0xe8, 0x06, 0x58, 0x37, 0x9a, 0x46,
0x6b, 0xc9, 0x16, 0xdf, 0xe4, 0x16, 0xd4, 0x62, 0x97, 0x3d, 0x77, 0x7a, 0x7e, 0xfc, 0x1c, 0x69,
0xbd, 0xd4, 0x34, 0x5a, 0xcb, 0xdb, 0x1b, 0xed, 0xcc, 0xf3, 0xf6, 0xc4, 0x4e, 0x67, 0xe4, 0x33,
0xb4, 0x81, 0xeb, 0x4a, 0x01, 0xb9, 0x06, 0xcb, 0x2e, 0x63, 0xd4, 0xef, 0x8e, 0xb8, 0x9a, 0xe3,
0x7b, 0xf5, 0x39, 0x61, 0xf7, 0x5c, 0x46, 0x7a, 0xe0, 0x91, 0x3d, 0x80, 0x1e, 0x45, 0x97, 0xa1,
0xe7, 0xb8, 0xac, 0x3e, 0xdf, 0x34, 0x5a, 0xb5, 0x6d, 0xb3, 0x2d, 0x1d, 0x6c, 0x6b, 0x07, 0xdb,
0x9f, 0x6b, 0x07, 0x77, 0x2b, 0x3f, 0xbd, 0xd9, 0xfa, 0xd7, 0x0f, 0x7f, 0x6c, 0x19, 0x76, 0x55,
0x9d, 0xdb, 0x61, 0xe4, 0xff, 0xb0, 0xe6, 0x61, 0xdf, 0x1d, 0x0d, 0x99, 0x93, 0xe0, 0x20, 0xc0,
0x90, 0x39, 0x89, 0xff, 0x0a, 0xeb, 0x0b, 0x4d, 0xa3, 0x35, 0x67, 0x13, 0xb5, 0xd7, 0x91, 0x5b,
0x1d, 0xff, 0x15, 0x92, 0x27, 0x70, 0x51, 0x9f, 0xa0, 0xe8, 0x8d, 0x42, 0xcf, 0x0d, 0x7b, 0xc7,
0x4e, 0xd2, 0x7b, 0x8e, 0x01, 0xd6, 0xcb, 0xe2, 0x15, 0x8d, 0x76, 0x8a, 0x9c, 0x3d, 0xd6, 0xe9,
0x08, 0x15, 0x7b, 0x43, 0x9d, 0x9e, 0xdc, 0x20, 0x1e, 0x6c, 0x6a, 0xc3, 0x29, 0x48, 0x4e, 0xec,
0x52, 0x37, 0x40, 0x86, 0x34, 0xa9, 0x2f, 0x0a, 0xe3, 0xcd, 0x2c, 0x84, 0x77, 0xc6, 0x9f, 0x87,
0x63, 0x3d, 0xbb, 0xa1, 0xcc, 0x14, 0x6d, 0x5a, 0x3e, 0x2c, 0x4b, 0xd2, 0x1e, 0xf8, 0x09, 0x3b,
0x60, 0x18, 0x14, 0x92, 0x97, 0xc7, 0xb6, 0xf4, 0x5e, 0xd8, 0x5a, 0x7f, 0x95, 0xe0, 0x82, 0xbc,
0x6b, 0x4f, 0xc8, 0x6c, 0xfc, 0x76, 0x84, 0xc9, 0x3f, 0x14, 0x2d, 0xb3, 0x88, 0x9e, 0x7f, 0x3f,
0xa2, 0x17, 0x3e, 0x24, 0xd1, 0xe5, 0xb3, 0x20, 0xfa, 0x13, 0x58, 0xcb, 0x83, 0x9f, 0xc4, 0x51,
0x98, 0x20, 0x69, 0x41, 0xb9, 0x2b, 0xe4, 0x02, 0xff, 0xda, 0xf6, 0x6a, 0x7b, 0x5c, 0x4b, 0xa4,
0xbe, 0xad, 0xf6, 0xad, 0xeb, 0xb0, 0x2a, 0x25, 0xf7, 0x90, 0x9d, 0xc0, 0x9d, 0xf5, 0x31, 0x9c,
0xcf, 0xe8, 0xbd, 0xf3, 0x35, 0xff, 0xd5, 0x51, 0xb2, 0x8f, 0x43, 0x3c, 0x31, 0x4a, 0xac, 0x75,
0xed, 0x93, 0x56, 0x95, 0x97, 0x59, 0x3b, 0xfa, 0x05, 0x3c, 0xa8, 0xb5, 0x81, 0x75, 0x28, 0xf7,
0x46, 0x34, 0x89, 0xa8, 0x32, 0xa1, 0x56, 0x64, 0x0d, 0x16, 0x86, 0x7e, 0xe0, 0xcb, 0xb0, 0x5e,
0xb0, 0xe5, 0xc2, 0x7a, 0x0a, 0x24, 0x6b, 0x42, 0x79, 0xd1, 0x86, 0x05, 0x9f, 0x61, 0x90, 0xd4,
0x8d, 0xe6, 0x5c, 0xab, 0xb6, 0x5d, 0x9f, 0x74, 0x42, 0x27, 0x91, 0x2d, 0xd5, 0xf8, 0xa3, 0x83,
0x88, 0xa2, 0x30, 0x5d, 0xb1, 0xc5, 0xb7, 0xf5, 0x14, 0x1a, 0x52, 0xb9, 0x83, 0x6c, 0x27, 0x8d,
0xc9, 0x93, 0xb2, 0x61, 0x3a, 0xa6, 0x4b, 0x05, 0x31, 0x6d, 0x5d, 0x86, 0x4b, 0xc5, 0x96, 0x15,
0x2c, 0xdf, 0x1b, 0x70, 0x61, 0xc7, 0xf3, 0x28, 0x26, 0x09, 0x7a, 0x8f, 0x79, 0xed, 0x7e, 0xc0,
0x7d, 0x25, 0x2d, 0x8d, 0x80, 0xa4, 0x86, 0xb4, 0x55, 0x5d, 0x4f, 0x55, 0x14, 0x2a, 0x64, 0x0f,
0xd6, 0x12, 0x16, 0x51, 0x77, 0x80, 0x0e, 0x6f, 0x0c, 0x8e, 0x2b, 0xad, 0xa9, 0x8a, 0x70, 0xbe,
0x2d, 0xba, 0xc5, 0xa3, 0xc8, 0x43, 0x75, 0x8d, 0x4d, 0x94, 0x7a, 0x46, 0x66, 0xbd, 0x2e, 0xc1,
0xba, 0x4a, 0xac, 0x27, 0xd4, 0x1f, 0x33, 0xfc, 0x78, 0xe8, 0x71, 0x8e, 0x32, 0x51, 0xb2, 0xa4,
0x63, 0x82, 0x83, 0xc2, 0x53, 0x5c, 0xb9, 0x2d, 0xbe, 0x49, 0x1d, 0x16, 0x55, 0xe6, 0x8a, 0x0c,
0x9f, 0xb3, 0xf5, 0x92, 0xdc, 0x06, 0x48, 0x33, 0x54, 0x75, 0x82, 0x13, 0x53, 0x33, 0xa3, 0x4e,
0x6e, 0x83, 0x19, 0xb8, 0x47, 0x3a, 0x13, 0xd1, 0x2b, 0xea, 0x03, 0x1b, 0x81, 0x7b, 0x74, 0x47,
0x2b, 0x64, 0x6b, 0xc4, 0x3e, 0x00, 0x1e, 0xc5, 0x3e, 0x75, 0x39, 0xee, 0x2a, 0x6f, 0xdf, 0xae,
0x4e, 0x66, 0xce, 0x59, 0xbf, 0x1a, 0xb0, 0x91, 0x07, 0x48, 0x12, 0xc8, 0x11, 0xba, 0x0f, 0xab,
0xae, 0xa6, 0xd0, 0x11, 0xa4, 0xe8, 0x60, 0xdc, 0x4c, 0x83, 0xb1, 0x80, 0x64, 0x7b, 0x65, 0x7c,
0x4c, 0xac, 0x13, 0x72, 0x13, 0xce, 0xd1, 0x28, 0x62, 0x4e, 0xec, 0x63, 0x0f, 0xc7, 0x31, 0xb5,
0xbb, 0xc2, 0x9f, 0xf4, 0xdb, 0x9b, 0xad, 0xc5, 0x43, 0x2e, 0x3f, 0xd8, 0xb7, 0x6b, 0x5c, 0x4b,
0x2e, 0x3c, 0x51, 0x97, 0xa9, 0xff, 0xd2, 0x65, 0xe8, 0xbc, 0xc0, 0x63, 0x59, 0x5a, 0x77, 0x37,
0xd4, 0x91, 0x15, 0xa1, 0x75, 0x28, 0xf7, 0x3f, 0xc3, 0x63, 0x1b, 0xe2, 0xf1, 0xb7, 0xf5, 0x73,
0xea, 0xd4, 0x5e, 0x14, 0xf0, 0x17, 0x9d, 0x35, 0xed, 0xff, 0x83, 0x45, 0xc5, 0xb1, 0xe2, 0x9c,
0x64, 0x38, 0x3f, 0x94, 0x5f, 0xb6, 0x56, 0x21, 0xb7, 0x61, 0x25, 0xa2, 0xfe, 0xc0, 0x0f, 0xdd,
0xa1, 0xc6, 0x71, 0x41, 0xe0, 0x58, 0x14, 0xfe, 0xcb, 0x5a, 0x55, 0x62, 0x67, 0xdd, 0x87, 0xfa,
0x84, 0x2f, 0x29, 0x43, 0x99, 0x67, 0x18, 0xa7, 0x3e, 0xc3, 0x72, 0xe1, 0xa2, 0xb2, 0xb4, 0x1f,
0x7d, 0x17, 0x0e, 0x23, 0xd7, 0x3b, 0x6b, 0x5c, 0xac, 0x5f, 0x0c, 0x30, 0xa7, 0xee, 0xf8, 0x10,
0x11, 0x95, 0xf1, 0xbc, 0x74, 0x3a, 0x01, 0xef, 0x1f, 0x4a, 0x5f, 0xc3, 0xbf, 0x95, 0x3f, 0x07,
0x61, 0x3f, 0x3a, 0x73, 0xbc, 0xee, 0x8e, 0xcb, 0x93, 0x34, 0x5f, 0x48, 0xed, 0xe9, 0x0e, 0x5a,
0xce, 0x38, 0xe0, 0x73, 0x9d, 0xec, 0xec, 0x1e, 0xfa, 0xda, 0x18, 0x87, 0x61, 0xbe, 0x01, 0x9e,
0x2d, 0xad, 0x13, 0x44, 0x95, 0xde, 0x9e, 0xa8, 0xdf, 0x0d, 0x58, 0xe7, 0x2d, 0x51, 0x3d, 0x32,
0x79, 0x0b, 0x04, 0xd6, 0xa1, 0x1c, 0x53, 0xec, 0xfb, 0x47, 0x0a, 0x03, 0xb5, 0x22, 0x5b, 0x50,
0x4b, 0x98, 0x4b, 0x99, 0xe3, 0xf6, 0x39, 0xfc, 0x72, 0xa6, 0x03, 0x21, 0xda, 0xe1, 0x12, 0xb2,
0x09, 0x80, 0xa1, 0xe7, 0x74, 0xb1, 0xcf, 0x1b, 0xee, 0xbc, 0xd8, 0xaf, 0x62, 0xe8, 0xed, 0x0a,
0x01, 0xb9, 0x04, 0x55, 0x8a, 0xbc, 0xe3, 0xfb, 0x2f, 0x65, 0x15, 0xaf, 0xd8, 0xa9, 0x20, 0x9d,
0x01, 0xca, 0x99, 0x19, 0x80, 0x9b, 0xe4, 0x48, 0x39, 0xfd, 0xa1, 0x3b, 0x90, 0xe3, 0xf6, 0xa2,
0x5d, 0xe5, 0x92, 0xbb, 0x5c, 0x20, 0xca, 0x74, 0xde, 0xbb, 0x14, 0xfd, 0x8f, 0xf2, 0x83, 0xc2,
0xf5, 0x14, 0xf2, 0x19, 0x27, 0xda, 0xa7, 0x8c, 0x0d, 0x26, 0xc2, 0xbc, 0x1e, 0xcf, 0x45, 0x88,
0x18, 0x99, 0x10, 0x79, 0xb7, 0xc4, 0x6b, 0x40, 0xd5, 0x4f, 0x1c, 0x85, 0xf2, 0x9c, 0xb8, 0xa2,
0xe2, 0x27, 0x87, 0x62, 0x6d, 0x3d, 0xe3, 0x21, 0x55, 0x30, 0x97, 0x70, 0xa7, 0xb6, 0xa0, 0x26,
0x59, 0x72, 0x32, 0x13, 0x0a, 0x48, 0xd1, 0x23, 0x3e, 0xa7, 0x6c, 0x02, 0xc4, 0x2e, 0x65, 0x21,
0xd2, 0x74, 0x46, 0xa9, 0x2a, 0xc9, 0x81, 0x67, 0x35, 0x78, 0xad, 0x2b, 0x9a, 0x4c, 0x1e, 0x0f,
0x3d, 0x6b, 0x0d, 0xc8, 0x21, 0x8d, 0xbe, 0xc1, 0x5e, 0x36, 0xa9, 0xad, 0x5b, 0x70, 0x21, 0x27,
0x55, 0x73, 0xd8, 0x15, 0x58, 0x8a, 0xa5, 0xd8, 0x49, 0xdc, 0xa1, 0x8e, 0xa1, 0x9a, 0x92, 0x75,
0xdc, 0x21, 0xdb, 0xfe, 0xb3, 0x02, 0x95, 0x87, 0x0a, 0x74, 0xf2, 0x10, 0x96, 0xe4, 0xd8, 0xab,
0x7e, 0x50, 0x37, 0x27, 0x07, 0xb7, 0xdc, 0x1f, 0x89, 0x79, 0x79, 0xd6, 0xb6, 0xba, 0x7e, 0x1f,
0xaa, 0xf7, 0x90, 0x29, 0x5b, 0xe6, 0xa4, 0x72, 0x3a, 0x1e, 0x9b, 0x8d, 0xc2, 0x3d, 0x65, 0xe5,
0x21, 0x2c, 0xc9, 0xb4, 0x9d, 0xf5, 0xa8, 0x5c, 0xd9, 0x98, 0x7e, 0x54, 0x3e, 0xe7, 0xc9, 0x7d,
0xa8, 0xf1, 0xd8, 0x92, 0x7b, 0x09, 0x69, 0x14, 0xcd, 0xa6, 0xda, 0xd6, 0xa5, 0xe2, 0x4d, 0x65,
0x09, 0x61, 0xad, 0xa3, 0xdd, 0xcb, 0xb0, 0x45, 0xae, 0x4d, 0x9e, 0x2a, 0x8c, 0x14, 0xf3, 0xfa,
0x69, 0x6a, 0xea, 0x9a, 0x27, 0xb0, 0x2a, 0x71, 0x55, 0xe9, 0xc0, 0x43, 0xac, 0x99, 0x9e, 0x2d,
0x1e, 0x11, 0xcd, 0x2b, 0xb3, 0x34, 0xd2, 0xe4, 0xfb, 0x12, 0x56, 0x65, 0x5b, 0xce, 0x18, 0x9e,
0x3e, 0x36, 0x39, 0x85, 0x98, 0xd6, 0x4c, 0x95, 0xd4, 0x74, 0x07, 0x96, 0x33, 0xbd, 0x41, 0x24,
0xc5, 0xd4, 0xa9, 0x7c, 0x53, 0x32, 0x9b, 0x33, 0x14, 0x52, 0xa3, 0x0e, 0x10, 0xdd, 0x98, 0x33,
0x2f, 0xbe, 0x3a, 0x75, 0x6e, 0x7a, 0x42, 0x30, 0xff, 0x73, 0x82, 0x52, 0x0e, 0x10, 0x19, 0x2c,
0x27, 0x02, 0x32, 0xd9, 0xa5, 0x0a, 0x00, 0x99, 0x6e, 0x33, 0x5f, 0xc0, 0x4a, 0xb6, 0xa2, 0x4d,
0x70, 0x58, 0x5c, 0xfc, 0xb3, 0x1c, 0xce, 0x2a, 0xa0, 0x5f, 0xc1, 0xf9, 0x7c, 0xd8, 0x70, 0x61,
0xee, 0x41, 0xc5, 0x45, 0xca, 0xbc, 0x3a, 0x5b, 0x27, 0xb5, 0xfe, 0x29, 0xd4, 0x32, 0x65, 0x85,
0x64, 0xd2, 0x61, 0xba, 0x06, 0x99, 0x9b, 0x33, 0x76, 0xa5, 0xb9, 0xdd, 0xf9, 0x67, 0xa5, 0xb8,
0xdb, 0x2d, 0x8b, 0xe9, 0xfe, 0xe6, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xe7, 0xcd, 0xfb, 0xa6,
0x8a, 0x13, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.

View File

@ -114,6 +114,7 @@ message SegmentWriteRequestOld {
message SegmentWriteResponseOld {
repeated AddressedOrderLimit addressed_limits = 1;
bytes root_piece_id = 2 [(gogoproto.customtype) = "PieceID", (gogoproto.nullable) = false];
bytes private_key = 3 [(gogoproto.customtype) = "PiecePrivateKey", (gogoproto.nullable) = false];
}
message SegmentCommitRequestOld {
@ -137,6 +138,7 @@ message SegmentDownloadRequestOld {
message SegmentDownloadResponseOld {
repeated AddressedOrderLimit addressed_limits = 1;
pointerdb.Pointer pointer = 2;
bytes private_key = 3 [(gogoproto.customtype) = "PiecePrivateKey", (gogoproto.nullable) = false];
}
message SegmentInfoRequestOld {
@ -157,6 +159,7 @@ message SegmentDeleteRequestOld {
message SegmentDeleteResponseOld {
repeated AddressedOrderLimit addressed_limits = 1;
bytes private_key = 2 [(gogoproto.customtype) = "PiecePrivateKey", (gogoproto.nullable) = false];
}
message ListSegmentsRequestOld {

View File

@ -102,8 +102,10 @@ type OrderLimit struct {
// satellite who issued this order limit allowing orderer to do the specified action
SatelliteId NodeID `protobuf:"bytes,2,opt,name=satellite_id,json=satelliteId,proto3,customtype=NodeID" json:"satellite_id"`
// uplink who requested or whom behalf the order limit to do an action
UplinkId NodeID `protobuf:"bytes,3,opt,name=uplink_id,json=uplinkId,proto3,customtype=NodeID" json:"uplink_id"`
// storage node who can reclaim the order limit specified by serial
DeprecatedUplinkId *NodeID `protobuf:"bytes,3,opt,name=deprecated_uplink_id,json=deprecatedUplinkId,proto3,customtype=NodeID" json:"deprecated_uplink_id,omitempty"`
// public key that will be used to sign orders and piece hash
UplinkPublicKey PiecePublicKey `protobuf:"bytes,13,opt,name=uplink_public_key,json=uplinkPublicKey,proto3,customtype=PiecePublicKey" json:"uplink_public_key"`
// storage node who can re claimthe order limit specified by serial
StorageNodeId NodeID `protobuf:"bytes,4,opt,name=storage_node_id,json=storageNodeId,proto3,customtype=NodeID" json:"storage_node_id"`
// piece which is allowed to be touched
PieceId PieceID `protobuf:"bytes,5,opt,name=piece_id,json=pieceId,proto3,customtype=PieceID" json:"piece_id"`
@ -251,15 +253,15 @@ type PieceHash struct {
PieceId PieceID `protobuf:"bytes,1,opt,name=piece_id,json=pieceId,proto3,customtype=PieceID" json:"piece_id"`
// hash of the piece that was/is uploaded
Hash []byte `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash,omitempty"`
// signature either satellite or storage node
Signature []byte `protobuf:"bytes,3,opt,name=signature,proto3" json:"signature,omitempty"`
// size of uploaded piece
PieceSize int64 `protobuf:"varint,4,opt,name=piece_size,json=pieceSize,proto3" json:"piece_size,omitempty"`
// timestamp when upload occur
Timestamp time.Time `protobuf:"bytes,5,opt,name=timestamp,proto3,stdtime" json:"timestamp"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
// timestamp when upload occurred
Timestamp time.Time `protobuf:"bytes,5,opt,name=timestamp,proto3,stdtime" json:"timestamp"`
// signature either satellite or storage node
Signature []byte `protobuf:"bytes,3,opt,name=signature,proto3" json:"signature,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *PieceHash) Reset() { *m = PieceHash{} }
@ -293,13 +295,6 @@ func (m *PieceHash) GetHash() []byte {
return nil
}
func (m *PieceHash) GetSignature() []byte {
if m != nil {
return m.Signature
}
return nil
}
func (m *PieceHash) GetPieceSize() int64 {
if m != nil {
return m.PieceSize
@ -314,6 +309,13 @@ func (m *PieceHash) GetTimestamp() time.Time {
return time.Time{}
}
func (m *PieceHash) GetSignature() []byte {
if m != nil {
return m.Signature
}
return nil
}
type SettlementRequest struct {
Limit *OrderLimit `protobuf:"bytes,1,opt,name=limit,proto3" json:"limit,omitempty"`
Order *Order `protobuf:"bytes,2,opt,name=order,proto3" json:"order,omitempty"`
@ -412,53 +414,56 @@ func init() {
func init() { proto.RegisterFile("orders.proto", fileDescriptor_e0f5d4cf0fc9e41b) }
var fileDescriptor_e0f5d4cf0fc9e41b = []byte{
// 731 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0xcb, 0x6e, 0x13, 0x4b,
0x10, 0x4d, 0xfb, 0x31, 0xb6, 0xcb, 0xaf, 0x49, 0x27, 0xba, 0xf2, 0xb5, 0xee, 0x95, 0x8d, 0xd9,
0x98, 0x44, 0x72, 0x88, 0x91, 0x90, 0xb2, 0x41, 0xf2, 0x63, 0x14, 0x06, 0xa2, 0xc4, 0x6a, 0x3b,
0x2c, 0xd8, 0x58, 0xe3, 0x4c, 0xe3, 0x8c, 0xb0, 0x67, 0xcc, 0x74, 0x5b, 0x42, 0xd9, 0xb3, 0xe7,
0x4b, 0xf8, 0x03, 0xf6, 0x2c, 0xf8, 0x02, 0x16, 0xe1, 0x57, 0x50, 0x57, 0x8f, 0x3d, 0x0e, 0x04,
0x41, 0x94, 0xdd, 0x9c, 0xaa, 0x73, 0xaa, 0xba, 0xba, 0x4e, 0x0f, 0x14, 0x82, 0xd0, 0xe5, 0xa1,
0x68, 0x2d, 0xc2, 0x40, 0x06, 0xd4, 0xd0, 0xa8, 0x0a, 0xd3, 0x60, 0x1a, 0xe8, 0x58, 0xb5, 0x36,
0x0d, 0x82, 0xe9, 0x8c, 0x1f, 0x20, 0x9a, 0x2c, 0xdf, 0x1c, 0x48, 0x6f, 0xce, 0x85, 0x74, 0xe6,
0x8b, 0x88, 0x00, 0x7e, 0xe0, 0x72, 0xfd, 0xdd, 0xf8, 0x94, 0x06, 0x38, 0x53, 0x35, 0x4e, 0xbc,
0xb9, 0x27, 0xe9, 0x11, 0x14, 0x05, 0x0f, 0x3d, 0x67, 0x36, 0xf6, 0x97, 0xf3, 0x09, 0x0f, 0x2b,
0xa4, 0x4e, 0x9a, 0x85, 0xee, 0xee, 0x97, 0xeb, 0xda, 0xd6, 0xb7, 0xeb, 0x5a, 0x61, 0x88, 0xc9,
0x53, 0xcc, 0xb1, 0x82, 0xd8, 0x40, 0xf4, 0x10, 0x0a, 0xc2, 0x91, 0x7c, 0x36, 0xf3, 0x24, 0x1f,
0x7b, 0x6e, 0x25, 0x81, 0xca, 0x52, 0xa4, 0x34, 0x4e, 0x03, 0x97, 0xdb, 0x7d, 0x96, 0x5f, 0x73,
0x6c, 0x97, 0xee, 0x43, 0x6e, 0xb9, 0x98, 0x79, 0xfe, 0x5b, 0xc5, 0x4f, 0xde, 0xca, 0xcf, 0x6a,
0x82, 0xed, 0xd2, 0xa7, 0x50, 0x16, 0x32, 0x08, 0x9d, 0x29, 0x1f, 0xab, 0xf3, 0x2b, 0x49, 0xea,
0x56, 0x49, 0x31, 0xa2, 0x21, 0x74, 0xe9, 0x1e, 0x64, 0x17, 0x1e, 0xbf, 0x40, 0x41, 0x1a, 0x05,
0xe5, 0x48, 0x90, 0x19, 0xa8, 0xb8, 0xdd, 0x67, 0x19, 0x24, 0xd8, 0x2e, 0xdd, 0x85, 0xf4, 0x4c,
0xdd, 0x43, 0xc5, 0xa8, 0x93, 0x66, 0x92, 0x69, 0x40, 0xf7, 0xc1, 0x70, 0x2e, 0xa4, 0x17, 0xf8,
0x95, 0x4c, 0x9d, 0x34, 0x4b, 0xed, 0x9d, 0x56, 0xb4, 0x03, 0xd4, 0x77, 0x30, 0xc5, 0x22, 0x0a,
0x3d, 0x03, 0x53, 0xb7, 0xe3, 0xef, 0x17, 0x5e, 0xe8, 0xa0, 0x2c, 0x5b, 0x27, 0xcd, 0x7c, 0xbb,
0xda, 0xd2, 0x8b, 0x69, 0xad, 0x16, 0xd3, 0x1a, 0xad, 0x16, 0xd3, 0xcd, 0xaa, 0x23, 0x7d, 0xfc,
0x5e, 0x23, 0xac, 0x8c, 0x6a, 0x6b, 0x2d, 0x56, 0x05, 0xb1, 0xdd, 0x66, 0xc1, 0xdc, 0x5d, 0x0a,
0xa2, 0x7a, 0xa3, 0xe0, 0x4b, 0x28, 0xe9, 0x82, 0x17, 0x21, 0xd7, 0xe5, 0x0a, 0x77, 0x28, 0x57,
0x44, 0x6d, 0x2f, 0x92, 0xd2, 0x03, 0xd8, 0x89, 0xb7, 0x2e, 0xbc, 0xa9, 0xef, 0xc8, 0x65, 0xc8,
0x2b, 0xa0, 0x2e, 0x9a, 0xd1, 0x75, 0x6a, 0xb8, 0xca, 0xd0, 0x67, 0xb0, 0x1d, 0x0b, 0x1c, 0xd7,
0x0d, 0xb9, 0x10, 0x95, 0x3c, 0x1e, 0x60, 0xbb, 0x85, 0xc6, 0x54, 0x7b, 0xeb, 0xe8, 0x04, 0x33,
0xd7, 0xdc, 0x28, 0xd2, 0xf8, 0x40, 0x20, 0x8d, 0x86, 0xbd, 0x8f, 0x57, 0xff, 0x01, 0xc3, 0x99,
0x07, 0x4b, 0x5f, 0xa2, 0x4b, 0x93, 0x2c, 0x42, 0xf4, 0x11, 0x98, 0x91, 0x21, 0xe3, 0x51, 0xd0,
0x97, 0xac, 0xac, 0xe3, 0xeb, 0x39, 0x1a, 0x5f, 0x09, 0xe4, 0x70, 0xff, 0xcf, 0x1d, 0x71, 0x79,
0xc3, 0x64, 0xe4, 0x0f, 0x26, 0xa3, 0x90, 0xba, 0x74, 0xc4, 0xa5, 0x7e, 0x20, 0x0c, 0xbf, 0xe9,
0x7f, 0x90, 0xfb, 0xb9, 0x63, 0x1c, 0xa0, 0xff, 0x03, 0xe8, 0xea, 0xc2, 0xbb, 0xe2, 0xe8, 0xfa,
0x24, 0xcb, 0x61, 0x64, 0xe8, 0x5d, 0x71, 0xda, 0x85, 0xdc, 0xfa, 0x89, 0xa3, 0xc5, 0xff, 0x76,
0x97, 0xb1, 0xac, 0x31, 0x81, 0xed, 0x21, 0x97, 0x72, 0xc6, 0xe7, 0xdc, 0x97, 0x8c, 0xbf, 0x5b,
0x72, 0x21, 0x69, 0x73, 0xf5, 0x1c, 0x08, 0x16, 0xa5, 0x2b, 0xdf, 0xc7, 0x3f, 0x8c, 0xd5, 0x13,
0x79, 0x08, 0x69, 0xcc, 0xe1, 0x50, 0xf9, 0x76, 0xf1, 0x06, 0x93, 0xe9, 0x5c, 0xe3, 0x33, 0x01,
0xba, 0xd9, 0x44, 0x2c, 0x02, 0x5f, 0xf0, 0xfb, 0xec, 0xf1, 0x08, 0x0c, 0x21, 0x1d, 0xb9, 0x14,
0xd8, 0xb7, 0xd4, 0x7e, 0xb0, 0xea, 0xfb, 0x6b, 0x9b, 0xd6, 0x10, 0x89, 0x2c, 0x12, 0x34, 0x0e,
0xc1, 0xd0, 0x11, 0x9a, 0x87, 0x8c, 0x7d, 0xfa, 0xaa, 0x73, 0x62, 0xf7, 0xcd, 0x2d, 0x5a, 0x80,
0x6c, 0xa7, 0xd7, 0xb3, 0x06, 0x23, 0xab, 0x6f, 0x12, 0x85, 0x98, 0xf5, 0xc2, 0xea, 0x29, 0x94,
0xd8, 0x9b, 0x42, 0x7e, 0xe3, 0xc5, 0xdf, 0xd4, 0x65, 0x20, 0x39, 0x38, 0x1f, 0x99, 0x44, 0x7d,
0x1c, 0x5b, 0x23, 0x33, 0x41, 0x8b, 0x90, 0x3b, 0xb6, 0x46, 0xe3, 0xce, 0x79, 0xdf, 0x1e, 0x99,
0x49, 0x5a, 0x02, 0x50, 0x90, 0x59, 0x83, 0x8e, 0xcd, 0xcc, 0x94, 0xc2, 0x83, 0xf3, 0x35, 0x4e,
0x53, 0x00, 0xa3, 0x6f, 0x9d, 0x58, 0x23, 0xcb, 0x34, 0xda, 0x43, 0x30, 0xf0, 0xe2, 0x04, 0xb5,
0x01, 0xe2, 0x51, 0xe8, 0xbf, 0xb7, 0x8d, 0x87, 0xab, 0xaa, 0x56, 0x7f, 0x3f, 0x79, 0x63, 0xab,
0x49, 0x1e, 0x93, 0x6e, 0xea, 0x75, 0x62, 0x31, 0x99, 0x18, 0x68, 0x88, 0x27, 0x3f, 0x02, 0x00,
0x00, 0xff, 0xff, 0x68, 0x6d, 0x5d, 0xe7, 0x47, 0x06, 0x00, 0x00,
// 775 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x94, 0x4d, 0x6f, 0xf3, 0x44,
0x10, 0xc7, 0xeb, 0xbc, 0x38, 0xc9, 0xe4, 0xcd, 0xd9, 0xa7, 0xaa, 0x42, 0x04, 0x4a, 0x08, 0x97,
0x50, 0xa4, 0x94, 0x06, 0x09, 0xa9, 0x12, 0x42, 0xca, 0x8b, 0x55, 0x4c, 0xab, 0x36, 0xda, 0x24,
0x1c, 0xb8, 0x44, 0x4e, 0xbc, 0xa4, 0x56, 0x1d, 0xdb, 0x78, 0xd7, 0x12, 0xed, 0x15, 0x71, 0xe7,
0x0b, 0x71, 0xe7, 0xc0, 0x27, 0xe0, 0x50, 0xbe, 0x0a, 0xda, 0xf1, 0x5b, 0x0a, 0x45, 0x3c, 0x55,
0x6f, 0x9e, 0x9d, 0xf9, 0xcd, 0xec, 0xec, 0xfc, 0xc7, 0x50, 0xf3, 0x02, 0x8b, 0x05, 0x7c, 0xe8,
0x07, 0x9e, 0xf0, 0x88, 0x1a, 0x59, 0x1d, 0xd8, 0x79, 0x3b, 0x2f, 0x3a, 0xeb, 0x74, 0x77, 0x9e,
0xb7, 0x73, 0xd8, 0x19, 0x5a, 0x9b, 0xf0, 0x87, 0x33, 0x61, 0xef, 0x19, 0x17, 0xe6, 0xde, 0x8f,
0x03, 0xc0, 0xf5, 0x2c, 0x16, 0x7d, 0xf7, 0x7f, 0x56, 0x01, 0x6e, 0x65, 0x8e, 0x6b, 0x7b, 0x6f,
0x0b, 0x72, 0x01, 0x75, 0xce, 0x02, 0xdb, 0x74, 0xd6, 0x6e, 0xb8, 0xdf, 0xb0, 0xa0, 0xad, 0xf4,
0x94, 0x41, 0x6d, 0x72, 0xfc, 0xfb, 0x53, 0xf7, 0xe8, 0xcf, 0xa7, 0x6e, 0x6d, 0x81, 0xce, 0x1b,
0xf4, 0xd1, 0x1a, 0x3f, 0xb0, 0xc8, 0x39, 0xd4, 0xb8, 0x29, 0x98, 0xe3, 0xd8, 0x82, 0xad, 0x6d,
0xab, 0x9d, 0x43, 0xb2, 0x11, 0x93, 0xea, 0x8d, 0x67, 0x31, 0x63, 0x46, 0xab, 0x69, 0x8c, 0x61,
0x91, 0xaf, 0xe0, 0xd8, 0x62, 0x7e, 0xc0, 0xb6, 0xa6, 0x60, 0xd6, 0x3a, 0xf4, 0x1d, 0xdb, 0xbd,
0x97, 0x68, 0x1e, 0x51, 0x38, 0xc0, 0x48, 0x16, 0xb7, 0xc2, 0x30, 0xc3, 0x22, 0x13, 0x68, 0xc5,
0x88, 0x1f, 0x6e, 0x1c, 0x7b, 0xbb, 0xbe, 0x67, 0x0f, 0xed, 0x3a, 0xa2, 0x27, 0x71, 0xd5, 0xc6,
0xdc, 0x66, 0x5b, 0x36, 0x47, 0xf7, 0x15, 0x7b, 0xa0, 0xcd, 0x08, 0x48, 0x0f, 0xc8, 0x97, 0xd0,
0xe4, 0xc2, 0x0b, 0xcc, 0x1d, 0x5b, 0xcb, 0x47, 0x91, 0xc5, 0x0b, 0x2f, 0xde, 0xbb, 0x1e, 0x87,
0xa1, 0x69, 0x91, 0x53, 0x28, 0xfb, 0x32, 0xb5, 0x04, 0x8a, 0x08, 0x34, 0x63, 0xa0, 0x84, 0x25,
0x8d, 0x19, 0x2d, 0x61, 0x80, 0x61, 0x91, 0x63, 0x28, 0x3a, 0xf2, 0x71, 0xdb, 0x6a, 0x4f, 0x19,
0xe4, 0x69, 0x64, 0x90, 0xcf, 0x40, 0x35, 0xb7, 0xc2, 0xf6, 0xdc, 0x76, 0xa9, 0xa7, 0x0c, 0x1a,
0xa3, 0x77, 0xc3, 0x78, 0xb0, 0xc8, 0x8f, 0xd1, 0x45, 0xe3, 0x10, 0x72, 0x0b, 0x5a, 0x54, 0x8e,
0xfd, 0xe4, 0xdb, 0x81, 0x89, 0x58, 0xb9, 0xa7, 0x0c, 0xaa, 0xa3, 0xce, 0x30, 0x9a, 0xf6, 0x30,
0x99, 0xf6, 0x70, 0x99, 0x4c, 0x7b, 0x52, 0x96, 0x57, 0xfa, 0xf5, 0xaf, 0xae, 0x42, 0x9b, 0x48,
0xeb, 0x29, 0x2c, 0x13, 0x62, 0xb9, 0xc3, 0x84, 0x95, 0xd7, 0x24, 0x44, 0xfa, 0x20, 0xe1, 0x15,
0x34, 0xa2, 0x84, 0xdb, 0x80, 0x45, 0xe9, 0x6a, 0xaf, 0x48, 0x57, 0x47, 0x76, 0x1a, 0xa3, 0xe4,
0x0c, 0xde, 0x65, 0x52, 0xe2, 0xf6, 0xce, 0x35, 0x45, 0x18, 0xb0, 0x36, 0xc8, 0x87, 0xa6, 0x24,
0x75, 0x2d, 0x12, 0x0f, 0xf9, 0x1a, 0x5a, 0x19, 0x60, 0x5a, 0x56, 0xc0, 0x38, 0x6f, 0x57, 0xf1,
0x02, 0xad, 0x21, 0xaa, 0x5d, 0xce, 0x6d, 0x1c, 0x39, 0xa8, 0x96, 0xc6, 0xc6, 0x27, 0xfd, 0x5f,
0x14, 0x28, 0xe2, 0x16, 0xbc, 0x65, 0x01, 0x4e, 0x40, 0x35, 0xf7, 0x5e, 0xe8, 0x0a, 0x94, 0x7e,
0x9e, 0xc6, 0x16, 0xf9, 0x14, 0xb4, 0x58, 0xa7, 0x59, 0x2b, 0xa8, 0xf0, 0x44, 0x8e, 0x69, 0x1f,
0xfd, 0x3f, 0x14, 0xa8, 0xe0, 0xfc, 0xbf, 0x31, 0xf9, 0xdd, 0x33, 0x91, 0x29, 0xff, 0x23, 0x32,
0x02, 0x85, 0x3b, 0x93, 0xdf, 0x45, 0x5b, 0x47, 0xf1, 0x9b, 0x7c, 0x04, 0x10, 0xf1, 0xdc, 0x7e,
0x64, 0xa8, 0xeb, 0x3c, 0xad, 0xe0, 0xc9, 0xc2, 0x7e, 0x64, 0x64, 0x02, 0x95, 0xf4, 0xcf, 0x80,
0x22, 0x7e, 0xdf, 0x69, 0x65, 0x18, 0xf9, 0x10, 0x2a, 0xff, 0x6c, 0x2a, 0x3b, 0xe8, 0x6f, 0xa0,
0xb5, 0x60, 0x42, 0x38, 0x6c, 0xcf, 0x5c, 0x41, 0xd9, 0x8f, 0x21, 0xe3, 0x82, 0x0c, 0x92, 0x75,
0x50, 0xb0, 0x24, 0x49, 0x74, 0x9f, 0xfd, 0x85, 0x92, 0x15, 0xf9, 0x04, 0x8a, 0xe8, 0xc3, 0xa6,
0xaa, 0xa3, 0xfa, 0xb3, 0x48, 0x1a, 0xf9, 0xfa, 0xbf, 0x29, 0x40, 0x0e, 0x8b, 0x70, 0xdf, 0x73,
0x39, 0x7b, 0xcb, 0x1c, 0x2f, 0x40, 0xe5, 0xc2, 0x14, 0x21, 0xc7, 0xba, 0x8d, 0xd1, 0xc7, 0x49,
0xdd, 0x7f, 0x97, 0x19, 0x2e, 0x30, 0x90, 0xc6, 0x40, 0xff, 0x1c, 0xd4, 0xe8, 0x84, 0x54, 0xa1,
0x64, 0xdc, 0x7c, 0x37, 0xbe, 0x36, 0x66, 0xda, 0x11, 0xa9, 0x41, 0x79, 0x3c, 0x9d, 0xea, 0xf3,
0xa5, 0x3e, 0xd3, 0x14, 0x69, 0x51, 0xfd, 0x5b, 0x7d, 0x2a, 0xad, 0xdc, 0xe9, 0x0e, 0xaa, 0x07,
0x1b, 0xff, 0x9c, 0x2b, 0x41, 0x7e, 0xbe, 0x5a, 0x6a, 0x8a, 0xfc, 0xb8, 0xd4, 0x97, 0x5a, 0x8e,
0xd4, 0xa1, 0x72, 0xa9, 0x2f, 0xd7, 0xe3, 0xd5, 0xcc, 0x58, 0x6a, 0x79, 0xd2, 0x00, 0x90, 0x26,
0xd5, 0xe7, 0x63, 0x83, 0x6a, 0x05, 0x69, 0xcf, 0x57, 0xa9, 0x5d, 0x24, 0x00, 0xea, 0x4c, 0xbf,
0xd6, 0x97, 0xba, 0xa6, 0x8e, 0x16, 0xa0, 0xe2, 0xc3, 0x71, 0x62, 0x00, 0x64, 0xad, 0x90, 0x0f,
0x5e, 0x6a, 0x0f, 0x47, 0xd5, 0xe9, 0xfc, 0x77, 0xe7, 0xfd, 0xa3, 0x81, 0xf2, 0xb9, 0x32, 0x29,
0x7c, 0x9f, 0xf3, 0x37, 0x1b, 0x15, 0xe5, 0xf2, 0xc5, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xa1,
0x64, 0x18, 0x84, 0x9c, 0x06, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.

View File

@ -28,8 +28,10 @@ message OrderLimit {
// satellite who issued this order limit allowing orderer to do the specified action
bytes satellite_id = 2 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false];
// uplink who requested or whom behalf the order limit to do an action
bytes uplink_id = 3 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false];
// storage node who can reclaim the order limit specified by serial
bytes deprecated_uplink_id = 3 [(gogoproto.customtype) = "NodeID"];
// public key that will be used to sign orders and piece hash
bytes uplink_public_key = 13 [(gogoproto.customtype) = "PiecePublicKey", (gogoproto.nullable) = false];
// storage node who can re claimthe order limit specified by serial
bytes storage_node_id = 4 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false];
// piece which is allowed to be touched
@ -43,7 +45,6 @@ message OrderLimit {
google.protobuf.Timestamp order_creation = 12 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
bytes satellite_signature = 10;
// satellites aren't necessarily discoverable in kademlia. this allows
// a storage node to find a satellite and handshake with it to get its key.
node.NodeAddress satellite_address = 11;
@ -64,15 +65,14 @@ message PieceHash {
bytes piece_id = 1 [(gogoproto.customtype) = "PieceID", (gogoproto.nullable) = false];
// hash of the piece that was/is uploaded
bytes hash = 2;
// signature either satellite or storage node
bytes signature = 3;
// size of uploaded piece
int64 piece_size = 4;
// timestamp when upload occur
// timestamp when upload occurred
google.protobuf.Timestamp timestamp = 5 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
// signature either satellite or storage node
bytes signature = 3;
}
service Orders {
rpc Settlement(stream SettlementRequest) returns (stream SettlementResponse) {}
}

View File

@ -17,6 +17,12 @@ type NodeIDList = storj.NodeIDList
// PieceID is an alias to storj.PieceID for use in generated protobuf code
type PieceID = storj.PieceID
// PiecePublicKey is an alias to storj.PiecePublicKey for use in generated protobuf code
type PiecePublicKey = storj.PiecePublicKey
// PiecePrivateKey is an alias to storj.PiecePrivateKey for use in generated protobuf code
type PiecePrivateKey = storj.PiecePrivateKey
// SerialNumber is an alias to storj.SerialNumber for use in generated protobuf code
type SerialNumber = storj.SerialNumber

View File

@ -16,7 +16,6 @@ import (
monkit "gopkg.in/spacemonkeygo/monkit.v2"
"storj.io/storj/internal/sync2"
"storj.io/storj/pkg/auth/signing"
"storj.io/storj/pkg/eestream"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/ranger"
@ -29,10 +28,10 @@ var mon = monkit.Package()
// Client defines an interface for storing erasure coded data to piece store nodes
type Client interface {
Put(ctx context.Context, limits []*pb.AddressedOrderLimit, rs eestream.RedundancyStrategy, data io.Reader, expiration time.Time) (successfulNodes []*pb.Node, successfulHashes []*pb.PieceHash, err error)
Repair(ctx context.Context, limits []*pb.AddressedOrderLimit, rs eestream.RedundancyStrategy, data io.Reader, expiration time.Time, timeout time.Duration, path storj.Path) (successfulNodes []*pb.Node, successfulHashes []*pb.PieceHash, err error)
Get(ctx context.Context, limits []*pb.AddressedOrderLimit, es eestream.ErasureScheme, size int64) (ranger.Ranger, error)
Delete(ctx context.Context, limits []*pb.AddressedOrderLimit) error
Put(ctx context.Context, limits []*pb.AddressedOrderLimit, privateKey storj.PiecePrivateKey, rs eestream.RedundancyStrategy, data io.Reader, expiration time.Time) (successfulNodes []*pb.Node, successfulHashes []*pb.PieceHash, err error)
Repair(ctx context.Context, limits []*pb.AddressedOrderLimit, privateKey storj.PiecePrivateKey, rs eestream.RedundancyStrategy, data io.Reader, expiration time.Time, timeout time.Duration, path storj.Path) (successfulNodes []*pb.Node, successfulHashes []*pb.PieceHash, err error)
Get(ctx context.Context, limits []*pb.AddressedOrderLimit, privateKey storj.PiecePrivateKey, es eestream.ErasureScheme, size int64) (ranger.Ranger, error)
Delete(ctx context.Context, limits []*pb.AddressedOrderLimit, privateKey storj.PiecePrivateKey) error
WithForceErrorDetection(force bool) Client
}
@ -61,11 +60,10 @@ func (ec *ecClient) WithForceErrorDetection(force bool) Client {
func (ec *ecClient) dialPiecestore(ctx context.Context, n *pb.Node) (*piecestore.Client, error) {
logger := ec.log.Named(n.Id.String())
signer := signing.SignerFromFullIdentity(ec.transport.Identity())
return piecestore.Dial(ctx, ec.transport, n, logger, signer, piecestore.DefaultConfig)
return piecestore.Dial(ctx, ec.transport, n, logger, piecestore.DefaultConfig)
}
func (ec *ecClient) Put(ctx context.Context, limits []*pb.AddressedOrderLimit, rs eestream.RedundancyStrategy, data io.Reader, expiration time.Time) (successfulNodes []*pb.Node, successfulHashes []*pb.PieceHash, err error) {
func (ec *ecClient) Put(ctx context.Context, limits []*pb.AddressedOrderLimit, privateKey storj.PiecePrivateKey, rs eestream.RedundancyStrategy, data io.Reader, expiration time.Time) (successfulNodes []*pb.Node, successfulHashes []*pb.PieceHash, err error) {
defer mon.Task()(&ctx)(&err)
if len(limits) != rs.TotalCount() {
@ -102,7 +100,7 @@ func (ec *ecClient) Put(ctx context.Context, limits []*pb.AddressedOrderLimit, r
for i, addressedLimit := range limits {
go func(i int, addressedLimit *pb.AddressedOrderLimit) {
hash, err := ec.putPiece(psCtx, ctx, addressedLimit, readers[i], expiration)
hash, err := ec.putPiece(psCtx, ctx, addressedLimit, privateKey, readers[i], expiration)
infos <- info{i: i, err: err, hash: hash}
}(i, addressedLimit)
}
@ -161,7 +159,7 @@ func (ec *ecClient) Put(ctx context.Context, limits []*pb.AddressedOrderLimit, r
return successfulNodes, successfulHashes, nil
}
func (ec *ecClient) Repair(ctx context.Context, limits []*pb.AddressedOrderLimit, rs eestream.RedundancyStrategy, data io.Reader, expiration time.Time, timeout time.Duration, path storj.Path) (successfulNodes []*pb.Node, successfulHashes []*pb.PieceHash, err error) {
func (ec *ecClient) Repair(ctx context.Context, limits []*pb.AddressedOrderLimit, privateKey storj.PiecePrivateKey, rs eestream.RedundancyStrategy, data io.Reader, expiration time.Time, timeout time.Duration, path storj.Path) (successfulNodes []*pb.Node, successfulHashes []*pb.PieceHash, err error) {
defer mon.Task()(&ctx)(&err)
if len(limits) != rs.TotalCount() {
@ -190,7 +188,7 @@ func (ec *ecClient) Repair(ctx context.Context, limits []*pb.AddressedOrderLimit
for i, addressedLimit := range limits {
go func(i int, addressedLimit *pb.AddressedOrderLimit) {
hash, err := ec.putPiece(psCtx, ctx, addressedLimit, readers[i], expiration)
hash, err := ec.putPiece(psCtx, ctx, addressedLimit, privateKey, readers[i], expiration)
infos <- info{i: i, err: err, hash: hash}
}(i, addressedLimit)
}
@ -253,7 +251,7 @@ func (ec *ecClient) Repair(ctx context.Context, limits []*pb.AddressedOrderLimit
return successfulNodes, successfulHashes, nil
}
func (ec *ecClient) putPiece(ctx, parent context.Context, limit *pb.AddressedOrderLimit, data io.ReadCloser, expiration time.Time) (hash *pb.PieceHash, err error) {
func (ec *ecClient) putPiece(ctx, parent context.Context, limit *pb.AddressedOrderLimit, privateKey storj.PiecePrivateKey, data io.ReadCloser, expiration time.Time) (hash *pb.PieceHash, err error) {
nodeName := "nil"
if limit != nil {
nodeName = limit.GetLimit().StorageNodeId.String()[0:8]
@ -278,7 +276,7 @@ func (ec *ecClient) putPiece(ctx, parent context.Context, limit *pb.AddressedOrd
}
defer func() { err = errs.Combine(err, ps.Close()) }()
upload, err := ps.Upload(ctx, limit.GetLimit())
upload, err := ps.Upload(ctx, limit.GetLimit(), privateKey)
if err != nil {
ec.log.Sugar().Debugf("Failed requesting upload of piece %s to node %s: %v", pieceID, storageNodeID, err)
return nil, err
@ -315,7 +313,7 @@ func (ec *ecClient) putPiece(ctx, parent context.Context, limit *pb.AddressedOrd
return hash, err
}
func (ec *ecClient) Get(ctx context.Context, limits []*pb.AddressedOrderLimit, es eestream.ErasureScheme, size int64) (rr ranger.Ranger, err error) {
func (ec *ecClient) Get(ctx context.Context, limits []*pb.AddressedOrderLimit, privateKey storj.PiecePrivateKey, es eestream.ErasureScheme, size int64) (rr ranger.Ranger, err error) {
defer mon.Task()(&ctx)(&err)
if len(limits) != es.TotalCount() {
@ -338,6 +336,7 @@ func (ec *ecClient) Get(ctx context.Context, limits []*pb.AddressedOrderLimit, e
rrs[i] = &lazyPieceRanger{
dialPiecestore: ec.dialPiecestore,
limit: addressedLimit,
privateKey: privateKey,
size: pieceSize,
}
}
@ -350,7 +349,7 @@ func (ec *ecClient) Get(ctx context.Context, limits []*pb.AddressedOrderLimit, e
return eestream.Unpad(rr, int(paddedSize-size))
}
func (ec *ecClient) Delete(ctx context.Context, limits []*pb.AddressedOrderLimit) (err error) {
func (ec *ecClient) Delete(ctx context.Context, limits []*pb.AddressedOrderLimit, privateKey storj.PiecePrivateKey) (err error) {
defer mon.Task()(&ctx)(&err)
errch := make(chan error, len(limits))
@ -371,7 +370,7 @@ func (ec *ecClient) Delete(ctx context.Context, limits []*pb.AddressedOrderLimit
errch <- err
return
}
err = ps.Delete(ctx, limit)
err = ps.Delete(ctx, limit, privateKey)
err = errs.Combine(err, ps.Close())
if err != nil {
ec.log.Sugar().Errorf("Failed deleting piece %s from node %s: %v", limit.PieceId, limit.StorageNodeId, err)
@ -433,6 +432,7 @@ func calcPadded(size int64, blockSize int) int64 {
type lazyPieceRanger struct {
dialPiecestore dialPiecestoreFunc
limit *pb.AddressedOrderLimit
privateKey storj.PiecePrivateKey
size int64
}
@ -452,7 +452,7 @@ func (lr *lazyPieceRanger) Range(ctx context.Context, offset, length int64) (_ i
return nil, err
}
download, err := ps.Download(ctx, lr.limit.GetLimit(), offset, length)
download, err := ps.Download(ctx, lr.limit.GetLimit(), lr.privateKey, offset, length)
if err != nil {
return nil, errs.Combine(err, ps.Close())
}

View File

@ -70,10 +70,12 @@ func TestECClient(t *testing.T) {
}
func testPut(ctx context.Context, t *testing.T, planet *testplanet.Planet, ec ecclient.Client, rs eestream.RedundancyStrategy, data []byte) ([]*pb.Node, []*pb.PieceHash) {
var err error
piecePublicKey, piecePrivateKey, err := storj.NewPieceKey()
require.NoError(t, err)
limits := make([]*pb.AddressedOrderLimit, rs.TotalCount())
for i := 0; i < len(limits); i++ {
limits[i], err = newAddressedOrderLimit(ctx, pb.PieceAction_PUT, planet.Satellites[0], planet.Uplinks[0], planet.StorageNodes[i], storj.NewPieceID())
limits[i], err = newAddressedOrderLimit(ctx, pb.PieceAction_PUT, planet.Satellites[0], piecePublicKey, planet.StorageNodes[i], storj.NewPieceID())
require.NoError(t, err)
}
@ -81,7 +83,7 @@ func testPut(ctx context.Context, t *testing.T, planet *testplanet.Planet, ec ec
r := bytes.NewReader(data)
successfulNodes, successfulHashes, err := ec.Put(ctx, limits, rs, r, ttl)
successfulNodes, successfulHashes, err := ec.Put(ctx, limits, piecePrivateKey, rs, r, ttl)
require.NoError(t, err)
assert.Equal(t, len(limits), len(successfulNodes))
@ -109,16 +111,18 @@ func testPut(ctx context.Context, t *testing.T, planet *testplanet.Planet, ec ec
}
func testGet(ctx context.Context, t *testing.T, planet *testplanet.Planet, ec ecclient.Client, es eestream.ErasureScheme, data []byte, successfulNodes []*pb.Node, successfulHashes []*pb.PieceHash) {
var err error
piecePublicKey, piecePrivateKey, err := storj.NewPieceKey()
require.NoError(t, err)
limits := make([]*pb.AddressedOrderLimit, es.TotalCount())
for i := 0; i < len(limits); i++ {
if successfulNodes[i] != nil {
limits[i], err = newAddressedOrderLimit(ctx, pb.PieceAction_GET, planet.Satellites[0], planet.Uplinks[0], planet.StorageNodes[i], successfulHashes[i].PieceId)
limits[i], err = newAddressedOrderLimit(ctx, pb.PieceAction_GET, planet.Satellites[0], piecePublicKey, planet.StorageNodes[i], successfulHashes[i].PieceId)
require.NoError(t, err)
}
}
rr, err := ec.Get(ctx, limits, es, dataSize.Int64())
rr, err := ec.Get(ctx, limits, piecePrivateKey, es, dataSize.Int64())
require.NoError(t, err)
r, err := rr.Range(ctx, 0, rr.Size())
@ -131,21 +135,23 @@ func testGet(ctx context.Context, t *testing.T, planet *testplanet.Planet, ec ec
}
func testDelete(ctx context.Context, t *testing.T, planet *testplanet.Planet, ec ecclient.Client, successfulNodes []*pb.Node, successfulHashes []*pb.PieceHash) {
var err error
piecePublicKey, piecePrivateKey, err := storj.NewPieceKey()
require.NoError(t, err)
limits := make([]*pb.AddressedOrderLimit, len(successfulNodes))
for i := 0; i < len(limits); i++ {
if successfulNodes[i] != nil {
limits[i], err = newAddressedOrderLimit(ctx, pb.PieceAction_DELETE, planet.Satellites[0], planet.Uplinks[0], planet.StorageNodes[i], successfulHashes[i].PieceId)
limits[i], err = newAddressedOrderLimit(ctx, pb.PieceAction_DELETE, planet.Satellites[0], piecePublicKey, planet.StorageNodes[i], successfulHashes[i].PieceId)
require.NoError(t, err)
}
}
err = ec.Delete(ctx, limits)
err = ec.Delete(ctx, limits, piecePrivateKey)
require.NoError(t, err)
}
func newAddressedOrderLimit(ctx context.Context, action pb.PieceAction, satellite *satellite.Peer, uplink *testplanet.Uplink, storageNode *storagenode.Peer, pieceID storj.PieceID) (*pb.AddressedOrderLimit, error) {
func newAddressedOrderLimit(ctx context.Context, action pb.PieceAction, satellite *satellite.Peer, piecePublicKey storj.PiecePublicKey, storageNode *storagenode.Peer, pieceID storj.PieceID) (*pb.AddressedOrderLimit, error) {
// TODO refactor to avoid OrderLimit duplication
serialNumber := testrand.SerialNumber()
@ -154,7 +160,7 @@ func newAddressedOrderLimit(ctx context.Context, action pb.PieceAction, satellit
limit := &pb.OrderLimit{
SerialNumber: serialNumber,
SatelliteId: satellite.ID(),
UplinkId: uplink.ID(),
UplinkPublicKey: piecePublicKey,
StorageNodeId: storageNode.ID(),
PieceId: pieceID,
Action: action,

View File

@ -117,7 +117,7 @@ func (repairer *Repairer) Repair(ctx context.Context, path storj.Path) (err erro
}
// Create the order limits for the GET_REPAIR action
getOrderLimits, err := repairer.orders.CreateGetRepairOrderLimits(ctx, repairer.identity.PeerIdentity(), bucketID, pointer, healthyPieces)
getOrderLimits, getPrivateKey, err := repairer.orders.CreateGetRepairOrderLimits(ctx, bucketID, pointer, healthyPieces)
if err != nil {
return Error.Wrap(err)
}
@ -135,13 +135,13 @@ func (repairer *Repairer) Repair(ctx context.Context, path storj.Path) (err erro
}
// Create the order limits for the PUT_REPAIR action
putLimits, err := repairer.orders.CreatePutRepairOrderLimits(ctx, repairer.identity.PeerIdentity(), bucketID, pointer, getOrderLimits, newNodes)
putLimits, putPrivateKey, err := repairer.orders.CreatePutRepairOrderLimits(ctx, bucketID, pointer, getOrderLimits, newNodes)
if err != nil {
return Error.Wrap(err)
}
// Download the segment using just the healthy pieces
rr, err := repairer.ec.Get(ctx, getOrderLimits, redundancy, pointer.GetSegmentSize())
rr, err := repairer.ec.Get(ctx, getOrderLimits, getPrivateKey, redundancy, pointer.GetSegmentSize())
if err != nil {
return Error.Wrap(err)
}
@ -153,7 +153,7 @@ func (repairer *Repairer) Repair(ctx context.Context, path storj.Path) (err erro
defer func() { err = errs.Combine(err, r.Close()) }()
// Upload the repaired pieces
successfulNodes, hashes, err := repairer.ec.Repair(ctx, putLimits, redundancy, r, expiration, repairer.timeout, path)
successfulNodes, hashes, err := repairer.ec.Repair(ctx, putLimits, putPrivateKey, redundancy, r, expiration, repairer.timeout, path)
if err != nil {
return Error.Wrap(err)
}

View File

@ -134,14 +134,14 @@ func (s *segmentStore) Put(ctx context.Context, data io.Reader, expiration time.
}
// path and segment index are not known at this point
limits, rootPieceID, err := s.metainfo.CreateSegment(ctx, bucket, objectPath, -1, redundancy, s.maxEncryptedSegmentSize, expiration)
limits, rootPieceID, piecePrivateKey, err := s.metainfo.CreateSegment(ctx, bucket, objectPath, -1, redundancy, s.maxEncryptedSegmentSize, expiration)
if err != nil {
return Meta{}, Error.Wrap(err)
}
sizedReader := SizeReader(peekReader)
successfulNodes, successfulHashes, err := s.ec.Put(ctx, limits, s.rs, sizedReader, expiration)
successfulNodes, successfulHashes, err := s.ec.Put(ctx, limits, piecePrivateKey, s.rs, sizedReader, expiration)
if err != nil {
return Meta{}, Error.Wrap(err)
}
@ -185,7 +185,7 @@ func (s *segmentStore) Get(ctx context.Context, path storj.Path) (rr ranger.Rang
return nil, Meta{}, err
}
pointer, limits, err := s.metainfo.ReadSegment(ctx, bucket, objectPath, segmentIndex)
pointer, limits, piecePrivateKey, err := s.metainfo.ReadSegment(ctx, bucket, objectPath, segmentIndex)
if err != nil {
return nil, Meta{}, Error.Wrap(err)
}
@ -216,7 +216,7 @@ func (s *segmentStore) Get(ctx context.Context, path storj.Path) (rr ranger.Rang
return nil, Meta{}, err
}
rr, err = s.ec.Get(ctx, selected, redundancy, pointer.GetSegmentSize())
rr, err = s.ec.Get(ctx, selected, piecePrivateKey, redundancy, pointer.GetSegmentSize())
if err != nil {
return nil, Meta{}, Error.Wrap(err)
}
@ -277,7 +277,7 @@ func (s *segmentStore) Delete(ctx context.Context, path storj.Path) (err error)
return err
}
limits, err := s.metainfo.DeleteSegment(ctx, bucket, objectPath, segmentIndex)
limits, privateKey, err := s.metainfo.DeleteSegment(ctx, bucket, objectPath, segmentIndex)
if err != nil {
return Error.Wrap(err)
}
@ -288,7 +288,7 @@ func (s *segmentStore) Delete(ctx context.Context, path storj.Path) (err error)
}
// remote segment - delete the pieces from storage nodes
err = s.ec.Delete(ctx, limits)
err = s.ec.Delete(ctx, limits, privateKey)
if err != nil {
return Error.Wrap(err)
}

159
pkg/storj/piecekey.go Normal file
View File

@ -0,0 +1,159 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package storj
import (
"database/sql/driver"
"github.com/zeebo/errs"
"golang.org/x/crypto/ed25519"
)
// ErrPieceKey is used when something goes wrong with a piece key
var ErrPieceKey = errs.Class("piece key error")
// PiecePublicKey is the unique identifier for pieces
type PiecePublicKey struct {
pub ed25519.PublicKey
}
// PiecePrivateKey is the unique identifier for pieces
type PiecePrivateKey struct {
priv ed25519.PrivateKey
}
// NewPieceKey creates a piece key pair
func NewPieceKey() (PiecePublicKey, PiecePrivateKey, error) {
pub, priv, err := ed25519.GenerateKey(nil)
return PiecePublicKey{pub}, PiecePrivateKey{priv}, ErrPieceKey.Wrap(err)
}
// PiecePublicKeyFromBytes converts bytes to a piece public key.
func PiecePublicKeyFromBytes(data []byte) (PiecePublicKey, error) {
if len(data) != ed25519.PublicKeySize {
return PiecePublicKey{}, ErrPieceKey.New("invalid public key length %v", len(data))
}
return PiecePublicKey{ed25519.PublicKey(data)}, nil
}
// PiecePrivateKeyFromBytes converts bytes to a piece private key.
func PiecePrivateKeyFromBytes(data []byte) (PiecePrivateKey, error) {
if len(data) != ed25519.PrivateKeySize {
return PiecePrivateKey{}, ErrPieceKey.New("invalid private key length %v", len(data))
}
return PiecePrivateKey{ed25519.PrivateKey(data)}, nil
}
// Sign signs the message with privateKey and returns a signature.
func (key PiecePrivateKey) Sign(data []byte) ([]byte, error) {
if len(key.priv) != ed25519.PrivateKeySize {
return nil, ErrPieceKey.New("invalid private key length %v", len(key.priv))
}
return ed25519.Sign(key.priv, data), nil
}
// Verify reports whether signature is a valid signature of message by publicKey.
func (key PiecePublicKey) Verify(data, signature []byte) error {
if len(key.pub) != ed25519.PublicKeySize {
return ErrPieceKey.New("invalid public key length %v", len(key.pub))
}
if !ed25519.Verify(key.pub, data, signature) {
return ErrPieceKey.New("invalid signature")
}
return nil
}
// Bytes returns bytes of the piece public key
func (key PiecePublicKey) Bytes() []byte { return key.pub[:] }
// Bytes returns bytes of the piece private key
func (key PiecePrivateKey) Bytes() []byte { return key.priv[:] }
// IsZero returns whether the key is empty.
func (key PiecePublicKey) IsZero() bool { return len(key.pub) == 0 }
// IsZero returns whether the key is empty.
func (key PiecePrivateKey) IsZero() bool { return len(key.priv) == 0 }
// Marshal serializes a piece public key
func (key PiecePublicKey) Marshal() ([]byte, error) { return key.Bytes(), nil }
// Marshal serializes a piece private key
func (key PiecePrivateKey) Marshal() ([]byte, error) { return key.Bytes(), nil }
// MarshalTo serializes a piece public key into the passed byte slice
func (key *PiecePublicKey) MarshalTo(data []byte) (n int, err error) {
n = copy(data, key.Bytes())
return n, nil
}
// MarshalTo serializes a piece private key into the passed byte slice
func (key *PiecePrivateKey) MarshalTo(data []byte) (n int, err error) {
n = copy(data, key.Bytes())
return n, nil
}
// Unmarshal deserializes a piece public key
func (key *PiecePublicKey) Unmarshal(data []byte) error {
// allow empty keys
if len(data) == 0 {
key.pub = nil
return nil
}
var err error
*key, err = PiecePublicKeyFromBytes(data)
return err
}
// Unmarshal deserializes a piece private key
func (key *PiecePrivateKey) Unmarshal(data []byte) error {
// allow empty keys
if len(data) == 0 {
key.priv = nil
return nil
}
if len(data) == 0 {
return nil
}
var err error
*key, err = PiecePrivateKeyFromBytes(data)
return err
}
// Size returns the length of a piece public key (implements gogo's custom type interface)
func (key *PiecePublicKey) Size() int { return len(key.pub) }
// Size returns the length of a piece private key (implements gogo's custom type interface)
func (key *PiecePrivateKey) Size() int { return len(key.priv) }
// Value set a PiecePublicKey to a database field
func (key PiecePublicKey) Value() (driver.Value, error) {
return key.Bytes(), nil
}
// Value set a PiecePrivateKey to a database field
func (key PiecePrivateKey) Value() (driver.Value, error) { return key.Bytes(), nil }
// Scan extracts a PiecePublicKey from a database field
func (key *PiecePublicKey) Scan(src interface{}) (err error) {
b, ok := src.([]byte)
if !ok {
return ErrPieceKey.New("PiecePublicKey Scan expects []byte")
}
n, err := PiecePublicKeyFromBytes(b)
*key = n
return err
}
// Scan extracts a PiecePrivateKey from a database field
func (key *PiecePrivateKey) Scan(src interface{}) (err error) {
b, ok := src.([]byte)
if !ok {
return ErrPieceKey.New("PiecePrivateKey Scan expects []byte")
}
n, err := PiecePrivateKeyFromBytes(b)
*key = n
return err
}

View File

@ -1855,6 +1855,21 @@
"value": "false"
}
]
},
{
"id": 3,
"name": "private_key",
"type": "bytes",
"options": [
{
"name": "(gogoproto.customtype)",
"value": "PiecePrivateKey"
},
{
"name": "(gogoproto.nullable)",
"value": "false"
}
]
}
]
},
@ -1932,6 +1947,21 @@
"id": 2,
"name": "pointer",
"type": "pointerdb.Pointer"
},
{
"id": 3,
"name": "private_key",
"type": "bytes",
"options": [
{
"name": "(gogoproto.customtype)",
"value": "PiecePrivateKey"
},
{
"name": "(gogoproto.nullable)",
"value": "false"
}
]
}
]
},
@ -1993,6 +2023,21 @@
"name": "addressed_limits",
"type": "AddressedOrderLimit",
"is_repeated": true
},
{
"id": 2,
"name": "private_key",
"type": "bytes",
"options": [
{
"name": "(gogoproto.customtype)",
"value": "PiecePrivateKey"
},
{
"name": "(gogoproto.nullable)",
"value": "false"
}
]
}
]
},
@ -2699,12 +2744,23 @@
},
{
"id": 3,
"name": "uplink_id",
"name": "deprecated_uplink_id",
"type": "bytes",
"options": [
{
"name": "(gogoproto.customtype)",
"value": "NodeID"
}
]
},
{
"id": 13,
"name": "uplink_public_key",
"type": "bytes",
"options": [
{
"name": "(gogoproto.customtype)",
"value": "PiecePublicKey"
},
{
"name": "(gogoproto.nullable)",
@ -2862,11 +2918,6 @@
"name": "hash",
"type": "bytes"
},
{
"id": 3,
"name": "signature",
"type": "bytes"
},
{
"id": 4,
"name": "piece_size",
@ -2886,6 +2937,11 @@
"value": "false"
}
]
},
{
"id": 3,
"name": "signature",
"type": "bytes"
}
]
},

View File

@ -20,7 +20,6 @@ import (
"storj.io/storj/pkg/accounting"
"storj.io/storj/pkg/auth"
"storj.io/storj/pkg/eestream"
"storj.io/storj/pkg/identity"
"storj.io/storj/pkg/macaroon"
"storj.io/storj/pkg/overlay"
"storj.io/storj/pkg/pb"
@ -181,13 +180,8 @@ func (endpoint *Endpoint) CreateSegmentOld(ctx context.Context, req *pb.SegmentW
return nil, status.Errorf(codes.Internal, err.Error())
}
uplinkIdentity, err := identity.PeerIdentityFromContext(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, err.Error())
}
bucketID := createBucketID(keyInfo.ProjectID, req.Bucket)
rootPieceID, addressedLimits, err := endpoint.orders.CreatePutOrderLimits(ctx, uplinkIdentity, bucketID, nodes, req.Expiration, maxPieceSize)
rootPieceID, addressedLimits, piecePrivateKey, err := endpoint.orders.CreatePutOrderLimits(ctx, bucketID, nodes, req.Expiration, maxPieceSize)
if err != nil {
return nil, Error.Wrap(err)
}
@ -199,7 +193,7 @@ func (endpoint *Endpoint) CreateSegmentOld(ctx context.Context, req *pb.SegmentW
})
}
return &pb.SegmentWriteResponseOld{AddressedLimits: addressedLimits, RootPieceId: rootPieceID}, nil
return &pb.SegmentWriteResponseOld{AddressedLimits: addressedLimits, RootPieceId: rootPieceID, PrivateKey: piecePrivateKey}, nil
}
func calculateSpaceUsed(ptr *pb.Pointer) (inlineSpace, remoteSpace int64) {
@ -365,16 +359,11 @@ func (endpoint *Endpoint) DownloadSegmentOld(ctx context.Context, req *pb.Segmen
}
return &pb.SegmentDownloadResponseOld{Pointer: pointer}, nil
} else if pointer.Type == pb.Pointer_REMOTE && pointer.Remote != nil {
uplinkIdentity, err := identity.PeerIdentityFromContext(ctx)
limits, privateKey, err := endpoint.orders.CreateGetOrderLimits(ctx, bucketID, pointer)
if err != nil {
return nil, status.Errorf(codes.Internal, err.Error())
}
limits, err := endpoint.orders.CreateGetOrderLimits(ctx, uplinkIdentity, bucketID, pointer)
if err != nil {
return nil, status.Errorf(codes.Internal, err.Error())
}
return &pb.SegmentDownloadResponseOld{Pointer: pointer, AddressedLimits: limits}, nil
return &pb.SegmentDownloadResponseOld{Pointer: pointer, AddressedLimits: limits, PrivateKey: privateKey}, nil
}
return &pb.SegmentDownloadResponseOld{}, nil
@ -420,11 +409,6 @@ func (endpoint *Endpoint) DeleteSegmentOld(ctx context.Context, req *pb.SegmentD
}
if pointer.Type == pb.Pointer_REMOTE && pointer.Remote != nil {
uplinkIdentity, err := identity.PeerIdentityFromContext(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, err.Error())
}
for _, piece := range pointer.GetRemote().GetRemotePieces() {
_, err := endpoint.containment.Delete(ctx, piece.NodeId)
if err != nil {
@ -433,12 +417,12 @@ func (endpoint *Endpoint) DeleteSegmentOld(ctx context.Context, req *pb.SegmentD
}
bucketID := createBucketID(keyInfo.ProjectID, req.Bucket)
limits, err := endpoint.orders.CreateDeleteOrderLimits(ctx, uplinkIdentity, bucketID, pointer)
limits, privateKey, err := endpoint.orders.CreateDeleteOrderLimits(ctx, bucketID, pointer)
if err != nil {
return nil, status.Errorf(codes.Internal, err.Error())
}
return &pb.SegmentDeleteResponseOld{AddressedLimits: limits}, nil
return &pb.SegmentDeleteResponseOld{AddressedLimits: limits, PrivateKey: privateKey}, nil
}
return &pb.SegmentDeleteResponseOld{}, nil

View File

@ -55,7 +55,7 @@ func TestInvalidAPIKey(t *testing.T) {
require.NoError(t, err)
defer ctx.Check(client.Close)
_, _, err = client.CreateSegment(ctx, "hello", "world", 1, &pb.RedundancyScheme{}, 123, time.Now().Add(time.Hour))
_, _, _, err = client.CreateSegment(ctx, "hello", "world", 1, &pb.RedundancyScheme{}, 123, time.Now().Add(time.Hour))
assertUnauthenticated(t, err, false)
_, err = client.CommitSegment(ctx, "testbucket", "testpath", 0, &pb.Pointer{}, nil)
@ -64,10 +64,10 @@ func TestInvalidAPIKey(t *testing.T) {
_, err = client.SegmentInfo(ctx, "testbucket", "testpath", 0)
assertUnauthenticated(t, err, false)
_, _, err = client.ReadSegment(ctx, "testbucket", "testpath", 0)
_, _, _, err = client.ReadSegment(ctx, "testbucket", "testpath", 0)
assertUnauthenticated(t, err, false)
_, err = client.DeleteSegment(ctx, "testbucket", "testpath", 0)
_, _, err = client.DeleteSegment(ctx, "testbucket", "testpath", 0)
assertUnauthenticated(t, err, false)
_, _, err = client.ListSegments(ctx, "testbucket", "", "", "", true, 1, 0)
@ -169,7 +169,7 @@ func TestRestrictedAPIKey(t *testing.T) {
require.NoError(t, err)
defer ctx.Check(client.Close)
_, _, err = client.CreateSegment(ctx, "testbucket", "testpath", 1, &pb.RedundancyScheme{}, 123, time.Now().Add(time.Hour))
_, _, _, err = client.CreateSegment(ctx, "testbucket", "testpath", 1, &pb.RedundancyScheme{}, 123, time.Now().Add(time.Hour))
assertUnauthenticated(t, err, test.CreateSegmentAllowed)
_, err = client.CommitSegment(ctx, "testbucket", "testpath", 0, &pb.Pointer{}, nil)
@ -178,16 +178,16 @@ func TestRestrictedAPIKey(t *testing.T) {
_, err = client.SegmentInfo(ctx, "testbucket", "testpath", 0)
assertUnauthenticated(t, err, test.SegmentInfoAllowed)
_, _, err = client.ReadSegment(ctx, "testbucket", "testpath", 0)
_, _, _, err = client.ReadSegment(ctx, "testbucket", "testpath", 0)
assertUnauthenticated(t, err, test.ReadSegmentAllowed)
_, err = client.DeleteSegment(ctx, "testbucket", "testpath", 0)
_, _, err = client.DeleteSegment(ctx, "testbucket", "testpath", 0)
assertUnauthenticated(t, err, test.DeleteSegmentAllowed)
_, _, err = client.ListSegments(ctx, "testbucket", "testpath", "", "", true, 1, 0)
assertUnauthenticated(t, err, test.ListSegmentsAllowed)
_, _, err = client.ReadSegment(ctx, "testbucket", "", -1)
_, _, _, err = client.ReadSegment(ctx, "testbucket", "", -1)
assertUnauthenticated(t, err, test.ReadBucketAllowed)
}
}
@ -310,7 +310,7 @@ func TestCommitSegment(t *testing.T) {
ErasureShareSize: 256,
}
expirationDate := time.Now().Add(time.Hour)
addresedLimits, rootPieceID, err := metainfo.CreateSegment(ctx, "bucket", "path", -1, redundancy, 1000, expirationDate)
addresedLimits, rootPieceID, _, err := metainfo.CreateSegment(ctx, "bucket", "path", -1, redundancy, 1000, expirationDate)
require.NoError(t, err)
// create number of pieces below repair threshold
@ -441,7 +441,7 @@ func TestCreateSegment(t *testing.T) {
fail: false,
},
} {
_, _, err := metainfo.CreateSegment(ctx, "bucket", "path", -1, r.rs, 1000, time.Now().Add(time.Hour))
_, _, _, err := metainfo.CreateSegment(ctx, "bucket", "path", -1, r.rs, 1000, time.Now().Add(time.Hour))
if r.fail {
require.Error(t, err)
} else {
@ -484,7 +484,7 @@ func TestExpirationTimeSegment(t *testing.T) {
},
} {
_, _, err := metainfo.CreateSegment(ctx, "my-bucket-name", "file/path", -1, pointer.Remote.Redundancy, memory.MiB.Int64(), r.expirationDate)
_, _, _, err := metainfo.CreateSegment(ctx, "my-bucket-name", "file/path", -1, pointer.Remote.Redundancy, memory.MiB.Int64(), r.expirationDate)
if err != nil {
assert.True(t, r.errFlag)
} else {
@ -705,7 +705,7 @@ func TestGetProjectInfo(t *testing.T) {
func runCreateSegment(ctx context.Context, t *testing.T, metainfo *metainfo.Client) (*pb.Pointer, []*pb.OrderLimit) {
pointer := createTestPointer(t)
addressedLimits, rootPieceID, err := metainfo.CreateSegment(ctx, "my-bucket-name", "file/path", -1, pointer.Remote.Redundancy, memory.MiB.Int64(), pointer.ExpirationDate)
addressedLimits, rootPieceID, _, err := metainfo.CreateSegment(ctx, "my-bucket-name", "file/path", -1, pointer.Remote.Redundancy, memory.MiB.Int64(), pointer.ExpirationDate)
require.NoError(t, err)
pointer.Remote.RootPieceId = rootPieceID
@ -794,7 +794,7 @@ func TestBucketNameValidation(t *testing.T) {
"testbucket-63-0123456789012345678901234567890123456789012345abc",
}
for _, name := range validNames {
_, _, err = metainfo.CreateSegment(ctx, name, "", -1, rs, 1, time.Now().Add(time.Hour))
_, _, _, err = metainfo.CreateSegment(ctx, name, "", -1, rs, 1, time.Now().Add(time.Hour))
require.NoError(t, err, "bucket name: %v", name)
}
@ -808,7 +808,7 @@ func TestBucketNameValidation(t *testing.T) {
"testbucket-64-0123456789012345678901234567890123456789012345abcd",
}
for _, name := range invalidNames {
_, _, err = metainfo.CreateSegment(ctx, name, "", -1, rs, 1, time.Now().Add(time.Hour))
_, _, _, err = metainfo.CreateSegment(ctx, name, "", -1, rs, 1, time.Now().Add(time.Hour))
require.Error(t, err, "bucket name: %v", name)
}
})

View File

@ -67,7 +67,7 @@ type Endpoint struct {
}
// NewEndpoint new orders receiving endpoint
func NewEndpoint(log *zap.Logger, satelliteSignee signing.Signee, db DB, certdb certdb.DB) *Endpoint {
func NewEndpoint(log *zap.Logger, satelliteSignee signing.Signee, certdb certdb.DB, db DB) *Endpoint {
return &Endpoint{
log: log,
satelliteSignee: satelliteSignee,
@ -138,29 +138,34 @@ func (endpoint *Endpoint) Settlement(stream pb.Orders_SettlementServer) (err err
}
rejectErr := func() error {
var uplinkSignee signing.Signee
// who asked for this order: uplink (get/put/del) or satellite (get_repair/put_repair/audit)
if endpoint.satelliteSignee.ID() == orderLimit.UplinkId {
uplinkSignee = endpoint.satelliteSignee
} else {
uplinkPubKey, err := endpoint.certdb.GetPublicKey(ctx, orderLimit.UplinkId)
if err != nil {
log.Warn("unable to find uplink public key", zap.Error(err))
return status.Errorf(codes.Internal, "unable to find uplink public key")
}
uplinkSignee = &signing.PublicKey{
Self: orderLimit.UplinkId,
Key: uplinkPubKey,
}
}
if err := signing.VerifyOrderLimitSignature(ctx, endpoint.satelliteSignee, orderLimit); err != nil {
return Error.New("unable to verify order limit")
}
if err := signing.VerifyOrderSignature(ctx, uplinkSignee, order); err != nil {
return Error.New("unable to verify order")
if orderLimit.DeprecatedUplinkId == nil { // new signature handling
if err := signing.VerifyUplinkOrderSignature(ctx, orderLimit.UplinkPublicKey, order); err != nil {
return Error.New("unable to verify order")
}
} else {
var uplinkSignee signing.Signee
// who asked for this order: uplink (get/put/del) or satellite (get_repair/put_repair/audit)
if endpoint.satelliteSignee.ID() == *orderLimit.DeprecatedUplinkId {
uplinkSignee = endpoint.satelliteSignee
} else {
uplinkPubKey, err := endpoint.certdb.GetPublicKey(ctx, *orderLimit.DeprecatedUplinkId)
if err != nil {
log.Warn("unable to find uplink public key", zap.Error(err))
return status.Errorf(codes.Internal, "unable to find uplink public key")
}
uplinkSignee = &signing.PublicKey{
Self: *orderLimit.DeprecatedUplinkId,
Key: uplinkPubKey,
}
}
if err := signing.VerifyOrderSignature(ctx, uplinkSignee, order); err != nil {
return Error.New("unable to verify order")
}
}
// TODO should this reject or just error ??
@ -174,7 +179,7 @@ func (endpoint *Endpoint) Settlement(stream pb.Orders_SettlementServer) (err err
return nil
}()
if rejectErr != err {
log.Debug("order limit/order verification failed", zap.Stringer("serial", orderLimit.SerialNumber), zap.Error(err))
log.Debug("order limit/order verification failed", zap.Stringer("serial", orderLimit.SerialNumber), zap.Error(err), zap.Error(rejectErr))
err := monitoredSettlementStreamSend(ctx, stream, &pb.SettlementResponse{
SerialNumber: orderLimit.SerialNumber,
Status: pb.SettlementResponse_REJECTED,

View File

@ -13,9 +13,7 @@ import (
"go.uber.org/zap"
"storj.io/storj/pkg/auth/signing"
"storj.io/storj/pkg/certdb"
"storj.io/storj/pkg/eestream"
"storj.io/storj/pkg/identity"
"storj.io/storj/pkg/overlay"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/storj"
@ -31,7 +29,6 @@ type Service struct {
log *zap.Logger
satellite signing.Signer
cache *overlay.Cache
certdb certdb.DB
orders DB
satelliteAddress *pb.NodeAddress
@ -39,12 +36,11 @@ type Service struct {
}
// NewService creates new service for creating order limits.
func NewService(log *zap.Logger, satellite signing.Signer, cache *overlay.Cache, certdb certdb.DB, orders DB, orderExpiration time.Duration, satelliteAddress *pb.NodeAddress) *Service {
func NewService(log *zap.Logger, satellite signing.Signer, cache *overlay.Cache, orders DB, orderExpiration time.Duration, satelliteAddress *pb.NodeAddress) *Service {
return &Service{
log: log,
satellite: satellite,
cache: cache,
certdb: certdb,
orders: orders,
satelliteAddress: satelliteAddress,
orderExpiration: orderExpiration,
@ -116,20 +112,26 @@ func (service *Service) updateBandwidth(ctx context.Context, projectID uuid.UUID
}
// CreateGetOrderLimits creates the order limits for downloading the pieces of pointer.
func (service *Service) CreateGetOrderLimits(ctx context.Context, uplink *identity.PeerIdentity, bucketID []byte, pointer *pb.Pointer) (_ []*pb.AddressedOrderLimit, err error) {
func (service *Service) CreateGetOrderLimits(ctx context.Context, bucketID []byte, pointer *pb.Pointer) (_ []*pb.AddressedOrderLimit, privateKey storj.PiecePrivateKey, err error) {
defer mon.Task()(&ctx)(&err)
rootPieceID := pointer.GetRemote().RootPieceId
pieceExpiration := pointer.ExpirationDate
orderExpiration := time.Now().Add(service.orderExpiration)
piecePublicKey, piecePrivateKey, err := storj.NewPieceKey()
if err != nil {
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
}
serialNumber, err := service.createSerial(ctx)
if err != nil {
return nil, err
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
}
redundancy, err := eestream.NewRedundancyStrategyFromProto(pointer.GetRemote().GetRedundancy())
if err != nil {
return nil, Error.Wrap(err)
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
}
pieceSize := eestream.CalcPieceSize(pointer.GetSegmentSize(), redundancy)
@ -160,7 +162,7 @@ func (service *Service) CreateGetOrderLimits(ctx context.Context, uplink *identi
SerialNumber: serialNumber,
SatelliteId: service.satellite.ID(),
SatelliteAddress: service.satelliteAddress,
UplinkId: uplink.ID,
UplinkPublicKey: piecePublicKey,
StorageNodeId: piece.NodeId,
PieceId: rootPieceID.Derive(piece.NodeId, piece.PieceNum),
Action: pb.PieceAction_GET,
@ -170,7 +172,7 @@ func (service *Service) CreateGetOrderLimits(ctx context.Context, uplink *identi
OrderExpiration: orderExpiration,
})
if err != nil {
return nil, Error.Wrap(err)
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
}
limits = append(limits, &pb.AddressedOrderLimit{
@ -181,39 +183,39 @@ func (service *Service) CreateGetOrderLimits(ctx context.Context, uplink *identi
if len(limits) < redundancy.RequiredCount() {
err = Error.New("not enough nodes available: got %d, required %d", len(limits), redundancy.RequiredCount())
return nil, errs.Combine(err, combinedErrs)
}
err = service.certdb.SavePublicKey(ctx, uplink.ID, uplink.Leaf.PublicKey)
if err != nil {
return nil, Error.Wrap(err)
return nil, storj.PiecePrivateKey{}, errs.Combine(err, combinedErrs)
}
err = service.saveSerial(ctx, serialNumber, bucketID, orderExpiration)
if err != nil {
return nil, Error.Wrap(err)
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
}
projectID, bucketName, err := SplitBucketID(bucketID)
if err != nil {
return nil, Error.Wrap(err)
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
}
if err := service.updateBandwidth(ctx, *projectID, bucketName, limits...); err != nil {
return nil, Error.Wrap(err)
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
}
return limits, nil
return limits, piecePrivateKey, nil
}
// CreatePutOrderLimits creates the order limits for uploading pieces to nodes.
func (service *Service) CreatePutOrderLimits(ctx context.Context, uplink *identity.PeerIdentity, bucketID []byte, nodes []*pb.Node, expiration time.Time, maxPieceSize int64) (_ storj.PieceID, _ []*pb.AddressedOrderLimit, err error) {
func (service *Service) CreatePutOrderLimits(ctx context.Context, bucketID []byte, nodes []*pb.Node, expiration time.Time, maxPieceSize int64) (_ storj.PieceID, _ []*pb.AddressedOrderLimit, privateKey storj.PiecePrivateKey, err error) {
defer mon.Task()(&ctx)(&err)
orderExpiration := time.Now().Add(service.orderExpiration)
piecePublicKey, piecePrivateKey, err := storj.NewPieceKey()
if err != nil {
return storj.PieceID{}, nil, storj.PiecePrivateKey{}, Error.Wrap(err)
}
serialNumber, err := service.createSerial(ctx)
if err != nil {
return storj.PieceID{}, nil, err
return storj.PieceID{}, nil, storj.PiecePrivateKey{}, Error.Wrap(err)
}
rootPieceID := storj.NewPieceID()
@ -224,7 +226,7 @@ func (service *Service) CreatePutOrderLimits(ctx context.Context, uplink *identi
SerialNumber: serialNumber,
SatelliteId: service.satellite.ID(),
SatelliteAddress: service.satelliteAddress,
UplinkId: uplink.ID,
UplinkPublicKey: piecePublicKey,
StorageNodeId: node.Id,
PieceId: rootPieceID.Derive(node.Id, pieceNum),
Action: pb.PieceAction_PUT,
@ -234,7 +236,7 @@ func (service *Service) CreatePutOrderLimits(ctx context.Context, uplink *identi
OrderExpiration: orderExpiration,
})
if err != nil {
return storj.PieceID{}, nil, Error.Wrap(err)
return storj.PieceID{}, nil, storj.PiecePrivateKey{}, Error.Wrap(err)
}
limits[pieceNum] = &pb.AddressedOrderLimit{
@ -244,37 +246,38 @@ func (service *Service) CreatePutOrderLimits(ctx context.Context, uplink *identi
pieceNum++
}
err = service.certdb.SavePublicKey(ctx, uplink.ID, uplink.Leaf.PublicKey)
if err != nil {
return storj.PieceID{}, nil, Error.Wrap(err)
}
err = service.saveSerial(ctx, serialNumber, bucketID, orderExpiration)
if err != nil {
return storj.PieceID{}, nil, Error.Wrap(err)
return storj.PieceID{}, nil, storj.PiecePrivateKey{}, Error.Wrap(err)
}
projectID, bucketName, err := SplitBucketID(bucketID)
if err != nil {
return rootPieceID, limits, err
return storj.PieceID{}, nil, storj.PiecePrivateKey{}, Error.Wrap(err)
}
if err := service.updateBandwidth(ctx, *projectID, bucketName, limits...); err != nil {
return storj.PieceID{}, nil, Error.Wrap(err)
return storj.PieceID{}, nil, storj.PiecePrivateKey{}, Error.Wrap(err)
}
return rootPieceID, limits, nil
return rootPieceID, limits, piecePrivateKey, nil
}
// CreateDeleteOrderLimits creates the order limits for deleting the pieces of pointer.
func (service *Service) CreateDeleteOrderLimits(ctx context.Context, uplink *identity.PeerIdentity, bucketID []byte, pointer *pb.Pointer) (_ []*pb.AddressedOrderLimit, err error) {
func (service *Service) CreateDeleteOrderLimits(ctx context.Context, bucketID []byte, pointer *pb.Pointer) (_ []*pb.AddressedOrderLimit, _ storj.PiecePrivateKey, err error) {
defer mon.Task()(&ctx)(&err)
rootPieceID := pointer.GetRemote().RootPieceId
pieceExpiration := pointer.ExpirationDate
orderExpiration := time.Now().Add(service.orderExpiration)
piecePublicKey, piecePrivateKey, err := storj.NewPieceKey()
if err != nil {
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
}
serialNumber, err := service.createSerial(ctx)
if err != nil {
return nil, err
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
}
var combinedErrs error
@ -303,7 +306,7 @@ func (service *Service) CreateDeleteOrderLimits(ctx context.Context, uplink *ide
SerialNumber: serialNumber,
SatelliteId: service.satellite.ID(),
SatelliteAddress: service.satelliteAddress,
UplinkId: uplink.ID,
UplinkPublicKey: piecePublicKey,
StorageNodeId: piece.NodeId,
PieceId: rootPieceID.Derive(piece.NodeId, piece.PieceNum),
Action: pb.PieceAction_DELETE,
@ -313,7 +316,7 @@ func (service *Service) CreateDeleteOrderLimits(ctx context.Context, uplink *ide
OrderExpiration: orderExpiration,
})
if err != nil {
return nil, Error.Wrap(err)
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
}
limits = append(limits, &pb.AddressedOrderLimit{
@ -324,35 +327,36 @@ func (service *Service) CreateDeleteOrderLimits(ctx context.Context, uplink *ide
if len(limits) == 0 {
err = Error.New("failed creating order limits for all nodes")
return nil, errs.Combine(err, combinedErrs)
}
err = service.certdb.SavePublicKey(ctx, uplink.ID, uplink.Leaf.PublicKey)
if err != nil {
return nil, Error.Wrap(err)
return nil, storj.PiecePrivateKey{}, errs.Combine(err, combinedErrs)
}
err = service.saveSerial(ctx, serialNumber, bucketID, orderExpiration)
if err != nil {
return nil, Error.Wrap(err)
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
}
return limits, nil
return limits, piecePrivateKey, nil
}
// CreateAuditOrderLimits creates the order limits for auditing the pieces of pointer.
func (service *Service) CreateAuditOrderLimits(ctx context.Context, auditor *identity.PeerIdentity, bucketID []byte, pointer *pb.Pointer, skip map[storj.NodeID]bool) (_ []*pb.AddressedOrderLimit, err error) {
func (service *Service) CreateAuditOrderLimits(ctx context.Context, bucketID []byte, pointer *pb.Pointer, skip map[storj.NodeID]bool) (_ []*pb.AddressedOrderLimit, _ storj.PiecePrivateKey, err error) {
defer mon.Task()(&ctx)(&err)
rootPieceID := pointer.GetRemote().RootPieceId
redundancy := pointer.GetRemote().GetRedundancy()
shareSize := redundancy.GetErasureShareSize()
totalPieces := redundancy.GetTotal()
pieceExpiration := pointer.ExpirationDate
orderExpiration := time.Now().Add(service.orderExpiration)
piecePublicKey, piecePrivateKey, err := storj.NewPieceKey()
if err != nil {
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
}
serialNumber, err := service.createSerial(ctx)
if err != nil {
return nil, err
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
}
var combinedErrs error
@ -386,7 +390,7 @@ func (service *Service) CreateAuditOrderLimits(ctx context.Context, auditor *ide
SerialNumber: serialNumber,
SatelliteId: service.satellite.ID(),
SatelliteAddress: service.satelliteAddress,
UplinkId: auditor.ID,
UplinkPublicKey: piecePublicKey,
StorageNodeId: piece.NodeId,
PieceId: rootPieceID.Derive(piece.NodeId, piece.PieceNum),
Action: pb.PieceAction_GET_AUDIT,
@ -396,7 +400,7 @@ func (service *Service) CreateAuditOrderLimits(ctx context.Context, auditor *ide
OrderExpiration: orderExpiration,
})
if err != nil {
return nil, Error.Wrap(err)
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
}
limits[piece.GetPieceNum()] = &pb.AddressedOrderLimit{
@ -408,55 +412,60 @@ func (service *Service) CreateAuditOrderLimits(ctx context.Context, auditor *ide
if limitsCount < redundancy.GetMinReq() {
err = Error.New("not enough nodes available: got %d, required %d", limitsCount, redundancy.GetMinReq())
return nil, errs.Combine(err, combinedErrs)
return nil, storj.PiecePrivateKey{}, errs.Combine(err, combinedErrs)
}
err = service.saveSerial(ctx, serialNumber, bucketID, orderExpiration)
if err != nil {
return nil, Error.Wrap(err)
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
}
projectID, bucketName, err := SplitBucketID(bucketID)
if err != nil {
return limits, err
return limits, storj.PiecePrivateKey{}, Error.Wrap(err)
}
if err := service.updateBandwidth(ctx, *projectID, bucketName, limits...); err != nil {
return nil, Error.Wrap(err)
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
}
return limits, nil
return limits, piecePrivateKey, nil
}
// CreateAuditOrderLimit creates an order limit for auditing a single the piece from a pointer.
func (service *Service) CreateAuditOrderLimit(ctx context.Context, auditor *identity.PeerIdentity, bucketID []byte, nodeID storj.NodeID, pieceNum int32, rootPieceID storj.PieceID, shareSize int32) (limit *pb.AddressedOrderLimit, err error) {
func (service *Service) CreateAuditOrderLimit(ctx context.Context, bucketID []byte, nodeID storj.NodeID, pieceNum int32, rootPieceID storj.PieceID, shareSize int32) (limit *pb.AddressedOrderLimit, _ storj.PiecePrivateKey, err error) {
// TODO reduce number of params ?
defer mon.Task()(&ctx)(&err)
orderExpiration := time.Now().Add(service.orderExpiration)
piecePublicKey, piecePrivateKey, err := storj.NewPieceKey()
if err != nil {
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
}
serialNumber, err := service.createSerial(ctx)
if err != nil {
return nil, err
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
}
node, err := service.cache.Get(ctx, nodeID)
if err != nil {
return nil, Error.Wrap(err)
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
}
if node.Disqualified != nil {
return nil, overlay.ErrNodeDisqualified.New(nodeID.String())
return nil, storj.PiecePrivateKey{}, overlay.ErrNodeDisqualified.New(nodeID.String())
}
if !service.cache.IsOnline(node) {
return nil, overlay.ErrNodeOffline.New(nodeID.String())
return nil, storj.PiecePrivateKey{}, overlay.ErrNodeOffline.New(nodeID.String())
}
orderLimit, err := signing.SignOrderLimit(ctx, service.satellite, &pb.OrderLimit{
SerialNumber: serialNumber,
SatelliteId: service.satellite.ID(),
SatelliteAddress: service.satelliteAddress,
UplinkId: auditor.ID,
UplinkPublicKey: piecePublicKey,
StorageNodeId: nodeID,
PieceId: rootPieceID.Derive(nodeID, pieceNum),
Action: pb.PieceAction_GET_AUDIT,
@ -465,7 +474,7 @@ func (service *Service) CreateAuditOrderLimit(ctx context.Context, auditor *iden
OrderExpiration: orderExpiration,
})
if err != nil {
return nil, Error.Wrap(err)
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
}
limit = &pb.AddressedOrderLimit{
@ -475,36 +484,43 @@ func (service *Service) CreateAuditOrderLimit(ctx context.Context, auditor *iden
err = service.saveSerial(ctx, serialNumber, bucketID, orderExpiration)
if err != nil {
return nil, Error.Wrap(err)
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
}
projectID, bucketName, err := SplitBucketID(bucketID)
if err != nil {
return limit, err
return limit, storj.PiecePrivateKey{}, Error.Wrap(err)
}
if err := service.updateBandwidth(ctx, *projectID, bucketName, limit); err != nil {
return nil, Error.Wrap(err)
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
}
return limit, nil
return limit, piecePrivateKey, nil
}
// CreateGetRepairOrderLimits creates the order limits for downloading the healthy pieces of pointer as the source for repair.
func (service *Service) CreateGetRepairOrderLimits(ctx context.Context, repairer *identity.PeerIdentity, bucketID []byte, pointer *pb.Pointer, healthy []*pb.RemotePiece) (_ []*pb.AddressedOrderLimit, err error) {
func (service *Service) CreateGetRepairOrderLimits(ctx context.Context, bucketID []byte, pointer *pb.Pointer, healthy []*pb.RemotePiece) (_ []*pb.AddressedOrderLimit, _ storj.PiecePrivateKey, err error) {
defer mon.Task()(&ctx)(&err)
rootPieceID := pointer.GetRemote().RootPieceId
redundancy, err := eestream.NewRedundancyStrategyFromProto(pointer.GetRemote().GetRedundancy())
if err != nil {
return nil, Error.Wrap(err)
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
}
pieceSize := eestream.CalcPieceSize(pointer.GetSegmentSize(), redundancy)
totalPieces := redundancy.TotalCount()
pieceExpiration := pointer.ExpirationDate
orderExpiration := time.Now().Add(service.orderExpiration)
piecePublicKey, piecePrivateKey, err := storj.NewPieceKey()
if err != nil {
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
}
serialNumber, err := service.createSerial(ctx)
if err != nil {
return nil, err
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
}
var combinedErrs error
@ -534,7 +550,7 @@ func (service *Service) CreateGetRepairOrderLimits(ctx context.Context, repairer
SerialNumber: serialNumber,
SatelliteId: service.satellite.ID(),
SatelliteAddress: service.satelliteAddress,
UplinkId: repairer.ID,
UplinkPublicKey: piecePublicKey,
StorageNodeId: piece.NodeId,
PieceId: rootPieceID.Derive(piece.NodeId, piece.PieceNum),
Action: pb.PieceAction_GET_REPAIR,
@ -544,7 +560,7 @@ func (service *Service) CreateGetRepairOrderLimits(ctx context.Context, repairer
OrderExpiration: orderExpiration,
})
if err != nil {
return nil, Error.Wrap(err)
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
}
limits[piece.GetPieceNum()] = &pb.AddressedOrderLimit{
@ -556,41 +572,48 @@ func (service *Service) CreateGetRepairOrderLimits(ctx context.Context, repairer
if limitsCount < redundancy.RequiredCount() {
err = Error.New("not enough nodes available: got %d, required %d", limitsCount, redundancy.RequiredCount())
return nil, errs.Combine(err, combinedErrs)
return nil, storj.PiecePrivateKey{}, errs.Combine(err, combinedErrs)
}
err = service.saveSerial(ctx, serialNumber, bucketID, orderExpiration)
if err != nil {
return nil, Error.Wrap(err)
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
}
projectID, bucketName, err := SplitBucketID(bucketID)
if err != nil {
return limits, err
return limits, storj.PiecePrivateKey{}, Error.Wrap(err)
}
if err := service.updateBandwidth(ctx, *projectID, bucketName, limits...); err != nil {
return nil, Error.Wrap(err)
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
}
return limits, nil
return limits, piecePrivateKey, nil
}
// CreatePutRepairOrderLimits creates the order limits for uploading the repaired pieces of pointer to newNodes.
func (service *Service) CreatePutRepairOrderLimits(ctx context.Context, repairer *identity.PeerIdentity, bucketID []byte, pointer *pb.Pointer, getOrderLimits []*pb.AddressedOrderLimit, newNodes []*pb.Node) (_ []*pb.AddressedOrderLimit, err error) {
func (service *Service) CreatePutRepairOrderLimits(ctx context.Context, bucketID []byte, pointer *pb.Pointer, getOrderLimits []*pb.AddressedOrderLimit, newNodes []*pb.Node) (_ []*pb.AddressedOrderLimit, _ storj.PiecePrivateKey, err error) {
defer mon.Task()(&ctx)(&err)
rootPieceID := pointer.GetRemote().RootPieceId
redundancy, err := eestream.NewRedundancyStrategyFromProto(pointer.GetRemote().GetRedundancy())
if err != nil {
return nil, Error.Wrap(err)
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
}
pieceSize := eestream.CalcPieceSize(pointer.GetSegmentSize(), redundancy)
totalPieces := redundancy.TotalCount()
pieceExpiration := pointer.ExpirationDate
orderExpiration := time.Now().Add(service.orderExpiration)
piecePublicKey, piecePrivateKey, err := storj.NewPieceKey()
if err != nil {
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
}
serialNumber, err := service.createSerial(ctx)
if err != nil {
return nil, err
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
}
limits := make([]*pb.AddressedOrderLimit, totalPieces)
@ -601,14 +624,14 @@ func (service *Service) CreatePutRepairOrderLimits(ctx context.Context, repairer
}
if int(pieceNum) >= totalPieces { // should not happen
return nil, Error.New("piece num greater than total pieces: %d >= %d", pieceNum, totalPieces)
return nil, storj.PiecePrivateKey{}, Error.New("piece num greater than total pieces: %d >= %d", pieceNum, totalPieces)
}
orderLimit, err := signing.SignOrderLimit(ctx, service.satellite, &pb.OrderLimit{
SerialNumber: serialNumber,
SatelliteId: service.satellite.ID(),
SatelliteAddress: service.satelliteAddress,
UplinkId: repairer.ID,
UplinkPublicKey: piecePublicKey,
StorageNodeId: node.Id,
PieceId: rootPieceID.Derive(node.Id, pieceNum),
Action: pb.PieceAction_PUT_REPAIR,
@ -618,7 +641,7 @@ func (service *Service) CreatePutRepairOrderLimits(ctx context.Context, repairer
OrderExpiration: orderExpiration,
})
if err != nil {
return nil, Error.Wrap(err)
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
}
limits[pieceNum] = &pb.AddressedOrderLimit{
@ -630,18 +653,18 @@ func (service *Service) CreatePutRepairOrderLimits(ctx context.Context, repairer
err = service.saveSerial(ctx, serialNumber, bucketID, orderExpiration)
if err != nil {
return nil, Error.Wrap(err)
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
}
projectID, bucketName, err := SplitBucketID(bucketID)
if err != nil {
return limits, err
return limits, storj.PiecePrivateKey{}, Error.Wrap(err)
}
if err := service.updateBandwidth(ctx, *projectID, bucketName, limits...); err != nil {
return nil, Error.Wrap(err)
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
}
return limits, nil
return limits, piecePrivateKey, nil
}
// UpdateGetInlineOrder updates amount of inline GET bandwidth for given bucket

View File

@ -377,14 +377,13 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB, config *Config, ve
peer.Orders.Endpoint = orders.NewEndpoint(
peer.Log.Named("orders:endpoint"),
satelliteSignee,
peer.DB.Orders(),
peer.DB.CertDB(),
peer.DB.Orders(),
)
peer.Orders.Service = orders.NewService(
peer.Log.Named("orders:service"),
signing.SignerFromFullIdentity(peer.Identity),
peer.Overlay.Service,
peer.DB.CertDB(),
peer.DB.Orders(),
config.Orders.Expiration,
&pb.NodeAddress{

View File

@ -32,7 +32,6 @@ func TestOrders(t *testing.T) {
satellite0 := testidentity.MustPregeneratedSignedIdentity(1, storj.LatestIDVersion())
uplink := testidentity.MustPregeneratedSignedIdentity(3, storj.LatestIDVersion())
piece := storj.NewPieceID()
serialNumber := testrand.SerialNumber()
@ -48,10 +47,13 @@ func TestOrders(t *testing.T) {
now := time.Now()
piecePublicKey, piecePrivateKey, err := storj.NewPieceKey()
require.NoError(t, err)
limit, err := signing.SignOrderLimit(ctx, signing.SignerFromFullIdentity(satellite0), &pb.OrderLimit{
SerialNumber: serialNumber,
SatelliteId: satellite0.ID,
UplinkId: uplink.ID,
UplinkPublicKey: piecePublicKey,
StorageNodeId: storagenode.ID,
PieceId: piece,
Limit: 100,
@ -62,7 +64,7 @@ func TestOrders(t *testing.T) {
})
require.NoError(t, err)
order, err := signing.SignOrder(ctx, signing.SignerFromFullIdentity(uplink), &pb.Order{
order, err := signing.SignUplinkOrder(ctx, piecePrivateKey, &pb.Order{
SerialNumber: serialNumber,
Amount: 50,
})

View File

@ -56,6 +56,7 @@ func TestPieceInfo(t *testing.T) {
PieceCreation: now,
PieceExpiration: now,
OrderLimit: &pb.OrderLimit{},
UplinkPieceHash: piecehash0,
}
@ -75,6 +76,7 @@ func TestPieceInfo(t *testing.T) {
PieceCreation: now,
PieceExpiration: now,
OrderLimit: &pb.OrderLimit{},
UplinkPieceHash: piecehash1,
}
@ -98,6 +100,7 @@ func TestPieceInfo(t *testing.T) {
PieceCreation: now2,
PieceExpiration: now2,
OrderLimit: &pb.OrderLimit{},
UplinkPieceHash: piecehash2,
}

View File

@ -13,7 +13,6 @@ import (
monkit "gopkg.in/spacemonkeygo/monkit.v2"
"storj.io/storj/internal/memory"
"storj.io/storj/pkg/identity"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/storj"
"storj.io/storj/storage"
@ -41,8 +40,8 @@ type Info struct {
PieceCreation time.Time
PieceExpiration time.Time
OrderLimit *pb.OrderLimit
UplinkPieceHash *pb.PieceHash
Uplink *identity.PeerIdentity
}
// ExpiredInfo is a fully namespaced piece id

View File

@ -207,11 +207,6 @@ func (endpoint *Endpoint) Upload(stream pb.Piecestore_UploadServer) (err error)
}
}()
peer, err := identity.PeerIdentityFromContext(ctx)
if err != nil {
return Error.Wrap(err)
}
pieceWriter, err = endpoint.store.Writer(ctx, limit.SatelliteId, limit.PieceId)
if err != nil {
return ErrInternal.Wrap(err) // TODO: report grpc status internal server error
@ -234,7 +229,7 @@ func (endpoint *Endpoint) Upload(stream pb.Piecestore_UploadServer) (err error)
}
largestOrder := pb.Order{}
defer endpoint.SaveOrder(ctx, limit, &largestOrder, peer)
defer endpoint.SaveOrder(ctx, limit, &largestOrder)
for {
message, err = stream.Recv() // TODO: reuse messages to avoid allocations
@ -251,7 +246,7 @@ func (endpoint *Endpoint) Upload(stream pb.Piecestore_UploadServer) (err error)
}
if message.Order != nil {
if err := endpoint.VerifyOrder(ctx, peer, limit, message.Order, largestOrder.Amount); err != nil {
if err := endpoint.VerifyOrder(ctx, limit, message.Order, largestOrder.Amount); err != nil {
return err
}
largestOrder = *message.Order
@ -284,7 +279,7 @@ func (endpoint *Endpoint) Upload(stream pb.Piecestore_UploadServer) (err error)
if message.Done != nil {
expectedHash := pieceWriter.Hash()
if err := endpoint.VerifyPieceHash(ctx, peer, limit, message.Done, expectedHash); err != nil {
if err := endpoint.VerifyPieceHash(ctx, limit, message.Done, expectedHash); err != nil {
return err // TODO: report grpc status internal server error
}
@ -303,8 +298,8 @@ func (endpoint *Endpoint) Upload(stream pb.Piecestore_UploadServer) (err error)
PieceCreation: limit.OrderCreation,
PieceExpiration: limit.PieceExpiration,
OrderLimit: limit,
UplinkPieceHash: message.Done,
Uplink: peer,
}
if err := endpoint.pieceinfo.Add(ctx, info); err != nil {
@ -399,11 +394,6 @@ func (endpoint *Endpoint) Download(stream pb.Piecestore_DownloadServer) (err err
}
}()
peer, err := identity.PeerIdentityFromContext(ctx)
if err != nil {
return Error.Wrap(err)
}
pieceReader, err = endpoint.store.Reader(ctx, limit.SatelliteId, limit.PieceId)
if err != nil {
if os.IsNotExist(err) {
@ -480,7 +470,7 @@ func (endpoint *Endpoint) Download(stream pb.Piecestore_DownloadServer) (err err
recvErr := func() (err error) {
largestOrder := pb.Order{}
defer endpoint.SaveOrder(ctx, limit, &largestOrder, peer)
defer endpoint.SaveOrder(ctx, limit, &largestOrder)
// ensure that we always terminate sending goroutine
defer throttle.Fail(io.EOF)
@ -498,7 +488,7 @@ func (endpoint *Endpoint) Download(stream pb.Piecestore_DownloadServer) (err err
return ErrProtocol.New("expected order as the message")
}
if err := endpoint.VerifyOrder(ctx, peer, limit, message.Order, largestOrder.Amount); err != nil {
if err := endpoint.VerifyOrder(ctx, limit, message.Order, largestOrder.Amount); err != nil {
return err
}
@ -522,7 +512,7 @@ func (endpoint *Endpoint) Download(stream pb.Piecestore_DownloadServer) (err err
}
// SaveOrder saves the order with all necessary information. It assumes it has been already verified.
func (endpoint *Endpoint) SaveOrder(ctx context.Context, limit *pb.OrderLimit, order *pb.Order, uplink *identity.PeerIdentity) {
func (endpoint *Endpoint) SaveOrder(ctx context.Context, limit *pb.OrderLimit, order *pb.Order) {
var err error
defer mon.Task()(&ctx)(&err)

View File

@ -146,10 +146,9 @@ func TestUpload(t *testing.T) {
expectedHash := pkcrypto.SHA256Hash(data)
serialNumber := testrand.SerialNumber()
orderLimit := GenerateOrderLimit(
orderLimit, piecePrivateKey := GenerateOrderLimit(
t,
planet.Satellites[0].ID(),
planet.Uplinks[0].ID(),
planet.StorageNodes[0].ID(),
tt.pieceID,
tt.action,
@ -162,7 +161,7 @@ func TestUpload(t *testing.T) {
orderLimit, err = signing.SignOrderLimit(ctx, signer, orderLimit)
require.NoError(t, err)
uploader, err := client.Upload(ctx, orderLimit)
uploader, err := client.Upload(ctx, orderLimit, piecePrivateKey)
require.NoError(t, err)
_, err = uploader.Write(data)
@ -201,10 +200,9 @@ func TestDownload(t *testing.T) {
expectedData := testrand.Bytes(10 * memory.KiB)
serialNumber := testrand.SerialNumber()
orderLimit := GenerateOrderLimit(
orderLimit, piecePrivateKey := GenerateOrderLimit(
t,
planet.Satellites[0].ID(),
planet.Uplinks[0].ID(),
planet.StorageNodes[0].ID(),
storj.PieceID{1},
pb.PieceAction_PUT,
@ -217,7 +215,7 @@ func TestDownload(t *testing.T) {
orderLimit, err = signing.SignOrderLimit(ctx, signer, orderLimit)
require.NoError(t, err)
uploader, err := client.Upload(ctx, orderLimit)
uploader, err := client.Upload(ctx, orderLimit, piecePrivateKey)
require.NoError(t, err)
_, err = uploader.Write(expectedData)
@ -248,10 +246,9 @@ func TestDownload(t *testing.T) {
} {
serialNumber := testrand.SerialNumber()
orderLimit := GenerateOrderLimit(
orderLimit, piecePrivateKey := GenerateOrderLimit(
t,
planet.Satellites[0].ID(),
planet.Uplinks[0].ID(),
planet.StorageNodes[0].ID(),
tt.pieceID,
tt.action,
@ -264,7 +261,7 @@ func TestDownload(t *testing.T) {
orderLimit, err = signing.SignOrderLimit(ctx, signer, orderLimit)
require.NoError(t, err)
downloader, err := client.Download(ctx, orderLimit, 0, int64(len(expectedData)))
downloader, err := client.Download(ctx, orderLimit, piecePrivateKey, 0, int64(len(expectedData)))
require.NoError(t, err)
buffer := make([]byte, len(expectedData))
@ -304,10 +301,9 @@ func TestDelete(t *testing.T) {
expectedData := testrand.Bytes(10 * memory.KiB)
serialNumber := testrand.SerialNumber()
orderLimit := GenerateOrderLimit(
orderLimit, piecePrivateKey := GenerateOrderLimit(
t,
planet.Satellites[0].ID(),
planet.Uplinks[0].ID(),
planet.StorageNodes[0].ID(),
storj.PieceID{1},
pb.PieceAction_PUT,
@ -320,7 +316,7 @@ func TestDelete(t *testing.T) {
orderLimit, err = signing.SignOrderLimit(ctx, signer, orderLimit)
require.NoError(t, err)
uploader, err := client.Upload(ctx, orderLimit)
uploader, err := client.Upload(ctx, orderLimit, piecePrivateKey)
require.NoError(t, err)
_, err = uploader.Write(expectedData)
@ -352,10 +348,9 @@ func TestDelete(t *testing.T) {
} {
serialNumber := testrand.SerialNumber()
orderLimit := GenerateOrderLimit(
orderLimit, piecePrivateKey := GenerateOrderLimit(
t,
planet.Satellites[0].ID(),
planet.Uplinks[0].ID(),
planet.StorageNodes[0].ID(),
tt.pieceID,
tt.action,
@ -368,7 +363,7 @@ func TestDelete(t *testing.T) {
orderLimit, err = signing.SignOrderLimit(ctx, signer, orderLimit)
require.NoError(t, err)
err := client.Delete(ctx, orderLimit)
err := client.Delete(ctx, orderLimit, piecePrivateKey)
if tt.err != "" {
require.Error(t, err)
require.Contains(t, err.Error(), tt.err)
@ -413,11 +408,10 @@ func TestTooManyRequests(t *testing.T) {
i, uplink := i, uplink
uploads.Go(func() (err error) {
storageNode := planet.StorageNodes[0].Local()
signer := signing.SignerFromFullIdentity(uplink.Transport.Identity())
config := piecestore.DefaultConfig
config.UploadBufferSize = 0 // disable buffering so we can detect write error early
client, err := piecestore.Dial(ctx, uplink.Transport, &storageNode.Node, uplink.Log, signer, config)
client, err := piecestore.Dial(ctx, uplink.Transport, &storageNode.Node, uplink.Log, config)
if err != nil {
return err
}
@ -431,10 +425,9 @@ func TestTooManyRequests(t *testing.T) {
pieceID := storj.PieceID{byte(i + 1)}
serialNumber := testrand.SerialNumber()
orderLimit := GenerateOrderLimit(
orderLimit, piecePrivateKey := GenerateOrderLimit(
t,
planet.Satellites[0].ID(),
uplink.ID(),
planet.StorageNodes[0].ID(),
pieceID,
pb.PieceAction_PUT,
@ -450,7 +443,7 @@ func TestTooManyRequests(t *testing.T) {
return err
}
upload, err := client.Upload(ctx, orderLimit)
upload, err := client.Upload(ctx, orderLimit, piecePrivateKey)
if err != nil {
if errs2.IsRPC(err, codes.Unavailable) {
if atomic.AddInt64(&failedCount, -1) == 0 {
@ -491,14 +484,14 @@ func TestTooManyRequests(t *testing.T) {
}
}
func GenerateOrderLimit(t *testing.T, satellite storj.NodeID, uplink storj.NodeID, storageNode storj.NodeID, pieceID storj.PieceID,
action pb.PieceAction, serialNumber storj.SerialNumber, pieceExpiration, orderExpiration time.Duration, limit int64) *pb.OrderLimit {
func GenerateOrderLimit(t *testing.T, satellite storj.NodeID, storageNode storj.NodeID, pieceID storj.PieceID, action pb.PieceAction, serialNumber storj.SerialNumber, pieceExpiration, orderExpiration time.Duration, limit int64) (*pb.OrderLimit, storj.PiecePrivateKey) {
piecePublicKey, piecePrivateKey, err := storj.NewPieceKey()
require.NoError(t, err)
now := time.Now()
return &pb.OrderLimit{
SatelliteId: satellite,
UplinkId: uplink,
UplinkPublicKey: piecePublicKey,
StorageNodeId: storageNode,
PieceId: pieceID,
Action: action,
@ -507,7 +500,7 @@ func GenerateOrderLimit(t *testing.T, satellite storj.NodeID, uplink storj.NodeI
OrderExpiration: now.Add(orderExpiration),
PieceExpiration: now.Add(pieceExpiration),
Limit: limit,
}
}, piecePrivateKey
}
func TestRetain(t *testing.T) {
@ -580,7 +573,7 @@ func TestRetain(t *testing.T) {
PieceID: id,
PieceCreation: pieceCreation,
UplinkPieceHash: piecehash0,
Uplink: uplink.PeerIdentity(),
OrderLimit: &pb.OrderLimit{},
}
pieceinfo1 := pieces.Info{
SatelliteID: satellite1.ID,
@ -588,7 +581,7 @@ func TestRetain(t *testing.T) {
PieceID: id,
PieceCreation: pieceCreation,
UplinkPieceHash: piecehash1,
Uplink: uplink.PeerIdentity(),
OrderLimit: &pb.OrderLimit{},
}
err = pieceInfos.Add(ctx, &pieceinfo0)

View File

@ -12,7 +12,6 @@ import (
"storj.io/storj/internal/errs2"
"storj.io/storj/pkg/auth/signing"
"storj.io/storj/pkg/identity"
"storj.io/storj/pkg/pb"
)
@ -45,28 +44,17 @@ func (endpoint *Endpoint) VerifyOrderLimit(ctx context.Context, limit *pb.OrderL
return ErrProtocol.New("order created too long ago: %v", limit.OrderCreation)
case limit.SatelliteId.IsZero():
return ErrProtocol.New("missing satellite id")
case limit.UplinkId.IsZero():
return ErrProtocol.New("missing uplink id")
case limit.UplinkPublicKey.IsZero():
return ErrProtocol.New("missing uplink public key")
case len(limit.SatelliteSignature) == 0:
return ErrProtocol.New("missing satellite signature")
case limit.PieceId.IsZero():
return ErrProtocol.New("missing piece id")
}
// either uplink or satellite can only make the request
// TODO: should this check be based on the action?
// with macaroons we might not have either of them doing the action
peer, err := identity.PeerIdentityFromContext(ctx)
if err != nil || limit.UplinkId != peer.ID && limit.SatelliteId != peer.ID {
return ErrVerifyNotAuthorized.New("uplink:%s satellite:%s sender %s", limit.UplinkId, limit.SatelliteId, peer.ID)
}
if err := endpoint.trust.VerifySatelliteID(ctx, limit.SatelliteId); err != nil {
return ErrVerifyUntrusted.Wrap(err)
}
if err := endpoint.trust.VerifyUplinkID(ctx, limit.UplinkId); err != nil {
return ErrVerifyUntrusted.Wrap(err)
}
if err := endpoint.VerifyOrderLimitSignature(ctx, limit); err != nil {
if err == context.Canceled {
@ -90,7 +78,7 @@ func (endpoint *Endpoint) VerifyOrderLimit(ctx context.Context, limit *pb.OrderL
}
// VerifyOrder verifies that the order corresponds to the order limit and has all the necessary fields.
func (endpoint *Endpoint) VerifyOrder(ctx context.Context, peer *identity.PeerIdentity, limit *pb.OrderLimit, order *pb.Order, largestOrderAmount int64) (err error) {
func (endpoint *Endpoint) VerifyOrder(ctx context.Context, limit *pb.OrderLimit, order *pb.Order, largestOrderAmount int64) (err error) {
defer mon.Task()(&ctx)(&err)
if order.SerialNumber != limit.SerialNumber {
@ -104,18 +92,18 @@ func (endpoint *Endpoint) VerifyOrder(ctx context.Context, peer *identity.PeerId
return ErrProtocol.New("order exceeded allowed amount=%v, limit=%v", order.Amount, limit.Limit) // TODO: report grpc status bad message
}
if err := signing.VerifyOrderSignature(ctx, signing.SigneeFromPeerIdentity(peer), order); err != nil {
return ErrVerifyUntrusted.New("invalid order signature") // TODO: report grpc status bad message
if err := signing.VerifyUplinkOrderSignature(ctx, limit.UplinkPublicKey, order); err != nil {
return ErrVerifyUntrusted.Wrap(err)
}
return nil
}
// VerifyPieceHash verifies whether the piece hash is properly signed and matches the locally computed hash.
func (endpoint *Endpoint) VerifyPieceHash(ctx context.Context, peer *identity.PeerIdentity, limit *pb.OrderLimit, hash *pb.PieceHash, expectedHash []byte) (err error) {
func (endpoint *Endpoint) VerifyPieceHash(ctx context.Context, limit *pb.OrderLimit, hash *pb.PieceHash, expectedHash []byte) (err error) {
defer mon.Task()(&ctx)(&err)
if peer == nil || limit == nil || hash == nil || len(expectedHash) == 0 {
if limit == nil || hash == nil || len(expectedHash) == 0 {
return ErrProtocol.New("invalid arguments")
}
if limit.PieceId != hash.PieceId {
@ -125,8 +113,8 @@ func (endpoint *Endpoint) VerifyPieceHash(ctx context.Context, peer *identity.Pe
return ErrProtocol.New("hashes don't match") // TODO: report grpc status bad message
}
if err := signing.VerifyPieceHashSignature(ctx, signing.SigneeFromPeerIdentity(peer), hash); err != nil {
return ErrVerifyUntrusted.New("invalid hash signature: %v", err) // TODO: report grpc status bad message
if err := signing.VerifyUplinkPieceHashSignature(ctx, limit.UplinkPublicKey, hash); err != nil {
return ErrVerifyUntrusted.New("invalid piece hash signature") // TODO: report grpc status bad message
}
return nil

View File

@ -140,10 +140,9 @@ func TestOrderLimitPutValidation(t *testing.T) {
satellite = unapprovedSatellite
}
orderLimit := GenerateOrderLimit(
orderLimit, piecePrivateKey := GenerateOrderLimit(
t,
satellite.ID,
planet.Uplinks[0].ID(),
planet.StorageNodes[0].ID(),
tt.pieceID,
tt.action,
@ -156,7 +155,7 @@ func TestOrderLimitPutValidation(t *testing.T) {
orderLimit, err = signing.SignOrderLimit(ctx, signer, orderLimit)
require.NoError(t, err)
uploader, err := client.Upload(ctx, orderLimit)
uploader, err := client.Upload(ctx, orderLimit, piecePrivateKey)
require.NoError(t, err)
var writeErr error
@ -205,10 +204,9 @@ func TestOrderLimitGetValidation(t *testing.T) {
signer := signing.SignerFromFullIdentity(planet.Satellites[0].Identity)
satellite := planet.Satellites[0].Identity
orderLimit := GenerateOrderLimit(
orderLimit, piecePrivateKey := GenerateOrderLimit(
t,
satellite.ID,
planet.Uplinks[0].ID(),
planet.StorageNodes[0].ID(),
storj.PieceID{1},
pb.PieceAction_PUT,
@ -221,7 +219,7 @@ func TestOrderLimitGetValidation(t *testing.T) {
orderLimit, err = signing.SignOrderLimit(ctx, signer, orderLimit)
require.NoError(t, err)
uploader, err := client.Upload(ctx, orderLimit)
uploader, err := client.Upload(ctx, orderLimit, piecePrivateKey)
require.NoError(t, err)
data := testrand.Bytes(defaultPieceSize)
@ -263,10 +261,9 @@ func TestOrderLimitGetValidation(t *testing.T) {
satellite = tt.satellite
}
orderLimit := GenerateOrderLimit(
orderLimit, piecePrivateKey := GenerateOrderLimit(
t,
satellite.ID,
planet.Uplinks[0].ID(),
planet.StorageNodes[0].ID(),
tt.pieceID,
tt.action,
@ -279,7 +276,7 @@ func TestOrderLimitGetValidation(t *testing.T) {
orderLimit, err = signing.SignOrderLimit(ctx, signer, orderLimit)
require.NoError(t, err)
downloader, err := client.Download(ctx, orderLimit, 0, tt.limit)
downloader, err := client.Download(ctx, orderLimit, piecePrivateKey, 0, tt.limit)
require.NoError(t, err)
var readErr error
@ -329,7 +326,7 @@ func setSpace(ctx context.Context, t *testing.T, planet *testplanet.Planet, spac
PieceSize: diff,
PieceCreation: now,
PieceExpiration: time.Time{},
Uplink: planet.Uplinks[0].Identity.PeerIdentity(),
OrderLimit: &pb.OrderLimit{},
UplinkPieceHash: &pb.PieceHash{},
})
require.NoError(t, err)

View File

@ -252,6 +252,13 @@ func (db *InfoDB) Migration() *migrate.Migration {
`DROP INDEX idx_pieceinfo_deletion_failed`,
},
},
{
Description: "Add order limit table.",
Version: 9,
Action: migrate.SQL{
`ALTER TABLE pieceinfo ADD COLUMN order_limit BLOB NOT NULL DEFAULT X''`,
},
},
},
}
}

View File

@ -38,6 +38,7 @@ func (db *ordersdb) Enqueue(ctx context.Context, info *orders.Info) (err error)
return ErrInfo.Wrap(err)
}
// TODO: remove uplink_cert_id
_, err = db.db.Exec(`
INSERT INTO unsent_order(
satellite_id, serial_number,

View File

@ -39,6 +39,11 @@ func (db *InfoDB) PieceInfo() pieces.DB { return &db.pieceinfo }
func (db *pieceinfo) Add(ctx context.Context, info *pieces.Info) (err error) {
defer mon.Task()(&ctx)(&err)
orderLimit, err := proto.Marshal(info.OrderLimit)
if err != nil {
return ErrInfo.Wrap(err)
}
uplinkPieceHash, err := proto.Marshal(info.UplinkPieceHash)
if err != nil {
return ErrInfo.Wrap(err)
@ -47,9 +52,9 @@ func (db *pieceinfo) Add(ctx context.Context, info *pieces.Info) (err error) {
// TODO remove `uplink_cert_id` from DB
_, err = db.db.ExecContext(ctx, db.Rebind(`
INSERT INTO
pieceinfo(satellite_id, piece_id, piece_size, piece_creation, piece_expiration, uplink_piece_hash, uplink_cert_id)
VALUES (?,?,?,?,?,?,?)
`), info.SatelliteID, info.PieceID, info.PieceSize, info.PieceCreation, info.PieceExpiration, uplinkPieceHash, 0)
pieceinfo(satellite_id, piece_id, piece_size, piece_creation, piece_expiration, order_limit, uplink_piece_hash, uplink_cert_id)
VALUES (?,?,?,?,?,?,?,?)
`), info.SatelliteID, info.PieceID, info.PieceSize, info.PieceCreation, info.PieceExpiration, orderLimit, uplinkPieceHash, 0)
if err == nil {
db.loadSpaceUsed(ctx)
@ -91,13 +96,20 @@ func (db *pieceinfo) Get(ctx context.Context, satelliteID storj.NodeID, pieceID
info.SatelliteID = satelliteID
info.PieceID = pieceID
var orderLimit []byte
var uplinkPieceHash []byte
err = db.db.QueryRowContext(ctx, db.Rebind(`
SELECT piece_size, piece_creation, piece_expiration, uplink_piece_hash
SELECT piece_size, piece_creation, piece_expiration, order_limit, uplink_piece_hash
FROM pieceinfo
WHERE satellite_id = ? AND piece_id = ?
`), satelliteID, pieceID).Scan(&info.PieceSize, &info.PieceCreation, &info.PieceExpiration, &uplinkPieceHash)
`), satelliteID, pieceID).Scan(&info.PieceSize, &info.PieceCreation, &info.PieceExpiration, &orderLimit, &uplinkPieceHash)
if err != nil {
return nil, ErrInfo.Wrap(err)
}
info.OrderLimit = &pb.OrderLimit{}
err = proto.Unmarshal(orderLimit, info.OrderLimit)
if err != nil {
return nil, ErrInfo.Wrap(err)
}

View File

@ -129,22 +129,22 @@ func createOrders(t *testing.T, ctx *testcontext.Context, orders map[string]orde
}
return nil
}
func createOrder(t *testing.T, ctx *testcontext.Context) (info *orders.Info) {
storageNodeIdentity := testidentity.MustPregeneratedSignedIdentity(0, storj.LatestIDVersion())
satelliteIdentity := testidentity.MustPregeneratedSignedIdentity(1, storj.LatestIDVersion())
uplink := testidentity.MustPregeneratedSignedIdentity(3, storj.LatestIDVersion())
piece := storj.NewPieceID()
piecePublicKey, piecePrivateKey, err := storj.NewPieceKey()
require.NoError(t, err)
piece := testrand.PieceID()
serialNumber := testrand.SerialNumber()
expiration := time.Now()
limit, err := signing.SignOrderLimit(ctx, signing.SignerFromFullIdentity(satelliteIdentity), &pb.OrderLimit{
SerialNumber: serialNumber,
SatelliteId: satelliteIdentity.ID,
UplinkId: uplink.ID,
UplinkPublicKey: piecePublicKey,
StorageNodeId: storageNodeIdentity.ID,
PieceId: piece,
Limit: 100,
@ -154,7 +154,7 @@ func createOrder(t *testing.T, ctx *testcontext.Context) (info *orders.Info) {
})
require.NoError(t, err)
order, err := signing.SignOrder(ctx, signing.SignerFromFullIdentity(uplink), &pb.Order{
order, err := signing.SignUplinkOrder(ctx, piecePrivateKey, &pb.Order{
SerialNumber: serialNumber,
Amount: 50,
})

View File

@ -0,0 +1,128 @@
-- table for keeping serials that need to be verified against
CREATE TABLE used_serial (
satellite_id BLOB NOT NULL,
serial_number BLOB NOT NULL,
expiration TIMESTAMP NOT NULL
);
-- primary key on satellite id and serial number
CREATE UNIQUE INDEX pk_used_serial ON used_serial(satellite_id, serial_number);
-- expiration index to allow fast deletion
CREATE INDEX idx_used_serial ON used_serial(expiration);
-- certificate table for storing uplink/satellite certificates
CREATE TABLE certificate (
cert_id INTEGER
);
-- table for storing piece meta info
CREATE TABLE pieceinfo (
satellite_id BLOB NOT NULL,
piece_id BLOB NOT NULL,
piece_size BIGINT NOT NULL,
piece_expiration TIMESTAMP,
order_limit BLOB NOT NULL,
uplink_piece_hash BLOB NOT NULL,
uplink_cert_id INTEGER NOT NULL,
deletion_failed_at TIMESTAMP,
piece_creation TIMESTAMP NOT NULL,
FOREIGN KEY(uplink_cert_id) REFERENCES certificate(cert_id)
);
-- primary key by satellite id and piece id
CREATE UNIQUE INDEX pk_pieceinfo ON pieceinfo(satellite_id, piece_id);
-- table for storing bandwidth usage
CREATE TABLE bandwidth_usage (
satellite_id BLOB NOT NULL,
action INTEGER NOT NULL,
amount BIGINT NOT NULL,
created_at TIMESTAMP NOT NULL
);
CREATE INDEX idx_bandwidth_usage_satellite ON bandwidth_usage(satellite_id);
CREATE INDEX idx_bandwidth_usage_created ON bandwidth_usage(created_at);
-- table for storing all unsent orders
CREATE TABLE unsent_order (
satellite_id BLOB NOT NULL,
serial_number BLOB NOT NULL,
order_limit_serialized BLOB NOT NULL,
order_serialized BLOB NOT NULL,
order_limit_expiration TIMESTAMP NOT NULL,
uplink_cert_id INTEGER NOT NULL,
FOREIGN KEY(uplink_cert_id) REFERENCES certificate(cert_id)
);
CREATE UNIQUE INDEX idx_orders ON unsent_order(satellite_id, serial_number);
-- table for storing all sent orders
CREATE TABLE order_archive (
satellite_id BLOB NOT NULL,
serial_number BLOB NOT NULL,
order_limit_serialized BLOB NOT NULL,
order_serialized BLOB NOT NULL,
uplink_cert_id INTEGER NOT NULL,
status INTEGER NOT NULL,
archived_at TIMESTAMP NOT NULL,
FOREIGN KEY(uplink_cert_id) REFERENCES certificate(cert_id)
);
CREATE INDEX idx_order_archive_satellite ON order_archive(satellite_id);
CREATE INDEX idx_order_archive_status ON order_archive(status);
-- table for storing vouchers
CREATE TABLE vouchers (
satellite_id BLOB PRIMARY KEY NOT NULL,
voucher_serialized BLOB NOT NULL,
expiration TIMESTAMP NOT NULL
);
INSERT INTO unsent_order VALUES(X'2b3a5863a41f25408a8f5348839d7a1361dbd886d75786bb139a8ca0bdf41000',X'1eddef484b4c03f01332279032796972',X'0a101eddef484b4c03f0133227903279697212202b3a5863a41f25408a8f5348839d7a1361dbd886d75786bb139a8ca0bdf410001a201968996e7ef170a402fdfd88b6753df792c063c07c555905ffac9cd3cbd1c00022200ed28abb2813e184a1e98b0f6605c4911ea468c7e8433eb583e0fca7ceac30002a20d00cf14f3c68b56321ace04902dec0484eb6f9098b22b31c6b3f82db249f191630643802420c08dfeb88e50510a8c1a5b9034a0c08dfeb88e50510a8c1a5b9035246304402204df59dc6f5d1bb7217105efbc9b3604d19189af37a81efbf16258e5d7db5549e02203bb4ead16e6e7f10f658558c22b59c3339911841e8dbaae6e2dea821f7326894',X'0a101eddef484b4c03f0133227903279697210321a47304502206d4c106ddec88140414bac5979c95bdea7de2e0ecc5be766e08f7d5ea36641a7022100e932ff858f15885ffa52d07e260c2c25d3861810ea6157956c1793ad0c906284','2019-04-01 16:01:35.9254586+00:00',1);
INSERT INTO pieceinfo VALUES(X'0ed28abb2813e184a1e98b0f6605c4911ea468c7e8433eb583e0fca7ceac3000',X'd5e757fd8d207d1c46583fb58330f803dc961b71147308ff75ff1e72a0df6b0b',123,'2019-05-09 00:00:00.000000+00:00', X'', X'0a20d5e757fd8d207d1c46583fb58330f803dc961b71147308ff75ff1e72a0df6b0b120501020304051a47304502201c16d76ecd9b208f7ad9f1edf66ce73dce50da6bde6bbd7d278415099a727421022100ca730450e7f6506c2647516f6e20d0641e47c8270f58dde2bb07d1f5a3a45673',1,NULL,'epoch');
INSERT INTO pieceinfo VALUES(X'2b3a5863a41f25408a8f5348839d7a1361dbd886d75786bb139a8ca0bdf41000',X'd5e757fd8d207d1c46583fb58330f803dc961b71147308ff75ff1e72a0df6b0b',123,'2019-05-09 00:00:00.000000+00:00', X'', X'0a20d5e757fd8d207d1c46583fb58330f803dc961b71147308ff75ff1e72a0df6b0b120501020304051a483046022100e623cf4705046e2c04d5b42d5edbecb81f000459713ad460c691b3361817adbf022100993da2a5298bb88de6c35b2e54009d1bf306cda5d441c228aa9eaf981ceb0f3d',2,NULL,'epoch');
INSERT INTO bandwidth_usage VALUES(X'0ed28abb2813e184a1e98b0f6605c4911ea468c7e8433eb583e0fca7ceac3000',0,0,'2019-04-01 18:51:24.1074772+03:00');
INSERT INTO bandwidth_usage VALUES(X'2b3a5863a41f25408a8f5348839d7a1361dbd886d75786bb139a8ca0bdf41000',0,0,'2019-04-01 20:51:24.1074772+03:00');
INSERT INTO bandwidth_usage VALUES(X'0ed28abb2813e184a1e98b0f6605c4911ea468c7e8433eb583e0fca7ceac3000',1,1,'2019-04-01 18:51:24.1074772+03:00');
INSERT INTO bandwidth_usage VALUES(X'2b3a5863a41f25408a8f5348839d7a1361dbd886d75786bb139a8ca0bdf41000',1,1,'2019-04-01 20:51:24.1074772+03:00');
INSERT INTO bandwidth_usage VALUES(X'0ed28abb2813e184a1e98b0f6605c4911ea468c7e8433eb583e0fca7ceac3000',2,2,'2019-04-01 18:51:24.1074772+03:00');
INSERT INTO bandwidth_usage VALUES(X'2b3a5863a41f25408a8f5348839d7a1361dbd886d75786bb139a8ca0bdf41000',2,2,'2019-04-01 20:51:24.1074772+03:00');
INSERT INTO bandwidth_usage VALUES(X'0ed28abb2813e184a1e98b0f6605c4911ea468c7e8433eb583e0fca7ceac3000',3,3,'2019-04-01 18:51:24.1074772+03:00');
INSERT INTO bandwidth_usage VALUES(X'2b3a5863a41f25408a8f5348839d7a1361dbd886d75786bb139a8ca0bdf41000',3,3,'2019-04-01 20:51:24.1074772+03:00');
INSERT INTO bandwidth_usage VALUES(X'0ed28abb2813e184a1e98b0f6605c4911ea468c7e8433eb583e0fca7ceac3000',4,4,'2019-04-01 18:51:24.1074772+03:00');
INSERT INTO bandwidth_usage VALUES(X'2b3a5863a41f25408a8f5348839d7a1361dbd886d75786bb139a8ca0bdf41000',4,4,'2019-04-01 20:51:24.1074772+03:00');
INSERT INTO bandwidth_usage VALUES(X'0ed28abb2813e184a1e98b0f6605c4911ea468c7e8433eb583e0fca7ceac3000',5,5,'2019-04-01 18:51:24.1074772+03:00');
INSERT INTO bandwidth_usage VALUES(X'2b3a5863a41f25408a8f5348839d7a1361dbd886d75786bb139a8ca0bdf41000',5,5,'2019-04-01 20:51:24.1074772+03:00');
INSERT INTO bandwidth_usage VALUES(X'0ed28abb2813e184a1e98b0f6605c4911ea468c7e8433eb583e0fca7ceac3000',6,6,'2019-04-01 18:51:24.1074772+03:00');
INSERT INTO bandwidth_usage VALUES(X'2b3a5863a41f25408a8f5348839d7a1361dbd886d75786bb139a8ca0bdf41000',6,6,'2019-04-01 20:51:24.1074772+03:00');
INSERT INTO bandwidth_usage VALUES(X'0ed28abb2813e184a1e98b0f6605c4911ea468c7e8433eb583e0fca7ceac3000',1,1,'2019-04-01 18:51:24.1074772+03:00');
INSERT INTO bandwidth_usage VALUES(X'2b3a5863a41f25408a8f5348839d7a1361dbd886d75786bb139a8ca0bdf41000',1,1,'2019-04-01 20:51:24.1074772+03:00');
INSERT INTO bandwidth_usage VALUES(X'0ed28abb2813e184a1e98b0f6605c4911ea468c7e8433eb583e0fca7ceac3000',2,2,'2019-04-01 18:51:24.1074772+03:00');
INSERT INTO bandwidth_usage VALUES(X'2b3a5863a41f25408a8f5348839d7a1361dbd886d75786bb139a8ca0bdf41000',2,2,'2019-04-01 20:51:24.1074772+03:00');
INSERT INTO bandwidth_usage VALUES(X'0ed28abb2813e184a1e98b0f6605c4911ea468c7e8433eb583e0fca7ceac3000',3,3,'2019-04-01 18:51:24.1074772+03:00');
INSERT INTO bandwidth_usage VALUES(X'2b3a5863a41f25408a8f5348839d7a1361dbd886d75786bb139a8ca0bdf41000',3,3,'2019-04-01 20:51:24.1074772+03:00');
INSERT INTO bandwidth_usage VALUES(X'0ed28abb2813e184a1e98b0f6605c4911ea468c7e8433eb583e0fca7ceac3000',4,4,'2019-04-01 18:51:24.1074772+03:00');
INSERT INTO bandwidth_usage VALUES(X'2b3a5863a41f25408a8f5348839d7a1361dbd886d75786bb139a8ca0bdf41000',4,4,'2019-04-01 20:51:24.1074772+03:00');
INSERT INTO bandwidth_usage VALUES(X'0ed28abb2813e184a1e98b0f6605c4911ea468c7e8433eb583e0fca7ceac3000',5,5,'2019-04-01 18:51:24.1074772+03:00');
INSERT INTO bandwidth_usage VALUES(X'2b3a5863a41f25408a8f5348839d7a1361dbd886d75786bb139a8ca0bdf41000',5,5,'2019-04-01 20:51:24.1074772+03:00');
INSERT INTO bandwidth_usage VALUES(X'0ed28abb2813e184a1e98b0f6605c4911ea468c7e8433eb583e0fca7ceac3000',6,6,'2019-04-01 18:51:24.1074772+03:00');
INSERT INTO bandwidth_usage VALUES(X'2b3a5863a41f25408a8f5348839d7a1361dbd886d75786bb139a8ca0bdf41000',6,6,'2019-04-01 20:51:24.1074772+03:00');
INSERT INTO order_archive VALUES(X'2b3a5863a41f25408a8f5348839d7a1361dbd886d75786bb139a8ca0bdf41000',X'62180593328b8ff3c9f97565fdfd305d',X'0a1062180593328b8ff3c9f97565fdfd305d12202b3a5863a41f25408a8f5348839d7a1361dbd886d75786bb139a8ca0bdf410001a201968996e7ef170a402fdfd88b6753df792c063c07c555905ffac9cd3cbd1c00022200ed28abb2813e184a1e98b0f6605c4911ea468c7e8433eb583e0fca7ceac30002a2077003db64dfd50c5bdc84daf28bcef97f140d302c3e5bfd002bcc7ac04e1273430643802420c08fce688e50510a0ffe7ff014a0c08fce688e50510a0ffe7ff0152473045022100943d90068a1b1e6879b16a6ed8cdf0237005de09f61cddab884933fefd9692bf0220417a74f2e59523d962e800a1b06618f0113039d584e28aae37737e4a71555966',X'0a1062180593328b8ff3c9f97565fdfd305d10321a47304502200f4d97f03ad2d87501f68bfcf0525ec518aebf817cf56aa5eeaea53d01b153a102210096e60cf4b594837b43b5c841d283e4b72c9a09207d64bdd4665c700dc2e0a4a2',1,1,'2019-04-01 18:51:24.5374893+03:00');
INSERT INTO vouchers VALUES(X'2b3a5863a41f25408a8f5348839d7a1361dbd886d75786bb139a8ca0bdf41000', X'd5e757fd8d207d1c46583fb58330f803dc961b71147308ff75ff1e72a0df6b0b', '2019-07-04 00:00:00.000000+00:00');
CREATE INDEX idx_pieceinfo_expiration ON pieceinfo(piece_expiration);
INSERT INTO used_serial VALUES(X'0693a8529105f5ff763e30b6f58ead3fe7a4f93f32b4b298073c01b2b39fa76e',X'18283dd3cec0a5abf6112e903549bdff','2019-04-01 18:58:53.3169599+03:00');
INSERT INTO used_serial VALUES(X'976a6bbcfcec9d96d847f8642c377d5f23c118187fb0ca21e9e1c5a9fbafa5f7',X'18283dd3cec0a5abf6112e903549bdff','2019-04-01 18:58:53.3169599+03:00');
-- NEW DATA --

View File

@ -83,13 +83,6 @@ func (pool *Pool) VerifySatelliteID(ctx context.Context, id storj.NodeID) (err e
return nil
}
// VerifyUplinkID verifides whether id corresponds to a trusted uplink.
func (pool *Pool) VerifyUplinkID(ctx context.Context, id storj.NodeID) (err error) {
defer mon.Task()(&ctx)(&err)
// trusting all the uplinks for now
return nil
}
// GetSignee gets the corresponding signee for verifying signatures.
// It ignores passed in ctx cancellation to avoid miscaching between concurrent requests.
func (pool *Pool) GetSignee(ctx context.Context, id storj.NodeID) (_ signing.Signee, err error) {

View File

@ -75,7 +75,7 @@ func (client *Client) Close() error {
}
// CreateSegment requests the order limits for creating a new segment
func (client *Client) CreateSegment(ctx context.Context, bucket string, path storj.Path, segmentIndex int64, redundancy *pb.RedundancyScheme, maxEncryptedSegmentSize int64, expiration time.Time) (limits []*pb.AddressedOrderLimit, rootPieceID storj.PieceID, err error) {
func (client *Client) CreateSegment(ctx context.Context, bucket string, path storj.Path, segmentIndex int64, redundancy *pb.RedundancyScheme, maxEncryptedSegmentSize int64, expiration time.Time) (limits []*pb.AddressedOrderLimit, rootPieceID storj.PieceID, piecePrivateKey storj.PiecePrivateKey, err error) {
defer mon.Task()(&ctx)(&err)
response, err := client.client.CreateSegmentOld(ctx, &pb.SegmentWriteRequestOld{
@ -87,10 +87,10 @@ func (client *Client) CreateSegment(ctx context.Context, bucket string, path sto
Expiration: expiration,
})
if err != nil {
return nil, rootPieceID, Error.Wrap(err)
return nil, rootPieceID, piecePrivateKey, Error.Wrap(err)
}
return response.GetAddressedLimits(), response.RootPieceId, nil
return response.GetAddressedLimits(), response.RootPieceId, response.PrivateKey, nil
}
// CommitSegment requests to store the pointer for the segment
@ -131,7 +131,7 @@ func (client *Client) SegmentInfo(ctx context.Context, bucket string, path storj
}
// ReadSegment requests the order limits for reading a segment
func (client *Client) ReadSegment(ctx context.Context, bucket string, path storj.Path, segmentIndex int64) (pointer *pb.Pointer, limits []*pb.AddressedOrderLimit, err error) {
func (client *Client) ReadSegment(ctx context.Context, bucket string, path storj.Path, segmentIndex int64) (pointer *pb.Pointer, limits []*pb.AddressedOrderLimit, piecePrivateKey storj.PiecePrivateKey, err error) {
defer mon.Task()(&ctx)(&err)
response, err := client.client.DownloadSegmentOld(ctx, &pb.SegmentDownloadRequestOld{
@ -141,12 +141,12 @@ func (client *Client) ReadSegment(ctx context.Context, bucket string, path storj
})
if err != nil {
if status.Code(err) == codes.NotFound {
return nil, nil, storage.ErrKeyNotFound.Wrap(err)
return nil, nil, piecePrivateKey, storage.ErrKeyNotFound.Wrap(err)
}
return nil, nil, Error.Wrap(err)
return nil, nil, piecePrivateKey, Error.Wrap(err)
}
return response.GetPointer(), sortLimits(response.GetAddressedLimits(), response.GetPointer()), nil
return response.GetPointer(), sortLimits(response.GetAddressedLimits(), response.GetPointer()), response.PrivateKey, nil
}
// sortLimits sorts order limits and fill missing ones with nil values
@ -168,7 +168,7 @@ func getLimitByStorageNodeID(limits []*pb.AddressedOrderLimit, storageNodeID sto
}
// DeleteSegment requests the order limits for deleting a segment
func (client *Client) DeleteSegment(ctx context.Context, bucket string, path storj.Path, segmentIndex int64) (limits []*pb.AddressedOrderLimit, err error) {
func (client *Client) DeleteSegment(ctx context.Context, bucket string, path storj.Path, segmentIndex int64) (limits []*pb.AddressedOrderLimit, piecePrivateKey storj.PiecePrivateKey, err error) {
defer mon.Task()(&ctx)(&err)
response, err := client.client.DeleteSegmentOld(ctx, &pb.SegmentDeleteRequestOld{
@ -178,12 +178,12 @@ func (client *Client) DeleteSegment(ctx context.Context, bucket string, path sto
})
if err != nil {
if status.Code(err) == codes.NotFound {
return nil, storage.ErrKeyNotFound.Wrap(err)
return nil, piecePrivateKey, storage.ErrKeyNotFound.Wrap(err)
}
return nil, Error.Wrap(err)
return nil, piecePrivateKey, Error.Wrap(err)
}
return response.GetAddressedLimits(), nil
return response.GetAddressedLimits(), response.PrivateKey, nil
}
// ListSegments lists the available segments

View File

@ -12,8 +12,8 @@ import (
"google.golang.org/grpc"
"storj.io/storj/internal/memory"
"storj.io/storj/pkg/auth/signing"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/storj"
"storj.io/storj/pkg/transport"
)
@ -41,14 +41,13 @@ var DefaultConfig = Config{
// Client implements uploading, downloading and deleting content from a piecestore.
type Client struct {
log *zap.Logger
signer signing.Signer
client pb.PiecestoreClient
conn *grpc.ClientConn
config Config
}
// Dial dials the target piecestore endpoint.
func Dial(ctx context.Context, transport transport.Client, target *pb.Node, log *zap.Logger, signer signing.Signer, config Config) (*Client, error) {
func Dial(ctx context.Context, transport transport.Client, target *pb.Node, log *zap.Logger, config Config) (*Client, error) {
conn, err := transport.DialNode(ctx, target)
if err != nil {
return nil, Error.Wrap(err)
@ -56,7 +55,6 @@ func Dial(ctx context.Context, transport transport.Client, target *pb.Node, log
return &Client{
log: log,
signer: signer,
client: pb.NewPiecestoreClient(conn),
conn: conn,
config: config,
@ -64,7 +62,7 @@ func Dial(ctx context.Context, transport transport.Client, target *pb.Node, log
}
// Delete uses delete order limit to delete a piece on piece store.
func (client *Client) Delete(ctx context.Context, limit *pb.OrderLimit) (err error) {
func (client *Client) Delete(ctx context.Context, limit *pb.OrderLimit, privateKey storj.PiecePrivateKey) (err error) {
defer mon.Task()(&ctx)(&err)
_, err = client.client.Delete(ctx, &pb.PieceDeleteRequest{
Limit: limit,

View File

@ -13,6 +13,7 @@ import (
"storj.io/storj/pkg/auth/signing"
"storj.io/storj/pkg/identity"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/storj"
)
// Downloader is interface that can be used for downloading content.
@ -24,11 +25,12 @@ type Downloader interface {
// Download implements downloading from a piecestore.
type Download struct {
client *Client
limit *pb.OrderLimit
peer *identity.PeerIdentity
stream pb.Piecestore_DownloadClient
ctx context.Context
client *Client
limit *pb.OrderLimit
privateKey storj.PiecePrivateKey
peer *identity.PeerIdentity
stream pb.Piecestore_DownloadClient
ctx context.Context
read int64 // how much data we have read so far
allocated int64 // how far have we sent orders
@ -42,7 +44,7 @@ type Download struct {
}
// Download starts a new download using the specified order limit at the specified offset and size.
func (client *Client) Download(ctx context.Context, limit *pb.OrderLimit, offset, size int64) (_ Downloader, err error) {
func (client *Client) Download(ctx context.Context, limit *pb.OrderLimit, piecePrivateKey storj.PiecePrivateKey, offset, size int64) (_ Downloader, err error) {
defer mon.Task()(&ctx)(&err)
stream, err := client.client.Download(ctx)
@ -70,11 +72,12 @@ func (client *Client) Download(ctx context.Context, limit *pb.OrderLimit, offset
}
download := &Download{
client: client,
limit: limit,
peer: peer,
stream: stream,
ctx: ctx,
client: client,
limit: limit,
privateKey: piecePrivateKey,
peer: peer,
stream: stream,
ctx: ctx,
read: 0,
@ -125,12 +128,10 @@ func (client *Download) Read(data []byte) (read int, err error) {
// send an order
if newAllocation > 0 {
// sign the order
order, err := signing.SignOrder(ctx, client.client.signer, &pb.Order{
order, err := signing.SignUplinkOrder(ctx, client.privateKey, &pb.Order{
SerialNumber: client.limit.SerialNumber,
Amount: newAllocation,
})
// something went wrong with signing
if err != nil {
client.unread.IncludeError(err)
return read, nil

View File

@ -15,6 +15,7 @@ import (
"storj.io/storj/pkg/identity"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/pkcrypto"
"storj.io/storj/pkg/storj"
)
var mon = monkit.Package()
@ -31,11 +32,12 @@ type Uploader interface {
// Upload implements uploading to the storage node.
type Upload struct {
client *Client
limit *pb.OrderLimit
peer *identity.PeerIdentity
stream pb.Piecestore_UploadClient
ctx context.Context
client *Client
limit *pb.OrderLimit
privateKey storj.PiecePrivateKey
peer *identity.PeerIdentity
stream pb.Piecestore_UploadClient
ctx context.Context
hash hash.Hash // TODO: use concrete implementation
offset int64
@ -47,7 +49,7 @@ type Upload struct {
}
// Upload initiates an upload to the storage node.
func (client *Client) Upload(ctx context.Context, limit *pb.OrderLimit) (_ Uploader, err error) {
func (client *Client) Upload(ctx context.Context, limit *pb.OrderLimit, piecePrivateKey storj.PiecePrivateKey) (_ Uploader, err error) {
defer mon.Task()(&ctx, "node: "+limit.StorageNodeId.String()[0:8])(&err)
stream, err := client.client.Upload(ctx)
@ -69,11 +71,12 @@ func (client *Client) Upload(ctx context.Context, limit *pb.OrderLimit) (_ Uploa
}
upload := &Upload{
client: client,
limit: limit,
peer: peer,
stream: stream,
ctx: ctx,
client: client,
limit: limit,
privateKey: piecePrivateKey,
peer: peer,
stream: stream,
ctx: ctx,
hash: pkcrypto.NewHash(),
offset: 0,
@ -118,7 +121,7 @@ func (client *Upload) Write(data []byte) (written int, err error) {
}
// create a signed order for the next chunk
order, err := signing.SignOrder(ctx, client.client.signer, &pb.Order{
order, err := signing.SignUplinkOrder(ctx, client.privateKey, &pb.Order{
SerialNumber: client.limit.SerialNumber,
Amount: client.offset + int64(len(sendData)),
})
@ -177,7 +180,7 @@ func (client *Upload) Commit(ctx context.Context) (_ *pb.PieceHash, err error) {
}
// sign the hash for storage node
uplinkHash, err := signing.SignPieceHash(ctx, client.client.signer, &pb.PieceHash{
uplinkHash, err := signing.SignUplinkPieceHash(ctx, client.privateKey, &pb.PieceHash{
PieceId: client.limit.PieceId,
Hash: client.hash.Sum(nil),
})