BWA aliases (#1333)

aliased RBAs and PBAs
This commit is contained in:
Bill Thorp 2019-02-22 16:17:35 -05:00 committed by GitHub
parent b5447c6608
commit 373b301736
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
35 changed files with 131 additions and 123 deletions

View File

@ -69,11 +69,11 @@ func sendGeneratedAgreements(ctx context.Context, t *testing.T, db satellite.DB,
agreements := make([]*psdb.Agreement, len(actions))
for i, action := range actions {
pba, err := testbwagreement.GeneratePayerBandwidthAllocation(action, satID, upID, time.Hour)
pba, err := testbwagreement.GenerateOrderLimit(action, satID, upID, time.Hour)
require.NoError(t, err)
err = db.CertDB().SavePublicKey(ctx, pba.UplinkId, upID.Leaf.PublicKey)
assert.NoError(t, err)
rba, err := testbwagreement.GenerateRenterBandwidthAllocation(pba, snID.ID, upID, 1000)
rba, err := testbwagreement.GenerateOrder(pba, snID.ID, upID, 1000)
require.NoError(t, err)
agreements[i] = &psdb.Agreement{Agreement: *rba}
}

View File

@ -23,7 +23,7 @@ import (
type Stripe struct {
Index int
Segment *pb.Pointer
PBA *pb.PayerBandwidthAllocation
PBA *pb.OrderLimit
}
// Cursor keeps track of audit location in pointer db

View File

@ -35,7 +35,7 @@ type Verifier struct {
}
type downloader interface {
DownloadShares(ctx context.Context, pointer *pb.Pointer, stripeIndex int, pba *pb.PayerBandwidthAllocation) (shares map[int]Share, nodes map[int]storj.NodeID, err error)
DownloadShares(ctx context.Context, pointer *pb.Pointer, stripeIndex int, pba *pb.OrderLimit) (shares map[int]Share, nodes map[int]storj.NodeID, err error)
}
// defaultDownloader downloads shares from networked storage nodes
@ -58,7 +58,7 @@ func NewVerifier(transport transport.Client, overlay *overlay.Cache, id *identit
// getShare use piece store clients to download shares from a given node
func (d *defaultDownloader) getShare(ctx context.Context, stripeIndex, shareSize, pieceNumber int,
id psclient.PieceID, pieceSize int64, fromNode *pb.Node, pba *pb.PayerBandwidthAllocation) (s Share, err error) {
id psclient.PieceID, pieceSize int64, fromNode *pb.Node, pba *pb.OrderLimit) (s Share, err error) {
// TODO: too many arguments use a struct
defer mon.Task()(&ctx)(&err)
@ -106,7 +106,7 @@ func (d *defaultDownloader) getShare(ctx context.Context, stripeIndex, shareSize
// Download Shares downloads shares from the nodes where remote pieces are located
func (d *defaultDownloader) DownloadShares(ctx context.Context, pointer *pb.Pointer,
stripeIndex int, pba *pb.PayerBandwidthAllocation) (shares map[int]Share, nodes map[int]storj.NodeID, err error) {
stripeIndex int, pba *pb.OrderLimit) (shares map[int]Share, nodes map[int]storj.NodeID, err error) {
defer mon.Task()(&ctx)(&err)
var nodeIds storj.NodeIDList

View File

@ -205,7 +205,7 @@ func TestCalcPadded(t *testing.T) {
}
func (m *mockDownloader) DownloadShares(ctx context.Context, pointer *pb.Pointer, stripeIndex int,
pba *pb.PayerBandwidthAllocation) (shares map[int]share, nodes map[int]storj.NodeID, err error) {
pba *pb.OrderLimit) (shares map[int]share, nodes map[int]storj.NodeID, err error) {
nodes = make(map[int]*pb.Node, 30)

View File

@ -39,8 +39,8 @@ func TestBandwidthDBAgreement(t *testing.T) {
func testCreateAgreement(ctx context.Context, t *testing.T, b bwagreement.DB, action pb.BandwidthAction,
serialNum string, upID, snID *identity.FullIdentity) error {
rba := &pb.RenterBandwidthAllocation{
PayerAllocation: pb.PayerBandwidthAllocation{
rba := &pb.Order{
PayerAllocation: pb.OrderLimit{
Action: action,
SerialNumber: serialNum,
UplinkId: upID.ID,

View File

@ -45,7 +45,7 @@ type UplinkStat struct {
// DB stores bandwidth agreements.
type DB interface {
// CreateAgreement adds a new bandwidth agreement.
CreateAgreement(context.Context, *pb.RenterBandwidthAllocation) error
CreateAgreement(context.Context, *pb.Order) error
// GetTotalsSince returns the sum of each bandwidth type after (exluding) a given date range
GetTotals(context.Context, time.Time, time.Time) (map[storj.NodeID][]int64, error)
//GetTotals returns stats about an uplink
@ -71,7 +71,7 @@ func NewServer(db DB, upldb certdb.DB, pkey crypto.PublicKey, logger *zap.Logger
func (s *Server) Close() error { return nil }
// BandwidthAgreements receives and stores bandwidth agreements from storage nodes
func (s *Server) BandwidthAgreements(ctx context.Context, rba *pb.RenterBandwidthAllocation) (reply *pb.AgreementsSummary, err error) {
func (s *Server) BandwidthAgreements(ctx context.Context, rba *pb.Order) (reply *pb.AgreementsSummary, err error) {
defer mon.Task()(&ctx)(&err)
s.logger.Debug("Received Agreement...")
reply = &pb.AgreementsSummary{
@ -110,13 +110,13 @@ func (s *Server) BandwidthAgreements(ctx context.Context, rba *pb.RenterBandwidt
return reply, nil
}
func (s *Server) verifySignature(ctx context.Context, rba *pb.RenterBandwidthAllocation) error {
func (s *Server) verifySignature(ctx context.Context, rba *pb.Order) error {
pba := rba.GetPayerAllocation()
// Get renter's public key from uplink agreement db
uplinkInfo, err := s.certdb.GetPublicKey(ctx, pba.UplinkId)
if err != nil {
return pb.ErrRenter.Wrap(auth.ErrVerify.New("Failed to unmarshal PayerBandwidthAllocation: %+v", err))
return pb.ErrRenter.Wrap(auth.ErrVerify.New("Failed to unmarshal OrderLimit: %+v", err))
}
// verify Renter's (uplink) signature

View File

@ -67,23 +67,23 @@ func testDatabase(ctx context.Context, t *testing.T, db satellite.DB) {
satellite := bwagreement.NewServer(db.BandwidthAgreement(), db.CertDB(), satID.Leaf.PublicKey, zap.NewNop(), satID.ID)
{ // TestSameSerialNumberBandwidthAgreements
pbaFile1, err := testbwagreement.GeneratePayerBandwidthAllocation(pb.BandwidthAction_GET, satID, upID, time.Hour)
pbaFile1, err := testbwagreement.GenerateOrderLimit(pb.BandwidthAction_GET, satID, upID, time.Hour)
assert.NoError(t, err)
err = db.CertDB().SavePublicKey(ctx, pbaFile1.UplinkId, upID.Leaf.PublicKey)
assert.NoError(t, err)
ctxSN1, storageNode1 := getPeerContext(ctx, t)
rbaNode1, err := testbwagreement.GenerateRenterBandwidthAllocation(pbaFile1, storageNode1, upID, 666)
rbaNode1, err := testbwagreement.GenerateOrder(pbaFile1, storageNode1, upID, 666)
assert.NoError(t, err)
ctxSN2, storageNode2 := getPeerContext(ctx, t)
rbaNode2, err := testbwagreement.GenerateRenterBandwidthAllocation(pbaFile1, storageNode2, upID, 666)
rbaNode2, err := testbwagreement.GenerateOrder(pbaFile1, storageNode2, upID, 666)
assert.NoError(t, err)
/* More than one storage node can submit bwagreements with the same serial number.
Uplink would like to download a file from 2 storage nodes.
Uplink requests a PayerBandwidthAllocation from the satellite. One serial number for all storage nodes.
Uplink signes 2 RenterBandwidthAllocation for both storage node. */
Uplink requests a OrderLimit from the satellite. One serial number for all storage nodes.
Uplink signes 2 Order for both storage node. */
{
reply, err := satellite.BandwidthAgreements(ctxSN1, rbaNode1)
assert.NoError(t, err)
@ -95,14 +95,14 @@ func testDatabase(ctx context.Context, t *testing.T, db satellite.DB) {
}
/* Storage node can submit a second bwagreement with a different sequence value.
Uplink downloads another file. New PayerBandwidthAllocation with a new sequence. */
Uplink downloads another file. New OrderLimit with a new sequence. */
{
pbaFile2, err := testbwagreement.GeneratePayerBandwidthAllocation(pb.BandwidthAction_GET, satID, upID, time.Hour)
pbaFile2, err := testbwagreement.GenerateOrderLimit(pb.BandwidthAction_GET, satID, upID, time.Hour)
assert.NoError(t, err)
err = db.CertDB().SavePublicKey(ctx, pbaFile2.UplinkId, upID.Leaf.PublicKey)
assert.NoError(t, err)
rbaNode1, err := testbwagreement.GenerateRenterBandwidthAllocation(pbaFile2, storageNode1, upID, 666)
rbaNode1, err := testbwagreement.GenerateOrder(pbaFile2, storageNode1, upID, 666)
assert.NoError(t, err)
reply, err := satellite.BandwidthAgreements(ctxSN1, rbaNode1)
@ -112,7 +112,7 @@ func testDatabase(ctx context.Context, t *testing.T, db satellite.DB) {
/* Storage nodes can't submit a second bwagreement with the same sequence. */
{
rbaNode1, err := testbwagreement.GenerateRenterBandwidthAllocation(pbaFile1, storageNode1, upID, 666)
rbaNode1, err := testbwagreement.GenerateOrder(pbaFile1, storageNode1, upID, 666)
assert.NoError(t, err)
reply, err := satellite.BandwidthAgreements(ctxSN1, rbaNode1)
@ -132,13 +132,13 @@ func testDatabase(ctx context.Context, t *testing.T, db satellite.DB) {
{ // TestExpiredBandwidthAgreements
{ // storage nodes can submit a bwagreement that will expire in 30 seconds
pba, err := testbwagreement.GeneratePayerBandwidthAllocation(pb.BandwidthAction_GET, satID, upID, 30*time.Second)
pba, err := testbwagreement.GenerateOrderLimit(pb.BandwidthAction_GET, satID, upID, 30*time.Second)
assert.NoError(t, err)
err = db.CertDB().SavePublicKey(ctx, pba.UplinkId, upID.Leaf.PublicKey)
assert.NoError(t, err)
ctxSN1, storageNode1 := getPeerContext(ctx, t)
rba, err := testbwagreement.GenerateRenterBandwidthAllocation(pba, storageNode1, upID, 666)
rba, err := testbwagreement.GenerateOrder(pba, storageNode1, upID, 666)
assert.NoError(t, err)
reply, err := satellite.BandwidthAgreements(ctxSN1, rba)
@ -147,13 +147,13 @@ func testDatabase(ctx context.Context, t *testing.T, db satellite.DB) {
}
{ // storage nodes can't submit a bwagreement that expires right now
pba, err := testbwagreement.GeneratePayerBandwidthAllocation(pb.BandwidthAction_GET, satID, upID, 0*time.Second)
pba, err := testbwagreement.GenerateOrderLimit(pb.BandwidthAction_GET, satID, upID, 0*time.Second)
assert.NoError(t, err)
err = db.CertDB().SavePublicKey(ctx, pba.UplinkId, upID.Leaf.PublicKey)
assert.NoError(t, err)
ctxSN1, storageNode1 := getPeerContext(ctx, t)
rba, err := testbwagreement.GenerateRenterBandwidthAllocation(pba, storageNode1, upID, 666)
rba, err := testbwagreement.GenerateOrder(pba, storageNode1, upID, 666)
assert.NoError(t, err)
reply, err := satellite.BandwidthAgreements(ctxSN1, rba)
@ -162,13 +162,13 @@ func testDatabase(ctx context.Context, t *testing.T, db satellite.DB) {
}
{ // storage nodes can't submit a bwagreement that expires yesterday
pba, err := testbwagreement.GeneratePayerBandwidthAllocation(pb.BandwidthAction_GET, satID, upID, -23*time.Hour-55*time.Second)
pba, err := testbwagreement.GenerateOrderLimit(pb.BandwidthAction_GET, satID, upID, -23*time.Hour-55*time.Second)
assert.NoError(t, err)
err = db.CertDB().SavePublicKey(ctx, pba.UplinkId, upID.Leaf.PublicKey)
assert.NoError(t, err)
ctxSN1, storageNode1 := getPeerContext(ctx, t)
rba, err := testbwagreement.GenerateRenterBandwidthAllocation(pba, storageNode1, upID, 666)
rba, err := testbwagreement.GenerateOrder(pba, storageNode1, upID, 666)
assert.NoError(t, err)
reply, err := satellite.BandwidthAgreements(ctxSN1, rba)
@ -178,7 +178,7 @@ func testDatabase(ctx context.Context, t *testing.T, db satellite.DB) {
}
{ // TestManipulatedBandwidthAgreements
pba, err := testbwagreement.GeneratePayerBandwidthAllocation(pb.BandwidthAction_GET, satID, upID, time.Hour)
pba, err := testbwagreement.GenerateOrderLimit(pb.BandwidthAction_GET, satID, upID, time.Hour)
if !assert.NoError(t, err) {
t.Fatal(err)
}
@ -186,7 +186,7 @@ func testDatabase(ctx context.Context, t *testing.T, db satellite.DB) {
assert.NoError(t, err)
ctxSN1, storageNode1 := getPeerContext(ctx, t)
rba, err := testbwagreement.GenerateRenterBandwidthAllocation(pba, storageNode1, upID, 666)
rba, err := testbwagreement.GenerateOrder(pba, storageNode1, upID, 666)
assert.NoError(t, err)
// Storage node manipulates the bwagreement
@ -244,7 +244,7 @@ func testDatabase(ctx context.Context, t *testing.T, db satellite.DB) {
assert.Equal(t, pb.AgreementsSummary_REJECTED, reply.Status)
}
/* Storage node can't self sign the PayerBandwidthAllocation.
/* Storage node can't self sign the OrderLimit.
Satellite will verify the Payer's Signature. */
{
manipRBA := *rba
@ -296,13 +296,13 @@ func testDatabase(ctx context.Context, t *testing.T, db satellite.DB) {
{ //TestInvalidBandwidthAgreements
ctxSN1, storageNode1 := getPeerContext(ctx, t)
ctxSN2, storageNode2 := getPeerContext(ctx, t)
pba, err := testbwagreement.GeneratePayerBandwidthAllocation(pb.BandwidthAction_GET, satID, upID, time.Hour)
pba, err := testbwagreement.GenerateOrderLimit(pb.BandwidthAction_GET, satID, upID, time.Hour)
assert.NoError(t, err)
err = db.CertDB().SavePublicKey(ctx, pba.UplinkId, upID.Leaf.PublicKey)
assert.NoError(t, err)
{ // Storage node sends an corrupted signuature to force a satellite crash
rba, err := testbwagreement.GenerateRenterBandwidthAllocation(pba, storageNode1, upID, 666)
rba, err := testbwagreement.GenerateOrder(pba, storageNode1, upID, 666)
assert.NoError(t, err)
rba.Signature = []byte("invalid")
reply, err := satellite.BandwidthAgreements(ctxSN1, rba)
@ -312,7 +312,7 @@ func testDatabase(ctx context.Context, t *testing.T, db satellite.DB) {
}
{ // Storage node sends an corrupted uplink Certs to force a crash
rba, err := testbwagreement.GenerateRenterBandwidthAllocation(pba, storageNode2, upID, 666)
rba, err := testbwagreement.GenerateOrder(pba, storageNode2, upID, 666)
assert.NoError(t, err)
rba.PayerAllocation.Certs = nil
reply, err := callBWA(ctxSN2, t, satellite, rba.GetSignature(), rba, rba.GetCerts())
@ -322,7 +322,7 @@ func testDatabase(ctx context.Context, t *testing.T, db satellite.DB) {
}
}
func callBWA(ctx context.Context, t *testing.T, sat *bwagreement.Server, signature []byte, rba *pb.RenterBandwidthAllocation, certs [][]byte) (*pb.AgreementsSummary, error) {
func callBWA(ctx context.Context, t *testing.T, sat *bwagreement.Server, signature []byte, rba *pb.Order, certs [][]byte) (*pb.AgreementsSummary, error) {
rba.SetCerts(certs)
rba.SetSignature(signature)
return sat.BandwidthAgreements(ctx, rba)

View File

@ -14,13 +14,13 @@ import (
"storj.io/storj/pkg/storj"
)
//GeneratePayerBandwidthAllocation creates a signed PayerBandwidthAllocation from a BandwidthAction
func GeneratePayerBandwidthAllocation(action pb.BandwidthAction, satID *identity.FullIdentity, upID *identity.FullIdentity, expiration time.Duration) (*pb.PayerBandwidthAllocation, error) {
//GenerateOrderLimit creates a signed OrderLimit from a BandwidthAction
func GenerateOrderLimit(action pb.BandwidthAction, satID *identity.FullIdentity, upID *identity.FullIdentity, expiration time.Duration) (*pb.OrderLimit, error) {
serialNum, err := uuid.New()
if err != nil {
return nil, err
}
pba := &pb.PayerBandwidthAllocation{
pba := &pb.OrderLimit{
SatelliteId: satID.ID,
UplinkId: upID.ID,
ExpirationUnixSec: time.Now().Add(expiration).Unix(),
@ -32,13 +32,13 @@ func GeneratePayerBandwidthAllocation(action pb.BandwidthAction, satID *identity
return pba, auth.SignMessage(pba, *satID)
}
//GenerateRenterBandwidthAllocation creates a signed RenterBandwidthAllocation from a PayerBandwidthAllocation
func GenerateRenterBandwidthAllocation(pba *pb.PayerBandwidthAllocation, storageNodeID storj.NodeID, upID *identity.FullIdentity, total int64) (*pb.RenterBandwidthAllocation, error) {
rba := &pb.RenterBandwidthAllocation{
//GenerateOrder creates a signed Order from a OrderLimit
func GenerateOrder(pba *pb.OrderLimit, storageNodeID storj.NodeID, upID *identity.FullIdentity, total int64) (*pb.Order, error) {
rba := &pb.Order{
PayerAllocation: *pba,
StorageNodeId: storageNodeID,
Total: total,
}
// Combine Signature and Data for RenterBandwidthAllocation
// Combine Signature and Data for Order
return rba, auth.SignMessage(rba, *upID)
}

View File

@ -11,6 +11,12 @@ import (
"github.com/zeebo/errs"
)
//OrderLimit aliases PayerBandwidthAllocation
type OrderLimit = PayerBandwidthAllocation
//Order aliases RenterBandwidthAllocation
type Order = RenterBandwidthAllocation
var (
//ErrRenter wraps errors related to renter bandwidth allocations
ErrRenter = errs.Class("Renter agreement")
@ -40,28 +46,28 @@ func Equal(msg1, msg2 proto.Message) bool {
}
//SetCerts updates the certs field, completing the auth.SignedMsg interface
func (m *PayerBandwidthAllocation) SetCerts(certs [][]byte) {
func (m *OrderLimit) SetCerts(certs [][]byte) {
m.Certs = certs
}
//SetSignature updates the signature field, completing the auth.SignedMsg interface
func (m *PayerBandwidthAllocation) SetSignature(signature []byte) {
func (m *OrderLimit) SetSignature(signature []byte) {
m.Signature = signature
}
//SetCerts updates the certs field, completing the auth.SignedMsg interface
func (m *RenterBandwidthAllocation) SetCerts(certs [][]byte) {
func (m *Order) SetCerts(certs [][]byte) {
m.Certs = certs
}
//SetSignature updates the signature field, completing the auth.SignedMsg interface
func (m *RenterBandwidthAllocation) SetSignature(signature []byte) {
func (m *Order) SetSignature(signature []byte) {
m.Signature = signature
}
// Clone creates a deep copy of PayerBandwidthAllocation
func (m *PayerBandwidthAllocation) Clone() (pba PayerBandwidthAllocation) {
pba = PayerBandwidthAllocation{
func (m *OrderLimit) Clone() (pba OrderLimit) {
pba = OrderLimit{
SatelliteId: m.SatelliteId,
UplinkId: m.UplinkId,
MaxSize: m.MaxSize,

View File

@ -43,8 +43,8 @@ func init() {
// Client is an interface describing the functions for interacting with piecestore nodes
type Client interface {
Meta(ctx context.Context, id PieceID) (*pb.PieceSummary, error)
Put(ctx context.Context, id PieceID, data io.Reader, ttl time.Time, ba *pb.PayerBandwidthAllocation) error
Get(ctx context.Context, id PieceID, size int64, ba *pb.PayerBandwidthAllocation) (ranger.Ranger, error)
Put(ctx context.Context, id PieceID, data io.Reader, ttl time.Time, ba *pb.OrderLimit) error
Get(ctx context.Context, id PieceID, size int64, ba *pb.OrderLimit) (ranger.Ranger, error)
Delete(ctx context.Context, pieceID PieceID, satelliteID storj.NodeID) error
io.Closer
}
@ -117,7 +117,7 @@ func (ps *PieceStore) Meta(ctx context.Context, id PieceID) (*pb.PieceSummary, e
}
// Put uploads a Piece to a piece store Server
func (ps *PieceStore) Put(ctx context.Context, id PieceID, data io.Reader, ttl time.Time, pba *pb.PayerBandwidthAllocation) error {
func (ps *PieceStore) Put(ctx context.Context, id PieceID, data io.Reader, ttl time.Time, pba *pb.OrderLimit) error {
stream, err := ps.client.Store(ctx)
if err != nil {
return err
@ -128,7 +128,7 @@ func (ps *PieceStore) Put(ctx context.Context, id PieceID, data io.Reader, ttl t
// of this instance at the same time.
pbaClone := pba.Clone()
rba := &pb.RenterBandwidthAllocation{
rba := &pb.Order{
PayerAllocation: pbaClone,
StorageNodeId: ps.remoteID,
}
@ -164,7 +164,7 @@ func (ps *PieceStore) Put(ctx context.Context, id PieceID, data io.Reader, ttl t
}
// Get begins downloading a Piece from a piece store Server
func (ps *PieceStore) Get(ctx context.Context, id PieceID, size int64, ba *pb.PayerBandwidthAllocation) (ranger.Ranger, error) {
func (ps *PieceStore) Get(ctx context.Context, id PieceID, size int64, ba *pb.OrderLimit) (ranger.Ranger, error) {
stream, err := ps.client.Retrieve(ctx)
if err != nil {
return nil, err
@ -184,6 +184,6 @@ func (ps *PieceStore) Delete(ctx context.Context, id PieceID, satelliteID storj.
}
// sign a message using the clients private key
func (ps *PieceStore) sign(rba *pb.RenterBandwidthAllocation) (err error) {
func (ps *PieceStore) sign(rba *pb.Order) (err error) {
return auth.SignMessage(rba, *ps.selfID)
}

View File

@ -23,11 +23,11 @@ type pieceRanger struct {
id PieceID
size int64
stream pb.PieceStoreRoutes_RetrieveClient
pba *pb.PayerBandwidthAllocation
pba *pb.OrderLimit
}
// PieceRanger PieceRanger returns a Ranger from a PieceID.
func PieceRanger(ctx context.Context, c *PieceStore, stream pb.PieceStoreRoutes_RetrieveClient, id PieceID, pba *pb.PayerBandwidthAllocation) (ranger.Ranger, error) {
func PieceRanger(ctx context.Context, c *PieceStore, stream pb.PieceStoreRoutes_RetrieveClient, id PieceID, pba *pb.OrderLimit) (ranger.Ranger, error) {
piece, err := c.Meta(ctx, id)
if err != nil {
return nil, err
@ -38,7 +38,7 @@ func PieceRanger(ctx context.Context, c *PieceStore, stream pb.PieceStoreRoutes_
// PieceRangerSize creates a PieceRanger with known size.
// Use it if you know the piece size. This will safe the extra request for
// retrieving the piece size from the piece storage.
func PieceRangerSize(c *PieceStore, stream pb.PieceStoreRoutes_RetrieveClient, id PieceID, size int64, pba *pb.PayerBandwidthAllocation) ranger.Ranger {
func PieceRangerSize(c *PieceStore, stream pb.PieceStoreRoutes_RetrieveClient, id PieceID, size int64, pba *pb.OrderLimit) ranger.Ranger {
return &pieceRanger{c: c, id: id, size: size, stream: stream, pba: pba}
}
@ -67,7 +67,7 @@ func (r *pieceRanger) Range(ctx context.Context, offset, length int64) (io.ReadC
// of this instance at the same time.
pbaClone := r.pba.Clone()
rba := &pb.RenterBandwidthAllocation{
rba := &pb.Order{
PayerAllocation: pbaClone,
StorageNodeId: r.c.remoteID,
}

View File

@ -80,7 +80,7 @@ func TestPieceRanger(t *testing.T) {
target.Type.DPanicOnInvalid("pr test")
c, err := NewCustomRoute(route, target, 32*1024, id)
assert.NoError(t, err)
rr, err := PieceRanger(ctx, c, stream, pid, &pb.PayerBandwidthAllocation{})
rr, err := PieceRanger(ctx, c, stream, pid, &pb.OrderLimit{})
if assert.NoError(t, err, errTag) {
assert.Equal(t, tt.size, rr.Size(), errTag)
}
@ -156,7 +156,7 @@ func TestPieceRangerSize(t *testing.T) {
target.Type.DPanicOnInvalid("pr test 2")
c, err := NewCustomRoute(route, target, 32*1024, id)
assert.NoError(t, err)
rr := PieceRangerSize(c, stream, pid, tt.size, &pb.PayerBandwidthAllocation{})
rr := PieceRangerSize(c, stream, pid, tt.size, &pb.OrderLimit{})
assert.Equal(t, tt.size, rr.Size(), errTag)
r, err := rr.Range(ctx, tt.offset, tt.length)
if tt.errString != "" {

View File

@ -19,7 +19,7 @@ type StreamWriter struct {
stream pb.PieceStoreRoutes_StoreClient
signer *PieceStore // We need this for signing
totalWritten int64
rba *pb.RenterBandwidthAllocation
rba *pb.Order
}
// Write Piece data to a piece store server upload stream
@ -68,7 +68,7 @@ type StreamReader struct {
}
// NewStreamReader creates a StreamReader for reading data from the piece store server
func NewStreamReader(client *PieceStore, stream pb.PieceStoreRoutes_RetrieveClient, rba *pb.RenterBandwidthAllocation, size int64) *StreamReader {
func NewStreamReader(client *PieceStore, stream pb.PieceStoreRoutes_RetrieveClient, rba *pb.Order, size int64) *StreamReader {
sr := &StreamReader{
pendingAllocs: sync2.NewThrottle(),
client: client,

View File

@ -38,7 +38,7 @@ type DB struct {
// Agreement is a struct that contains a bandwidth agreement and the associated signature
type Agreement struct {
Agreement pb.RenterBandwidthAllocation
Agreement pb.Order
Signature []byte
}
@ -233,7 +233,7 @@ func (db *DB) DeleteExpired(ctx context.Context) (expired []string, err error) {
}
// WriteBandwidthAllocToDB inserts bandwidth agreement into DB
func (db *DB) WriteBandwidthAllocToDB(rba *pb.RenterBandwidthAllocation) error {
func (db *DB) WriteBandwidthAllocToDB(rba *pb.Order) error {
rbaBytes, err := proto.Marshal(rba)
if err != nil {
return err
@ -265,7 +265,7 @@ func (db *DB) DeleteBandwidthAllocationBySignature(signature []byte) error {
}
// GetBandwidthAllocationBySignature finds allocation info by signature
func (db *DB) GetBandwidthAllocationBySignature(signature []byte) ([]*pb.RenterBandwidthAllocation, error) {
func (db *DB) GetBandwidthAllocationBySignature(signature []byte) ([]*pb.Order, error) {
defer db.locked()()
rows, err := db.DB.Query(`SELECT agreement FROM bandwidth_agreements WHERE signature = ?`, signature)
@ -278,14 +278,14 @@ func (db *DB) GetBandwidthAllocationBySignature(signature []byte) ([]*pb.RenterB
}
}()
agreements := []*pb.RenterBandwidthAllocation{}
agreements := []*pb.Order{}
for rows.Next() {
var rbaBytes []byte
err := rows.Scan(&rbaBytes)
if err != nil {
return agreements, err
}
rba := &pb.RenterBandwidthAllocation{}
rba := &pb.Order{}
err = proto.Unmarshal(rbaBytes, rba)
if err != nil {
return agreements, err

View File

@ -130,9 +130,9 @@ func TestHappyPath(t *testing.T) {
}
})
bandwidthAllocation := func(signature string, satelliteID storj.NodeID, total int64) *pb.RenterBandwidthAllocation {
return &pb.RenterBandwidthAllocation{
PayerAllocation: pb.PayerBandwidthAllocation{SatelliteId: satelliteID},
bandwidthAllocation := func(signature string, satelliteID storj.NodeID, total int64) *pb.Order {
return &pb.Order{
PayerAllocation: pb.OrderLimit{SatelliteId: satelliteID},
Total: total,
Signature: []byte(signature),
}
@ -140,7 +140,7 @@ func TestHappyPath(t *testing.T) {
//TODO: use better data
nodeIDAB := teststorj.NodeIDFromString("AB")
allocationTests := []*pb.RenterBandwidthAllocation{
allocationTests := []*pb.Order{
bandwidthAllocation("signed by test", nodeIDAB, 0),
bandwidthAllocation("signed by sigma", nodeIDAB, 10),
bandwidthAllocation("signed by sigma", nodeIDAB, 98),
@ -260,8 +260,8 @@ func BenchmarkWriteBandwidthAllocation(b *testing.B) {
b.RunParallel(func(b *testing.PB) {
for b.Next() {
for i := 0; i < WritesPerLoop; i++ {
_ = db.WriteBandwidthAllocToDB(&pb.RenterBandwidthAllocation{
PayerAllocation: pb.PayerBandwidthAllocation{},
_ = db.WriteBandwidthAllocToDB(&pb.Order{
PayerAllocation: pb.OrderLimit{},
Total: 156,
Signature: []byte("signed by test"),
})

View File

@ -37,7 +37,7 @@ func (s *StreamWriter) Write(b []byte) (int, error) {
// StreamReader is a struct for Retrieving data from server
type StreamReader struct {
src *utils.ReaderSource
bandwidthAllocation *pb.RenterBandwidthAllocation
bandwidthAllocation *pb.Order
currentTotal int64
bandwidthRemaining int64
spaceRemaining int64

View File

@ -42,12 +42,12 @@ func (s *Server) Retrieve(stream pb.PieceStoreRoutes_RetrieveServer) (err error)
rba := recv.GetBandwidthAllocation()
if rba == nil {
return RetrieveError.New("RenterBandwidthAllocation message is nil")
return RetrieveError.New("Order message is nil")
}
pba := rba.PayerAllocation
if pb.Equal(&pba, &pb.PayerBandwidthAllocation{}) {
return RetrieveError.New("PayerBandwidthAllocation message is empty")
if pb.Equal(&pba, &pb.OrderLimit{}) {
return RetrieveError.New("OrderLimit message is empty")
}
id, err := getNamespacedPieceID([]byte(pd.GetId()), pba.SatelliteId.Bytes())
@ -114,7 +114,7 @@ func (s *Server) retrieveData(ctx context.Context, stream pb.PieceStoreRoutes_Re
// Bandwidth Allocation recv loop
go func() {
var lastTotal int64
var lastAllocation *pb.RenterBandwidthAllocation
var lastAllocation *pb.Order
defer func() {
if lastAllocation == nil {
return

View File

@ -268,7 +268,7 @@ func (s *Server) deleteByID(id string) error {
return nil
}
func (s *Server) verifySignature(ctx context.Context, rba *pb.RenterBandwidthAllocation) error {
func (s *Server) verifySignature(ctx context.Context, rba *pb.Order) error {
// TODO(security): detect replay attacks
pba := rba.PayerAllocation
//verify message content
@ -304,7 +304,7 @@ func (s *Server) verifySignature(ctx context.Context, rba *pb.RenterBandwidthAll
return nil
}
func (s *Server) verifyPayerAllocation(pba *pb.PayerBandwidthAllocation, actionPrefix string) (err error) {
func (s *Server) verifyPayerAllocation(pba *pb.OrderLimit, actionPrefix string) (err error) {
switch {
case pba.SatelliteId.IsZero():
return StoreError.New("payer bandwidth allocation: missing satellite id")

View File

@ -234,7 +234,7 @@ func TestRetrieve(t *testing.T) {
err = stream.Send(&pb.PieceRetrieval{PieceData: &pb.PieceRetrieval_PieceData{Id: tt.id, PieceSize: tt.reqSize, Offset: tt.offset}})
require.NoError(t, err)
pba, err := testbwagreement.GeneratePayerBandwidthAllocation(pb.BandwidthAction_GET, snID, upID, time.Hour)
pba, err := testbwagreement.GenerateOrderLimit(pb.BandwidthAction_GET, snID, upID, time.Hour)
require.NoError(t, err)
totalAllocated := int64(0)
@ -245,7 +245,7 @@ func TestRetrieve(t *testing.T) {
// Send bandwidth bandwidthAllocation
totalAllocated += tt.allocSize
rba, err := testbwagreement.GenerateRenterBandwidthAllocation(pba, snID.ID, upID, totalAllocated)
rba, err := testbwagreement.GenerateOrder(pba, snID.ID, upID, totalAllocated)
require.NoError(t, err)
err = stream.Send(&pb.PieceRetrieval{BandwidthAllocation: rba})
@ -325,9 +325,9 @@ func TestStore(t *testing.T) {
require.NoError(t, err)
// Create Bandwidth Allocation Data
pba, err := testbwagreement.GeneratePayerBandwidthAllocation(pb.BandwidthAction_PUT, snID, upID, time.Hour)
pba, err := testbwagreement.GenerateOrderLimit(pb.BandwidthAction_PUT, snID, upID, time.Hour)
require.NoError(t, err)
rba, err := testbwagreement.GenerateRenterBandwidthAllocation(pba, snID.ID, upID, tt.totalReceived)
rba, err := testbwagreement.GenerateOrder(pba, snID.ID, upID, tt.totalReceived)
require.NoError(t, err)
// Write the buffer to the stream we opened earlier
@ -368,7 +368,7 @@ func TestStore(t *testing.T) {
var agreement, signature []byte
err = rows.Scan(&agreement, &signature)
require.NoError(t, err)
rba := &pb.RenterBandwidthAllocation{}
rba := &pb.Order{}
require.NoError(t, proto.Unmarshal(agreement, rba))
require.Equal(t, msg.BandwidthAllocation.GetSignature(), signature)
require.True(t, pb.Equal(pba, &rba.PayerAllocation))
@ -437,9 +437,9 @@ func TestPbaValidation(t *testing.T) {
// Create Bandwidth Allocation Data
content := []byte("content")
pba, err := testbwagreement.GeneratePayerBandwidthAllocation(tt.action, satID1, upID, time.Hour)
pba, err := testbwagreement.GenerateOrderLimit(tt.action, satID1, upID, time.Hour)
require.NoError(t, err)
rba, err := testbwagreement.GenerateRenterBandwidthAllocation(pba, snID.ID, upID, int64(len(content)))
rba, err := testbwagreement.GenerateOrder(pba, snID.ID, upID, int64(len(content)))
require.NoError(t, err)
msg := &pb.PieceStore{
PieceData: &pb.PieceStore_PieceData{Content: content},

View File

@ -48,12 +48,12 @@ func (s *Server) Store(reqStream pb.PieceStoreRoutes_StoreServer) (err error) {
rba := recv.GetBandwidthAllocation()
if rba == nil {
return StoreError.New("RenterBandwidthAllocation message is nil")
return StoreError.New("Order message is nil")
}
pba := rba.PayerAllocation
if pb.Equal(&pba, &pb.PayerBandwidthAllocation{}) {
return StoreError.New("PayerBandwidthAllocation message is empty")
if pb.Equal(&pba, &pb.OrderLimit{}) {
return StoreError.New("OrderLimit message is empty")
}
id, err := getNamespacedPieceID([]byte(pd.GetId()), pba.SatelliteId.Bytes())

View File

@ -34,7 +34,7 @@ func NewAllocationSigner(satelliteIdentity *identity.FullIdentity, bwExpiration
}
// PayerBandwidthAllocation returns generated payer bandwidth allocation
func (allocation *AllocationSigner) PayerBandwidthAllocation(ctx context.Context, peerIdentity *identity.PeerIdentity, action pb.BandwidthAction) (pba *pb.PayerBandwidthAllocation, err error) {
func (allocation *AllocationSigner) PayerBandwidthAllocation(ctx context.Context, peerIdentity *identity.PeerIdentity, action pb.BandwidthAction) (pba *pb.OrderLimit, err error) {
if peerIdentity == nil {
return nil, Error.New("missing peer identity")
}
@ -57,7 +57,7 @@ func (allocation *AllocationSigner) PayerBandwidthAllocation(ctx context.Context
return nil, err
}
pba = &pb.PayerBandwidthAllocation{
pba = &pb.OrderLimit{
SatelliteId: allocation.satelliteIdentity.ID,
UplinkId: peerIdentity.ID,
CreatedUnixSec: created,

View File

@ -45,11 +45,11 @@ type ListItem struct {
// Client services offerred for the interface
type Client interface {
Put(ctx context.Context, path storj.Path, pointer *pb.Pointer) error
Get(ctx context.Context, path storj.Path) (*pb.Pointer, []*pb.Node, *pb.PayerBandwidthAllocation, error)
Get(ctx context.Context, path storj.Path) (*pb.Pointer, []*pb.Node, *pb.OrderLimit, error)
List(ctx context.Context, prefix, startAfter, endBefore storj.Path, recursive bool, limit int, metaFlags uint32) (items []ListItem, more bool, err error)
Delete(ctx context.Context, path storj.Path) error
PayerBandwidthAllocation(context.Context, pb.BandwidthAction) (*pb.PayerBandwidthAllocation, error)
PayerBandwidthAllocation(context.Context, pb.BandwidthAction) (*pb.OrderLimit, error)
// Disconnect() error // TODO: implement
}
@ -87,7 +87,7 @@ func (pdb *PointerDB) Put(ctx context.Context, path storj.Path, pointer *pb.Poin
}
// Get is the interface to make a GET request, needs PATH and APIKey
func (pdb *PointerDB) Get(ctx context.Context, path storj.Path) (pointer *pb.Pointer, nodes []*pb.Node, pba *pb.PayerBandwidthAllocation, err error) {
func (pdb *PointerDB) Get(ctx context.Context, path storj.Path) (pointer *pb.Pointer, nodes []*pb.Node, pba *pb.OrderLimit, err error) {
defer mon.Task()(&ctx)(&err)
res, err := pdb.client.Get(ctx, &pb.GetRequest{Path: path})
@ -159,7 +159,7 @@ func (pdb *PointerDB) Delete(ctx context.Context, path storj.Path) (err error) {
}
// PayerBandwidthAllocation gets payer bandwidth allocation message
func (pdb *PointerDB) PayerBandwidthAllocation(ctx context.Context, action pb.BandwidthAction) (resp *pb.PayerBandwidthAllocation, err error) {
func (pdb *PointerDB) PayerBandwidthAllocation(ctx context.Context, action pb.BandwidthAction) (resp *pb.OrderLimit, err error) {
defer mon.Task()(&ctx)(&err)
response, err := pdb.client.PayerBandwidthAllocation(ctx, &pb.PayerBandwidthAllocationRequest{Action: action})

View File

@ -146,7 +146,7 @@ func TestGet(t *testing.T) {
err = proto.Unmarshal(byteData, ptr)
assert.NoError(t, err)
getResponse := pb.GetResponse{Pointer: ptr, Nodes: []*pb.Node{}, Pba: &pb.PayerBandwidthAllocation{}}
getResponse := pb.GetResponse{Pointer: ptr, Nodes: []*pb.Node{}, Pba: &pb.OrderLimit{}}
errTag := fmt.Sprintf("Test case #%d", i)

View File

@ -6,8 +6,10 @@ package mock_pointerdb
import (
context "context"
gomock "github.com/golang/mock/gomock"
reflect "reflect"
gomock "github.com/golang/mock/gomock"
pb "storj.io/storj/pkg/pb"
pdbclient "storj.io/storj/pkg/pointerdb/pdbclient"
)
@ -50,12 +52,12 @@ func (mr *MockClientMockRecorder) Delete(arg0, arg1 interface{}) *gomock.Call {
}
// Get mocks base method
func (m *MockClient) Get(arg0 context.Context, arg1 string) (*pb.Pointer, []*pb.Node, *pb.PayerBandwidthAllocation, error) {
func (m *MockClient) Get(arg0 context.Context, arg1 string) (*pb.Pointer, []*pb.Node, *pb.OrderLimit, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Get", arg0, arg1)
ret0, _ := ret[0].(*pb.Pointer)
ret1, _ := ret[1].([]*pb.Node)
ret2, _ := ret[2].(*pb.PayerBandwidthAllocation)
ret2, _ := ret[2].(*pb.OrderLimit)
ret3, _ := ret[3].(error)
return ret0, ret1, ret2, ret3
}
@ -83,15 +85,15 @@ func (mr *MockClientMockRecorder) List(arg0, arg1, arg2, arg3, arg4, arg5, arg6
}
// PayerBandwidthAllocation mocks base method
func (m *MockClient) PayerBandwidthAllocation(arg0 context.Context, arg1 pb.BandwidthAction) (*pb.PayerBandwidthAllocation, error) {
func (m *MockClient) PayerBandwidthAllocation(arg0 context.Context, arg1 pb.BandwidthAction) (*pb.OrderLimit, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "PayerBandwidthAllocation", arg0, arg1)
ret0, _ := ret[0].(*pb.PayerBandwidthAllocation)
ret0, _ := ret[0].(*pb.OrderLimit)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// PayerBandwidthAllocation indicates an expected call of PayerBandwidthAllocation
// OrderLimit indicates an expected call of OrderLimit
func (mr *MockClientMockRecorder) PayerBandwidthAllocation(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PayerBandwidthAllocation", reflect.TypeOf((*MockClient)(nil).PayerBandwidthAllocation), arg0, arg1)

View File

@ -106,7 +106,7 @@ func (m *MockPointerDBClient) PayerBandwidthAllocation(arg0 context.Context, arg
return ret0, ret1
}
// PayerBandwidthAllocation indicates an expected call of PayerBandwidthAllocation
// OrderLimit indicates an expected call of OrderLimit
func (mr *MockPointerDBClientMockRecorder) PayerBandwidthAllocation(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
varargs := append([]interface{}{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PayerBandwidthAllocation", reflect.TypeOf((*MockPointerDBClient)(nil).PayerBandwidthAllocation), varargs...)

View File

@ -233,7 +233,7 @@ func (s *Server) Iterate(ctx context.Context, req *pb.IterateRequest, f func(it
return s.service.Iterate(prefix, req.First, req.Recurse, req.Reverse, f)
}
// PayerBandwidthAllocation returns PayerBandwidthAllocation struct, signed and with given action type
// PayerBandwidthAllocation returns OrderLimit struct, signed and with given action type
func (s *Server) PayerBandwidthAllocation(ctx context.Context, req *pb.PayerBandwidthAllocationRequest) (res *pb.PayerBandwidthAllocationResponse, err error) {
defer mon.Task()(&ctx)(&err)

View File

@ -28,8 +28,8 @@ var mon = monkit.Package()
// Client defines an interface for storing erasure coded data to piece store nodes
type Client interface {
Put(ctx context.Context, nodes []*pb.Node, rs eestream.RedundancyStrategy, pieceID psclient.PieceID, data io.Reader, expiration time.Time, pba *pb.PayerBandwidthAllocation) (successfulNodes []*pb.Node, err error)
Get(ctx context.Context, nodes []*pb.Node, es eestream.ErasureScheme, pieceID psclient.PieceID, size int64, pba *pb.PayerBandwidthAllocation) (ranger.Ranger, error)
Put(ctx context.Context, nodes []*pb.Node, rs eestream.RedundancyStrategy, pieceID psclient.PieceID, data io.Reader, expiration time.Time, pba *pb.OrderLimit) (successfulNodes []*pb.Node, err error)
Get(ctx context.Context, nodes []*pb.Node, es eestream.ErasureScheme, pieceID psclient.PieceID, size int64, pba *pb.OrderLimit) (ranger.Ranger, error)
Delete(ctx context.Context, nodes []*pb.Node, pieceID psclient.PieceID, satelliteID storj.NodeID) error
}
@ -56,7 +56,7 @@ func (ec *ecClient) newPSClient(ctx context.Context, n *pb.Node) (psclient.Clien
return ec.newPSClientFunc(ctx, ec.transport, n, 0)
}
func (ec *ecClient) Put(ctx context.Context, nodes []*pb.Node, rs eestream.RedundancyStrategy, pieceID psclient.PieceID, data io.Reader, expiration time.Time, pba *pb.PayerBandwidthAllocation) (successfulNodes []*pb.Node, err error) {
func (ec *ecClient) Put(ctx context.Context, nodes []*pb.Node, rs eestream.RedundancyStrategy, pieceID psclient.PieceID, data io.Reader, expiration time.Time, pba *pb.OrderLimit) (successfulNodes []*pb.Node, err error) {
defer mon.Task()(&ctx)(&err)
if len(nodes) != rs.TotalCount() {
return nil, Error.New("size of nodes slice (%d) does not match total count (%d) of erasure scheme", len(nodes), rs.TotalCount())
@ -146,7 +146,7 @@ func (ec *ecClient) Put(ctx context.Context, nodes []*pb.Node, rs eestream.Redun
return successfulNodes, nil
}
func (ec *ecClient) putPiece(ctx, parent context.Context, node *pb.Node, pieceID psclient.PieceID, data io.ReadCloser, expiration time.Time, pba *pb.PayerBandwidthAllocation) (err error) {
func (ec *ecClient) putPiece(ctx, parent context.Context, node *pb.Node, pieceID psclient.PieceID, data io.ReadCloser, expiration time.Time, pba *pb.OrderLimit) (err error) {
defer func() { err = errs.Combine(err, data.Close()) }()
if node == nil {
@ -189,7 +189,7 @@ func (ec *ecClient) putPiece(ctx, parent context.Context, node *pb.Node, pieceID
}
func (ec *ecClient) Get(ctx context.Context, nodes []*pb.Node, es eestream.ErasureScheme,
pieceID psclient.PieceID, size int64, pba *pb.PayerBandwidthAllocation) (rr ranger.Ranger, err error) {
pieceID psclient.PieceID, size int64, pba *pb.OrderLimit) (rr ranger.Ranger, err error) {
defer mon.Task()(&ctx)(&err)
if len(nodes) != es.TotalCount() {
@ -361,7 +361,7 @@ type lazyPieceRanger struct {
node *pb.Node
id psclient.PieceID
size int64
pba *pb.PayerBandwidthAllocation
pba *pb.OrderLimit
}
// Size implements Ranger.Size

View File

@ -156,8 +156,8 @@ TestLoop:
}
ps := NewMockPSClient(ctrl)
gomock.InOrder(
ps.EXPECT().Put(gomock.Any(), derivedID, gomock.Any(), ttl, &pb.PayerBandwidthAllocation{}).Return(errs[n]).
Do(func(ctx context.Context, id psclient.PieceID, data io.Reader, ttl time.Time, ba *pb.PayerBandwidthAllocation) {
ps.EXPECT().Put(gomock.Any(), derivedID, gomock.Any(), ttl, &pb.OrderLimit{}).Return(errs[n]).
Do(func(ctx context.Context, id psclient.PieceID, data io.Reader, ttl time.Time, ba *pb.OrderLimit) {
// simulate that the mocked piece store client is reading the data
_, err := io.Copy(ioutil.Discard, data)
assert.NoError(t, err, errTag)
@ -173,7 +173,7 @@ TestLoop:
r := io.LimitReader(rand.Reader, int64(size))
ec := ecClient{newPSClientFunc: mockNewPSClient(clients)}
successfulNodes, err := ec.Put(ctx, tt.nodes, rs, id, r, ttl, &pb.PayerBandwidthAllocation{})
successfulNodes, err := ec.Put(ctx, tt.nodes, rs, id, r, ttl, &pb.OrderLimit{})
if tt.errString != "" {
assert.EqualError(t, err, tt.errString, errTag)

View File

@ -55,7 +55,7 @@ func (mr *MockClientMockRecorder) Delete(arg0, arg1, arg2, arg3 interface{}) *go
}
// Get mocks base method
func (m *MockClient) Get(arg0 context.Context, arg1 []*pb.Node, arg2 eestream.ErasureScheme, arg3 psclient.PieceID, arg4 int64, arg5 *pb.PayerBandwidthAllocation) (ranger.Ranger, error) {
func (m *MockClient) Get(arg0 context.Context, arg1 []*pb.Node, arg2 eestream.ErasureScheme, arg3 psclient.PieceID, arg4 int64, arg5 *pb.OrderLimit) (ranger.Ranger, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Get", arg0, arg1, arg2, arg3, arg4, arg5)
ret0, _ := ret[0].(ranger.Ranger)
@ -70,7 +70,7 @@ func (mr *MockClientMockRecorder) Get(arg0, arg1, arg2, arg3, arg4, arg5 interfa
}
// Put mocks base method
func (m *MockClient) Put(arg0 context.Context, arg1 []*pb.Node, arg2 eestream.RedundancyStrategy, arg3 psclient.PieceID, arg4 io.Reader, arg5 time.Time, arg6 *pb.PayerBandwidthAllocation) ([]*pb.Node, error) {
func (m *MockClient) Put(arg0 context.Context, arg1 []*pb.Node, arg2 eestream.RedundancyStrategy, arg3 psclient.PieceID, arg4 io.Reader, arg5 time.Time, arg6 *pb.OrderLimit) ([]*pb.Node, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Put", arg0, arg1, arg2, arg3, arg4, arg5, arg6)
ret0, _ := ret[0].([]*pb.Node)

View File

@ -68,7 +68,7 @@ func (mr *MockPSClientMockRecorder) Delete(arg0, arg1, arg2 interface{}) *gomock
}
// Get mocks base method
func (m *MockPSClient) Get(arg0 context.Context, arg1 psclient.PieceID, arg2 int64, arg3 *pb.PayerBandwidthAllocation) (ranger.Ranger, error) {
func (m *MockPSClient) Get(arg0 context.Context, arg1 psclient.PieceID, arg2 int64, arg3 *pb.OrderLimit) (ranger.Ranger, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Get", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(ranger.Ranger)
@ -98,7 +98,7 @@ func (mr *MockPSClientMockRecorder) Meta(arg0, arg1 interface{}) *gomock.Call {
}
// Put mocks base method
func (m *MockPSClient) Put(arg0 context.Context, arg1 psclient.PieceID, arg2 io.Reader, arg3 time.Time, arg4 *pb.PayerBandwidthAllocation) error {
func (m *MockPSClient) Put(arg0 context.Context, arg1 psclient.PieceID, arg2 io.Reader, arg3 time.Time, arg4 *pb.OrderLimit) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Put", arg0, arg1, arg2, arg3, arg4)
ret0, _ := ret[0].(error)

View File

@ -391,7 +391,7 @@ func TestSegmentStoreDeleteRemote(t *testing.T) {
ExpirationDate: someTime,
SegmentSize: tt.size,
Metadata: tt.metadata,
}, nil, &pb.PayerBandwidthAllocation{}, nil),
}, nil, &pb.OrderLimit{}, nil),
mockOC.EXPECT().BulkLookup(gomock.Any(), gomock.Any()),
mockEC.EXPECT().Delete(
gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(),

View File

@ -20,7 +20,7 @@ type bandwidthagreement struct {
db *dbx.DB
}
func (b *bandwidthagreement) CreateAgreement(ctx context.Context, rba *pb.RenterBandwidthAllocation) (err error) {
func (b *bandwidthagreement) CreateAgreement(ctx context.Context, rba *pb.Order) (err error) {
expiration := time.Unix(rba.PayerAllocation.ExpirationUnixSec, 0)
_, err = b.db.Create_Bwagreement(
ctx,

View File

@ -56,7 +56,7 @@ func (b *certDB) GetPublicKey(ctx context.Context, nodeID storj.NodeID) (crypto.
}
pubkey, err := pkcrypto.PublicKeyFromPKIX(dbxInfo.Publickey)
if err != nil {
return nil, Error.New("Failed to extract Public Key from RenterBandwidthAllocation: %+v", err)
return nil, Error.New("Failed to extract Public Key from Order: %+v", err)
}
return pubkey, nil
}

View File

@ -113,7 +113,7 @@ type lockedBandwidthAgreement struct {
}
// CreateAgreement adds a new bandwidth agreement.
func (m *lockedBandwidthAgreement) CreateAgreement(ctx context.Context, a1 *pb.RenterBandwidthAllocation) error {
func (m *lockedBandwidthAgreement) CreateAgreement(ctx context.Context, a1 *pb.Order) error {
m.Lock()
defer m.Unlock()
return m.db.CreateAgreement(ctx, a1)

View File

@ -210,7 +210,7 @@ func (db *DB) PostgresMigration() *migrate.Migration {
return ErrMigrate.Wrap(err)
}
var rba pb.RenterBandwidthAllocation
var rba pb.Order
if err := proto.Unmarshal(data, &rba); err != nil {
return ErrMigrate.Wrap(err)
}