satellite/orders: Signer for simplifying signing logic
Create a separate struct for signing order limits. Change-Id: I8f8f5245040efa8c03138512be9248d4834f3f36
This commit is contained in:
parent
01bb2bd17d
commit
cd5e99ea6b
@ -6,8 +6,6 @@ package orders
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"crypto/rand"
|
|
||||||
"encoding/binary"
|
|
||||||
"math"
|
"math"
|
||||||
mathrand "math/rand"
|
mathrand "math/rand"
|
||||||
"sync"
|
"sync"
|
||||||
@ -84,20 +82,6 @@ func (service *Service) VerifyOrderLimitSignature(ctx context.Context, signed *p
|
|||||||
return signing.VerifyOrderLimitSignature(ctx, service.satellite, signed)
|
return signing.VerifyOrderLimitSignature(ctx, service.satellite, signed)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (service *Service) createSerial(ctx context.Context, orderExpiration time.Time) (_ storj.SerialNumber, err error) {
|
|
||||||
defer mon.Task()(&ctx)(&err)
|
|
||||||
|
|
||||||
var serial storj.SerialNumber
|
|
||||||
|
|
||||||
binary.BigEndian.PutUint64(serial[0:8], uint64(orderExpiration.Unix()))
|
|
||||||
_, err = rand.Read(serial[8:])
|
|
||||||
if err != nil {
|
|
||||||
return storj.SerialNumber{}, Error.Wrap(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return serial, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (service *Service) saveSerial(ctx context.Context, serialNumber storj.SerialNumber, bucketID []byte, expiresAt time.Time) (err error) {
|
func (service *Service) saveSerial(ctx context.Context, serialNumber storj.SerialNumber, bucketID []byte, expiresAt time.Time) (err error) {
|
||||||
defer mon.Task()(&ctx)(&err)
|
defer mon.Task()(&ctx)(&err)
|
||||||
return service.orders.CreateSerialInfo(ctx, serialNumber, bucketID, expiresAt)
|
return service.orders.CreateSerialInfo(ctx, serialNumber, bucketID, expiresAt)
|
||||||
@ -145,7 +129,7 @@ func (service *Service) CreateGetOrderLimits(ctx context.Context, bucketID []byt
|
|||||||
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
serialNumber, err := service.createSerial(ctx, orderExpiration)
|
serialNumber, err := createSerial(orderExpiration)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
||||||
}
|
}
|
||||||
@ -267,55 +251,29 @@ func (service *Service) RandomSampleOfOrderLimits(limits []*pb.AddressedOrderLim
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CreatePutOrderLimits creates the order limits for uploading pieces to nodes.
|
// CreatePutOrderLimits creates the order limits for uploading pieces to nodes.
|
||||||
func (service *Service) CreatePutOrderLimits(ctx context.Context, bucketID []byte, nodes []*overlay.SelectedNode, expiration time.Time, maxPieceSize int64) (_ storj.PieceID, _ []*pb.AddressedOrderLimit, privateKey storj.PiecePrivateKey, err error) {
|
func (service *Service) CreatePutOrderLimits(ctx context.Context, bucketID []byte, nodes []*overlay.SelectedNode, pieceExpiration time.Time, maxPieceSize int64) (_ storj.PieceID, _ []*pb.AddressedOrderLimit, privateKey storj.PiecePrivateKey, err error) {
|
||||||
defer mon.Task()(&ctx)(&err)
|
defer mon.Task()(&ctx)(&err)
|
||||||
|
|
||||||
orderExpiration := time.Now().Add(service.orderExpiration)
|
orderCreation := time.Now()
|
||||||
|
orderExpiration := orderCreation.Add(service.orderExpiration)
|
||||||
|
|
||||||
piecePublicKey, piecePrivateKey, err := storj.NewPieceKey()
|
signer, err := NewSignerPut(service, pieceExpiration, orderCreation, orderExpiration, maxPieceSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return storj.PieceID{}, nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
return storj.PieceID{}, nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
serialNumber, err := service.createSerial(ctx, orderExpiration)
|
for pieceNum, node := range nodes {
|
||||||
if err != nil {
|
address := node.Address.Address
|
||||||
return storj.PieceID{}, nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
if node.LastIPPort != "" {
|
||||||
}
|
address = node.LastIPPort
|
||||||
|
}
|
||||||
rootPieceID := storj.NewPieceID()
|
_, err := signer.Sign(ctx, storj.NodeURL{ID: node.ID, Address: address}, int32(pieceNum))
|
||||||
limits := make([]*pb.AddressedOrderLimit, len(nodes))
|
|
||||||
var pieceNum int32
|
|
||||||
for _, node := range nodes {
|
|
||||||
orderLimit, err := signing.SignOrderLimit(ctx, service.satellite, &pb.OrderLimit{
|
|
||||||
SerialNumber: serialNumber,
|
|
||||||
SatelliteId: service.satellite.ID(),
|
|
||||||
SatelliteAddress: service.satelliteAddress,
|
|
||||||
UplinkPublicKey: piecePublicKey,
|
|
||||||
StorageNodeId: node.ID,
|
|
||||||
PieceId: rootPieceID.Derive(node.ID, pieceNum),
|
|
||||||
Action: pb.PieceAction_PUT,
|
|
||||||
Limit: maxPieceSize,
|
|
||||||
PieceExpiration: expiration,
|
|
||||||
OrderCreation: time.Now(),
|
|
||||||
OrderExpiration: orderExpiration,
|
|
||||||
})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return storj.PieceID{}, nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
return storj.PieceID{}, nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// use the lastIP that we have on record to avoid doing extra DNS resolutions
|
|
||||||
if node.LastIPPort != "" {
|
|
||||||
node.Address.Address = node.LastIPPort
|
|
||||||
}
|
|
||||||
limits[pieceNum] = &pb.AddressedOrderLimit{
|
|
||||||
Limit: orderLimit,
|
|
||||||
StorageNodeAddress: node.Address,
|
|
||||||
}
|
|
||||||
pieceNum++
|
|
||||||
}
|
}
|
||||||
|
|
||||||
err = service.saveSerial(ctx, serialNumber, bucketID, orderExpiration)
|
err = service.saveSerial(ctx, signer.Serial, bucketID, signer.OrderExpiration)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return storj.PieceID{}, nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
return storj.PieceID{}, nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
||||||
}
|
}
|
||||||
@ -324,31 +282,17 @@ func (service *Service) CreatePutOrderLimits(ctx context.Context, bucketID []byt
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return storj.PieceID{}, nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
return storj.PieceID{}, nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
||||||
}
|
}
|
||||||
if err := service.updateBandwidth(ctx, projectID, bucketName, limits...); err != nil {
|
if err := service.updateBandwidth(ctx, projectID, bucketName, signer.AddressedLimits...); err != nil {
|
||||||
return storj.PieceID{}, nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
return storj.PieceID{}, nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return rootPieceID, limits, piecePrivateKey, nil
|
return signer.RootPieceID, signer.AddressedLimits, signer.PrivateKey, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateDeleteOrderLimits creates the order limits for deleting the pieces of pointer.
|
// CreateDeleteOrderLimits creates the order limits for deleting the pieces of pointer.
|
||||||
func (service *Service) CreateDeleteOrderLimits(ctx context.Context, bucketID []byte, pointer *pb.Pointer) (_ []*pb.AddressedOrderLimit, _ storj.PiecePrivateKey, err error) {
|
func (service *Service) CreateDeleteOrderLimits(ctx context.Context, bucketID []byte, pointer *pb.Pointer) (_ []*pb.AddressedOrderLimit, _ storj.PiecePrivateKey, err error) {
|
||||||
defer mon.Task()(&ctx)(&err)
|
defer mon.Task()(&ctx)(&err)
|
||||||
|
|
||||||
rootPieceID := pointer.GetRemote().RootPieceId
|
|
||||||
pieceExpiration := pointer.ExpirationDate
|
|
||||||
orderExpiration := time.Now().Add(service.orderExpiration)
|
|
||||||
|
|
||||||
piecePublicKey, piecePrivateKey, err := storj.NewPieceKey()
|
|
||||||
if err != nil {
|
|
||||||
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
serialNumber, err := service.createSerial(ctx, orderExpiration)
|
|
||||||
if err != nil {
|
|
||||||
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
nodeIDs := make([]storj.NodeID, len(pointer.GetRemote().GetRemotePieces()))
|
nodeIDs := make([]storj.NodeID, len(pointer.GetRemote().GetRemotePieces()))
|
||||||
for i, piece := range pointer.GetRemote().GetRemotePieces() {
|
for i, piece := range pointer.GetRemote().GetRemotePieces() {
|
||||||
nodeIDs[i] = piece.NodeId
|
nodeIDs[i] = piece.NodeId
|
||||||
@ -360,8 +304,15 @@ func (service *Service) CreateDeleteOrderLimits(ctx context.Context, bucketID []
|
|||||||
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
orderCreation := time.Now()
|
||||||
|
orderExpiration := orderCreation.Add(service.orderExpiration)
|
||||||
|
|
||||||
|
signer, err := NewSignerDelete(service, pointer.GetRemote().RootPieceId, pointer.ExpirationDate, orderCreation, orderExpiration)
|
||||||
|
if err != nil {
|
||||||
|
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
||||||
|
}
|
||||||
|
|
||||||
var nodeErrors errs.Group
|
var nodeErrors errs.Group
|
||||||
var limits []*pb.AddressedOrderLimit
|
|
||||||
for _, piece := range pointer.GetRemote().GetRemotePieces() {
|
for _, piece := range pointer.GetRemote().GetRemotePieces() {
|
||||||
node, ok := nodes[piece.NodeId]
|
node, ok := nodes[piece.NodeId]
|
||||||
if !ok {
|
if !ok {
|
||||||
@ -369,66 +320,38 @@ func (service *Service) CreateDeleteOrderLimits(ctx context.Context, bucketID []
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
orderLimit, err := signing.SignOrderLimit(ctx, service.satellite, &pb.OrderLimit{
|
address := node.Address.Address
|
||||||
SerialNumber: serialNumber,
|
if node.LastIPPort != "" {
|
||||||
SatelliteId: service.satellite.ID(),
|
address = node.LastIPPort
|
||||||
SatelliteAddress: service.satelliteAddress,
|
}
|
||||||
UplinkPublicKey: piecePublicKey,
|
_, err := signer.Sign(ctx, storj.NodeURL{ID: piece.NodeId, Address: address}, piece.PieceNum)
|
||||||
StorageNodeId: piece.NodeId,
|
|
||||||
PieceId: rootPieceID.Derive(piece.NodeId, piece.PieceNum),
|
|
||||||
Action: pb.PieceAction_DELETE,
|
|
||||||
Limit: 0,
|
|
||||||
PieceExpiration: pieceExpiration,
|
|
||||||
OrderCreation: time.Now(),
|
|
||||||
OrderExpiration: orderExpiration,
|
|
||||||
})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// use the lastIP that we have on record to avoid doing extra DNS resolutions
|
|
||||||
if node.LastIPPort != "" {
|
|
||||||
node.Address.Address = node.LastIPPort
|
|
||||||
}
|
|
||||||
limits = append(limits, &pb.AddressedOrderLimit{
|
|
||||||
Limit: orderLimit,
|
|
||||||
StorageNodeAddress: node.Address,
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(limits) == 0 {
|
if len(signer.AddressedLimits) == 0 {
|
||||||
err = Error.New("failed creating order limits for all nodes")
|
return nil, storj.PiecePrivateKey{}, Error.New("failed creating order limits: %w", nodeErrors.Err())
|
||||||
return nil, storj.PiecePrivateKey{}, errs.Combine(err, nodeErrors.Err())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
err = service.saveSerial(ctx, serialNumber, bucketID, orderExpiration)
|
err = service.saveSerial(ctx, signer.Serial, bucketID, signer.OrderExpiration)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return limits, piecePrivateKey, nil
|
return signer.AddressedLimits, signer.PrivateKey, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateAuditOrderLimits creates the order limits for auditing the pieces of pointer.
|
// CreateAuditOrderLimits creates the order limits for auditing the pieces of pointer.
|
||||||
func (service *Service) CreateAuditOrderLimits(ctx context.Context, bucketID []byte, pointer *pb.Pointer, skip map[storj.NodeID]bool) (_ []*pb.AddressedOrderLimit, _ storj.PiecePrivateKey, err error) {
|
func (service *Service) CreateAuditOrderLimits(ctx context.Context, bucketID []byte, pointer *pb.Pointer, skip map[storj.NodeID]bool) (_ []*pb.AddressedOrderLimit, _ storj.PiecePrivateKey, err error) {
|
||||||
defer mon.Task()(&ctx)(&err)
|
defer mon.Task()(&ctx)(&err)
|
||||||
rootPieceID := pointer.GetRemote().RootPieceId
|
|
||||||
redundancy := pointer.GetRemote().GetRedundancy()
|
redundancy := pointer.GetRemote().GetRedundancy()
|
||||||
shareSize := redundancy.GetErasureShareSize()
|
shareSize := redundancy.GetErasureShareSize()
|
||||||
totalPieces := redundancy.GetTotal()
|
totalPieces := redundancy.GetTotal()
|
||||||
|
|
||||||
pieceExpiration := pointer.ExpirationDate
|
orderCreation := time.Now()
|
||||||
orderExpiration := time.Now().Add(service.orderExpiration)
|
orderExpiration := orderCreation.Add(service.orderExpiration)
|
||||||
|
|
||||||
piecePublicKey, piecePrivateKey, err := storj.NewPieceKey()
|
|
||||||
if err != nil {
|
|
||||||
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
serialNumber, err := service.createSerial(ctx, orderExpiration)
|
|
||||||
if err != nil {
|
|
||||||
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
nodeIDs := make([]storj.NodeID, len(pointer.GetRemote().GetRemotePieces()))
|
nodeIDs := make([]storj.NodeID, len(pointer.GetRemote().GetRemotePieces()))
|
||||||
for i, piece := range pointer.GetRemote().GetRemotePieces() {
|
for i, piece := range pointer.GetRemote().GetRemotePieces() {
|
||||||
@ -441,6 +364,11 @@ func (service *Service) CreateAuditOrderLimits(ctx context.Context, bucketID []b
|
|||||||
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
signer, err := NewSignerAudit(service, pointer.GetRemote().RootPieceId, orderCreation, orderExpiration, int64(shareSize))
|
||||||
|
if err != nil {
|
||||||
|
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
||||||
|
}
|
||||||
|
|
||||||
var nodeErrors errs.Group
|
var nodeErrors errs.Group
|
||||||
var limitsCount int32
|
var limitsCount int32
|
||||||
limits := make([]*pb.AddressedOrderLimit, totalPieces)
|
limits := make([]*pb.AddressedOrderLimit, totalPieces)
|
||||||
@ -448,34 +376,21 @@ func (service *Service) CreateAuditOrderLimits(ctx context.Context, bucketID []b
|
|||||||
if skip[piece.NodeId] {
|
if skip[piece.NodeId] {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
node, ok := nodes[piece.NodeId]
|
node, ok := nodes[piece.NodeId]
|
||||||
if !ok {
|
if !ok {
|
||||||
nodeErrors.Add(errs.New("node %q is not reliable", piece.NodeId))
|
nodeErrors.Add(errs.New("node %q is not reliable", piece.NodeId))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
orderLimit, err := signing.SignOrderLimit(ctx, service.satellite, &pb.OrderLimit{
|
limit, err := signer.Sign(ctx, storj.NodeURL{
|
||||||
SerialNumber: serialNumber,
|
ID: piece.NodeId,
|
||||||
SatelliteId: service.satellite.ID(),
|
Address: node.Address.Address,
|
||||||
SatelliteAddress: service.satelliteAddress,
|
}, piece.PieceNum)
|
||||||
UplinkPublicKey: piecePublicKey,
|
|
||||||
StorageNodeId: piece.NodeId,
|
|
||||||
PieceId: rootPieceID.Derive(piece.NodeId, piece.PieceNum),
|
|
||||||
Action: pb.PieceAction_GET_AUDIT,
|
|
||||||
Limit: int64(shareSize),
|
|
||||||
PieceExpiration: pieceExpiration,
|
|
||||||
OrderCreation: time.Now(),
|
|
||||||
OrderExpiration: orderExpiration,
|
|
||||||
})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
limits[piece.GetPieceNum()] = &pb.AddressedOrderLimit{
|
limits[piece.GetPieceNum()] = limit
|
||||||
Limit: orderLimit,
|
|
||||||
StorageNodeAddress: node.Address,
|
|
||||||
}
|
|
||||||
limitsCount++
|
limitsCount++
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -484,7 +399,7 @@ func (service *Service) CreateAuditOrderLimits(ctx context.Context, bucketID []b
|
|||||||
return nil, storj.PiecePrivateKey{}, errs.Combine(err, nodeErrors.Err())
|
return nil, storj.PiecePrivateKey{}, errs.Combine(err, nodeErrors.Err())
|
||||||
}
|
}
|
||||||
|
|
||||||
err = service.saveSerial(ctx, serialNumber, bucketID, orderExpiration)
|
err = service.saveSerial(ctx, signer.Serial, bucketID, signer.OrderExpiration)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
||||||
}
|
}
|
||||||
@ -497,7 +412,7 @@ func (service *Service) CreateAuditOrderLimits(ctx context.Context, bucketID []b
|
|||||||
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return limits, piecePrivateKey, nil
|
return limits, signer.PrivateKey, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateAuditOrderLimit creates an order limit for auditing a single the piece from a pointer.
|
// CreateAuditOrderLimit creates an order limit for auditing a single the piece from a pointer.
|
||||||
@ -505,70 +420,50 @@ func (service *Service) CreateAuditOrderLimit(ctx context.Context, bucketID []by
|
|||||||
// TODO reduce number of params ?
|
// TODO reduce number of params ?
|
||||||
defer mon.Task()(&ctx)(&err)
|
defer mon.Task()(&ctx)(&err)
|
||||||
|
|
||||||
orderExpiration := time.Now().Add(service.orderExpiration)
|
orderCreation := time.Now()
|
||||||
|
orderExpiration := orderCreation.Add(service.orderExpiration)
|
||||||
piecePublicKey, piecePrivateKey, err := storj.NewPieceKey()
|
|
||||||
if err != nil {
|
|
||||||
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
serialNumber, err := service.createSerial(ctx, orderExpiration)
|
|
||||||
if err != nil {
|
|
||||||
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
node, err := service.overlay.Get(ctx, nodeID)
|
node, err := service.overlay.Get(ctx, nodeID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if node.Disqualified != nil {
|
if node.Disqualified != nil {
|
||||||
return nil, storj.PiecePrivateKey{}, overlay.ErrNodeDisqualified.New("%v", nodeID)
|
return nil, storj.PiecePrivateKey{}, overlay.ErrNodeDisqualified.New("%v", nodeID)
|
||||||
}
|
}
|
||||||
|
|
||||||
if node.ExitStatus.ExitFinishedAt != nil {
|
if node.ExitStatus.ExitFinishedAt != nil {
|
||||||
return nil, storj.PiecePrivateKey{}, overlay.ErrNodeFinishedGE.New("%v", nodeID)
|
return nil, storj.PiecePrivateKey{}, overlay.ErrNodeFinishedGE.New("%v", nodeID)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !service.overlay.IsOnline(node) {
|
if !service.overlay.IsOnline(node) {
|
||||||
return nil, storj.PiecePrivateKey{}, overlay.ErrNodeOffline.New("%v", nodeID)
|
return nil, storj.PiecePrivateKey{}, overlay.ErrNodeOffline.New("%v", nodeID)
|
||||||
}
|
}
|
||||||
|
|
||||||
orderLimit, err := signing.SignOrderLimit(ctx, service.satellite, &pb.OrderLimit{
|
signer, err := NewSignerAudit(service, rootPieceID, orderCreation, orderExpiration, int64(shareSize))
|
||||||
SerialNumber: serialNumber,
|
|
||||||
SatelliteId: service.satellite.ID(),
|
|
||||||
SatelliteAddress: service.satelliteAddress,
|
|
||||||
UplinkPublicKey: piecePublicKey,
|
|
||||||
StorageNodeId: nodeID,
|
|
||||||
PieceId: rootPieceID.Derive(nodeID, pieceNum),
|
|
||||||
Action: pb.PieceAction_GET_AUDIT,
|
|
||||||
Limit: int64(shareSize),
|
|
||||||
OrderCreation: time.Now(),
|
|
||||||
OrderExpiration: orderExpiration,
|
|
||||||
})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
limit = &pb.AddressedOrderLimit{
|
orderLimit, err := signer.Sign(ctx, storj.NodeURL{
|
||||||
Limit: orderLimit,
|
ID: nodeID,
|
||||||
StorageNodeAddress: node.Address,
|
Address: node.Address.Address,
|
||||||
|
}, pieceNum)
|
||||||
|
if err != nil {
|
||||||
|
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = service.saveSerial(ctx, serialNumber, bucketID, orderExpiration)
|
err = service.saveSerial(ctx, signer.Serial, bucketID, signer.OrderExpiration)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
projectID, bucketName, err := SplitBucketID(bucketID)
|
projectID, bucketName, err := SplitBucketID(bucketID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return limit, storj.PiecePrivateKey{}, Error.Wrap(err)
|
return orderLimit, storj.PiecePrivateKey{}, Error.Wrap(err)
|
||||||
}
|
}
|
||||||
if err := service.updateBandwidth(ctx, projectID, bucketName, limit); err != nil {
|
if err := service.updateBandwidth(ctx, projectID, bucketName, limit); err != nil {
|
||||||
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return limit, piecePrivateKey, nil
|
return orderLimit, signer.PrivateKey, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateGetRepairOrderLimits creates the order limits for downloading the
|
// CreateGetRepairOrderLimits creates the order limits for downloading the
|
||||||
@ -580,7 +475,6 @@ func (service *Service) CreateAuditOrderLimit(ctx context.Context, bucketID []by
|
|||||||
func (service *Service) CreateGetRepairOrderLimits(ctx context.Context, bucketID []byte, pointer *pb.Pointer, healthy []*pb.RemotePiece) (_ []*pb.AddressedOrderLimit, _ storj.PiecePrivateKey, err error) {
|
func (service *Service) CreateGetRepairOrderLimits(ctx context.Context, bucketID []byte, pointer *pb.Pointer, healthy []*pb.RemotePiece) (_ []*pb.AddressedOrderLimit, _ storj.PiecePrivateKey, err error) {
|
||||||
defer mon.Task()(&ctx)(&err)
|
defer mon.Task()(&ctx)(&err)
|
||||||
|
|
||||||
rootPieceID := pointer.GetRemote().RootPieceId
|
|
||||||
redundancy, err := eestream.NewRedundancyStrategyFromProto(pointer.GetRemote().GetRedundancy())
|
redundancy, err := eestream.NewRedundancyStrategyFromProto(pointer.GetRemote().GetRedundancy())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
||||||
@ -588,18 +482,8 @@ func (service *Service) CreateGetRepairOrderLimits(ctx context.Context, bucketID
|
|||||||
|
|
||||||
pieceSize := eestream.CalcPieceSize(pointer.GetSegmentSize(), redundancy)
|
pieceSize := eestream.CalcPieceSize(pointer.GetSegmentSize(), redundancy)
|
||||||
totalPieces := redundancy.TotalCount()
|
totalPieces := redundancy.TotalCount()
|
||||||
pieceExpiration := pointer.ExpirationDate
|
orderCreation := time.Now()
|
||||||
orderExpiration := time.Now().Add(service.orderExpiration)
|
orderExpiration := orderCreation.Add(service.orderExpiration)
|
||||||
|
|
||||||
piecePublicKey, piecePrivateKey, err := storj.NewPieceKey()
|
|
||||||
if err != nil {
|
|
||||||
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
serialNumber, err := service.createSerial(ctx, orderExpiration)
|
|
||||||
if err != nil {
|
|
||||||
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
nodeIDs := make([]storj.NodeID, len(pointer.GetRemote().GetRemotePieces()))
|
nodeIDs := make([]storj.NodeID, len(pointer.GetRemote().GetRemotePieces()))
|
||||||
for i, piece := range pointer.GetRemote().GetRemotePieces() {
|
for i, piece := range pointer.GetRemote().GetRemotePieces() {
|
||||||
@ -612,6 +496,11 @@ func (service *Service) CreateGetRepairOrderLimits(ctx context.Context, bucketID
|
|||||||
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
signer, err := NewSignerRepairGet(service, pointer.GetRemote().RootPieceId, pointer.ExpirationDate, orderCreation, orderExpiration, pieceSize)
|
||||||
|
if err != nil {
|
||||||
|
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
||||||
|
}
|
||||||
|
|
||||||
var nodeErrors errs.Group
|
var nodeErrors errs.Group
|
||||||
var limitsCount int
|
var limitsCount int
|
||||||
limits := make([]*pb.AddressedOrderLimit, totalPieces)
|
limits := make([]*pb.AddressedOrderLimit, totalPieces)
|
||||||
@ -622,27 +511,15 @@ func (service *Service) CreateGetRepairOrderLimits(ctx context.Context, bucketID
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
orderLimit, err := signing.SignOrderLimit(ctx, service.satellite, &pb.OrderLimit{
|
limit, err := signer.Sign(ctx, storj.NodeURL{
|
||||||
SerialNumber: serialNumber,
|
ID: piece.NodeId,
|
||||||
SatelliteId: service.satellite.ID(),
|
Address: node.Address.Address,
|
||||||
SatelliteAddress: service.satelliteAddress,
|
}, piece.PieceNum)
|
||||||
UplinkPublicKey: piecePublicKey,
|
|
||||||
StorageNodeId: piece.NodeId,
|
|
||||||
PieceId: rootPieceID.Derive(piece.NodeId, piece.PieceNum),
|
|
||||||
Action: pb.PieceAction_GET_REPAIR,
|
|
||||||
Limit: pieceSize,
|
|
||||||
PieceExpiration: pieceExpiration,
|
|
||||||
OrderCreation: time.Now(),
|
|
||||||
OrderExpiration: orderExpiration,
|
|
||||||
})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
limits[piece.GetPieceNum()] = &pb.AddressedOrderLimit{
|
limits[piece.GetPieceNum()] = limit
|
||||||
Limit: orderLimit,
|
|
||||||
StorageNodeAddress: node.Address,
|
|
||||||
}
|
|
||||||
limitsCount++
|
limitsCount++
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -651,7 +528,7 @@ func (service *Service) CreateGetRepairOrderLimits(ctx context.Context, bucketID
|
|||||||
return nil, storj.PiecePrivateKey{}, errs.Combine(err, nodeErrors.Err())
|
return nil, storj.PiecePrivateKey{}, errs.Combine(err, nodeErrors.Err())
|
||||||
}
|
}
|
||||||
|
|
||||||
err = service.saveSerial(ctx, serialNumber, bucketID, orderExpiration)
|
err = service.saveSerial(ctx, signer.Serial, bucketID, signer.OrderExpiration)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
||||||
}
|
}
|
||||||
@ -664,92 +541,72 @@ func (service *Service) CreateGetRepairOrderLimits(ctx context.Context, bucketID
|
|||||||
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return limits, piecePrivateKey, nil
|
return limits, signer.PrivateKey, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreatePutRepairOrderLimits creates the order limits for uploading the repaired pieces of pointer to newNodes.
|
// CreatePutRepairOrderLimits creates the order limits for uploading the repaired pieces of pointer to newNodes.
|
||||||
func (service *Service) CreatePutRepairOrderLimits(ctx context.Context, bucketID []byte, pointer *pb.Pointer, getOrderLimits []*pb.AddressedOrderLimit, newNodes []*overlay.SelectedNode, optimalThresholdMultiplier float64) (_ []*pb.AddressedOrderLimit, _ storj.PiecePrivateKey, err error) {
|
func (service *Service) CreatePutRepairOrderLimits(ctx context.Context, bucketID []byte, pointer *pb.Pointer, getOrderLimits []*pb.AddressedOrderLimit, newNodes []*overlay.SelectedNode, optimalThresholdMultiplier float64) (_ []*pb.AddressedOrderLimit, _ storj.PiecePrivateKey, err error) {
|
||||||
defer mon.Task()(&ctx)(&err)
|
defer mon.Task()(&ctx)(&err)
|
||||||
orderExpiration := time.Now().Add(service.orderExpiration)
|
|
||||||
|
|
||||||
piecePublicKey, piecePrivateKey, err := storj.NewPieceKey()
|
orderCreation := time.Now()
|
||||||
|
orderExpiration := orderCreation.Add(service.orderExpiration)
|
||||||
|
|
||||||
|
// Create the order limits for being used to upload the repaired pieces
|
||||||
|
redundancy, err := eestream.NewRedundancyStrategyFromProto(pointer.GetRemote().GetRedundancy())
|
||||||
|
if err != nil {
|
||||||
|
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
||||||
|
}
|
||||||
|
pieceSize := eestream.CalcPieceSize(pointer.GetSegmentSize(), redundancy)
|
||||||
|
|
||||||
|
totalPieces := redundancy.TotalCount()
|
||||||
|
totalPiecesAfterRepair := int(math.Ceil(float64(redundancy.OptimalThreshold()) * optimalThresholdMultiplier))
|
||||||
|
if totalPiecesAfterRepair > totalPieces {
|
||||||
|
totalPiecesAfterRepair = totalPieces
|
||||||
|
}
|
||||||
|
|
||||||
|
var numCurrentPieces int
|
||||||
|
for _, o := range getOrderLimits {
|
||||||
|
if o != nil {
|
||||||
|
numCurrentPieces++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
totalPiecesToRepair := totalPiecesAfterRepair - numCurrentPieces
|
||||||
|
|
||||||
|
limits := make([]*pb.AddressedOrderLimit, totalPieces)
|
||||||
|
signer, err := NewSignerRepairPut(service, pointer.GetRemote().RootPieceId, pointer.ExpirationDate, orderCreation, orderExpiration, pieceSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
serialNumber, err := service.createSerial(ctx, orderExpiration)
|
var pieceNum int32
|
||||||
if err != nil {
|
for _, node := range newNodes {
|
||||||
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
for int(pieceNum) < totalPieces && getOrderLimits[pieceNum] != nil {
|
||||||
}
|
pieceNum++
|
||||||
|
}
|
||||||
|
|
||||||
var limits []*pb.AddressedOrderLimit
|
if int(pieceNum) >= totalPieces { // should not happen
|
||||||
{ // Create the order limits for being used to upload the repaired pieces
|
return nil, storj.PiecePrivateKey{}, Error.New("piece num greater than total pieces: %d >= %d", pieceNum, totalPieces)
|
||||||
redundancy, err := eestream.NewRedundancyStrategyFromProto(pointer.GetRemote().GetRedundancy())
|
}
|
||||||
|
|
||||||
|
limit, err := signer.Sign(ctx, storj.NodeURL{
|
||||||
|
ID: node.ID,
|
||||||
|
Address: node.Address.Address,
|
||||||
|
}, pieceNum)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
totalPieces := redundancy.TotalCount()
|
limits[pieceNum] = limit
|
||||||
limits = make([]*pb.AddressedOrderLimit, totalPieces)
|
pieceNum++
|
||||||
|
totalPiecesToRepair--
|
||||||
|
|
||||||
totalPiecesAfterRepair := int(math.Ceil(float64(redundancy.OptimalThreshold()) * optimalThresholdMultiplier))
|
if totalPiecesToRepair == 0 {
|
||||||
if totalPiecesAfterRepair > totalPieces {
|
break
|
||||||
totalPiecesAfterRepair = totalPieces
|
|
||||||
}
|
|
||||||
|
|
||||||
var numCurrentPieces int
|
|
||||||
for _, o := range getOrderLimits {
|
|
||||||
if o != nil {
|
|
||||||
numCurrentPieces++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
totalPiecesToRepair = totalPiecesAfterRepair - numCurrentPieces
|
|
||||||
rootPieceID = pointer.GetRemote().RootPieceId
|
|
||||||
pieceSize = eestream.CalcPieceSize(pointer.GetSegmentSize(), redundancy)
|
|
||||||
pieceNum int32
|
|
||||||
)
|
|
||||||
for _, node := range newNodes {
|
|
||||||
for int(pieceNum) < totalPieces && getOrderLimits[pieceNum] != nil {
|
|
||||||
pieceNum++
|
|
||||||
}
|
|
||||||
|
|
||||||
if int(pieceNum) >= totalPieces { // should not happen
|
|
||||||
return nil, storj.PiecePrivateKey{}, Error.New("piece num greater than total pieces: %d >= %d", pieceNum, totalPieces)
|
|
||||||
}
|
|
||||||
|
|
||||||
orderLimit, err := signing.SignOrderLimit(ctx, service.satellite, &pb.OrderLimit{
|
|
||||||
SerialNumber: serialNumber,
|
|
||||||
SatelliteId: service.satellite.ID(),
|
|
||||||
SatelliteAddress: service.satelliteAddress,
|
|
||||||
UplinkPublicKey: piecePublicKey,
|
|
||||||
StorageNodeId: node.ID,
|
|
||||||
PieceId: rootPieceID.Derive(node.ID, pieceNum),
|
|
||||||
Action: pb.PieceAction_PUT_REPAIR,
|
|
||||||
Limit: pieceSize,
|
|
||||||
PieceExpiration: pointer.ExpirationDate,
|
|
||||||
OrderCreation: time.Now(),
|
|
||||||
OrderExpiration: orderExpiration,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
limits[pieceNum] = &pb.AddressedOrderLimit{
|
|
||||||
Limit: orderLimit,
|
|
||||||
StorageNodeAddress: node.Address,
|
|
||||||
}
|
|
||||||
pieceNum++
|
|
||||||
totalPiecesToRepair--
|
|
||||||
|
|
||||||
if totalPiecesToRepair == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
err = service.saveSerial(ctx, serialNumber, bucketID, orderExpiration)
|
err = service.saveSerial(ctx, signer.Serial, bucketID, orderExpiration)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
||||||
}
|
}
|
||||||
@ -762,69 +619,40 @@ func (service *Service) CreatePutRepairOrderLimits(ctx context.Context, bucketID
|
|||||||
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return limits, piecePrivateKey, nil
|
return limits, signer.PrivateKey, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateGracefulExitPutOrderLimit creates an order limit for graceful exit put transfers.
|
// CreateGracefulExitPutOrderLimit creates an order limit for graceful exit put transfers.
|
||||||
func (service *Service) CreateGracefulExitPutOrderLimit(ctx context.Context, bucketID []byte, nodeID storj.NodeID, pieceNum int32, rootPieceID storj.PieceID, shareSize int32) (limit *pb.AddressedOrderLimit, _ storj.PiecePrivateKey, err error) {
|
func (service *Service) CreateGracefulExitPutOrderLimit(ctx context.Context, bucketID []byte, nodeID storj.NodeID, pieceNum int32, rootPieceID storj.PieceID, shareSize int32) (limit *pb.AddressedOrderLimit, _ storj.PiecePrivateKey, err error) {
|
||||||
defer mon.Task()(&ctx)(&err)
|
defer mon.Task()(&ctx)(&err)
|
||||||
|
|
||||||
orderExpiration := time.Now().UTC().Add(service.orderExpiration)
|
orderCreation := time.Now().UTC()
|
||||||
|
orderExpiration := orderCreation.Add(service.orderExpiration)
|
||||||
piecePublicKey, piecePrivateKey, err := storj.NewPieceKey()
|
|
||||||
if err != nil {
|
|
||||||
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
serialNumber, err := service.createSerial(ctx, orderExpiration)
|
|
||||||
if err != nil {
|
|
||||||
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
|
// should this use KnownReliable or similar?
|
||||||
node, err := service.overlay.Get(ctx, nodeID)
|
node, err := service.overlay.Get(ctx, nodeID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if node.Disqualified != nil {
|
if node.Disqualified != nil {
|
||||||
return nil, storj.PiecePrivateKey{}, overlay.ErrNodeDisqualified.New("%v", nodeID)
|
return nil, storj.PiecePrivateKey{}, overlay.ErrNodeDisqualified.New("%v", nodeID)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !service.overlay.IsOnline(node) {
|
if !service.overlay.IsOnline(node) {
|
||||||
return nil, storj.PiecePrivateKey{}, overlay.ErrNodeOffline.New("%v", nodeID)
|
return nil, storj.PiecePrivateKey{}, overlay.ErrNodeOffline.New("%v", nodeID)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: we're using `PUT_REPAIR` here even though `PUT_GRACEFUL_EXIT` exists and
|
signer, err := NewSignerGracefulExit(service, rootPieceID, orderCreation, orderExpiration, shareSize)
|
||||||
// seems like the perfect thing because we're in a pickle. we can't use `PUT`
|
|
||||||
// because we don't want to charge bucket owners for graceful exit bandwidth, and
|
|
||||||
// we can't use `PUT_GRACEFUL_EXIT` because storagenode will only accept upload
|
|
||||||
// orders with `PUT` or `PUT_REPAIR` as the action. we also don't have a bunch of
|
|
||||||
// supporting code/tables to aggregate `PUT_GRACEFUL_EXIT` bandwidth into our rollups
|
|
||||||
// and stuff. so, for now, we just use `PUT_REPAIR` because it's the least bad of
|
|
||||||
// our options. this should be fixed.
|
|
||||||
|
|
||||||
orderLimit, err := signing.SignOrderLimit(ctx, service.satellite, &pb.OrderLimit{
|
|
||||||
SerialNumber: serialNumber,
|
|
||||||
SatelliteId: service.satellite.ID(),
|
|
||||||
SatelliteAddress: service.satelliteAddress,
|
|
||||||
UplinkPublicKey: piecePublicKey,
|
|
||||||
StorageNodeId: nodeID,
|
|
||||||
PieceId: rootPieceID.Derive(nodeID, pieceNum),
|
|
||||||
Action: pb.PieceAction_PUT_REPAIR,
|
|
||||||
Limit: int64(shareSize),
|
|
||||||
OrderCreation: time.Now().UTC(),
|
|
||||||
OrderExpiration: orderExpiration,
|
|
||||||
})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
limit = &pb.AddressedOrderLimit{
|
nodeURL := storj.NodeURL{ID: nodeID, Address: node.Address.Address}
|
||||||
Limit: orderLimit,
|
limit, err = signer.Sign(ctx, nodeURL, pieceNum)
|
||||||
StorageNodeAddress: node.Address,
|
if err != nil {
|
||||||
|
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = service.saveSerial(ctx, serialNumber, bucketID, orderExpiration)
|
err = service.saveSerial(ctx, signer.Serial, bucketID, signer.OrderExpiration)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
||||||
}
|
}
|
||||||
@ -837,7 +665,7 @@ func (service *Service) CreateGracefulExitPutOrderLimit(ctx context.Context, buc
|
|||||||
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return limit, piecePrivateKey, nil
|
return limit, signer.PrivateKey, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdateGetInlineOrder updates amount of inline GET bandwidth for given bucket.
|
// UpdateGetInlineOrder updates amount of inline GET bandwidth for given bucket.
|
||||||
|
170
satellite/orders/signer.go
Normal file
170
satellite/orders/signer.go
Normal file
@ -0,0 +1,170 @@
|
|||||||
|
// Copyright (C) 2020 Storj Labs, Inc.
|
||||||
|
// See LICENSE for copying information.
|
||||||
|
|
||||||
|
package orders
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/rand"
|
||||||
|
"encoding/binary"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/zeebo/errs"
|
||||||
|
|
||||||
|
"storj.io/common/pb"
|
||||||
|
"storj.io/common/signing"
|
||||||
|
"storj.io/common/storj"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ErrSigner is default error class for Signer.
|
||||||
|
var ErrSigner = errs.Class("signer")
|
||||||
|
|
||||||
|
// Signer implements signing of order limits.
|
||||||
|
type Signer struct {
|
||||||
|
// TODO: should this be a ref to the necessary pieces instead of the service?
|
||||||
|
Service *Service
|
||||||
|
|
||||||
|
// TODO: use a Template pb.OrderLimit here?
|
||||||
|
|
||||||
|
RootPieceID storj.PieceID
|
||||||
|
|
||||||
|
PieceExpiration time.Time
|
||||||
|
OrderCreation time.Time
|
||||||
|
OrderExpiration time.Time
|
||||||
|
|
||||||
|
PublicKey storj.PiecePublicKey
|
||||||
|
PrivateKey storj.PiecePrivateKey
|
||||||
|
|
||||||
|
Serial storj.SerialNumber
|
||||||
|
Action pb.PieceAction
|
||||||
|
Limit int64
|
||||||
|
|
||||||
|
AddressedLimits []*pb.AddressedOrderLimit
|
||||||
|
}
|
||||||
|
|
||||||
|
// createSerial creates a timestamped serial number.
|
||||||
|
func createSerial(orderExpiration time.Time) (_ storj.SerialNumber, err error) {
|
||||||
|
var serial storj.SerialNumber
|
||||||
|
|
||||||
|
binary.BigEndian.PutUint64(serial[0:8], uint64(orderExpiration.Unix()))
|
||||||
|
_, err = rand.Read(serial[8:])
|
||||||
|
if err != nil {
|
||||||
|
return storj.SerialNumber{}, ErrSigner.Wrap(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return serial, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: do we need to pass both orderCreation and orderExpiration as arguments?
|
||||||
|
|
||||||
|
// NewSigner creates an order limit signer.
|
||||||
|
func NewSigner(service *Service, rootPieceID storj.PieceID, pieceExpiration time.Time, orderCreation time.Time, orderExpiration time.Time, limit int64, action pb.PieceAction) (*Signer, error) {
|
||||||
|
signer := &Signer{}
|
||||||
|
signer.Service = service
|
||||||
|
|
||||||
|
signer.RootPieceID = rootPieceID
|
||||||
|
|
||||||
|
signer.PieceExpiration = pieceExpiration
|
||||||
|
signer.OrderCreation = orderCreation
|
||||||
|
signer.OrderExpiration = orderExpiration
|
||||||
|
|
||||||
|
var err error
|
||||||
|
signer.PublicKey, signer.PrivateKey, err = storj.NewPieceKey()
|
||||||
|
if err != nil {
|
||||||
|
return nil, ErrSigner.Wrap(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
signer.Serial, err = createSerial(orderExpiration)
|
||||||
|
if err != nil {
|
||||||
|
return nil, ErrSigner.Wrap(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
signer.Action = action
|
||||||
|
signer.Limit = limit
|
||||||
|
|
||||||
|
return signer, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSignerGet creates a new signer for get orders.
|
||||||
|
func NewSignerGet(service *Service, rootPieceID storj.PieceID, pieceExpiration time.Time, orderCreation time.Time, orderExpiration time.Time, limit int64) (*Signer, error) {
|
||||||
|
return NewSigner(service, rootPieceID, pieceExpiration, orderCreation, orderExpiration, limit, pb.PieceAction_GET)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSignerPut creates a new signer for put orders.
|
||||||
|
func NewSignerPut(service *Service, pieceExpiration time.Time, orderCreation time.Time, orderExpiration time.Time, limit int64) (*Signer, error) {
|
||||||
|
rootPieceID := storj.NewPieceID()
|
||||||
|
return NewSigner(service, rootPieceID, pieceExpiration, orderCreation, orderExpiration, limit, pb.PieceAction_PUT)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSignerDelete creates a new signer for delete orders.
|
||||||
|
func NewSignerDelete(service *Service, rootPieceID storj.PieceID, pieceExpiration time.Time, orderCreation time.Time, orderExpiration time.Time) (*Signer, error) {
|
||||||
|
return NewSigner(service, rootPieceID, pieceExpiration, orderCreation, orderExpiration, 0, pb.PieceAction_DELETE)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSignerRepairGet creates a new signer for get repair orders.
|
||||||
|
func NewSignerRepairGet(service *Service, rootPieceID storj.PieceID, pieceExpiration time.Time, orderCreation time.Time, orderExpiration time.Time, pieceSize int64) (*Signer, error) {
|
||||||
|
return NewSigner(service, rootPieceID, pieceExpiration, orderCreation, orderExpiration, pieceSize, pb.PieceAction_GET_REPAIR)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSignerRepairPut creates a new signer for put repair orders.
|
||||||
|
func NewSignerRepairPut(service *Service, rootPieceID storj.PieceID, pieceExpiration time.Time, orderCreation time.Time, orderExpiration time.Time, pieceSize int64) (*Signer, error) {
|
||||||
|
return NewSigner(service, rootPieceID, pieceExpiration, orderCreation, orderExpiration, pieceSize, pb.PieceAction_PUT_REPAIR)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSignerAudit creates a new signer for audit orders.
|
||||||
|
func NewSignerAudit(service *Service, rootPieceID storj.PieceID, orderCreation time.Time, orderExpiration time.Time, pieceSize int64) (*Signer, error) {
|
||||||
|
return NewSigner(service, rootPieceID, time.Time{}, orderCreation, orderExpiration, pieceSize, pb.PieceAction_GET_AUDIT)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSignerGracefulExit creates a new signer for graceful exit orders.
|
||||||
|
func NewSignerGracefulExit(service *Service, rootPieceID storj.PieceID, orderCreation time.Time, orderExpiration time.Time, shareSize int32) (*Signer, error) {
|
||||||
|
// TODO: we're using zero time.Time for piece expiration for some reason.
|
||||||
|
|
||||||
|
// TODO: we're using `PUT_REPAIR` here even though `PUT_GRACEFUL_EXIT` exists and
|
||||||
|
// seems like the perfect thing because we're in a pickle. we can't use `PUT`
|
||||||
|
// because we don't want to charge bucket owners for graceful exit bandwidth, and
|
||||||
|
// we can't use `PUT_GRACEFUL_EXIT` because storagenode will only accept upload
|
||||||
|
// orders with `PUT` or `PUT_REPAIR` as the action. we also don't have a bunch of
|
||||||
|
// supporting code/tables to aggregate `PUT_GRACEFUL_EXIT` bandwidth into our rollups
|
||||||
|
// and stuff. so, for now, we just use `PUT_REPAIR` because it's the least bad of
|
||||||
|
// our options. this should be fixed.
|
||||||
|
return NewSigner(service, rootPieceID, time.Time{}, orderCreation, orderExpiration, int64(shareSize), pb.PieceAction_PUT_REPAIR)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sign signs an order limit for the specified node.
|
||||||
|
func (signer *Signer) Sign(ctx context.Context, node storj.NodeURL, pieceNum int32) (_ *pb.AddressedOrderLimit, err error) {
|
||||||
|
defer mon.Task()(&ctx)(&err)
|
||||||
|
|
||||||
|
limit := &pb.OrderLimit{
|
||||||
|
SerialNumber: signer.Serial,
|
||||||
|
SatelliteId: signer.Service.satellite.ID(),
|
||||||
|
UplinkPublicKey: signer.PublicKey,
|
||||||
|
StorageNodeId: node.ID,
|
||||||
|
|
||||||
|
PieceId: signer.RootPieceID.Derive(node.ID, pieceNum),
|
||||||
|
Limit: signer.Limit,
|
||||||
|
Action: signer.Action,
|
||||||
|
|
||||||
|
PieceExpiration: signer.PieceExpiration,
|
||||||
|
OrderCreation: signer.OrderCreation,
|
||||||
|
OrderExpiration: signer.OrderExpiration,
|
||||||
|
|
||||||
|
SatelliteAddress: signer.Service.satelliteAddress,
|
||||||
|
}
|
||||||
|
|
||||||
|
signedLimit, err := signing.SignOrderLimit(ctx, signer.Service.satellite, limit)
|
||||||
|
if err != nil {
|
||||||
|
return nil, ErrSigner.Wrap(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
addressedLimit := &pb.AddressedOrderLimit{
|
||||||
|
Limit: signedLimit,
|
||||||
|
StorageNodeAddress: &pb.NodeAddress{
|
||||||
|
Address: node.Address,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
signer.AddressedLimits = append(signer.AddressedLimits, addressedLimit)
|
||||||
|
|
||||||
|
return addressedLimit, nil
|
||||||
|
}
|
Loading…
Reference in New Issue
Block a user