2019-03-27 10:24:35 +00:00
|
|
|
// Copyright (C) 2019 Storj Labs, Inc.
|
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
|
|
|
package orders
|
|
|
|
|
|
|
|
import (
|
2020-01-15 21:45:17 +00:00
|
|
|
"bytes"
|
2019-03-27 10:24:35 +00:00
|
|
|
"context"
|
|
|
|
"io"
|
2020-01-15 21:45:17 +00:00
|
|
|
"sort"
|
2019-03-27 10:24:35 +00:00
|
|
|
"time"
|
|
|
|
|
2020-03-27 03:21:35 +00:00
|
|
|
monkit "github.com/spacemonkeygo/monkit/v3"
|
2019-03-27 10:24:35 +00:00
|
|
|
"github.com/zeebo/errs"
|
|
|
|
"go.uber.org/zap"
|
|
|
|
|
2019-12-27 11:48:47 +00:00
|
|
|
"storj.io/common/identity"
|
|
|
|
"storj.io/common/pb"
|
2020-02-26 13:59:46 +00:00
|
|
|
"storj.io/common/pb/pbgrpc"
|
2019-12-27 11:48:47 +00:00
|
|
|
"storj.io/common/rpc/rpcstatus"
|
|
|
|
"storj.io/common/signing"
|
|
|
|
"storj.io/common/storj"
|
2020-03-30 10:08:50 +01:00
|
|
|
"storj.io/common/uuid"
|
2019-03-27 10:24:35 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// DB implements saving order after receiving from storage node
|
2019-09-10 14:24:16 +01:00
|
|
|
//
|
|
|
|
// architecture: Database
|
2019-03-27 10:24:35 +00:00
|
|
|
type DB interface {
|
2020-02-18 12:03:23 +00:00
|
|
|
// CreateSerialInfo creates serial number entry in database.
|
2019-03-28 20:09:23 +00:00
|
|
|
CreateSerialInfo(ctx context.Context, serialNumber storj.SerialNumber, bucketID []byte, limitExpiration time.Time) error
|
2020-02-18 12:03:23 +00:00
|
|
|
// UseSerialNumber creates a used serial number entry in database from an
|
|
|
|
// existing serial number.
|
|
|
|
// It returns the bucket ID associated to serialNumber.
|
2019-04-01 21:14:58 +01:00
|
|
|
UseSerialNumber(ctx context.Context, serialNumber storj.SerialNumber, storageNodeID storj.NodeID) ([]byte, error)
|
|
|
|
// UnuseSerialNumber removes pair serial number -> storage node id from database
|
|
|
|
UnuseSerialNumber(ctx context.Context, serialNumber storj.SerialNumber, storageNodeID storj.NodeID) error
|
2020-02-14 00:03:41 +00:00
|
|
|
// DeleteExpiredSerials deletes all expired serials in serial_number, used_serials, and consumed_serials table.
|
2019-08-27 18:12:38 +01:00
|
|
|
DeleteExpiredSerials(ctx context.Context, now time.Time) (_ int, err error)
|
2020-02-14 00:03:41 +00:00
|
|
|
// DeleteExpiredConsumedSerials deletes all expired serials in the consumed_serials table.
|
|
|
|
DeleteExpiredConsumedSerials(ctx context.Context, now time.Time) (_ int, err error)
|
2019-04-01 21:14:58 +01:00
|
|
|
|
|
|
|
// UpdateBucketBandwidthAllocation updates 'allocated' bandwidth for given bucket
|
2019-06-25 16:58:42 +01:00
|
|
|
UpdateBucketBandwidthAllocation(ctx context.Context, projectID uuid.UUID, bucketName []byte, action pb.PieceAction, amount int64, intervalStart time.Time) error
|
2019-04-01 21:14:58 +01:00
|
|
|
// UpdateBucketBandwidthSettle updates 'settled' bandwidth for given bucket
|
2019-06-25 16:58:42 +01:00
|
|
|
UpdateBucketBandwidthSettle(ctx context.Context, projectID uuid.UUID, bucketName []byte, action pb.PieceAction, amount int64, intervalStart time.Time) error
|
2019-04-01 21:14:58 +01:00
|
|
|
// UpdateBucketBandwidthInline updates 'inline' bandwidth for given bucket
|
2019-06-25 16:58:42 +01:00
|
|
|
UpdateBucketBandwidthInline(ctx context.Context, projectID uuid.UUID, bucketName []byte, action pb.PieceAction, amount int64, intervalStart time.Time) error
|
2019-04-01 21:14:58 +01:00
|
|
|
|
|
|
|
// UpdateStoragenodeBandwidthSettle updates 'settled' bandwidth for given storage node
|
2019-04-04 16:20:59 +01:00
|
|
|
UpdateStoragenodeBandwidthSettle(ctx context.Context, storageNode storj.NodeID, action pb.PieceAction, amount int64, intervalStart time.Time) error
|
2019-04-01 21:14:58 +01:00
|
|
|
|
|
|
|
// GetBucketBandwidth gets total bucket bandwidth from period of time
|
2019-06-25 16:58:42 +01:00
|
|
|
GetBucketBandwidth(ctx context.Context, projectID uuid.UUID, bucketName []byte, from, to time.Time) (int64, error)
|
2019-04-01 21:14:58 +01:00
|
|
|
// GetStorageNodeBandwidth gets total storage node bandwidth from period of time
|
|
|
|
GetStorageNodeBandwidth(ctx context.Context, nodeID storj.NodeID, from, to time.Time) (int64, error)
|
2019-08-15 20:05:43 +01:00
|
|
|
|
|
|
|
// ProcessOrders takes a list of order requests and processes them in a batch
|
2020-02-14 00:03:41 +00:00
|
|
|
ProcessOrders(ctx context.Context, requests []*ProcessOrderRequest) (responses []*ProcessOrderResponse, err error)
|
2020-01-15 21:45:17 +00:00
|
|
|
|
2020-01-16 18:02:15 +00:00
|
|
|
// WithTransaction runs the callback and provides it with a Transaction.
|
|
|
|
WithTransaction(ctx context.Context, cb func(ctx context.Context, tx Transaction) error) error
|
2020-03-02 18:47:22 +00:00
|
|
|
// WithQueue runs the callback and provides it with a Queue. When the callback returns with
|
|
|
|
// no error, any pending serials returned by the queue are removed from it.
|
2020-02-14 00:03:41 +00:00
|
|
|
WithQueue(ctx context.Context, cb func(ctx context.Context, queue Queue) error) error
|
2020-01-10 18:53:42 +00:00
|
|
|
}
|
|
|
|
|
2020-01-15 21:45:17 +00:00
|
|
|
// Transaction represents a database transaction but with higher level actions.
|
|
|
|
type Transaction interface {
|
|
|
|
// UpdateBucketBandwidthBatch updates all the bandwidth rollups in the database
|
|
|
|
UpdateBucketBandwidthBatch(ctx context.Context, intervalStart time.Time, rollups []BucketBandwidthRollup) error
|
2020-02-14 00:03:41 +00:00
|
|
|
|
2020-01-15 21:45:17 +00:00
|
|
|
// UpdateStoragenodeBandwidthBatch updates all the bandwidth rollups in the database
|
|
|
|
UpdateStoragenodeBandwidthBatch(ctx context.Context, intervalStart time.Time, rollups []StoragenodeBandwidthRollup) error
|
2020-02-14 00:03:41 +00:00
|
|
|
|
2020-03-02 18:47:22 +00:00
|
|
|
// CreateConsumedSerialsBatch creates the batch of ConsumedSerials.
|
2020-02-14 00:03:41 +00:00
|
|
|
CreateConsumedSerialsBatch(ctx context.Context, consumedSerials []ConsumedSerial) (err error)
|
|
|
|
|
2020-03-02 18:47:22 +00:00
|
|
|
// HasConsumedSerial returns true if the node and serial number have been consumed.
|
2020-02-14 00:03:41 +00:00
|
|
|
HasConsumedSerial(ctx context.Context, nodeID storj.NodeID, serialNumber storj.SerialNumber) (bool, error)
|
|
|
|
}
|
|
|
|
|
2020-03-02 18:47:22 +00:00
|
|
|
// Queue is an abstraction around a queue of pending serials.
|
2020-02-14 00:03:41 +00:00
|
|
|
type Queue interface {
|
2020-03-02 18:47:22 +00:00
|
|
|
// GetPendingSerialsBatch returns a batch of pending serials containing at most size
|
|
|
|
// entries. It returns a boolean indicating true if the queue is empty.
|
|
|
|
GetPendingSerialsBatch(ctx context.Context, size int) ([]PendingSerial, bool, error)
|
2020-02-14 00:03:41 +00:00
|
|
|
}
|
|
|
|
|
2020-03-02 18:47:22 +00:00
|
|
|
// ConsumedSerial is a serial that has been consumed and its bandwidth recorded.
|
2020-02-14 00:03:41 +00:00
|
|
|
type ConsumedSerial struct {
|
|
|
|
NodeID storj.NodeID
|
|
|
|
SerialNumber storj.SerialNumber
|
|
|
|
ExpiresAt time.Time
|
|
|
|
}
|
|
|
|
|
|
|
|
// PendingSerial is a serial number reported by a storagenode waiting to be
|
|
|
|
// settled
|
|
|
|
type PendingSerial struct {
|
|
|
|
NodeID storj.NodeID
|
|
|
|
BucketID []byte
|
|
|
|
Action uint
|
|
|
|
SerialNumber storj.SerialNumber
|
|
|
|
ExpiresAt time.Time
|
|
|
|
Settled uint64
|
2019-03-27 10:24:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var (
|
|
|
|
// Error the default orders errs class
|
|
|
|
Error = errs.Class("orders error")
|
2019-04-04 15:42:01 +01:00
|
|
|
// ErrUsingSerialNumber error class for serial number
|
|
|
|
ErrUsingSerialNumber = errs.Class("serial number")
|
|
|
|
|
2020-03-27 03:21:35 +00:00
|
|
|
errExpiredOrder = errs.Class("order limit expired")
|
|
|
|
|
2019-04-04 15:42:01 +01:00
|
|
|
mon = monkit.Package()
|
2019-03-27 10:24:35 +00:00
|
|
|
)
|
|
|
|
|
2020-01-15 21:45:17 +00:00
|
|
|
// BucketBandwidthRollup contains all the info needed for a bucket bandwidth rollup
|
|
|
|
type BucketBandwidthRollup struct {
|
|
|
|
ProjectID uuid.UUID
|
|
|
|
BucketName string
|
|
|
|
Action pb.PieceAction
|
|
|
|
Inline int64
|
|
|
|
Allocated int64
|
|
|
|
Settled int64
|
|
|
|
}
|
|
|
|
|
|
|
|
// SortBucketBandwidthRollups sorts the rollups
|
|
|
|
func SortBucketBandwidthRollups(rollups []BucketBandwidthRollup) {
|
|
|
|
sort.SliceStable(rollups, func(i, j int) bool {
|
|
|
|
uuidCompare := bytes.Compare(rollups[i].ProjectID[:], rollups[j].ProjectID[:])
|
|
|
|
switch {
|
|
|
|
case uuidCompare == -1:
|
|
|
|
return true
|
|
|
|
case uuidCompare == 1:
|
|
|
|
return false
|
|
|
|
case rollups[i].BucketName < rollups[j].BucketName:
|
|
|
|
return true
|
|
|
|
case rollups[i].BucketName > rollups[j].BucketName:
|
|
|
|
return false
|
|
|
|
case rollups[i].Action < rollups[j].Action:
|
|
|
|
return true
|
|
|
|
case rollups[i].Action > rollups[j].Action:
|
|
|
|
return false
|
|
|
|
default:
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// StoragenodeBandwidthRollup contains all the info needed for a storagenode bandwidth rollup
|
|
|
|
type StoragenodeBandwidthRollup struct {
|
|
|
|
NodeID storj.NodeID
|
|
|
|
Action pb.PieceAction
|
|
|
|
Allocated int64
|
|
|
|
Settled int64
|
|
|
|
}
|
|
|
|
|
|
|
|
// SortStoragenodeBandwidthRollups sorts the rollups
|
|
|
|
func SortStoragenodeBandwidthRollups(rollups []StoragenodeBandwidthRollup) {
|
|
|
|
sort.SliceStable(rollups, func(i, j int) bool {
|
|
|
|
nodeCompare := bytes.Compare(rollups[i].NodeID.Bytes(), rollups[j].NodeID.Bytes())
|
|
|
|
switch {
|
|
|
|
case nodeCompare == -1:
|
|
|
|
return true
|
|
|
|
case nodeCompare == 1:
|
|
|
|
return false
|
|
|
|
case rollups[i].Action < rollups[j].Action:
|
|
|
|
return true
|
|
|
|
case rollups[i].Action > rollups[j].Action:
|
|
|
|
return false
|
|
|
|
default:
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-08-15 20:05:43 +01:00
|
|
|
// ProcessOrderRequest for batch order processing
|
|
|
|
type ProcessOrderRequest struct {
|
|
|
|
Order *pb.Order
|
|
|
|
OrderLimit *pb.OrderLimit
|
|
|
|
}
|
|
|
|
|
2019-08-19 14:36:11 +01:00
|
|
|
// ProcessOrderResponse for batch order processing responses
|
|
|
|
type ProcessOrderResponse struct {
|
|
|
|
SerialNumber storj.SerialNumber
|
|
|
|
Status pb.SettlementResponse_Status
|
|
|
|
}
|
|
|
|
|
2019-03-27 10:24:35 +00:00
|
|
|
// Endpoint for orders receiving
|
2019-09-10 14:24:16 +01:00
|
|
|
//
|
|
|
|
// architecture: Endpoint
|
2019-03-27 10:24:35 +00:00
|
|
|
type Endpoint struct {
|
2019-08-15 20:05:43 +01:00
|
|
|
log *zap.Logger
|
|
|
|
satelliteSignee signing.Signee
|
|
|
|
DB DB
|
|
|
|
settlementBatchSize int
|
2019-03-27 10:24:35 +00:00
|
|
|
}
|
|
|
|
|
2019-09-12 22:09:46 +01:00
|
|
|
// drpcEndpoint wraps streaming methods so that they can be used with drpc
|
|
|
|
type drpcEndpoint struct{ *Endpoint }
|
|
|
|
|
|
|
|
// DRPC returns a DRPC form of the endpoint.
|
|
|
|
func (endpoint *Endpoint) DRPC() pb.DRPCOrdersServer { return &drpcEndpoint{Endpoint: endpoint} }
|
|
|
|
|
2019-03-27 10:24:35 +00:00
|
|
|
// NewEndpoint new orders receiving endpoint
|
2019-08-15 20:05:43 +01:00
|
|
|
func NewEndpoint(log *zap.Logger, satelliteSignee signing.Signee, db DB, settlementBatchSize int) *Endpoint {
|
2019-03-27 10:24:35 +00:00
|
|
|
return &Endpoint{
|
2019-08-15 20:05:43 +01:00
|
|
|
log: log,
|
|
|
|
satelliteSignee: satelliteSignee,
|
|
|
|
DB: db,
|
|
|
|
settlementBatchSize: settlementBatchSize,
|
2019-03-27 10:24:35 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-12 22:09:46 +01:00
|
|
|
func monitoredSettlementStreamReceive(ctx context.Context, stream settlementStream) (_ *pb.SettlementRequest, err error) {
|
2019-06-05 16:43:41 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
return stream.Recv()
|
|
|
|
}
|
|
|
|
|
2019-09-12 22:09:46 +01:00
|
|
|
func monitoredSettlementStreamSend(ctx context.Context, stream settlementStream, resp *pb.SettlementResponse) (err error) {
|
2019-06-05 16:43:41 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
switch resp.Status {
|
|
|
|
case pb.SettlementResponse_ACCEPTED:
|
|
|
|
mon.Event("settlement_response_accepted")
|
|
|
|
case pb.SettlementResponse_REJECTED:
|
|
|
|
mon.Event("settlement_response_rejected")
|
|
|
|
default:
|
|
|
|
mon.Event("settlement_response_unknown")
|
|
|
|
}
|
|
|
|
return stream.Send(resp)
|
|
|
|
}
|
|
|
|
|
2019-08-19 14:36:11 +01:00
|
|
|
// Settlement receives orders and handles them in batches
|
2020-02-26 13:59:46 +00:00
|
|
|
func (endpoint *Endpoint) Settlement(stream pbgrpc.Orders_SettlementServer) (err error) {
|
2019-09-12 22:09:46 +01:00
|
|
|
return endpoint.doSettlement(stream)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Settlement receives orders and handles them in batches
|
|
|
|
func (endpoint *drpcEndpoint) Settlement(stream pb.DRPCOrders_SettlementStream) (err error) {
|
|
|
|
return endpoint.doSettlement(stream)
|
|
|
|
}
|
|
|
|
|
|
|
|
// settlementStream is the minimum interface required to perform settlements.
|
|
|
|
type settlementStream interface {
|
|
|
|
Context() context.Context
|
|
|
|
Send(*pb.SettlementResponse) error
|
|
|
|
Recv() (*pb.SettlementRequest, error)
|
|
|
|
}
|
|
|
|
|
|
|
|
// doSettlement receives orders and handles them in batches
|
|
|
|
func (endpoint *Endpoint) doSettlement(stream settlementStream) (err error) {
|
2019-03-27 10:24:35 +00:00
|
|
|
ctx := stream.Context()
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
peer, err := identity.PeerIdentityFromContext(ctx)
|
|
|
|
if err != nil {
|
2019-09-19 05:46:39 +01:00
|
|
|
return rpcstatus.Error(rpcstatus.Unauthenticated, err.Error())
|
2019-03-27 10:24:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
formatError := func(err error) error {
|
|
|
|
if err == io.EOF {
|
|
|
|
return nil
|
|
|
|
}
|
2019-09-19 05:46:39 +01:00
|
|
|
return rpcstatus.Error(rpcstatus.Unknown, err.Error())
|
2019-03-27 10:24:35 +00:00
|
|
|
}
|
|
|
|
|
2019-05-23 23:47:05 +01:00
|
|
|
log := endpoint.log.Named(peer.ID.String())
|
|
|
|
log.Debug("Settlement")
|
2019-08-15 20:05:43 +01:00
|
|
|
|
|
|
|
requests := make([]*ProcessOrderRequest, 0, endpoint.settlementBatchSize)
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
if len(requests) > 0 {
|
|
|
|
err = errs.Combine(err, endpoint.processOrders(ctx, stream, requests))
|
|
|
|
if err != nil {
|
|
|
|
err = formatError(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2020-03-27 03:21:35 +00:00
|
|
|
var expirationCount int64
|
|
|
|
defer func() {
|
|
|
|
if expirationCount > 0 {
|
|
|
|
log.Debug("order verification found expired orders", zap.Int64("amount", expirationCount))
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2019-03-27 10:24:35 +00:00
|
|
|
for {
|
2019-06-05 16:43:41 +01:00
|
|
|
request, err := monitoredSettlementStreamReceive(ctx, stream)
|
2019-03-27 10:24:35 +00:00
|
|
|
if err != nil {
|
|
|
|
return formatError(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if request == nil {
|
2019-09-19 05:46:39 +01:00
|
|
|
return rpcstatus.Error(rpcstatus.InvalidArgument, "request missing")
|
2019-03-27 10:24:35 +00:00
|
|
|
}
|
|
|
|
if request.Limit == nil {
|
2019-09-19 05:46:39 +01:00
|
|
|
return rpcstatus.Error(rpcstatus.InvalidArgument, "order limit missing")
|
2019-03-27 10:24:35 +00:00
|
|
|
}
|
|
|
|
if request.Order == nil {
|
2019-09-19 05:46:39 +01:00
|
|
|
return rpcstatus.Error(rpcstatus.InvalidArgument, "order missing")
|
2019-03-27 10:24:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
orderLimit := request.Limit
|
|
|
|
order := request.Order
|
|
|
|
|
|
|
|
if orderLimit.StorageNodeId != peer.ID {
|
2019-09-19 05:46:39 +01:00
|
|
|
return rpcstatus.Error(rpcstatus.Unauthenticated, "only specified storage node can settle order")
|
2019-03-27 10:24:35 +00:00
|
|
|
}
|
|
|
|
|
2019-06-21 12:44:14 +01:00
|
|
|
rejectErr := func() error {
|
2020-04-02 19:35:05 +01:00
|
|
|
// check expiration first before the signatures so that we can throw out the large
|
|
|
|
// amount of expired orders being sent to us before doing expensive signature
|
|
|
|
// verification.
|
|
|
|
if orderLimit.OrderExpiration.Before(time.Now()) {
|
|
|
|
mon.Event("order_verification_failed_expired")
|
|
|
|
expirationCount++
|
|
|
|
return errExpiredOrder.New("order limit expired")
|
|
|
|
}
|
|
|
|
|
2019-08-12 15:41:34 +01:00
|
|
|
// satellite verifies that it signed the order limit
|
2019-06-05 14:47:01 +01:00
|
|
|
if err := signing.VerifyOrderLimitSignature(ctx, endpoint.satelliteSignee, orderLimit); err != nil {
|
2020-03-27 03:21:35 +00:00
|
|
|
mon.Event("order_verification_failed_satellite_signature")
|
2019-03-27 10:24:35 +00:00
|
|
|
return Error.New("unable to verify order limit")
|
|
|
|
}
|
|
|
|
|
2019-08-12 15:41:34 +01:00
|
|
|
// satellite verifies that the order signature matches pub key in order limit
|
|
|
|
if err := signing.VerifyUplinkOrderSignature(ctx, orderLimit.UplinkPublicKey, order); err != nil {
|
2020-03-27 03:21:35 +00:00
|
|
|
mon.Event("order_verification_failed_uplink_signature")
|
2019-08-12 15:41:34 +01:00
|
|
|
return Error.New("unable to verify order")
|
2019-03-27 10:24:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// TODO should this reject or just error ??
|
|
|
|
if orderLimit.SerialNumber != order.SerialNumber {
|
2020-03-27 03:21:35 +00:00
|
|
|
mon.Event("order_verification_failed_serial_mismatch")
|
2019-03-27 10:24:35 +00:00
|
|
|
return Error.New("invalid serial number")
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}()
|
2019-08-16 15:53:22 +01:00
|
|
|
if rejectErr != nil {
|
2020-03-27 03:21:35 +00:00
|
|
|
mon.Event("order_verification_failed")
|
|
|
|
if !errExpiredOrder.Has(rejectErr) {
|
|
|
|
log.Debug("order limit/order verification failed", zap.Stringer("serial", orderLimit.SerialNumber), zap.Error(rejectErr))
|
|
|
|
}
|
2019-06-05 16:43:41 +01:00
|
|
|
err := monitoredSettlementStreamSend(ctx, stream, &pb.SettlementResponse{
|
2019-03-27 10:24:35 +00:00
|
|
|
SerialNumber: orderLimit.SerialNumber,
|
|
|
|
Status: pb.SettlementResponse_REJECTED,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return formatError(err)
|
|
|
|
}
|
2019-06-12 16:00:29 +01:00
|
|
|
continue
|
2019-03-27 10:24:35 +00:00
|
|
|
}
|
|
|
|
|
2019-08-15 20:05:43 +01:00
|
|
|
requests = append(requests, &ProcessOrderRequest{Order: order, OrderLimit: orderLimit})
|
2019-04-01 21:14:58 +01:00
|
|
|
|
2019-08-15 20:05:43 +01:00
|
|
|
if len(requests) >= endpoint.settlementBatchSize {
|
|
|
|
err = endpoint.processOrders(ctx, stream, requests)
|
|
|
|
requests = requests[:0]
|
|
|
|
if err != nil {
|
|
|
|
return formatError(err)
|
2019-04-01 21:14:58 +01:00
|
|
|
}
|
|
|
|
}
|
2019-08-15 20:05:43 +01:00
|
|
|
}
|
|
|
|
}
|
2019-04-01 21:14:58 +01:00
|
|
|
|
2019-09-12 22:09:46 +01:00
|
|
|
func (endpoint *Endpoint) processOrders(ctx context.Context, stream settlementStream, requests []*ProcessOrderRequest) (err error) {
|
2019-08-15 20:05:43 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2020-02-14 00:03:41 +00:00
|
|
|
responses, err := endpoint.DB.ProcessOrders(ctx, requests)
|
2019-08-15 20:05:43 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-03-27 10:24:35 +00:00
|
|
|
|
2019-08-15 20:05:43 +01:00
|
|
|
for _, response := range responses {
|
2019-08-19 14:36:11 +01:00
|
|
|
r := &pb.SettlementResponse{
|
|
|
|
SerialNumber: response.SerialNumber,
|
|
|
|
Status: response.Status,
|
|
|
|
}
|
|
|
|
err = monitoredSettlementStreamSend(ctx, stream, r)
|
2019-03-27 10:24:35 +00:00
|
|
|
if err != nil {
|
2019-08-15 20:05:43 +01:00
|
|
|
return err
|
2019-03-27 10:24:35 +00:00
|
|
|
}
|
|
|
|
}
|
2019-08-15 20:05:43 +01:00
|
|
|
return nil
|
2019-03-27 10:24:35 +00:00
|
|
|
}
|