satellite/orders: delete unused code

Change-Id: I431c8cc2f23e538c676d6f742fb1faef7cc1d73e
This commit is contained in:
Egon Elbre 2020-06-25 16:48:26 +03:00
parent db57e5cf7b
commit df8cf8f58a

View File

@ -126,100 +126,6 @@ func (service *Service) updateBandwidth(ctx context.Context, projectID uuid.UUID
return nil
}
// CreateGetOrderLimitsOld creates the order limits for downloading the pieces of pointer for backwards compatibility
func (service *Service) CreateGetOrderLimitsOld(ctx context.Context, bucketID []byte, pointer *pb.Pointer) (_ []*pb.AddressedOrderLimit, privateKey storj.PiecePrivateKey, err error) {
defer mon.Task()(&ctx)(&err)
rootPieceID := pointer.GetRemote().RootPieceId
pieceExpiration := pointer.ExpirationDate
orderExpiration := time.Now().Add(service.orderExpiration)
piecePublicKey, piecePrivateKey, err := storj.NewPieceKey()
if err != nil {
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
}
serialNumber, err := service.createSerial(ctx, orderExpiration)
if err != nil {
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
}
redundancy, err := eestream.NewRedundancyStrategyFromProto(pointer.GetRemote().GetRedundancy())
if err != nil {
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
}
pieceSize := eestream.CalcPieceSize(pointer.GetSegmentSize(), redundancy)
nodeIDs := make([]storj.NodeID, len(pointer.GetRemote().GetRemotePieces()))
for i, piece := range pointer.GetRemote().GetRemotePieces() {
nodeIDs[i] = piece.NodeId
}
nodes, err := service.overlay.GetOnlineNodesForGetDelete(ctx, nodeIDs)
if err != nil {
service.log.Debug("error getting nodes from overlay", zap.Error(err))
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
}
var nodeErrors errs.Group
var limits []*pb.AddressedOrderLimit
for _, piece := range pointer.GetRemote().GetRemotePieces() {
node, ok := nodes[piece.NodeId]
if !ok {
nodeErrors.Add(errs.New("node %q is not reliable", piece.NodeId))
continue
}
orderLimit, err := signing.SignOrderLimit(ctx, service.satellite, &pb.OrderLimit{
SerialNumber: serialNumber,
SatelliteId: service.satellite.ID(),
SatelliteAddress: service.satelliteAddress,
UplinkPublicKey: piecePublicKey,
StorageNodeId: piece.NodeId,
PieceId: rootPieceID.Derive(piece.NodeId, piece.PieceNum),
Action: pb.PieceAction_GET,
Limit: pieceSize,
PieceExpiration: pieceExpiration,
OrderCreation: time.Now(),
OrderExpiration: orderExpiration,
})
if err != nil {
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
}
// use the lastIP that we have on record to avoid doing extra DNS resolutions
if node.LastIPPort != "" {
node.Address.Address = node.LastIPPort
}
limits = append(limits, &pb.AddressedOrderLimit{
Limit: orderLimit,
StorageNodeAddress: node.Address,
})
}
if len(limits) < redundancy.RequiredCount() {
mon.Meter("download_failed_not_enough_pieces_uplink").Mark(1) //locked
err = Error.New("not enough nodes available: got %d, required %d", len(limits), redundancy.RequiredCount())
return nil, storj.PiecePrivateKey{}, ErrDownloadFailedNotEnoughPieces.Wrap(errs.Combine(err, nodeErrors.Err()))
}
err = service.saveSerial(ctx, serialNumber, bucketID, orderExpiration)
if err != nil {
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
}
projectID, bucketName, err := SplitBucketID(bucketID)
if err != nil {
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
}
if err := service.updateBandwidth(ctx, projectID, bucketName, limits...); err != nil {
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
}
return limits, piecePrivateKey, nil
}
// CreateGetOrderLimits creates the order limits for downloading the pieces of pointer.
func (service *Service) CreateGetOrderLimits(ctx context.Context, bucketID []byte, pointer *pb.Pointer) (_ []*pb.AddressedOrderLimit, privateKey storj.PiecePrivateKey, err error) {
defer mon.Task()(&ctx)(&err)