satellite/gracefulexit: make orders with right bucket id and action
paths are organized as follows: project_id/segment_index/bucket_name/encrypted_key so by picking parts[0] and parts[1], we were using the segment index instead of the bucket name, causing bandwidth to be accounted for incorrectly. additionally, we were using the PUT action instead of the PUT_GRACEFUL_EXIT action, causing the data to be charged incorrectly. we use PUT_REPAIR for now because nodes won't accept uploads with PUT_GRACEFUL_EXIT and our tables need migrations to handle rollups with it. Change-Id: Ife2aff541222bac930c35df8fcf76e8bac5d60b2
This commit is contained in:
parent
494fead7af
commit
26e33e7e07
@ -412,11 +412,11 @@ func (endpoint *Endpoint) processIncomplete(ctx context.Context, stream processS
|
|||||||
pieceID := remote.RootPieceId.Derive(nodeID, incomplete.PieceNum)
|
pieceID := remote.RootPieceId.Derive(nodeID, incomplete.PieceNum)
|
||||||
|
|
||||||
parts := storj.SplitPath(storj.Path(incomplete.Path))
|
parts := storj.SplitPath(storj.Path(incomplete.Path))
|
||||||
if len(parts) < 2 {
|
if len(parts) < 3 {
|
||||||
return Error.New("invalid path for node ID %v, piece ID %v", incomplete.NodeID, pieceID)
|
return Error.New("invalid path for node ID %v, piece ID %v", incomplete.NodeID, pieceID)
|
||||||
}
|
}
|
||||||
|
|
||||||
bucketID := []byte(storj.JoinPaths(parts[0], parts[1]))
|
bucketID := []byte(storj.JoinPaths(parts[0], parts[2]))
|
||||||
limit, privateKey, err := endpoint.orders.CreateGracefulExitPutOrderLimit(ctx, bucketID, newNode.Id, incomplete.PieceNum, remote.RootPieceId, int32(pieceSize))
|
limit, privateKey, err := endpoint.orders.CreateGracefulExitPutOrderLimit(ctx, bucketID, newNode.Id, incomplete.PieceNum, remote.RootPieceId, int32(pieceSize))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Error.Wrap(err)
|
return Error.Wrap(err)
|
||||||
|
@ -747,6 +747,15 @@ func (service *Service) CreateGracefulExitPutOrderLimit(ctx context.Context, buc
|
|||||||
return nil, storj.PiecePrivateKey{}, overlay.ErrNodeOffline.New("%v", nodeID)
|
return nil, storj.PiecePrivateKey{}, overlay.ErrNodeOffline.New("%v", nodeID)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: we're using `PUT_REPAIR` here even though `PUT_GRACEFUL_EXIT` exists and
|
||||||
|
// seems like the perfect thing because we're in a pickle. we can't use `PUT`
|
||||||
|
// because we don't want to charge bucket owners for graceful exit bandwidth, and
|
||||||
|
// we can't use `PUT_GRACEFUL_EXIT` because storagenode will only accept upload
|
||||||
|
// orders with `PUT` or `PUT_REPAIR` as the action. we also don't have a bunch of
|
||||||
|
// supporting code/tables to aggregate `PUT_GRACEFUL_EXIT` bandwidth into our rollups
|
||||||
|
// and stuff. so, for now, we just use `PUT_REPAIR` because it's the least bad of
|
||||||
|
// our options. this should be fixed.
|
||||||
|
|
||||||
orderLimit, err := signing.SignOrderLimit(ctx, service.satellite, &pb.OrderLimit{
|
orderLimit, err := signing.SignOrderLimit(ctx, service.satellite, &pb.OrderLimit{
|
||||||
SerialNumber: serialNumber,
|
SerialNumber: serialNumber,
|
||||||
SatelliteId: service.satellite.ID(),
|
SatelliteId: service.satellite.ID(),
|
||||||
@ -754,7 +763,7 @@ func (service *Service) CreateGracefulExitPutOrderLimit(ctx context.Context, buc
|
|||||||
UplinkPublicKey: piecePublicKey,
|
UplinkPublicKey: piecePublicKey,
|
||||||
StorageNodeId: nodeID,
|
StorageNodeId: nodeID,
|
||||||
PieceId: rootPieceID.Derive(nodeID, pieceNum),
|
PieceId: rootPieceID.Derive(nodeID, pieceNum),
|
||||||
Action: pb.PieceAction_PUT,
|
Action: pb.PieceAction_PUT_REPAIR,
|
||||||
Limit: int64(shareSize),
|
Limit: int64(shareSize),
|
||||||
OrderCreation: time.Now().UTC(),
|
OrderCreation: time.Now().UTC(),
|
||||||
OrderExpiration: orderExpiration,
|
OrderExpiration: orderExpiration,
|
||||||
|
Loading…
Reference in New Issue
Block a user