satellite/metainfo: fix for getting real RS values

Change-Id: Ib2cd01b2a68baab9f342dc0ff1ab8e5f12f4557f
This commit is contained in:
Michał Niewrzał 2021-02-16 16:36:09 +01:00
parent 5dd76522af
commit 6ebe06cd1b
4 changed files with 94 additions and 8 deletions

View File

@ -876,7 +876,31 @@ func (endpoint *Endpoint) getObject(ctx context.Context, projectID uuid.UUID, bu
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
}
object, err := endpoint.objectToProto(ctx, metaObject)
rs := endpoint.defaultRS
if metaObject.SegmentCount > 0 {
segment, err := endpoint.metainfo.metabaseDB.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
StreamID: metaObject.StreamID,
Position: metabase.SegmentPosition{
Index: 0,
},
})
if err != nil {
// don't fail because its possible that its multipart object
endpoint.log.Error("internal", zap.Error(err))
} else {
rs = &pb.RedundancyScheme{
Type: pb.RedundancyScheme_SchemeType(segment.Redundancy.Algorithm),
ErasureShareSize: segment.Redundancy.ShareSize,
MinReq: int32(segment.Redundancy.RequiredShares),
RepairThreshold: int32(segment.Redundancy.RepairShares),
SuccessThreshold: int32(segment.Redundancy.OptimalShares),
Total: int32(segment.Redundancy.TotalShares),
}
}
}
object, err := endpoint.objectToProto(ctx, metaObject, rs)
if err != nil {
endpoint.log.Error("internal", zap.Error(err))
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
@ -2048,7 +2072,7 @@ func (endpoint *Endpoint) deleteObjectsPieces(ctx context.Context, result metaba
deletedObjects = make([]*pb.Object, len(result.Objects))
for i, object := range result.Objects {
deletedObject, err := endpoint.objectToProto(ctx, object)
deletedObject, err := endpoint.objectToProto(ctx, object, endpoint.defaultRS)
if err != nil {
return nil, err
}
@ -2080,7 +2104,7 @@ func (endpoint *Endpoint) deleteSegmentPieces(ctx context.Context, segments []me
}
}
func (endpoint *Endpoint) objectToProto(ctx context.Context, object metabase.Object) (*pb.Object, error) {
func (endpoint *Endpoint) objectToProto(ctx context.Context, object metabase.Object, rs *pb.RedundancyScheme) (*pb.Object, error) {
expires := time.Time{}
if object.ExpiresAt != nil {
expires = *object.ExpiresAt
@ -2096,9 +2120,7 @@ func (endpoint *Endpoint) objectToProto(ctx context.Context, object metabase.Obj
ExpirationDate: expires,
StreamId: object.StreamID[:],
MultipartObject: multipartObject,
// TODO: defaultRS may change while the upload is pending.
// Ideally, we should remove redundancy from satStreamID.
Redundancy: endpoint.defaultRS,
Redundancy: rs,
})
if err != nil {
return nil, err
@ -2159,8 +2181,7 @@ func (endpoint *Endpoint) objectToProto(ctx context.Context, object metabase.Obj
BlockSize: int64(object.Encryption.BlockSize),
},
// TODO extend DownloadSegment response to provide RS values for client
RedundancyScheme: endpoint.defaultRS,
RedundancyScheme: rs,
}
return result, nil

View File

@ -37,6 +37,12 @@ fi
storj-sim -x --satellites 1 --host $STORJ_NETWORK_HOST4 network test bash "$SCRIPTDIR"/test-uplink.sh
storj-sim -x --satellites 1 --host $STORJ_NETWORK_HOST4 network test bash "$SCRIPTDIR"/test-uplink-share.sh
storj-sim -x --satellites 1 --host $STORJ_NETWORK_HOST4 network test bash "$SCRIPTDIR"/test-billing.sh
storj-sim -x --satellites 1 --host $STORJ_NETWORK_HOST4 network test bash "$SCRIPTDIR"/test-uplink-rs-upload.sh
# change RS values and try download
sed -i 's@# metainfo.rs: 4/6/8/10-256 B@metainfo.rs: 2/3/6/8-256 B@g' $(storj-sim network env SATELLITE_0_DIR)/config.yaml
storj-sim -x --satellites 1 --host $STORJ_NETWORK_HOST4 network test bash "$SCRIPTDIR"/test-uplink-rs-download.sh
storj-sim -x --satellites 1 --host $STORJ_NETWORK_HOST4 network destroy
# setup the network with ipv6

View File

@ -0,0 +1,25 @@
#!/usr/bin/env bash
set -ueo pipefail
TMPDIR=$(mktemp -d -t tmp.XXXXXXXXXX)
cleanup(){
rm -rf "$TMPDIR"
echo "cleaned up test successfully"
}
trap cleanup EXIT
BUCKET=bucket-for-rs-change
SRC_DIR=$TMPDIR/source
DST_DIR=$TMPDIR/dst
UPLINK_DIR=$TMPDIR/uplink
mkdir -p "$SRC_DIR" "$DST_DIR"
UPLINK_DEBUG_ADDR=""
export STORJ_ACCESS=$GATEWAY_0_ACCESS
export STORJ_DEBUG_ADDR=$UPLINK_DEBUG_ADDR
uplink cp "sj://$BUCKET/big-upload-testfile" "$DST_DIR" --progress=false

View File

@ -0,0 +1,34 @@
#!/usr/bin/env bash
set -ueo pipefail
TMPDIR=$(mktemp -d -t tmp.XXXXXXXXXX)
cleanup(){
rm -rf "$TMPDIR"
echo "cleaned up test successfully"
}
trap cleanup EXIT
BUCKET=bucket-for-rs-change
SRC_DIR=$TMPDIR/source
DST_DIR=$TMPDIR/dst
UPLINK_DIR=$TMPDIR/uplink
mkdir -p "$SRC_DIR" "$DST_DIR"
random_bytes_file () {
size=$1
output=$2
head -c $size </dev/urandom > $output
}
random_bytes_file "1MiB" "$SRC_DIR/big-upload-testfile"
UPLINK_DEBUG_ADDR=""
export STORJ_ACCESS=$GATEWAY_0_ACCESS
export STORJ_DEBUG_ADDR=$UPLINK_DEBUG_ADDR
uplink mb "sj://$BUCKET/"
uplink cp "$SRC_DIR/big-upload-testfile" "sj://$BUCKET/" --progress=false