storj/scripts/test-captplanet.sh

92 lines
2.9 KiB
Bash
Raw Normal View History

#!/bin/bash
set -ueo pipefail
go install -v storj.io/storj/cmd/captplanet
2018-09-25 19:23:21 +01:00
captplanet setup --overwrite
# run captplanet for 5 seconds to reproduce kademlia problems. See V3-526
captplanet run &
CAPT_PID=$!
sleep 5
kill -9 $CAPT_PID
captplanet run &
CAPT_PID=$!
aws configure set aws_access_key_id insecure-dev-access-key
aws configure set aws_secret_access_key insecure-dev-secret-key
aws configure set default.region us-east-1
head -c 1024 </dev/urandom > ./small-upload-testfile # create 1mb file of random bytes (inline)
2018-09-21 20:44:45 +01:00
head -c 5120 </dev/urandom > ./big-upload-testfile # create 5mb file of random bytes (remote)
Stream encryption (#302) * begin adding encryption for remote pieces * begin adding decryption * add encryption key as arg to Put and Get * move encryption/decryption to object store * Add encryption key to object store constructor * Add the erasure scheme to object store constructor * Ensure decrypter is initialized with the stripe size used by encrypter * Revert "Ensure decrypter is initialized with the stripe size used by encrypter" This reverts commit 07272333f461606edfb43ad106cc152f37a3bd46. * Revert "Add the erasure scheme to object store constructor" This reverts commit ea5e793b536159d993b96e3db69a37c1656a193c. * move encryption to stream store * move decryption stuff to stream store * revert changes in object store * add encryptedBlockSize and close rangers on error during Get * calculate padding sizes correctly * encryptedBlockSize -> encryptionBlockSize * pass encryption key and block size into stream store * remove encryption key and block size from object store constructor * move encrypter/decrypter initialization * remove unnecessary cast * Fix padding issue * Fix linter * add todos * use random encryption key for data encryption. Store an encrypted copy of this key in segment metadata * use different encryption key for each segment * encrypt data in one step if it is small enough * refactor and move encryption stuff * fix errors related to nil slices passed to copy * fix encrypter vs. decrypter bug * put encryption stuff in eestream * get captplanet test to pass * fix linting errors * add types for encryption keys/nonces and clean up * fix tests * more review changes * add Cipher type for encryption stuff * fix rs_test * Simplify type casting of key and nonce * Init starting nonce to the segment index * don't copy derived key * remove default encryption key; force user to explicitly set it * move getSegmentPath to streams package * dont require user to specify encryption key for captplanet * rename GenericKey and GenericNonce to Key and Nonce * review changes * fix linting error * Download uses the encryption type from metadata * Store enc block size in metadata and use it for download
2018-09-26 14:32:23 +01:00
head -c 5 </dev/urandom > ./multipart-upload-testfile # create 5kb file of random bytes (remote)
aws s3 --endpoint=http://localhost:7777/ mb s3://bucket
2018-09-21 20:44:45 +01:00
aws configure set default.s3.multipart_threshold 1TB
aws s3 --endpoint=http://localhost:7777/ cp ./small-upload-testfile s3://bucket/small-testfile
aws s3 --endpoint=http://localhost:7777/ cp ./big-upload-testfile s3://bucket/big-testfile
2018-09-21 20:44:45 +01:00
aws configure set default.s3.multipart_threshold 4KB
aws s3 --endpoint=http://localhost:7777/ cp ./multipart-upload-testfile s3://bucket/multipart-testfile
aws s3 --endpoint=http://localhost:7777/ ls s3://bucket
aws s3 --endpoint=http://localhost:7777/ cp s3://bucket/small-testfile ./small-download-testfile
aws s3 --endpoint=http://localhost:7777/ cp s3://bucket/big-testfile ./big-download-testfile
2018-09-21 20:44:45 +01:00
aws s3 --endpoint=http://localhost:7777/ cp s3://bucket/multipart-testfile ./multipart-download-testfile
2018-09-25 19:23:21 +01:00
aws s3 --endpoint=http://localhost:7777/ rb s3://bucket --force
if cmp ./small-upload-testfile ./small-download-testfile
then
echo "Downloaded file matches uploaded file";
else
echo "Downloaded file does not match uploaded file";
kill -9 $CAPT_PID
exit 1;
fi
if cmp ./big-upload-testfile ./big-download-testfile
then
echo "Downloaded file matches uploaded file";
else
echo "Downloaded file does not match uploaded file";
kill -9 $CAPT_PID
exit 1;
fi
2018-09-21 20:44:45 +01:00
if cmp ./multipart-upload-testfile ./multipart-download-testfile
then
echo "Downloaded file matches uploaded file";
else
echo "Downloaded file does not match uploaded file";
kill -9 $CAPT_PID
exit 1;
fi
2018-09-25 19:23:21 +01:00
kill -9 $CAPT_PID
captplanet setup --listen-host ::1 --overwrite
captplanet run &
CAPT_PID=$!
aws s3 --endpoint=http://localhost:7777/ mb s3://bucket
aws s3 --endpoint=http://localhost:7777/ cp ./big-upload-testfile s3://bucket/big-testfile
aws s3 --endpoint=http://localhost:7777/ cp s3://bucket/big-testfile ./big-download-testfile-ipv6
aws s3 --endpoint=http://localhost:7777/ rb s3://bucket --force
if cmp ./big-upload-testfile ./big-download-testfile-ipv6
then
echo "Downloaded ipv6 file matches uploaded file";
else
echo "Downloaded ipv6 file does not match uploaded file";
kill -9 $CAPT_PID
exit 1;
fi
2018-09-21 20:44:45 +01:00
kill -9 $CAPT_PID
2018-09-25 19:23:21 +01:00
rm small-upload-testfile
rm big-upload-testfile
rm multipart-upload-testfile
rm big-download-testfile-ipv6