Test all-in-one (#900)

* Add test for aio

* Don't trust the user to have images built for a version

* Make travis run the aio test

* Add missing values to docker-compose, sort some things, consider the gateway image

* today's changes

* config changed, again

* more fixes

* Expose satellite port on localhost:7778

* Add retries and a timeout around the big-testfile test in AIO

* Another config value changed

* Make this error message a little more useful

* Fix nil condition
This commit is contained in:
Matt Robinson 2019-01-03 14:54:27 -05:00 committed by GitHub
parent 9fa874459a
commit c0e6b62708
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 166 additions and 22 deletions

View File

@ -1,2 +1,4 @@
/*.swp
/release
/scripts
docker-compose.yaml

View File

@ -73,6 +73,15 @@ matrix:
script:
- make test-captplanet
### All-In-One tests ###
- env: MODE=integration
services:
- docker
install:
- source scripts/install-awscli.sh
script:
- make test-all-in-one
### windows tests ###
- env: MODE=windows-tests
if: commit_message =~ /(?i:windows)/

View File

@ -84,11 +84,15 @@ test-docker: ## Run tests in Docker
.PHONY: all-in-one
all-in-one: ## Deploy docker images with one storagenode locally
if [ -z "${VERSION}" ]; then \
$(MAKE) satellite-image storagenode-image gateway-image -j 3 \
&& export VERSION="${TAG}"; \
fi \
&& docker-compose up storagenode satellite gateway
export VERSION="${TAG}${CUSTOMTAG}" \
&& $(MAKE) satellite-image storagenode-image gateway-image \
&& docker-compose up --scale storagenode=1 satellite gateway
.PHONY: test-all-in-one
test-all-in-one: ## Test docker images locally
export VERSION="${TAG}${CUSTOMTAG}" \
&& $(MAKE) satellite-image storagenode-image gateway-image \
&& ./scripts/test-aio.sh
##@ Build
@ -98,16 +102,16 @@ images: satellite-image storagenode-image uplink-image gateway-image ## Build ga
.PHONY: gateway-image
gateway-image: ## Build gateway Docker image
${DOCKER_BUILD} -t storjlabs/gateway:${TAG}${CUSTOMTAG} -f cmd/gateway/Dockerfile .
${DOCKER_BUILD} --pull=true -t storjlabs/gateway:${TAG}${CUSTOMTAG} -f cmd/gateway/Dockerfile .
.PHONY: satellite-image
satellite-image: ## Build satellite Docker image
${DOCKER_BUILD} -t storjlabs/satellite:${TAG}${CUSTOMTAG} -f cmd/satellite/Dockerfile .
${DOCKER_BUILD} --pull=true -t storjlabs/satellite:${TAG}${CUSTOMTAG} -f cmd/satellite/Dockerfile .
.PHONY: storagenode-image
storagenode-image: ## Build storagenode Docker image
${DOCKER_BUILD} -t storjlabs/storagenode:${TAG}${CUSTOMTAG} -f cmd/storagenode/Dockerfile .
${DOCKER_BUILD} --pull=true -t storjlabs/storagenode:${TAG}${CUSTOMTAG} -f cmd/storagenode/Dockerfile .
.PHONY: uplink-image
uplink-image: ## Build uplink Docker image
${DOCKER_BUILD} -t storjlabs/uplink:${TAG}${CUSTOMTAG} -f cmd/uplink/Dockerfile .
${DOCKER_BUILD} --pull=true -t storjlabs/uplink:${TAG}${CUSTOMTAG} -f cmd/uplink/Dockerfile .
.PHONY: binary
binary: CUSTOMTAG = -${GOOS}-${GOARCH}
@ -173,6 +177,9 @@ push-images: ## Push Docker images to Docker Hub (jenkins)
docker tag storjlabs/uplink:${TAG} storjlabs/uplink:latest
docker push storjlabs/uplink:${TAG}
docker push storjlabs/uplink:latest
docker tag storjlabs/gateway:${TAG} storjlabs/gateway:latest
docker push storjlabs/gateway:${TAG}
docker push storjlabs/gateway:latest
.PHONY: binaries-upload
binaries-upload: ## Upload binaries to Google Storage (jenkins)
@ -190,11 +197,13 @@ binaries-clean: ## Remove all local release binaries (jenkins)
.PHONY: clean-images
ifeq (${BRANCH},master)
clean-images: ## Remove Docker images from local engine
-docker rmi storjlabs/gateway:${TAG} storjlabs/gateway:latest
-docker rmi storjlabs/satellite:${TAG} storjlabs/satellite:latest
-docker rmi storjlabs/storagenode:${TAG} storjlabs/storagenode:latest
-docker rmi storjlabs/uplink:${TAG} storjlabs/uplink:latest
else
clean-images:
-docker rmi storjlabs/gateway:${TAG}
-docker rmi storjlabs/satellite:${TAG}
-docker rmi storjlabs/storagenode:${TAG}
-docker rmi storjlabs/uplink:${TAG}

View File

@ -7,6 +7,8 @@ fi
RUN_PARAMS="${RUN_PARAMS:-} --config-dir ${CONF_PATH}"
export STORJ_IDENTITY_SERVER_ADDRESS="${STORJ_IDENTITY_SERVER_ADDRESS:-$(hostname -i):7777}"
if [ -n "${SATELLITE_ADDR:-}" ]; then
RUN_PARAMS="${RUN_PARAMS} --kademlia.bootstrap-addr $SATELLITE_ADDR"
fi

View File

@ -16,15 +16,18 @@ services:
- POSTGRES_PASSWORD=storj-pass
satellite:
image: storjlabs/satellite:${VERSION}
image: storjlabs/satellite:${VERSION:-latest}
environment:
- API_KEY=abc123
- BOOTSTRAP_ADDR=localhost:8080
- STORJ_LOG_LEVEL=debug
- STORJ_CHECKER_QUEUE_ADDRESS=redis://redis:6379/?db=0
- STORJ_REPAIRER_QUEUE_ADDRESS=redis://redis:6379/?db=0
- STORJ_DATABASE=postgres://postgres:postgres@postgres/satellite?sslmode=disable
- STORJ_LOG_LEVEL=debug
- STORJ_REPAIRER_QUEUE_ADDRESS=redis://redis:6379/?db=0
- STORJ_IDENTITY_SERVER_ADDRESS=satellite:7777
restart: always
ports:
- 7778:7777
links:
- redis
- postgres
@ -32,21 +35,24 @@ services:
- redis
- postgres
storagenode:
image: storjlabs/storagenode:${VERSION}
image: storjlabs/storagenode:${VERSION:-latest}
environment:
- SATELLITE_ADDR=satellite:7777
- STORJ_KADEMLIA_EXTERNAL_ADDRESS=storagenode:7777
- STORJ_KADEMLIA_OPERATOR_EMAIL=hello@storj.io
- STORJ_KADEMLIA_OPERATOR_WALLET=0x0000000000000000000000000000000000000000
- STORJ_LOG_LEVEL=debug
- STORJ_IDENTITY_SERVER_ADDRESS=storagenode:7777
restart: always
links:
- satellite
gateway:
image: storjlabs/gateway:${VERSION}
command: --rs.min-threshold 1 --rs.max-threshold 2 --rs.repair-threshold 1 --rs.success-threshold 1
image: storjlabs/gateway:${VERSION:-latest}
command: --rs.min-threshold 1 --rs.max-threshold 1 --rs.repair-threshold 1 --rs.success-threshold 1
environment:
- API_KEY=abc123
- SATELLITE_ADDR=satellite:7777
- STORJ_LOG_LEVEL=debug
- STORJ_SERVER_ADDRESS=0.0.0.0:7777
ports:
- 7777:7777
restart: always

View File

@ -118,8 +118,12 @@ func (ec *ecClient) Put(ctx context.Context, nodes []*pb.Node, rs eestream.Redun
// io.ErrUnexpectedEOF means the piece upload was interrupted due to slow connection.
// No error logging for this case.
if err != nil && err != io.ErrUnexpectedEOF {
zap.S().Errorf("Failed putting piece %s -> %s to node %s: %v",
pieceID, derivedPieceID, n.Id, err)
nodeAddress := "nil"
if n.Address != nil {
nodeAddress = n.Address.Address
}
zap.S().Errorf("Failed putting piece %s -> %s to node %s (%+v): %v",
pieceID, derivedPieceID, n.Id, nodeAddress, err)
}
infos <- info{i: i, err: err}
}(i, n)

112
scripts/test-aio.sh Executable file
View File

@ -0,0 +1,112 @@
#!/bin/bash
set -euo pipefail
TMP_DIR=$(mktemp -d -t tmp.XXXXXXXXXX)
CMP_DIR=$(mktemp -d -t tmp.XXXXXXXXXX)
# Clean up what we might have done
cleanup(){
echo ""
echo ""
echo ""
echo "=> Testing finished, logs to follow"
echo "=> Satellite logs"
docker logs storj_satellite_1
echo "=> Storagenode logs"
docker logs storj_storagenode_1
echo "=> Gateway logs"
docker logs storj_gateway_1
echo "=> Cleaning up"
rm -rf "$TMP_DIR" "$CMP_DIR"
# Hide any ERRORs and Faileds here as they are not relevant to the actual
# errors and failures of this test.
docker-compose down --rmi all 2>&1 | grep -v ERROR | grep -v Failed
}
trap cleanup EXIT
mkdir -p "$TMP_DIR"
mkdir -p "$CMP_DIR"
# Stand up production images in a local environment
docker-compose up -d satellite storagenode gateway
# Wait for the gateway to be ready
until docker logs storj_gateway_1 | grep -q Access; do
sleep 2
done
# Extract the keys for AWS client
access_key_id="$(docker logs storj_gateway_1 2>/dev/null| awk '/Access/{print $3; exit}')"
secret_access_key="$(docker logs storj_gateway_1 2>/dev/null| awk '/Secret/{print $3; exit}')"
echo "=> Access Key: $access_key_id"
echo "=> Secret Key: $secret_access_key"
export AWS_ACCESS_KEY_ID="$access_key_id"
export AWS_SECRET_ACCESS_KEY="$secret_access_key"
aws configure set default.region us-east-1
echo "=> Making test files"
head -c 1024 </dev/urandom > "$TMP_DIR/small-upload-testfile" # create 1mb file of random bytes (inline)
head -c 5120 </dev/urandom > "$TMP_DIR/big-upload-testfile" # create 5mb file of random bytes (remote)
head -c 5 </dev/urandom > "$TMP_DIR/multipart-upload-testfile" # create 5kb file of random bytes (remote)
echo "=> Making bucket"
aws s3 --endpoint=http://localhost:7777/ mb s3://bucket
echo "=> Uploading test files"
aws configure set default.s3.multipart_threshold 1TB
aws s3 --endpoint=http://localhost:7777/ cp "$TMP_DIR/small-upload-testfile" s3://bucket/small-testfile
starttime="$(date +%s)"
while true; do
if aws s3 --endpoint=http://localhost:7777/ cp "$TMP_DIR/big-upload-testfile" s3://bucket/big-testfile; then
break
fi
echo "=> Large file failed, sleeping for a bit before trying again"
sleep 1
if [ $(( $starttime + 60 )) -lt $(date +%s) ]; then
echo "=> Failed to upload big-testfile for over a minute!"
exit 1
fi
done
# Wait 5 seconds to trigger any error related to one of the different intervals
sleep 5
aws configure set default.s3.multipart_threshold 4KB
aws s3 --endpoint=http://localhost:7777/ cp "$TMP_DIR/multipart-upload-testfile" s3://bucket/multipart-testfile
echo "=> Listing bucket"
aws s3 --endpoint=http://localhost:7777/ ls s3://bucket
echo "=> Downloading test files"
aws s3 --endpoint=http://localhost:7777/ cp s3://bucket/small-testfile "$CMP_DIR/small-download-testfile"
aws s3 --endpoint=http://localhost:7777/ cp s3://bucket/big-testfile "$CMP_DIR/big-download-testfile"
aws s3 --endpoint=http://localhost:7777/ cp s3://bucket/multipart-testfile "$CMP_DIR/multipart-download-testfile"
echo "=> Removing bucket"
aws s3 --endpoint=http://localhost:7777/ rb s3://bucket --force
echo "=> Comparing test files downloaded with uploaded versions"
if cmp "$TMP_DIR/small-upload-testfile" "$CMP_DIR/small-download-testfile"
then
echo "Downloaded file matches uploaded file"
else
echo "Downloaded file does not match uploaded file"
exit 1
fi
if cmp "$TMP_DIR/big-upload-testfile" "$CMP_DIR/big-download-testfile"
then
echo "Downloaded file matches uploaded file"
else
echo "Downloaded file does not match uploaded file"
exit 1
fi
if cmp "$TMP_DIR/multipart-upload-testfile" "$CMP_DIR/multipart-download-testfile"
then
echo "Downloaded file matches uploaded file"
else
echo "Downloaded file does not match uploaded file"
exit 1
fi