build: cleanup more gateway targets from Makefile (#3802)

Change-Id: Ia95caa2187b3e9e056a83cbea4230788ed4e8abd

Co-authored-by: Michal Niewrzal <michal@storj.io>
This commit is contained in:
Kaloyan Raev 2020-03-16 16:07:52 +02:00 committed by GitHub
parent 44ade00e8a
commit 4f0bf3fe1d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 3 additions and 146 deletions

View File

@ -117,18 +117,6 @@ check-satellite-config-lock: ## Test if the satellite config file has changed (j
@echo "Running ${@}"
@cd scripts; ./check-satellite-config-lock.sh
.PHONY: all-in-one
all-in-one: ## Deploy docker images with one storagenode locally
export VERSION="${TAG}${CUSTOMTAG}" \
&& $(MAKE) satellite-image storagenode-image gateway-image \
&& docker-compose up --scale storagenode=1 satellite gateway
.PHONY: test-all-in-one
test-all-in-one: ## Test docker images locally
export VERSION="${TAG}${CUSTOMTAG}" \
&& $(MAKE) satellite-image storagenode-image gateway-image \
&& ./scripts/test-aio.sh
.PHONY: test-sim-backwards-compatible
test-sim-backwards-compatible: ## Test uploading a file with lastest release (jenkins)
@echo "Running ${@}"
@ -162,19 +150,9 @@ storagenode-console:
gofmt -w -s storagenode/console/consoleassets/bindata.resource.go
.PHONY: images
images: gateway-image satellite-image storagenode-image uplink-image versioncontrol-image ## Build gateway, satellite, storagenode, uplink, and versioncontrol Docker images
images: satellite-image storagenode-image uplink-image versioncontrol-image ## Build satellite, storagenode, uplink, and versioncontrol Docker images
echo Built version: ${TAG}
.PHONY: gateway-image
gateway-image: gateway_linux_arm gateway_linux_arm64 gateway_linux_amd64 ## Build gateway Docker image
${DOCKER_BUILD} --pull=true -t storjlabs/gateway:${TAG}${CUSTOMTAG}-amd64 \
-f cmd/gateway/Dockerfile .
${DOCKER_BUILD} --pull=true -t storjlabs/gateway:${TAG}${CUSTOMTAG}-arm32v6 \
--build-arg=GOARCH=arm --build-arg=DOCKER_ARCH=arm32v6 \
-f cmd/gateway/Dockerfile .
${DOCKER_BUILD} --pull=true -t storjlabs/gateway:${TAG}${CUSTOMTAG}-aarch64 \
--build-arg=GOARCH=arm --build-arg=DOCKER_ARCH=aarch64 \
-f cmd/gateway/Dockerfile .
.PHONY: satellite-image
satellite-image: satellite_linux_arm satellite_linux_arm64 satellite_linux_amd64 ## Build satellite Docker image
${DOCKER_BUILD} --pull=true -t storjlabs/satellite:${TAG}${CUSTOMTAG}-amd64 \
@ -260,9 +238,6 @@ binary-check:
.PHONY: certificates_%
certificates_%:
$(MAKE) binary-check COMPONENT=certificates GOARCH=$(word 3, $(subst _, ,$@)) GOOS=$(word 2, $(subst _, ,$@))
.PHONY: gateway_%
gateway_%:
$(MAKE) binary-check COMPONENT=gateway GOARCH=$(word 3, $(subst _, ,$@)) GOOS=$(word 2, $(subst _, ,$@))
.PHONY: identity_%
identity_%:
$(MAKE) binary-check COMPONENT=identity GOARCH=$(word 3, $(subst _, ,$@)) GOOS=$(word 2, $(subst _, ,$@))
@ -289,11 +264,11 @@ versioncontrol_%:
$(MAKE) binary-check COMPONENT=versioncontrol GOARCH=$(word 3, $(subst _, ,$@)) GOOS=$(word 2, $(subst _, ,$@))
COMPONENTLIST := certificates gateway identity inspector linksharing satellite storagenode storagenode-updater uplink versioncontrol
COMPONENTLIST := certificates identity inspector linksharing satellite storagenode storagenode-updater uplink versioncontrol
OSARCHLIST := darwin_amd64 linux_amd64 linux_arm linux_arm64 windows_amd64 freebsd_amd64
BINARIES := $(foreach C,$(COMPONENTLIST),$(foreach O,$(OSARCHLIST),$C_$O))
.PHONY: binaries
binaries: ${BINARIES} ## Build certificates, gateway, identity, inspector, linksharing, satellite, storagenode, uplink, and versioncontrol binaries (jenkins)
binaries: ${BINARIES} ## Build certificates, identity, inspector, linksharing, satellite, storagenode, uplink, and versioncontrol binaries (jenkins)
.PHONY: sign-windows-installer
sign-windows-installer:
@ -357,7 +332,6 @@ binaries-clean: ## Remove all local release binaries (jenkins)
.PHONY: clean-images
clean-images:
-docker rmi storjlabs/gateway:${TAG}${CUSTOMTAG}
-docker rmi storjlabs/satellite:${TAG}${CUSTOMTAG}
-docker rmi storjlabs/storagenode:${TAG}${CUSTOMTAG}
-docker rmi storjlabs/uplink:${TAG}${CUSTOMTAG}

View File

@ -1,117 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
TMP_DIR=$(mktemp -d -t tmp.XXXXXXXXXX)
CMP_DIR=$(mktemp -d -t tmp.XXXXXXXXXX)
# Clean up what we might have done
cleanup(){
echo ""
echo ""
echo ""
echo "=> Testing finished, logs to follow"
echo "=> Satellite logs"
docker logs storj_satellite_1
echo "=> Storagenode logs"
docker logs storj_storagenode_1
echo "=> Gateway logs"
docker logs storj_gateway_1
echo "=> Cleaning up"
rm -rf "$TMP_DIR" "$CMP_DIR"
# Hide any ERRORs and Faileds here as they are not relevant to the actual
# errors and failures of this test.
docker-compose down --rmi all 2>&1 | grep -v ERROR | grep -v Failed
}
trap cleanup EXIT
mkdir -p "$TMP_DIR"
mkdir -p "$CMP_DIR"
# Stand up production images in a local environment
docker-compose up -d satellite storagenode gateway
echo "=> Waiting for the gateway to be ready"
until docker logs storj_gateway_1 | grep -q Access; do
sleep 2
done
# Extract the keys for AWS client
access_key_id="$(docker logs storj_gateway_1 2>/dev/null| awk '/Access/{print $3; exit}')"
secret_access_key="$(docker logs storj_gateway_1 2>/dev/null| awk '/Secret/{print $3; exit}')"
echo "=> Access Key: $access_key_id"
echo "=> Secret Key: $secret_access_key"
export AWS_ACCESS_KEY_ID="$access_key_id"
export AWS_SECRET_ACCESS_KEY="$secret_access_key"
aws configure set default.region us-east-1
echo "=> Making test files"
random_bytes_file(){
size=$1
output=$2
dd if=/dev/urandom of="$output" count=1 bs="$size" >/dev/null 2>&1
}
random_bytes_file 1x1024x1024 "$TMP_DIR/small-upload-testfile" # create 1mb file of random bytes (inline)
random_bytes_file 5x1024x1024 "$TMP_DIR/big-upload-testfile" # create 5mb file of random bytes (remote)
random_bytes_file 5x1024 "$TMP_DIR/multipart-upload-testfile" # create 5kb file of random bytes (remote)
echo "=> Making bucket"
aws s3 --endpoint=http://localhost:7777/ mb s3://bucket
echo "=> Uploading test files"
aws configure set default.s3.multipart_threshold 1TB
aws s3 --endpoint=http://localhost:7777/ cp "$TMP_DIR/small-upload-testfile" s3://bucket/small-testfile
starttime="$(date +%s)"
while true; do
if aws s3 --endpoint=http://localhost:7777/ cp "$TMP_DIR/big-upload-testfile" s3://bucket/big-testfile; then
break
fi
echo "=> Large file failed, sleeping for a bit before trying again"
sleep 1
if [ $(( $starttime + 60 )) -lt $(date +%s) ]; then
echo "=> Failed to upload big-testfile for over a minute!"
exit 1
fi
done
# Wait 5 seconds to trigger any error related to one of the different intervals
sleep 5
aws configure set default.s3.multipart_threshold 4KB
aws s3 --endpoint=http://localhost:7777/ cp "$TMP_DIR/multipart-upload-testfile" s3://bucket/multipart-testfile
echo "=> Listing bucket"
aws s3 --endpoint=http://localhost:7777/ ls s3://bucket
echo "=> Downloading test files"
aws s3 --endpoint=http://localhost:7777/ cp s3://bucket/small-testfile "$CMP_DIR/small-download-testfile"
aws s3 --endpoint=http://localhost:7777/ cp s3://bucket/big-testfile "$CMP_DIR/big-download-testfile"
aws s3 --endpoint=http://localhost:7777/ cp s3://bucket/multipart-testfile "$CMP_DIR/multipart-download-testfile"
echo "=> Removing bucket"
aws s3 --endpoint=http://localhost:7777/ rb s3://bucket --force
echo "=> Comparing test files downloaded with uploaded versions"
if cmp "$TMP_DIR/small-upload-testfile" "$CMP_DIR/small-download-testfile"
then
echo "Downloaded file matches uploaded file"
else
echo "Downloaded file does not match uploaded file"
exit 1
fi
if cmp "$TMP_DIR/big-upload-testfile" "$CMP_DIR/big-download-testfile"
then
echo "Downloaded file matches uploaded file"
else
echo "Downloaded file does not match uploaded file"
exit 1
fi
if cmp "$TMP_DIR/multipart-upload-testfile" "$CMP_DIR/multipart-download-testfile"
then
echo "Downloaded file matches uploaded file"
else
echo "Downloaded file does not match uploaded file"
exit 1
fi