all: switch from master to main

This commit is contained in:
Stefan Benten 2020-12-28 22:59:06 +01:00
parent 2223877439
commit 7f1871b8f1
15 changed files with 56 additions and 56 deletions

View File

@ -13,8 +13,8 @@ Please describe the performance impact:
- [ ] NEW: Are there any Satellite database migrations? Are they forwards _and_ backwards compatible?
- [ ] Does the PR describe what changes are being made?
- [ ] Does the PR describe why the changes are being made?
- [ ] Does the code follow [our style guide](https://github.com/storj/docs/blob/master/code/Style.md)?
- [ ] Does the code follow [our testing guide](https://github.com/storj/docs/blob/master/code/Testing.md)?
- [ ] Does the code follow [our style guide](https://github.com/storj/docs/blob/main/code/Style.md)?
- [ ] Does the code follow [our testing guide](https://github.com/storj/docs/blob/main/code/Testing.md)?
- [ ] Is the PR appropriately sized? (If it could be broken into smaller PRs it should be)
- [ ] Does the new code have enough tests? (*every* PR should have tests or justification otherwise. Bug-fix PRs especially)
- [ ] Does the new code have enough documentation that answers "how do I use it?" and "what does it do?"? (both source documentation and [higher level](https://github.com/storj/docs), diagrams?)

10
Jenkinsfile vendored
View File

@ -10,7 +10,7 @@ node('node') {
echo "Current build result: ${currentBuild.result}"
}
if (env.BRANCH_NAME == "master") {
if (env.BRANCH_NAME == "main") {
stage('Run Versions Test') {
lastStage = env.STAGE_NAME
try {
@ -30,8 +30,8 @@ node('node') {
done
'''
sh 'docker exec postgres-$BUILD_NUMBER createdb -U postgres teststorj'
// fetch the remote master branch
sh 'git fetch --no-tags --progress -- https://github.com/storj/storj.git +refs/heads/master:refs/remotes/origin/master'
// fetch the remote main branch
sh 'git fetch --no-tags --progress -- https://github.com/storj/storj.git +refs/heads/main:refs/remotes/origin/main'
sh 'docker run -u $(id -u):$(id -g) --rm -i -v $PWD:$PWD -w $PWD --entrypoint $PWD/scripts/tests/testversions/test-sim-versions.sh -e STORJ_SIM_POSTGRES -e STORJ_SIM_REDIS --link redis-$BUILD_NUMBER:redis --link postgres-$BUILD_NUMBER:postgres -e CC=gcc storjlabs/golang:1.15.6'
}
catch(err){
@ -65,8 +65,8 @@ node('node') {
done
'''
sh 'docker exec postgres-$BUILD_NUMBER createdb -U postgres teststorj'
// fetch the remote master branch
sh 'git fetch --no-tags --progress -- https://github.com/storj/storj.git +refs/heads/master:refs/remotes/origin/master'
// fetch the remote main branch
sh 'git fetch --no-tags --progress -- https://github.com/storj/storj.git +refs/heads/main:refs/remotes/origin/main'
sh 'docker run -u $(id -u):$(id -g) --rm -i -v $PWD:$PWD -w $PWD --entrypoint $PWD/scripts/tests/rollingupgrade/test-sim-rolling-upgrade.sh -e BRANCH_NAME -e STORJ_SIM_POSTGRES -e STORJ_SIM_REDIS --link redis-$BUILD_NUMBER:redis --link postgres-$BUILD_NUMBER:postgres -e CC=gcc storjlabs/golang:1.15.6'
}
catch(err){

View File

@ -68,7 +68,7 @@ pipeline {
'cockroach://root@localhost:26259/testcockroach?sslmode=disable'
STORJ_TEST_COCKROACH_ALT = 'cockroach://root@localhost:26260/testcockroach?sslmode=disable'
STORJ_TEST_POSTGRES = 'postgres://postgres@localhost/teststorj?sslmode=disable'
COVERFLAGS = "${ env.BRANCH_NAME != 'master' ? '' : '-coverprofile=.build/coverprofile -coverpkg=storj.io/storj/private/...,storj.io/storj/pkg/...,storj.io/storj/satellite/...,storj.io/storj/storage/...,storj.io/storj/storagenode/...,storj.io/storj/versioncontrol/...'}"
COVERFLAGS = "${ env.BRANCH_NAME != 'main' ? '' : '-coverprofile=.build/coverprofile -coverpkg=storj.io/storj/private/...,storj.io/storj/pkg/...,storj.io/storj/satellite/...,storj.io/storj/storage/...,storj.io/storj/storagenode/...,storj.io/storj/versioncontrol/...'}"
}
steps {
sh 'cockroach sql --insecure --host=localhost:26256 -e \'create database testcockroach;\''

View File

@ -4,7 +4,7 @@ GOARCH ?= amd64
GOPATH ?= $(shell go env GOPATH)
COMPOSE_PROJECT_NAME := ${TAG}-$(shell git rev-parse --abbrev-ref HEAD)
BRANCH_NAME ?= $(shell git rev-parse --abbrev-ref HEAD | sed "s!/!-!g")
ifeq (${BRANCH_NAME},master)
ifeq (${BRANCH_NAME},main)
TAG := $(shell git rev-parse --short HEAD)-go${GO_VERSION}
TRACKED_BRANCH := true
LATEST_TAG := latest
@ -368,5 +368,5 @@ diagrams-graphml:
.PHONY: bump-dependencies
bump-dependencies:
go get storj.io/common@master storj.io/private@master storj.io/uplink@master
go get storj.io/common@main storj.io/private@main storj.io/uplink@main
go mod tidy

View File

@ -2,10 +2,10 @@
[![Go Report Card](https://goreportcard.com/badge/storj.io/storj)](https://goreportcard.com/report/storj.io/storj)
[![Go Doc](https://img.shields.io/badge/godoc-reference-blue.svg?style=flat-square)](https://pkg.go.dev/storj.io/storj)
[![Coverage Status](https://img.shields.io/badge/coverage-master-green.svg)](https://build.dev.storj.io/job/storj/job/master/cobertura)
[![Coverage Status](https://img.shields.io/badge/coverage-master-green.svg)](https://build.dev.storj.io/job/storj/job/main/cobertura)
![Alpha](https://img.shields.io/badge/version-alpha-green.svg)
<img src="https://github.com/storj/storj/raw/master/resources/logo.png" width="100">
<img src="https://github.com/storj/storj/raw/main/resources/logo.png" width="100">
Storj is building a decentralized cloud storage network.
[Check out our white paper for more info!](https://storj.io/white-paper)
@ -92,7 +92,7 @@ Use Git to push your changes to your fork:
```bash
git commit -a -m 'my changes!'
git push origin master
git push origin main
```
Use Github to open a pull request!

View File

@ -90,7 +90,7 @@ func networkExec(flags *Flags, args []string, command string) error {
if command == "setup" {
if flags.Postgres == "" {
return errors.New("postgres connection URL is required for running storj-sim. Example: `storj-sim network setup --postgres=<connection URL>`.\nSee docs for more details https://github.com/storj/docs/blob/master/Test-network.md#running-tests-with-postgres")
return errors.New("postgres connection URL is required for running storj-sim. Example: `storj-sim network setup --postgres=<connection URL>`.\nSee docs for more details https://github.com/storj/docs/blob/main/Test-network.md#running-tests-with-postgres")
}
identities, err := identitySetup(processes)

View File

@ -47,7 +47,7 @@ The current Satellite database has the table `nodes`. For the offline time calcu
### Detecting offline nodes
Per [Kademlia removal blueprint](https://github.com/storj/storj/blob/master/docs/design/kademlia-removal.md#network-refreshing), any storage node has to ping the satellite every hour. For storage nodes that have not pinged, we need to contact them directly.
Per [Kademlia removal blueprint](https://github.com/storj/storj/blob/main/docs/design/kademlia-removal.md#network-refreshing), any storage node has to ping the satellite every hour. For storage nodes that have not pinged, we need to contact them directly.
For finding the storage nodes gone offline, we run a chore, with the following query:
@ -229,4 +229,4 @@ Data Science could use this approach to more nicely calculate statistics however
* The design needs to account for potential satellite or DNS outages to ensure that we do not unfairly disqualify nodes if the satellite cannot be contacted.
* The design indefinitely checks offline storage nodes until they are disqualified.
* The implementation requires coordination with the team working in [Kademlia removal blueprint](kademlia-removal.md) for the "ping" functionality.
* The implementation requires the [Kademlia removal network refreshing](https://github.com/storj/storj/blob/master/docs/design/kademlia-removal.md#network-refreshing) implemented and deployed before deploying the new chore. Use a feature flag for removing the constraint.
* The implementation requires the [Kademlia removal network refreshing](https://github.com/storj/storj/blob/main/docs/design/kademlia-removal.md#network-refreshing) implemented and deployed before deploying the new chore. Use a feature flag for removing the constraint.

View File

@ -29,7 +29,7 @@ Reference: [Redash query](https://redash.datasci.storj.io/queries/1224) for curr
## Design
#### GC Manager and Workers
The idea here is to split the garbage collection process into a manager process and many worker processes. The GC Manager will join the metainfo loop to retrieve pointer data. It will assign a portion of the storage nodes to each of the GC workers so that each worker is only responsible for creating bloom filters for a subset of all storage nodes. The GC master will send the piece IDs from the metainfo loop to the correct worker responsible for that storage node. Once the GC cycle is complete, the workers send the completed bloom filters to the storage nodes.
The idea here is to split the garbage collection process into a manager process and many worker processes. The GC Manager will join the metainfo loop to retrieve pointer data. It will assign a portion of the storage nodes to each of the GC workers so that each worker is only responsible for creating bloom filters for a subset of all storage nodes. The GC main will send the piece IDs from the metainfo loop to the correct worker responsible for that storage node. Once the GC cycle is complete, the workers send the completed bloom filters to the storage nodes.
#### Reliable data transfer mechanism
With this design, the GC Manager will be sending piece ID data to the workers, it's very important we never lose a piece ID. We need a way to confirm each worker received every piece ID from the GC Manager for any given GC iteration. Otherwise it could cause the storage node to delete the unintended data. One way to solve this is to assign a sequence number to each piece ID sent from GC Manager to the worker so that at the end of the GC cycle, the manager and worker must confirm the end sequence number match and all sequences in between have been received.

View File

@ -34,19 +34,19 @@ Metainfo is responsible for all things related to the metainfo stored for each f
4) metainfo service, which ensures proper access to metainfo database and simplifies modification for other services.
#### orders
Orders is responsible for creating/managing orders that the satellite issues for a file upload/download. Orders are used to keep track of how much bandwidth was used to upload/download a file. This data is used to pay storage nodes for bandwidth usage and to charge the uplinks. See this [doc on the lifecycle of data](https://github.com/storj/docs/blob/master/code/payments/Accounting.md#lifecycle-of-the-data) related to accounting which includes orders.
Orders is responsible for creating/managing orders that the satellite issues for a file upload/download. Orders are used to keep track of how much bandwidth was used to upload/download a file. This data is used to pay storage nodes for bandwidth usage and to charge the uplinks. See this [doc on the lifecycle of data](https://github.com/storj/docs/blob/main/code/payments/Accounting.md#lifecycle-of-the-data) related to accounting which includes orders.
#### audit
Audit performs audits of the storage nodes to make sure the data they store is still retrievable. The audit system is currently made up of an audit service that runs on an interval performing audits on a segment at a time. The result of the audits are reported to the overlay service to store in node table in satellite.DB. See [docs on audit](https://github.com/storj/docs/blob/master/code/audits/audit-service.md) for more details.
Audit performs audits of the storage nodes to make sure the data they store is still retrievable. The audit system is currently made up of an audit service that runs on an interval performing audits on a segment at a time. The result of the audits are reported to the overlay service to store in node table in satellite.DB. See [docs on audit](https://github.com/storj/docs/blob/main/code/audits/audit-service.md) for more details.
#### repair
Repair searches metainfo for injured segments and adds them to the repair queue. Repairer picks segments from queue and tries to fix them. When repair fails, then the segment is added to irreparable database. The repair system is currently made of 4 parts and 2 DBs (db tables). The 4 parts are 1) repair observer (contains ReliabilityCache) 2) irreparable loop 3) repairer 4) repair queue. The 2 DBs are 1) injuredsegment (repair queue) table in satellite.DB 2) and irreparabledb table in satellite.DB
#### garbage collection (GC)
GC iterates over the metainfo and creates a list for each storage node. It sends these list to storage nodes, who can delete all pieces missing from the list. See [GC design doc](https://github.com/storj/storj/blob/master/docs/design/garbage-collection.md) for more details.
GC iterates over the metainfo and creates a list for each storage node. It sends these list to storage nodes, who can delete all pieces missing from the list. See [GC design doc](https://github.com/storj/storj/blob/main/docs/design/garbage-collection.md) for more details.
#### accounting
Accounting calculates how much uplinks use storage and how much storage nodes store data. See [docs on accounting](https://github.com/storj/docs/blob/master/code/payments/Accounting.md) for more details.
Accounting calculates how much uplinks use storage and how much storage nodes store data. See [docs on accounting](https://github.com/storj/docs/blob/main/code/payments/Accounting.md) for more details.
#### console
Console provides the web UI for the Satellite where users can create new accounts/projects/apiKeys needed for uploading/downloading to the network.
@ -64,7 +64,7 @@ Nodestats allows storage nodes to ask information about themselves from the sate
Inspectors allow private diagnostics on certain systems. The following inspectors currently exist: overlay inspector, health inspector, and irreparable inspector.
#### kademlia
Kademlia, discovery, bootstrap, and vouchers are being removed and not included in this doc. See [kademlia removal design doc](https://github.com/storj/storj/blob/master/docs/design/kademlia-removal.md) for more details.
Kademlia, discovery, bootstrap, and vouchers are being removed and not included in this doc. See [kademlia removal design doc](https://github.com/storj/storj/blob/main/docs/design/kademlia-removal.md) for more details.
#### RPC endpoints
The Satellite has the following RPC endpoints:
@ -80,8 +80,8 @@ The Satellite has the following HTTP endpoints:
All services (except version) make connections to the satellite.DB. Five services rely on metainfo service to access the metainfoDB, this includes inspectors, accounting, audit, repair, and garbage collection.
See these docs for details on current database design:
- https://github.com/storj/docs/blob/master/code/Database.md#database
- https://github.com/storj/docs/blob/master/code/persistentstorage/Databases.md
- https://github.com/storj/docs/blob/main/code/Database.md#database
- https://github.com/storj/docs/blob/main/code/persistentstorage/Databases.md
#### limitations
The current items that prevent Satellite horizontal scaling include:
@ -108,7 +108,7 @@ The private api process handles all private RPC and HTTP requests, this includes
#### metainfo loop and the observer system
The metainfo loop process iterates over all the segments in metainfoDB repeatedly on an interval. With each loop, the process can also execute the code for the observer systems that take a segment as input and performs some action with it. The observer systems currently include: audit observer, gc observer, repair checker observer, and accounting tally.
The audit observer uses the segments from the metainfo loop to create segment reservoir samples for each storage node and saves those samples to a reservoir cache. Audit observer currently runs on a 30s interval for the release default setting. See [audit-v2 design](https://github.com/storj/storj/blob/master/docs/design/audit-v2.md) for more details.
The audit observer uses the segments from the metainfo loop to create segment reservoir samples for each storage node and saves those samples to a reservoir cache. Audit observer currently runs on a 30s interval for the release default setting. See [audit-v2 design](https://github.com/storj/storj/blob/main/docs/design/audit-v2.md) for more details.
The repair (checker) observer uses the segments from the metainfo loop to identify segments that need to be repaired and adds those injured segments to the repair queue. The repair check currently has a `checkerObserver.ReliabilityCache`, this should be ok to stay in-memory for the time being since we plan on only running a single metainfo loop. The repair observer currently runs on a 30s interval for the release default setting.
@ -131,7 +131,7 @@ Lets get rid of the irreparable loop. For one, we never expect there to be files
The repair worker executes a repair for an item in the repair queue. We want to work through the repair queue as fast as possible so its important to be able to dispatch many workers at a time.
#### audit workers
The audit process should be able to run many audits in parallel. See the [audit-v2 design doc](https://github.com/storj/storj/blob/master/docs/design/audit-v2.md) for updates to the audit design.
The audit process should be able to run many audits in parallel. See the [audit-v2 design doc](https://github.com/storj/storj/blob/main/docs/design/audit-v2.md) for updates to the audit design.
#### accounting
The accounting process is responsible for calculating disk usage and bandwidth usage. These calculations are used for uplink invoices and storage nodes payments. Accounting should receive storage node total stored bytes data from the tally observer running with the metainfo loop.

View File

@ -2,10 +2,10 @@
# This file contains the second part of Stage 2 for the rolling upgrade test.
# Description of file functionality:
# * Upload an inline, remote, and multisegment file to the network using the master uplink and the new satellite api.
# * Upload an inline, remote, and multisegment file to the network using the master uplink and the old satellite api.
# * Download the six inline, remote, and multisegment files from the previous two steps using the master uplink and new satellite api.
# * Download the six inline, remote, and multisegment files from the previous two steps using the master uplink and old satellite api.
# * Upload an inline, remote, and multisegment file to the network using the main uplink and the new satellite api.
# * Upload an inline, remote, and multisegment file to the network using the main uplink and the old satellite api.
# * Download the six inline, remote, and multisegment files from the previous two steps using the main uplink and new satellite api.
# * Download the six inline, remote, and multisegment files from the previous two steps using the main uplink and old satellite api.
set -ueo pipefail

View File

@ -2,8 +2,8 @@
# This file contains the first part of Stage 2 for the rolling upgrade test.
# Description of file functionality:
# * Download the inline, remote, and multisegment file from the network using the master uplink and the new satellite api.
# * Download the inline, remote, and multisegment file from the network using the master uplink and the old satellite api.
# * Download the inline, remote, and multisegment file from the network using the main uplink and the new satellite api.
# * Download the inline, remote, and multisegment file from the network using the main uplink and the old satellite api.
set -ueo pipefail

View File

@ -24,6 +24,6 @@ until $(docker logs postgres-$BUILD_NUMBER | grep "database system is ready to a
done
docker exec postgres-$BUILD_NUMBER createdb -U postgres teststorj
# fetch the remote master branch
git fetch --no-tags --progress -- https://github.com/storj/storj.git +refs/heads/master:refs/remotes/origin/master
# fetch the remote main branch
git fetch --no-tags --progress -- https://github.com/storj/storj.git +refs/heads/main:refs/remotes/origin/main
$SCRIPTDIR/test-sim-rolling-upgrade.sh

View File

@ -9,15 +9,15 @@
# * (test-versions.sh upload) - Upload an inline, remote, and multisegment file to the network with the selected uplink.
# Stage 2:
# * Upgrade the satellite to current commit. Run an "old" satellite api server on the latest point release (port 30000).
# * Keep half of the storagenodes on the latest point release. Upgrade the other half to master.
# * Keep half of the storagenodes on the latest point release. Upgrade the other half to main.
# * Point half of the storagenodes to the old satellite api (port 30000). Keep the other half on the new satellite api (port 10000).
# * Check out the master version of the uplink.
# * (test-rolling-upgrade.sh) - Download the inline, remote, and multisegment file from the network using the master uplink and the new satellite api.
# * (test-rolling-upgrade.sh) - Download the inline, remote, and multisegment file from the network using the master uplink and the old satellite api.
# * (test-rolling-upgrade-final-upload.sh) - Upload an inline, remote, and multisegment file to the network using the master uplink and the new satellite api.
# * (test-rolling-upgrade-final-upload.sh) - Upload an inline, remote, and multisegment file to the network using the master uplink and the old satellite api.
# * (test-rolling-upgrade-final-upload.sh) - Download the six inline, remote, and multisegment files from the previous two steps using the master uplink and new satellite api.
# * (test-rolling-upgrade-final-upload.sh) - Download the six inline, remote, and multisegment files from the previous two steps using the master uplink and old satellite api.
# * Check out the main version of the uplink.
# * (test-rolling-upgrade.sh) - Download the inline, remote, and multisegment file from the network using the main uplink and the new satellite api.
# * (test-rolling-upgrade.sh) - Download the inline, remote, and multisegment file from the network using the main uplink and the old satellite api.
# * (test-rolling-upgrade-final-upload.sh) - Upload an inline, remote, and multisegment file to the network using the main uplink and the new satellite api.
# * (test-rolling-upgrade-final-upload.sh) - Upload an inline, remote, and multisegment file to the network using the main uplink and the old satellite api.
# * (test-rolling-upgrade-final-upload.sh) - Download the six inline, remote, and multisegment files from the previous two steps using the main uplink and new satellite api.
# * (test-rolling-upgrade-final-upload.sh) - Download the six inline, remote, and multisegment files from the previous two steps using the main uplink and old satellite api.
set -ueo pipefail
set +x
@ -46,11 +46,11 @@ populate_sno_versions(){
# set peers' versions
# in stage 1: satellite, uplink, and storagenode use latest release version
# in stage 2: satellite core uses latest release version and satellite api uses master. Storage nodes are split into half on latest release version and half on master. Uplink uses the latest release version plus master
# in stage 2: satellite core uses latest release version and satellite api uses main. Storage nodes are split into half on latest release version and half on main. Uplink uses the latest release version plus main
BRANCH_NAME=${BRANCH_NAME:-""}
git fetch --tags
# if it's running on a release branch, we will set the stage 1 version to be the latest previous major release
# if it's running on master, we will set the stage 1 version to be the current release version
# if it's running on main, we will set the stage 1 version to be the current release version
current_commit=$(git rev-parse HEAD)
stage1_release_version=$(git tag -l --sort -version:refname | grep -v rc | head -1)
if [[ $BRANCH_NAME = v* ]]; then
@ -277,7 +277,7 @@ old_api_pid=$!
# Wait until old satellite api is responding to requests to ensure it happens before migration.
storj-sim tool wait-for --retry 50 --interval 100ms "127.0.0.1:30000"
# Downloading every file uploaded in stage 1 from the network using the latest commit from master branch for each uplink version
# Downloading every file uploaded in stage 1 from the network using the latest commit from main branch for each uplink version
for ul_version in ${stage2_uplink_versions}; do
if [ "$ul_version" = "v1.6.3" ]; then
# TODO: skip v1.6.3 uplink since it doesn't support changing imported access satellite address

View File

@ -24,6 +24,6 @@ until $(docker logs postgres-$BUILD_NUMBER | grep "database system is ready to a
done
docker exec postgres-$BUILD_NUMBER createdb -U postgres teststorj
# fetch the remote master branch
git fetch --no-tags --progress -- https://github.com/storj/storj.git +refs/heads/master:refs/remotes/origin/master
# fetch the remote main branch
git fetch --no-tags --progress -- https://github.com/storj/storj.git +refs/heads/main:refs/remotes/origin/main
$SCRIPTDIR/test-sim-versions.sh

View File

@ -25,7 +25,7 @@ RUN_TYPE=${RUN_TYPE:-"jenkins"}
# set peers' versions
# in stage 1: satellite and storagenode use latest release version, uplink uses all highest point release from all major releases starting from v0.15
# in stage 2: satellite core uses latest release version and satellite api uses master. Storage nodes are split into half on latest release version and half on master. Uplink uses the all versions from stage 1 plus master
# in stage 2: satellite core uses latest release version and satellite api uses main. Storage nodes are split into half on latest release version and half on main. Uplink uses the all versions from stage 1 plus main
git fetch --tags
major_release_tags=$(
git tag -l --sort -version:refname | # get the tag list
@ -38,9 +38,9 @@ current_release_version=$(echo $major_release_tags | xargs -n 1 | tail -1)
stage1_sat_version=$current_release_version
stage1_uplink_versions=$major_release_tags
stage1_storagenode_versions=$(populate_sno_versions $current_release_version 10)
stage2_sat_version="master"
stage2_uplink_versions=$major_release_tags\ "master"
stage2_storagenode_versions=$(populate_sno_versions $current_release_version 5)\ $(populate_sno_versions "master" 5)
stage2_sat_version="main"
stage2_uplink_versions=$major_release_tags\ "main"
stage2_storagenode_versions=$(populate_sno_versions $current_release_version 5)\ $(populate_sno_versions "main" 5)
echo "stage1_sat_version" $stage1_sat_version
echo "stage1_uplink_versions" $stage1_uplink_versions
@ -179,9 +179,9 @@ for version in ${unique_versions}; do
bin_dir=${dir}/bin
echo -e "\nAdding worktree for ${version} in ${dir}."
if [[ $version = "master" ]]
if [[ $version = "main" ]]
then
git worktree add -f "$dir" "origin/master"
git worktree add -f "$dir" "origin/main"
else
git worktree add -f "$dir" "${version}"
fi
@ -196,7 +196,7 @@ for version in ${unique_versions}; do
EOF
fi
if [[ $version = $current_release_version || $version = "master" ]]
if [[ $version = $current_release_version || $version = "main" ]]
then
echo "Installing storj-sim for ${version} in ${dir}."
@ -238,7 +238,7 @@ test_dir=$(version_dir "test_dir")
cp -r $(version_dir ${stage1_sat_version}) ${test_dir}
echo -e "\nSetting up stage 1 in ${test_dir}"
setup_stage "${test_dir}" "${stage1_sat_version}" "${stage1_storagenode_versions}"
update_access_script_path="$(version_dir "master")/scripts/update-access.go"
update_access_script_path="$(version_dir "main")/scripts/update-access.go"
# Uploading files to the network using the latest release version for each uplink version
for ul_version in ${stage1_uplink_versions}; do
@ -254,7 +254,7 @@ echo -e "\nSetting up stage 2 in ${test_dir}"
setup_stage "${test_dir}" "${stage2_sat_version}" "${stage2_storagenode_versions}"
echo -e "\nRunning stage 2."
# Downloading every file uploaded in stage 1 from the network using the latest commit from master branch for each uplink version
# Downloading every file uploaded in stage 1 from the network using the latest commit from main branch for each uplink version
for ul_version in ${stage2_uplink_versions}; do
echo "Stage 2 Uplink version: ${ul_version}"
src_ul_version_dir=$(version_dir ${ul_version})