From e6c0383477d813be3c24b65c1b7ce0131e6f2246 Mon Sep 17 00:00:00 2001 From: Brandon Iglesias Date: Wed, 20 Jan 2021 10:52:29 -0500 Subject: [PATCH 01/38] adding hypernet to the list (#4025) --- satellite/rewards/partners.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/satellite/rewards/partners.go b/satellite/rewards/partners.go index 93908f480..7f5f9b46b 100644 --- a/satellite/rewards/partners.go +++ b/satellite/rewards/partners.go @@ -83,6 +83,10 @@ func DefaultPartners() PartnerList { Name: "Heroku", ID: "706011f3-400e-45eb-a796-90cce2a7d67e", UUID: parseUUID("706011f3-400e-45eb-a796-90cce2a7d67e"), + }, { + Name: "Hypernet", + ID: "5abfc372-1d59-44fa-bbcc-bc3aa03a9542", + UUID: parseUUID("5abfc372-1d59-44fa-bbcc-bc3aa03a9542"), }, { Name: "Infura", ID: "1519bdee-ed18-45fe-86c6-4c7fa9668a14", From 0a48071854cde1df08ece8f6dfa4e3d804c70e71 Mon Sep 17 00:00:00 2001 From: Moby von Briesen Date: Fri, 15 Jan 2021 17:21:05 -0500 Subject: [PATCH 02/38] satellite/console: Add pagination fields for ListProjectsByOwnerID Add ProjectsCursor type for pagination Add PageCount, CurrentPage, and TotalCount ProjectsPage This allows us to mimic the logic of GetBucketTotals and the implementation of BucketUsages in graphql for the new ProjectsByOwnerID functionality. Change-Id: I4e1613859085db65971b44fcacd9813d9ddad8eb --- satellite/console/projects.go | 16 ++++++++++++- satellite/console/projects_test.go | 24 +++++++++++++++----- satellite/satellitedb/projects.go | 36 +++++++++++++++++++++++++----- 3 files changed, 63 insertions(+), 13 deletions(-) diff --git a/satellite/console/projects.go b/satellite/console/projects.go index 52ca98a12..9d5878ff7 100644 --- a/satellite/console/projects.go +++ b/satellite/console/projects.go @@ -34,7 +34,7 @@ type Projects interface { // List returns paginated projects, created before provided timestamp. List(ctx context.Context, offset int64, limit int, before time.Time) (ProjectsPage, error) // ListByOwnerID is a method for querying all projects from the database by ownerID. It also includes the number of members for each project. - ListByOwnerID(ctx context.Context, userID uuid.UUID, limit int, offset int64) (ProjectsPage, error) + ListByOwnerID(ctx context.Context, userID uuid.UUID, cursor ProjectsCursor) (ProjectsPage, error) // UpdateRateLimit is a method for updating projects rate limit. UpdateRateLimit(ctx context.Context, id uuid.UUID, newLimit int) error @@ -67,6 +67,13 @@ type ProjectInfo struct { CreatedAt time.Time `json:"createdAt"` } +// ProjectsCursor holds info for project +// cursor pagination. +type ProjectsCursor struct { + Limit int + Page int +} + // ProjectsPage returns paginated projects, // providing next offset if there are more projects // to retrieve. @@ -74,6 +81,13 @@ type ProjectsPage struct { Projects []Project Next bool NextOffset int64 + + Limit int + Offset int64 + + PageCount int + CurrentPage int + TotalCount int64 } // ValidateNameAndDescription validates project name and description strings. diff --git a/satellite/console/projects_test.go b/satellite/console/projects_test.go index 75ca0cbe4..a6d2225b1 100644 --- a/satellite/console/projects_test.go +++ b/satellite/console/projects_test.go @@ -228,7 +228,7 @@ func TestProjectsList(t *testing.T) { } require.False(t, projsPage.Next) - require.Equal(t, int64(0), projsPage.NextOffset) + require.EqualValues(t, 0, projsPage.NextOffset) require.Equal(t, length, len(projectsList)) require.Empty(t, cmp.Diff(projects[0], projectsList[0], cmp.Transformer("Sort", func(xs []console.Project) []console.Project { @@ -243,8 +243,9 @@ func TestProjectsList(t *testing.T) { func TestProjectsListByOwner(t *testing.T) { const ( - limit = 5 - length = limit*4 - 1 // make length offset from page size so we can test incomplete page at end + limit = 5 + length = limit*4 - 1 // make length offset from page size so we can test incomplete page at end + totalPages = 4 ) rateLimit := 100 @@ -333,23 +334,34 @@ func TestProjectsListByOwner(t *testing.T) { {id: owner2.ID, originalProjects: owner2Projects}, } for _, tt := range testCases { - projsPage, err := projectsDB.ListByOwnerID(ctx, tt.id, limit, 0) + cursor := &console.ProjectsCursor{ + Limit: limit, + Page: 1, + } + projsPage, err := projectsDB.ListByOwnerID(ctx, tt.id, *cursor) require.NoError(t, err) require.Len(t, projsPage.Projects, limit) + require.EqualValues(t, 1, projsPage.CurrentPage) + require.EqualValues(t, totalPages, projsPage.PageCount) + require.EqualValues(t, length, projsPage.TotalCount) ownerProjectsDB := projsPage.Projects for projsPage.Next { - projsPage, err = projectsDB.ListByOwnerID(ctx, tt.id, limit, projsPage.NextOffset) + cursor.Page++ + projsPage, err = projectsDB.ListByOwnerID(ctx, tt.id, *cursor) require.NoError(t, err) // number of projects should not exceed page limit require.True(t, len(projsPage.Projects) > 0 && len(projsPage.Projects) <= limit) + require.EqualValues(t, cursor.Page, projsPage.CurrentPage) + require.EqualValues(t, totalPages, projsPage.PageCount) + require.EqualValues(t, length, projsPage.TotalCount) ownerProjectsDB = append(ownerProjectsDB, projsPage.Projects...) } require.False(t, projsPage.Next) - require.Equal(t, int64(0), projsPage.NextOffset) + require.EqualValues(t, 0, projsPage.NextOffset) require.Equal(t, length, len(ownerProjectsDB)) // sort originalProjects by Name in alphabetical order originalProjects := tt.originalProjects diff --git a/satellite/satellitedb/projects.go b/satellite/satellitedb/projects.go index 822424ea8..c2fa528e6 100644 --- a/satellite/satellitedb/projects.go +++ b/satellite/satellitedb/projects.go @@ -203,10 +203,34 @@ func (projects *projects) List(ctx context.Context, offset int64, limit int, bef } // ListByOwnerID is a method for querying all projects from the database by ownerID. It also includes the number of members for each project. -func (projects *projects) ListByOwnerID(ctx context.Context, ownerID uuid.UUID, limit int, offset int64) (_ console.ProjectsPage, err error) { +// cursor.Limit is set to 50 if it exceeds 50. +func (projects *projects) ListByOwnerID(ctx context.Context, ownerID uuid.UUID, cursor console.ProjectsCursor) (_ console.ProjectsPage, err error) { defer mon.Task()(&ctx)(&err) - var page console.ProjectsPage + if cursor.Limit > 50 { + cursor.Limit = 50 + } + if cursor.Page == 0 { + return console.ProjectsPage{}, errs.New("page can not be 0") + } + + page := console.ProjectsPage{ + CurrentPage: cursor.Page, + Limit: cursor.Limit, + Offset: int64((cursor.Page - 1) * cursor.Limit), + } + + countRow := projects.sdb.QueryRowContext(ctx, projects.sdb.Rebind(` + SELECT COUNT(*) FROM projects WHERE owner_id = ? + `), ownerID) + err = countRow.Scan(&page.TotalCount) + if err != nil { + return console.ProjectsPage{}, err + } + page.PageCount = int(page.TotalCount / int64(cursor.Limit)) + if page.TotalCount%int64(cursor.Limit) != 0 { + page.PageCount++ + } rows, err := projects.sdb.Query(ctx, projects.sdb.Rebind(` SELECT id, name, description, owner_id, rate_limit, max_buckets, created_at, @@ -216,20 +240,20 @@ func (projects *projects) ListByOwnerID(ctx context.Context, ownerID uuid.UUID, ORDER BY name ASC OFFSET ? ROWS LIMIT ? - `), ownerID, offset, limit+1) // add 1 to limit to see if there is another page + `), ownerID, page.Offset, page.Limit+1) // add 1 to limit to see if there is another page if err != nil { return console.ProjectsPage{}, err } defer func() { err = errs.Combine(err, rows.Close()) }() count := 0 - projectsToSend := make([]console.Project, 0, limit) + projectsToSend := make([]console.Project, 0, page.Limit) for rows.Next() { count++ - if count == limit+1 { + if count == page.Limit+1 { // we are done with this page; do not include this project page.Next = true - page.NextOffset = offset + int64(limit) + page.NextOffset = page.Offset + int64(page.Limit) break } var rateLimit, maxBuckets sql.NullInt32 From ff102d6ffe77342a47683f11a6e5aeb5ac6f3085 Mon Sep 17 00:00:00 2001 From: Stefan Benten Date: Wed, 20 Jan 2021 19:08:35 +0100 Subject: [PATCH 03/38] Makefile: update to go v1.15.7 (#4026) --- Jenkinsfile | 4 ++-- Makefile | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 2ecad1dd6..f5cdd999a 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -32,7 +32,7 @@ node('node') { sh 'docker exec postgres-$BUILD_NUMBER createdb -U postgres teststorj' // fetch the remote main branch sh 'git fetch --no-tags --progress -- https://github.com/storj/storj.git +refs/heads/main:refs/remotes/origin/main' - sh 'docker run -u $(id -u):$(id -g) --rm -i -v $PWD:$PWD -w $PWD --entrypoint $PWD/scripts/tests/testversions/test-sim-versions.sh -e STORJ_SIM_POSTGRES -e STORJ_SIM_REDIS --link redis-$BUILD_NUMBER:redis --link postgres-$BUILD_NUMBER:postgres -e CC=gcc storjlabs/golang:1.15.6' + sh 'docker run -u $(id -u):$(id -g) --rm -i -v $PWD:$PWD -w $PWD --entrypoint $PWD/scripts/tests/testversions/test-sim-versions.sh -e STORJ_SIM_POSTGRES -e STORJ_SIM_REDIS --link redis-$BUILD_NUMBER:redis --link postgres-$BUILD_NUMBER:postgres -e CC=gcc storjlabs/golang:1.15.7' } catch(err){ throw err @@ -67,7 +67,7 @@ node('node') { sh 'docker exec postgres-$BUILD_NUMBER createdb -U postgres teststorj' // fetch the remote main branch sh 'git fetch --no-tags --progress -- https://github.com/storj/storj.git +refs/heads/main:refs/remotes/origin/main' - sh 'docker run -u $(id -u):$(id -g) --rm -i -v $PWD:$PWD -w $PWD --entrypoint $PWD/scripts/tests/rollingupgrade/test-sim-rolling-upgrade.sh -e BRANCH_NAME -e STORJ_SIM_POSTGRES -e STORJ_SIM_REDIS --link redis-$BUILD_NUMBER:redis --link postgres-$BUILD_NUMBER:postgres -e CC=gcc storjlabs/golang:1.15.6' + sh 'docker run -u $(id -u):$(id -g) --rm -i -v $PWD:$PWD -w $PWD --entrypoint $PWD/scripts/tests/rollingupgrade/test-sim-rolling-upgrade.sh -e BRANCH_NAME -e STORJ_SIM_POSTGRES -e STORJ_SIM_REDIS --link redis-$BUILD_NUMBER:redis --link postgres-$BUILD_NUMBER:postgres -e CC=gcc storjlabs/golang:1.15.7' } catch(err){ throw err diff --git a/Makefile b/Makefile index 6236e90f6..d5ddd4102 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -GO_VERSION ?= 1.15.6 +GO_VERSION ?= 1.15.7 GOOS ?= linux GOARCH ?= amd64 GOPATH ?= $(shell go env GOPATH) From fa2dae0acaa3693cce9c983e0f4bffbc2e696749 Mon Sep 17 00:00:00 2001 From: Malcolm Bouzi Date: Thu, 21 Jan 2021 10:03:02 -0500 Subject: [PATCH 04/38] Pin Project Owner in Team tab to top of members list (#4021) * web/satellite: Pin Project Owner in Team tab to top of members list * web/satellite:Pin Project Owner to top of members list - input missing semicolon * web/satellite: Pin Project Owner to top of members list - refactor project owner variable instantion - check for existance of owner before returning members list * web/satellite: Pin project owner to top of members list - changed returned members list variable name - add comment explaining unshift logic --- .../src/components/team/ProjectMembersArea.vue | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/web/satellite/src/components/team/ProjectMembersArea.vue b/web/satellite/src/components/team/ProjectMembersArea.vue index 84163ca4c..5c205fed3 100644 --- a/web/satellite/src/components/team/ProjectMembersArea.vue +++ b/web/satellite/src/components/team/ProjectMembersArea.vue @@ -104,9 +104,17 @@ export default class ProjectMembersArea extends Vue { /** * Returns team members of current page from store. + * With project owner pinned to top */ public get projectMembers(): ProjectMember[] { - return this.$store.state.projectMembersModule.page.projectMembers; + const projectMembers = this.$store.state.projectMembersModule.page.projectMembers; + const projectOwner = projectMembers.find((member) => member.user.id === this.$store.getters.selectedProject.ownerId); + const projectMembersToReturn = projectMembers.filter((member) => member.user.id !== this.$store.getters.selectedProject.ownerId); + + // if the project owner exists, place at the front of the members list + projectOwner && projectMembersToReturn.unshift(projectOwner); + + return projectMembersToReturn; } public get getItemComponent() { From 6c3b7a233eaca1588961d0297233820d2c47013d Mon Sep 17 00:00:00 2001 From: VitaliiShpital Date: Thu, 21 Jan 2021 14:48:26 +0200 Subject: [PATCH 05/38] web/satellite: add beta messaging for EU and US2 sats WHAT: add a banner with a message saying: gateway MT is in beta for EU and US2 sats WHY: user experience? Change-Id: I0964c4499cdaf12dbd9dba082910fff4ff6f9a12 --- .../accessGrants/steps/ResultStep.vue | 49 ++++++++++++++++--- 1 file changed, 43 insertions(+), 6 deletions(-) diff --git a/web/satellite/src/components/accessGrants/steps/ResultStep.vue b/web/satellite/src/components/accessGrants/steps/ResultStep.vue index 0989767d8..171904ea4 100644 --- a/web/satellite/src/components/accessGrants/steps/ResultStep.vue +++ b/web/satellite/src/components/accessGrants/steps/ResultStep.vue @@ -29,11 +29,22 @@
-

Gateway Credentials (beta)

+

Gateway Credentials

+
+

Gateway MT is currently in Beta

+ + Learn More > + +

Using Gateway Credentials Enables Server-Side Encryption. @@ -325,8 +336,8 @@ export default class ResultStep extends Vue { } &__warning { - padding: 20px; - width: calc(100% - 42px); + padding: 15px; + width: calc(100% - 32px); background: #fff9f7; border: 1px solid #f84b00; border-radius: 8px; @@ -354,7 +365,7 @@ export default class ResultStep extends Vue { } &__grant-area { - margin: 40px 0; + margin: 20px; width: 100%; &__label { @@ -412,8 +423,34 @@ export default class ResultStep extends Vue { &__container { width: 100%; + &__beta { + background-color: #effff9; + border: 1px solid #1a9666; + border-radius: 6px; + display: flex; + align-items: center; + justify-content: space-between; + padding: 12px 20px; + margin-top: 20px; + + &__message { + font-weight: bold; + font-size: 14px; + line-height: 19px; + color: #000; + margin: 0; + } + + &__link { + font-weight: bold; + font-size: 14px; + line-height: 19px; + color: #1a9666; + } + } + &__warning { - margin-top: 30px; + margin-top: 20px; background: #f5f6fa; border-radius: 6px; padding: 40px 50px; @@ -477,7 +514,7 @@ export default class ResultStep extends Vue { } &__done-button { - margin-top: 30px; + margin-top: 20px; } } From 1f1d9fce5898bd37b6055d12c5967d1d301e1d9e Mon Sep 17 00:00:00 2001 From: Jessica Grebenschikov Date: Wed, 20 Jan 2021 10:57:47 -0800 Subject: [PATCH 06/38] satellite/console/wasm: add test to confirm wasm size isnt growing Change-Id: I975a9f8ac3f6b98cc213140fdd7a99557efe14c8 --- Jenkinsfile.public | 1 + Makefile | 5 +++++ scripts/test-wasm-size.sh | 19 +++++++++++++++++++ 3 files changed, 25 insertions(+) create mode 100755 scripts/test-wasm-size.sh diff --git a/Jenkinsfile.public b/Jenkinsfile.public index 4c24993e7..51e99de05 100644 --- a/Jenkinsfile.public +++ b/Jenkinsfile.public @@ -57,6 +57,7 @@ pipeline { sh 'golangci-lint --config /go/ci/.golangci.yml -j=2 run' sh 'check-mod-tidy -mod .build/go.mod.orig' sh 'make check-monitoring' + sh 'make test-wasm-size' } } diff --git a/Makefile b/Makefile index d5ddd4102..4709bcdaa 100644 --- a/Makefile +++ b/Makefile @@ -129,6 +129,11 @@ check-monitoring: ## Check for locked monkit calls that have changed || (echo "Locked monkit metrics have been changed. Notify #data-science and run \`go run github.com/storj/ci/check-monitoring -out monkit.lock ./...\` to update monkit.lock file." \ && exit 1) +.PHONY: test-wasm-size +test-wasm-size: ## Test that the built .wasm code has not increased in size + @echo "Running ${@}" + @./scripts/test-wasm-size.sh + ##@ Build .PHONY: storagenode-console diff --git a/scripts/test-wasm-size.sh b/scripts/test-wasm-size.sh new file mode 100755 index 000000000..a63f2dcd7 --- /dev/null +++ b/scripts/test-wasm-size.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash +set -ueo pipefail +set +x + +cleanup(){ + rm main.wasm + echo "cleaned up test successfully" +} +trap cleanup EXIT + +cd satellite/console/wasm && pwd && GOOS=js GOARCH=wasm go build -o main.wasm . +BUILD_SIZE=$(stat -c %s main.wasm) +CURRENT_SIZE=10000000 +if [ $BUILD_SIZE -gt $CURRENT_SIZE ]; then + echo "Wasm size is too big, was $CURRENT_SIZE but now it is $BUILD_SIZE" + exit 1 +fi + +echo "Wasm size did not increase and it is $BUILD_SIZE (limit: $CURRENT_SIZE)" From b7d8dee5e9de4a0d16a62ac01140a1e9c47fb302 Mon Sep 17 00:00:00 2001 From: Jessica Grebenschikov Date: Fri, 8 Jan 2021 11:26:37 -0800 Subject: [PATCH 07/38] satellite/console/wasm: add js tests Change-Id: I8b1b0e81500836e0408e0517edb6c696698ab5f7 --- Jenkinsfile.public | 10 +++++ satellite/console/wasm/tests/main.spec.js | 54 +++++++++++++++++++++++ satellite/console/wasm/tests/package.json | 12 +++++ 3 files changed, 76 insertions(+) create mode 100644 satellite/console/wasm/tests/main.spec.js create mode 100644 satellite/console/wasm/tests/package.json diff --git a/Jenkinsfile.public b/Jenkinsfile.public index 51e99de05..941b5d345 100644 --- a/Jenkinsfile.public +++ b/Jenkinsfile.public @@ -190,6 +190,16 @@ pipeline { } } + stage('wasm npm') { + steps { + dir(".build") { + sh 'cp -r ../satellite/console/wasm/tests/ .' + sh 'cd tests && cp "$(go env GOROOT)/misc/wasm/wasm_exec.js" .' + sh 'cd tests && npm install && npm run test' + } + } + } + stage('storagenode npm') { steps { dir("web/storagenode") { diff --git a/satellite/console/wasm/tests/main.spec.js b/satellite/console/wasm/tests/main.spec.js new file mode 100644 index 000000000..049a863a7 --- /dev/null +++ b/satellite/console/wasm/tests/main.spec.js @@ -0,0 +1,54 @@ +// Copyright (C) 2021 Storj Labs, Inc. +// See LICENSE for copying information + +require('./wasm_exec.js'); + +const fs = require('fs'); +const path = require('path'); + +describe('main.wasm Tests', () => { + beforeAll(async () => { + const go = new Go(); + wasmPath = __dirname; + if (process.env.WASM_PATH) { + wasmPath = process.env.WASM_PATH; + } + wasmPath = path.resolve(wasmPath, 'main.wasm'); + const buffer = fs.readFileSync(wasmPath); + await WebAssembly.instantiate(buffer, go.importObject).then(results => { + go.run(results.instance); + }) + }) + + describe('generateAccessGrant function', () => { + test('returns an error when called without arguments', async () => { + const result = generateAccessGrant(); + expect(result["error"]).toContain("not enough argument") + expect(result["value"]).toBeNull() + }); + test('happy path returns an access grant', async () => { + const apiKey = "13YqeGFpvtzbUp1QAfpvy2E5ZqLUFFNhEkv7153UDGDVnSmTuYYa7tKUnENGgvFXCCSFP7zNhKw6fsuQmWG5JGdQJbXVaVYFhoM2LcA" + const projectID = "b9e663e0-69e0-48e9-8eb2-4079be85e488" + const result = generateAccessGrant("a",apiKey, "supersecretpassphrase", projectID); + expect(result["error"]).toEqual("") + expect(result["value"]).toEqual("158UWUf6FHtCk8RGQn2JAXETNRnVwyoF7yEQQnuvPrLbsCPpttuAVWwzQ2YgD2bpQLpdBnctHssvQsqeju7kn7gz3LEJZSdRqyRG6rA9Da3PLGsawWMtM3NdGVqq9akyEmotsN7eMJVC1mfTsupiYXeHioTTTg11kY") + }); + }); + + describe('setAPIKeyPermission function', () => { + test('returns an error when called without arguments', async () => { + const result = setAPIKeyPermission(); + expect(result["error"]).toContain("not enough arguments") + expect(result["value"]).toBeNull() + }); + test('default permissions returns an access grant', async () => { + const apiKey = "13YqeGFpvtzbUp1QAfpvy2E5ZqLUFFNhEkv7153UDGDVnSmTuYYa7tKUnENGgvFXCCSFP7zNhKw6fsuQmWG5JGdQJbXVaVYFhoM2LcA" + const projectID = "b9e663e0-69e0-48e9-8eb2-4079be85e488" + const perm = newPermission() + perm["AllowDownload"] = true + const result = setAPIKeyPermission(apiKey, [], perm); + expect(result["error"]).toEqual("") + expect(result["value"]).toEqual("19JjrwZJK1Ck5PdhRtxujGUnzbbiPYSSPZGyE8xrTbxVaJSEr9JL4vXpca3bSH2igjfeYsWeoe7rzo4QTGnwd29Pa924rtXzRjDzSxvkt4UdFd6iiCg") + }); + }); +}); diff --git a/satellite/console/wasm/tests/package.json b/satellite/console/wasm/tests/package.json new file mode 100644 index 000000000..2ca6114e9 --- /dev/null +++ b/satellite/console/wasm/tests/package.json @@ -0,0 +1,12 @@ +{ + "name": "testing-wasm", + "version": "1.0.0", + "scripts": { + "build": "GOOS=js GOARCH=wasm go build -o main.wasm storj.io/storj/satellite/console/wasm", + "pretest": "npm run build", + "test": "jest || true" + }, + "devDependencies": { + "jest": "^23.5.0" + } +} From ae03a0540c095126bdde8548ce3ee29aefc9c908 Mon Sep 17 00:00:00 2001 From: Yingrong Zhao Date: Wed, 9 Sep 2020 18:51:56 -0400 Subject: [PATCH 08/38] pkg/quic: add quic implementation Due to the issues with the licensing and go version requirement by the quic-go library, we can not introduce the library into libuplink at the moment. Therefore, we decided to put our quic integration code into storj/storj. It will unblock us from rolling out quic to storagenodes. Change-Id: If9d765da10c45947a4e3a3a11e4679bab69bcf08 --- go.mod | 1 + go.sum | 129 +++++++++++++++++++++++++++ pkg/quic/common.go | 16 ++++ pkg/quic/conn.go | 196 ++++++++++++++++++++++++++++++++++++++++++ pkg/quic/connector.go | 78 +++++++++++++++++ pkg/quic/listener.go | 62 +++++++++++++ 6 files changed, 482 insertions(+) create mode 100644 pkg/quic/common.go create mode 100644 pkg/quic/conn.go create mode 100644 pkg/quic/connector.go create mode 100644 pkg/quic/listener.go diff --git a/go.mod b/go.mod index 8315c41eb..78d8c443a 100644 --- a/go.mod +++ b/go.mod @@ -22,6 +22,7 @@ require ( github.com/jackc/pgx/v4 v4.9.0 github.com/jinzhu/now v1.1.1 github.com/jtolds/monkit-hw/v2 v2.0.0-20191108235325-141a0da276b3 + github.com/lucas-clemente/quic-go v0.19.3 github.com/mattn/go-sqlite3 v2.0.3+incompatible github.com/nsf/jsondiff v0.0.0-20200515183724-f29ed568f4ce github.com/nsf/termbox-go v0.0.0-20200418040025-38ba6e5628f1 diff --git a/go.sum b/go.sum index bbd52cbc0..19ac02ae3 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,7 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo= cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= @@ -17,7 +19,12 @@ cloud.google.com/go/pubsub v1.0.1 h1:W9tAK3E57P75u0XLLR82LZyw8VpAnhmyTOxW9qzmyj8 cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/storage v1.0.0 h1:VV2nUM3wwLLGh9lSABFgZMjInyUbJeaRSE64WuAIQ+4= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= +dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= +dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= +git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= @@ -40,6 +47,7 @@ github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a h1:HbKu58rmZp github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= github.com/alicebob/miniredis/v2 v2.13.3 h1:kohgdtN58KW/r9ZDVmMJE3MrfbumwsDQStd0LPAGmmw= github.com/alicebob/miniredis/v2 v2.13.3/go.mod h1:uS970Sw5Gs9/iK3yBg0l9Uj9s25wXxSpQUE9EaJ/Blg= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/apache/thrift v0.12.0 h1:pODnxUFNcjP9UTLZGTdeh+j16A8lJbRvD3rOtrk/7bs= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= @@ -55,6 +63,7 @@ github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJm github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= @@ -68,12 +77,15 @@ github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVa github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= +github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= github.com/calebcase/tmpfile v1.0.1/go.mod h1:iErLeG/iqJr8LaQ/gYRv4GXdqssi3jg4iSzvrA06/lw= github.com/calebcase/tmpfile v1.0.2-0.20200602150926-3af473ef8439/go.mod h1:iErLeG/iqJr8LaQ/gYRv4GXdqssi3jg4iSzvrA06/lw= github.com/calebcase/tmpfile v1.0.2 h1:1AGuhKiUu4J6wxz6lxuF6ck3f8G2kaV6KSEny0RGCig= github.com/calebcase/tmpfile v1.0.2/go.mod h1:iErLeG/iqJr8LaQ/gYRv4GXdqssi3jg4iSzvrA06/lw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cheekybits/genny v1.0.0 h1:uGGa4nei+j20rOSeDeP5Of12XVm7TGUd4dJA9RDitfE= +github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= github.com/cheggaaa/pb/v3 v3.0.5 h1:lmZOti7CraK9RSjzExsY53+WWfub9Qv13B5m4ptEoPE= github.com/cheggaaa/pb/v3 v3.0.5/go.mod h1:X1L61/+36nz9bjIsrDU52qHKOQukUQe2Ge+YvGuquCw= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= @@ -92,6 +104,7 @@ github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= @@ -125,6 +138,7 @@ github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKoh github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.3.3 h1:Xk8S3Xj5sLGlG5g67hJmYMmUgXv5N4PhkjJHHqrwnTk= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= @@ -134,10 +148,16 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7 github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsouza/fake-gcs-server v1.7.0/go.mod h1:5XIRs4YvwNbNoz+1JF8j6KLAyDh7RHGAyAK3EP2EsNk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= +github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= @@ -161,12 +181,17 @@ github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekf github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7 h1:5ZkaAPbicIKTF2I64qf5Fh8Aa83Q/dnOafMYV0OMwjA= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1 h1:qGJ6qTW+x6xX/my+8YUVl4WNpX9B7+/l2tRsHGZ7f2s= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4 h1:l75CXGRSwbaYNpl/Z2X1XIIAMSCquvXgpVZDhwEIJsc= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= @@ -178,6 +203,8 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1 h1:ZFgWrT+bLgsYPirOnRfKLYJLvssAegOj/hgyMFdJZe0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -200,6 +227,9 @@ github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3 h1:SRgJV+IoxM5MKyFdlSUeNy6/ycRUF2yBAKdAQswoHUk= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/googleapis/gax-go v2.0.0+incompatible h1:j0GKcs05QVmm7yesiZq2+9cxHkNK9YM6zKx4D2qucQU= +github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= +github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= @@ -216,8 +246,10 @@ github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoA github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/graphql-go/graphql v0.7.9 h1:5Va/Rt4l5g3YjwDnid3vFfn43faaQBq7rMcIZ0VnV34= github.com/graphql-go/graphql v0.7.9/go.mod h1:k6yrAYQaSP59DC5UVxbgxESlmVyojThKdORUqGDGmrI= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= @@ -303,6 +335,7 @@ github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0f github.com/jackc/puddle v1.1.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v1.1.1/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v1.1.2/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jinzhu/now v1.1.1 h1:g39TucaRWyV3dwDO++eEc6qf8TVIQ/Da48WmqjZ3i7E= github.com/jinzhu/now v1.1.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= @@ -332,6 +365,7 @@ github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFB github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= @@ -341,9 +375,18 @@ github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.3.0 h1:/qkRGz8zljWiDcFvgpwUpwIAPu3r07TDvs3Rws+o/pU= github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lucas-clemente/quic-go v0.19.3 h1:eCDQqvGBB+kCTkA0XrAFtNe81FMa0/fn4QSoeAbmiF4= +github.com/lucas-clemente/quic-go v0.19.3/go.mod h1:ADXpNbTQjq1hIzCpB+y/k5iz4n4z4IwqoLb94Kh5Hu8= +github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/marten-seemann/qpack v0.2.1/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc= +github.com/marten-seemann/qtls v0.10.0 h1:ECsuYUKalRL240rRD4Ri33ISb7kAQ3qGDlrrl55b2pc= +github.com/marten-seemann/qtls v0.10.0/go.mod h1:UvMd1oaYDACI99/oZUYLzMCkBXQVT0aGm99sJhbT8hs= +github.com/marten-seemann/qtls-go1-15 v0.1.1 h1:LIH6K34bPVttyXnUWixk0bzH6/N07VxbSabxn5A5gZQ= +github.com/marten-seemann/qtls-go1-15 v0.1.1/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= @@ -364,6 +407,7 @@ github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsO github.com/mattn/go-sqlite3 v2.0.3+incompatible h1:gXHsfypPkaMZrKbD5209QV9jbUTJKjyR5WD3HYQSd+U= github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/minio/sha256-simd v0.0.0-20190328051042-05b4dd3047e5/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= @@ -381,22 +425,32 @@ github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c h1:nXxl5PrvVm2L/wCy8d github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/nakagami/firebirdsql v0.0.0-20190310045651-3c02a58cfed8/go.mod h1:86wM1zFnC6/uDBfZGNwB65O+pR2OFi5q/YQaEUid1qA= +github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= +github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= github.com/nsf/jsondiff v0.0.0-20200515183724-f29ed568f4ce h1:RPclfga2SEJmgMmz2k+Mg7cowZ8yv4Trqw9UsJby758= github.com/nsf/jsondiff v0.0.0-20200515183724-f29ed568f4ce/go.mod h1:uFMI8w+ref4v2r9jz+c9i1IfIttS/OkmLfrk1jne5hs= github.com/nsf/termbox-go v0.0.0-20200418040025-38ba6e5628f1 h1:lh3PyZvY+B9nFliSGTn5uFuqQQJGuNrD0MLCokv09ag= github.com/nsf/termbox-go v0.0.0-20200418040025-38ba6e5628f1/go.mod h1:IuKpRQcYE1Tfu+oAQqaLisqDeXgjyyltCfsaoYN18NQ= +github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.3 h1:OoxbjfXVZyod1fmWYhI7SEyaD8B00ynP3T+D5GiyHOY= github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1 h1:K0jcRCwNQM3vFGh1ppMtDh/+7ApJrjldlX8fA0jDTLQ= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= @@ -409,6 +463,7 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= @@ -416,9 +471,11 @@ github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1: github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -429,15 +486,39 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= +github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= +github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= +github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw= +github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI= +github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU= +github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag= +github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg= +github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw= +github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y= +github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= +github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q= +github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ= +github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I= +github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0= +github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= +github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk= +github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= +github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= @@ -447,6 +528,8 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1 github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= +github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= github.com/spacemonkeygo/monkit/v3 v3.0.0-20191108235033-eacca33b3037/go.mod h1:JcK1pCbReQsOsMKF/POFSZCq7drXFybgGmbc27tuwes= github.com/spacemonkeygo/monkit/v3 v3.0.4/go.mod h1:JcK1pCbReQsOsMKF/POFSZCq7drXFybgGmbc27tuwes= github.com/spacemonkeygo/monkit/v3 v3.0.5/go.mod h1:JcK1pCbReQsOsMKF/POFSZCq7drXFybgGmbc27tuwes= @@ -487,9 +570,12 @@ github.com/stripe/stripe-go v70.15.0+incompatible h1:hNML7M1zx8RgtepEMlxyu/FpVPr github.com/stripe/stripe-go v70.15.0+incompatible/go.mod h1:A1dQZmO/QypXmsL0T8axYZkSN/uA/T/A64pfKdBAMiY= github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= +github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= github.com/vivint/infectious v0.0.0-20200605153912-25a574ae18a3 h1:zMsHhfK9+Wdl1F7sIKLyx3wrOFofpb3rWFbA4HgcK5k= github.com/vivint/infectious v0.0.0-20200605153912-25a574ae18a3/go.mod h1:R0Gbuw7ElaGSLOZUSwBm/GgVwMd30jWxBDdAyMOeTuc= github.com/xanzy/go-gitlab v0.15.0/go.mod h1:8zdQa/ri1dfn8eS3Ir1SyfvOKlw7WBJ8DVThkpGiXrs= @@ -521,6 +607,7 @@ go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.mongodb.org/mongo-driver v1.1.0/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= @@ -544,10 +631,14 @@ go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go.uber.org/zap v1.16.0 h1:uFRZXykJGK9lLY4HtgSw44DnIcAM+kRBP7x5m+NpAOM= go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= +go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= +golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -558,6 +649,7 @@ golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -572,6 +664,7 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299 h1:zQpM52jfKHG6II1ISZY1Zcpyg golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -590,6 +683,8 @@ golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181108082009-03003ca0c849/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -598,6 +693,7 @@ golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190424112056-4829fb13d2c6/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -609,13 +705,19 @@ golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa h1:F+8P+gmewFQYRk6JoLQLwjBCTu3mcIURZfNkVweuRKA= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -630,6 +732,7 @@ golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -638,6 +741,7 @@ golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -648,7 +752,10 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200107144601-ef85f5a75ddf/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -657,16 +764,19 @@ golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200610111108-226ff32320da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112091331-59c308dcf3cc h1:y0Og6AYdwus7SIAnKnDxjc4gJetRiYEWOx4AKbOeyEI= golang.org/x/sys v0.0.0-20210112091331-59c308dcf3cc/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221 h1:/ZHdbVpdR/jk3g30/d4yUL0JU9kksj8+F/bnQUVLGDM= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s= @@ -674,6 +784,7 @@ golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -705,6 +816,9 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.3.2/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= @@ -716,6 +830,7 @@ google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsb google.golang.org/api v0.20.0 h1:jz2KixHX7EcCPiQrySzPdnYT7DbINAypCqKZ1Z7GM40= google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -723,6 +838,10 @@ google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= +google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -736,6 +855,8 @@ google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba h1:pRj9OXZbwNtbtZt google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -753,6 +874,7 @@ google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQ google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= @@ -775,10 +897,13 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -787,6 +912,10 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= +sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= storj.io/common v0.0.0-20200424175742-65ac59022f4f/go.mod h1:pZyXiIE7bGETIRXtfs0nICqMwp7PM8HqnDuyUeldNA0= storj.io/common v0.0.0-20201026135900-1aaeec90670b/go.mod h1:GqdmNf3fLm2UZX/7Zr0BLFCJ4gFjgm6eHrk/fnmr5jQ= storj.io/common v0.0.0-20201207172416-78f4e59925c3/go.mod h1:6sepaQTRLuygvA+GNPzdgRPOB1+wFfjde76KBWofbMY= diff --git a/pkg/quic/common.go b/pkg/quic/common.go new file mode 100644 index 000000000..e71b5b76d --- /dev/null +++ b/pkg/quic/common.go @@ -0,0 +1,16 @@ +// Copyright (C) 2021 Storj Labs, Inc. +// See LICENSE for copying information. + +package quic + +import ( + "github.com/spacemonkeygo/monkit/v3" + "github.com/zeebo/errs" +) + +var ( + mon = monkit.Package() + + // Error is a pkg/quic error. + Error = errs.Class("quic error") +) diff --git a/pkg/quic/conn.go b/pkg/quic/conn.go new file mode 100644 index 000000000..e777627e4 --- /dev/null +++ b/pkg/quic/conn.go @@ -0,0 +1,196 @@ +// Copyright (C) 2021 Storj Labs, Inc. +// See LICENSE for copying information. + +package quic + +import ( + "context" + "crypto/tls" + "net" + "runtime" + "sync" + "time" + + "github.com/lucas-clemente/quic-go" + + "storj.io/common/memory" + "storj.io/common/rpc" +) + +// Conn is a wrapper around a quic connection and fulfills net.Conn interface. +type Conn struct { + once sync.Once + // The Conn.stream varible should never be directly accessed. + // Always use Conn.getStream() instead. + stream quic.Stream + + acceptErr error + session quic.Session +} + +// Read implements the Conn Read method. +func (c *Conn) Read(b []byte) (n int, err error) { + stream, err := c.getStream() + if err != nil { + return 0, err + } + return stream.Read(b) +} + +// Write implements the Conn Write method. +func (c *Conn) Write(b []byte) (int, error) { + stream, err := c.getStream() + if err != nil { + return 0, err + } + return stream.Write(b) +} + +func (c *Conn) getStream() (quic.Stream, error) { + // Outgoing connections `stream` gets set when the Conn is initialized. + // It's only with incoming connections that `stream == nil` and this + // AcceptStream() code happens. + if c.stream == nil { + // When this function completes, it guarantees either c.acceptErr is not nil or c.stream is not nil + c.once.Do(func() { + stream, err := c.session.AcceptStream(context.Background()) + if err != nil { + c.acceptErr = err + return + } + + c.stream = stream + }) + if c.acceptErr != nil { + return nil, c.acceptErr + } + } + + return c.stream, nil +} + +// ConnectionState converts quic session state to tls connection state and returns tls state. +func (c *Conn) ConnectionState() tls.ConnectionState { + return c.session.ConnectionState().ConnectionState +} + +// Close closes the quic connection. +func (c *Conn) Close() error { + return c.session.CloseWithError(quic.ErrorCode(0), "") +} + +// LocalAddr returns the local address. +func (c *Conn) LocalAddr() net.Addr { + return c.session.LocalAddr() +} + +// RemoteAddr returns the address of the peer. +func (c *Conn) RemoteAddr() net.Addr { + return c.session.RemoteAddr() +} + +// SetReadDeadline sets the deadline for future Read calls +// and any currently-blocked Read call. +func (c *Conn) SetReadDeadline(t time.Time) error { + stream, err := c.getStream() + if err != nil { + return err + } + return stream.SetReadDeadline(t) +} + +// SetWriteDeadline sets the deadline for future Write calls +// and any currently-blocked Write call. +func (c *Conn) SetWriteDeadline(t time.Time) error { + stream, err := c.getStream() + if err != nil { + return err + } + return stream.SetWriteDeadline(t) +} + +// SetDeadline sets the read and write deadlines associated +// with the connection. It is equivalent to calling both +// SetReadDeadline and SetWriteDeadline. +func (c *Conn) SetDeadline(t time.Time) error { + stream, err := c.getStream() + if err != nil { + return err + } + + return stream.SetDeadline(t) +} + +// +// timed conns +// + +// timedConn wraps a rpc.ConnectorConn so that all reads and writes get the specified timeout and +// return bytes no faster than the rate. If the timeout or rate are zero, they are +// ignored. +type timedConn struct { + rpc.ConnectorConn + rate memory.Size +} + +// now returns time.Now if there's a nonzero rate. +func (t *timedConn) now() (now time.Time) { + if t.rate > 0 { + now = time.Now() + } + return now +} + +// delay ensures that we sleep to keep the rate if it is nonzero. n is the number of +// bytes in the read or write operation we need to delay. +func (t *timedConn) delay(start time.Time, n int) { + if t.rate > 0 { + expected := time.Duration(n * int(time.Second) / t.rate.Int()) + if actual := time.Since(start); expected > actual { + time.Sleep(expected - actual) + } + } +} + +// Read wraps the connection read and adds sleeping to ensure the rate. +func (t *timedConn) Read(p []byte) (int, error) { + start := t.now() + n, err := t.ConnectorConn.Read(p) + t.delay(start, n) + return n, err +} + +// Write wraps the connection write and adds sleeping to ensure the rate. +func (t *timedConn) Write(p []byte) (int, error) { + start := t.now() + n, err := t.ConnectorConn.Write(p) + t.delay(start, n) + return n, err +} + +// closeTrackingConn wraps a rpc.ConnectorConn and keeps track of if it was closed +// or if it was leaked (and closes it if it was leaked). +type closeTrackingConn struct { + rpc.ConnectorConn +} + +// trackClose wraps the conn and sets a finalizer on the returned value to +// close the conn and monitor that it was leaked. +func trackClose(conn rpc.ConnectorConn) rpc.ConnectorConn { + tracked := &closeTrackingConn{ConnectorConn: conn} + runtime.SetFinalizer(tracked, (*closeTrackingConn).finalize) + return tracked +} + +// Close clears the finalizer and closes the connection. +func (c *closeTrackingConn) Close() error { + runtime.SetFinalizer(c, nil) + mon.Event("quic_connection_closed") + return c.ConnectorConn.Close() +} + +// finalize monitors that a connection was leaked and closes the connection. +func (c *closeTrackingConn) finalize() { + mon.Event("quic_connection_leaked") + _ = c.ConnectorConn.Close() +} diff --git a/pkg/quic/connector.go b/pkg/quic/connector.go new file mode 100644 index 000000000..26c73fa93 --- /dev/null +++ b/pkg/quic/connector.go @@ -0,0 +1,78 @@ +// Copyright (C) 2021 Storj Labs, Inc. +// See LICENSE for copying information. + +package quic + +import ( + "context" + "crypto/tls" + "time" + + "github.com/lucas-clemente/quic-go" + + "storj.io/common/memory" + "storj.io/common/peertls/tlsopts" + "storj.io/common/rpc" +) + +// Connector implements a dialer that creates a quic connection. +type Connector struct { + transferRate memory.Size + + config *quic.Config +} + +// NewDefaultConnector instantiates a new instance of Connector. +// If no quic configuration is provided, default value will be used. +func NewDefaultConnector(quicConfig *quic.Config) Connector { + if quicConfig == nil { + quicConfig = &quic.Config{ + MaxIdleTimeout: 15 * time.Minute, + } + } + return Connector{ + config: quicConfig, + } +} + +// DialContext creates a quic connection. +func (c Connector) DialContext(ctx context.Context, tlsConfig *tls.Config, address string) (_ rpc.ConnectorConn, err error) { + defer mon.Task()(&ctx)(&err) + + if tlsConfig == nil { + return nil, Error.New("tls config is not set") + } + tlsConfigCopy := tlsConfig.Clone() + tlsConfigCopy.NextProtos = []string{tlsopts.StorjApplicationProtocol} + + sess, err := quic.DialAddrContext(ctx, address, tlsConfigCopy, c.config) + if err != nil { + return nil, Error.Wrap(err) + } + + stream, err := sess.OpenStreamSync(ctx) + if err != nil { + return nil, Error.Wrap(err) + } + + conn := &Conn{ + session: sess, + stream: stream, + } + + return &timedConn{ + ConnectorConn: trackClose(conn), + rate: c.transferRate, + }, nil +} + +// SetTransferRate returns a QUIC connector with the given transfer rate. +func (c Connector) SetTransferRate(rate memory.Size) Connector { + c.transferRate = rate + return c +} + +// TransferRate returns the transfer rate set on the connector. +func (c Connector) TransferRate() memory.Size { + return c.transferRate +} diff --git a/pkg/quic/listener.go b/pkg/quic/listener.go new file mode 100644 index 000000000..7eeeab1a6 --- /dev/null +++ b/pkg/quic/listener.go @@ -0,0 +1,62 @@ +// Copyright (C) 2021 Storj Labs, Inc. +// See LICENSE for copying information. + +package quic + +import ( + "context" + "crypto/tls" + "net" + + "github.com/lucas-clemente/quic-go" + + "storj.io/common/peertls/tlsopts" +) + +// Listener implements listener for QUIC. +type Listener struct { + listener quic.Listener +} + +// NewListener returns a new listener instance for QUIC. +// The quic.Config may be nil, in that case the default values will be used. +// if the provided context is closed, all existing or following Accept calls will return an error. +func NewListener(tlsConfig *tls.Config, address string, quicConfig *quic.Config) (net.Listener, error) { + if tlsConfig == nil { + return nil, Error.New("tls config is not set") + } + tlsConfigCopy := tlsConfig.Clone() + tlsConfigCopy.NextProtos = []string{tlsopts.StorjApplicationProtocol} + + listener, err := quic.ListenAddr(address, tlsConfigCopy, quicConfig) + if err != nil { + return nil, err + } + + return &Listener{ + listener: listener, + }, nil +} + +// Accept waits for and returns the next available quic session to the listener. +func (l *Listener) Accept() (net.Conn, error) { + ctx := context.Background() + session, err := l.listener.Accept(ctx) + if err != nil { + return nil, err + } + + return &Conn{ + session: session, + }, nil +} + +// Close closes the QUIC listener. +func (l *Listener) Close() error { + return l.listener.Close() +} + +// Addr returns the local network addr that the server is listening on. +func (l *Listener) Addr() net.Addr { + return l.listener.Addr() +} From c44368c489c3aac740df42f5a8af62a5aa115baf Mon Sep 17 00:00:00 2001 From: Egon Elbre Date: Fri, 22 Jan 2021 14:08:39 +0200 Subject: [PATCH 09/38] cmd/storj-sim: fix port assignment for multiple satellites Currently first satellite GC would've conflicted with second satellites public RPC port. Instead assign "satellite workers" a new peer class. Change-Id: Id6bdaa17243556482e88da708c5147149788f6be --- cmd/storj-sim/network.go | 32 ++++++++++++++++++-------------- 1 file changed, 18 insertions(+), 14 deletions(-) diff --git a/cmd/storj-sim/network.go b/cmd/storj-sim/network.go index 157cc1f6b..f6a58311f 100644 --- a/cmd/storj-sim/network.go +++ b/cmd/storj-sim/network.go @@ -51,10 +51,11 @@ const ( // to create a port with a consistent format for storj-sim services. // Peer classes. - satellitePeer = 0 - gatewayPeer = 1 - versioncontrolPeer = 2 - storagenodePeer = 3 + satellitePeer = 0 + satellitePeerWorker = 4 + gatewayPeer = 1 + versioncontrolPeer = 2 + storagenodePeer = 3 // Endpoints. publicRPC = 0 @@ -64,12 +65,15 @@ const ( debugHTTP = 9 // Satellite specific constants. - redisPort = 4 - adminHTTP = 5 - debugAdminHTTP = 6 - debugPeerHTTP = 7 - debugRepairerHTTP = 8 - debugGCHTTP = 10 + redisPort = 4 + adminHTTP = 5 + debugAdminHTTP = 6 + debugCoreHTTP = 7 + + // Satellite worker specific constants. + debugMigrationHTTP = 0 + debugRepairerHTTP = 1 + debugGCHTTP = 2 ) // port creates a port with a consistent format for storj-sim services. @@ -394,7 +398,7 @@ func newNetwork(flags *Flags) (*Processes, error) { migrationProcess.Arguments = withCommon(apiProcess.Directory, Arguments{ "run": { "migration", - "--debug.addr", net.JoinHostPort(host, port(satellitePeer, i, debugPeerHTTP)), + "--debug.addr", net.JoinHostPort(host, port(satellitePeerWorker, i, debugMigrationHTTP)), }, }) apiProcess.WaitForExited(migrationProcess) @@ -407,7 +411,7 @@ func newNetwork(flags *Flags) (*Processes, error) { }) coreProcess.Arguments = withCommon(apiProcess.Directory, Arguments{ "run": { - "--debug.addr", net.JoinHostPort(host, port(satellitePeer, i, debugPeerHTTP)), + "--debug.addr", net.JoinHostPort(host, port(satellitePeer, i, debugCoreHTTP)), "--orders.encryption-keys", "0100000000000000=0100000000000000000000000000000000000000000000000000000000000000", }, }) @@ -435,7 +439,7 @@ func newNetwork(flags *Flags) (*Processes, error) { repairProcess.Arguments = withCommon(apiProcess.Directory, Arguments{ "run": { "repair", - "--debug.addr", net.JoinHostPort(host, port(satellitePeer, i, debugRepairerHTTP)), + "--debug.addr", net.JoinHostPort(host, port(satellitePeerWorker, i, debugRepairerHTTP)), "--orders.encryption-keys", "0100000000000000=0100000000000000000000000000000000000000000000000000000000000000", }, }) @@ -449,7 +453,7 @@ func newNetwork(flags *Flags) (*Processes, error) { garbageCollectionProcess.Arguments = withCommon(apiProcess.Directory, Arguments{ "run": { "garbage-collection", - "--debug.addr", net.JoinHostPort(host, port(satellitePeer, i, debugGCHTTP)), + "--debug.addr", net.JoinHostPort(host, port(satellitePeerWorker, i, debugGCHTTP)), }, }) garbageCollectionProcess.WaitForExited(migrationProcess) From 49c8e94480c1e7f63d7cab2e362b3c4027d56d87 Mon Sep 17 00:00:00 2001 From: Ivan Fraixedes Date: Wed, 16 Dec 2020 19:38:01 +0100 Subject: [PATCH 10/38] scripts: Add test Satellite working w/o Redis Create a storj-sim test that checks that uplinks operations works when satellite runs and can connect to Redis and when it cannot connect to simulate a Redis downtime. Also verifies that the satellite can start despite of Redis being downtime. This test currently doesn't pass and it will be the one used to verify the work that has to be done to make sure that the satellite allow the clients to perform their operations despite of Redis being unavailable. We require these changes before we deploy any customer face satellite on a multi-region architecture. NOTE that this test will be added later on to Jenkins to run this test every time that we apply changes and at that time we'll see if it has to be adjusted for being able to run on Jenkins because as it's now it may not work because the scripts start and stop a Redis docker container. Change-Id: I22acb22f0ca594583e36b45c88f8c03bac73b329 --- scripts/test-sim-redis-up-and-down-dev.sh | 8 ++ scripts/test-sim-redis-up-and-down.sh | 72 +++++++++++ scripts/test-uplink-redis-up-and-down.sh | 150 ++++++++++++++++++++++ 3 files changed, 230 insertions(+) create mode 100755 scripts/test-sim-redis-up-and-down-dev.sh create mode 100755 scripts/test-sim-redis-up-and-down.sh create mode 100755 scripts/test-uplink-redis-up-and-down.sh diff --git a/scripts/test-sim-redis-up-and-down-dev.sh b/scripts/test-sim-redis-up-and-down-dev.sh new file mode 100755 index 000000000..724061add --- /dev/null +++ b/scripts/test-sim-redis-up-and-down-dev.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +SCRIPTDIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" + +# shellcheck source=/postgres-dev.sh +source "${SCRIPTDIR}/postgres-dev.sh" + +"${SCRIPTDIR}/test-sim-redis-up-and-down.sh" diff --git a/scripts/test-sim-redis-up-and-down.sh b/scripts/test-sim-redis-up-and-down.sh new file mode 100755 index 000000000..f0e4e504a --- /dev/null +++ b/scripts/test-sim-redis-up-and-down.sh @@ -0,0 +1,72 @@ +#!/usr/bin/env bash +set -Eeuo pipefail +set +x + +# Required environment variables +if [ -z "${STORJ_SIM_POSTGRES}" ]; then + echo "STORJ_SIM_POSTGRES environment variable must be set to a non-empty string" + exit 1 +fi + +# constants +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" +readonly SCRIPT_DIR +REDIS_CONTAINER_NAME=storj_sim_redis +readonly REDIS_CONTAINER_NAME +TMP_DIR=$(mktemp -d -t tmp.XXXXXXXXXX) +readonly TMP_DIR + +# setup tmpdir for testfiles and cleanup +cleanup() { + trap - EXIT + + rm -rf "${TMP_DIR}" + docker container rm -f "${REDIS_CONTAINER_NAME}" >/dev/null 2>&1 || true +} +trap cleanup EXIT + +echo "install sim" +make -C "$SCRIPT_DIR"/.. install-sim + +echo "overriding default max segment size to 6MiB" +GOBIN="${TMP_DIR}" go install -v -ldflags "-X 'storj.io/uplink.maxSegmentSize=6MiB'" storj.io/storj/cmd/uplink + +# use modified version of uplink +export PATH="${TMP_DIR}:${PATH}" +export STORJ_NETWORK_DIR="${TMP_DIR}" + +STORJ_NETWORK_HOST4=${STORJ_NETWORK_HOST4:-127.0.0.1} + +redis_run() { + local retries=10 + + docker container run -d -p 6379:6379 --name "${REDIS_CONTAINER_NAME}" redis:5.0-alpine + until docker container exec "${REDIS_CONTAINER_NAME}" redis-cli ping >/dev/null 2>&1 || + [ ${retries} -eq 0 ]; do + echo "waiting for Redis server to be ready, $((retries--)) remaining attemps..." + sleep 1 + done + + if [ ${retries} -eq 0 ]; then + echo "aborting, Redis server is not ready after several retrials" + exit 1 + fi +} + +redis_stop() { + docker container stop "${REDIS_CONTAINER_NAME}" +} + +# setup the network +storj-sim --failfast -x --satellites 1 --host "${STORJ_NETWORK_HOST4}" network \ + --postgres="${STORJ_SIM_POSTGRES}" --redis="127.0.0.1:6379" setup + +# run test that checks that the satellite runs when Redis is up and down +redis_run +storj-sim --failfast -x --satellites 1 --host "${STORJ_NETWORK_HOST4}" network \ + --redis="127.0.0.1:6379" test bash "${SCRIPT_DIR}/test-uplink-redis-up-and-down.sh" "${REDIS_CONTAINER_NAME}" + +# run test that checks that the satellite runs despite of not being able to connect to Redis +redis_stop +storj-sim --failfast -x --satellites 1 --host "${STORJ_NETWORK_HOST4}" network \ + --redis="127.0.0.1:6379" test bash "${SCRIPT_DIR}/test-uplink.sh" diff --git a/scripts/test-uplink-redis-up-and-down.sh b/scripts/test-uplink-redis-up-and-down.sh new file mode 100755 index 000000000..2f32e3c5a --- /dev/null +++ b/scripts/test-uplink-redis-up-and-down.sh @@ -0,0 +1,150 @@ +#!/usr/bin/env bash +set -ueo pipefail + +redis_container_name="${1-}" + +# Required positional arguments +if [ -z "${redis_container_name}" ]; then + echo "redis container name is required as a first positional script argument" + exit 1 +fi + +# constants +BUCKET="bucket-123" +readonly BUCKET +UPLINK_DEBUG_ADDR="" +readonly UPLINK_DEBUG_ADDR + +export STORJ_ACCESS="${GATEWAY_0_ACCESS}" +export STORJ_DEBUG_ADDR="${UPLINK_DEBUG_ADDR}" + +# Vars +temp_dirs=() # used to track all the created temporary directories + +cleanup() { + trap - EXIT + + rm -rf "${temp_dirs[@]}" + echo "cleaned up test successfully" +} +trap cleanup EXIT + +random_bytes_file() { + size="${1}" + output="${2}" + head -c "${size}" "${output}" +} + +compare_files() { + name=$(basename "${2}") + if cmp "${1}" "${2}"; then + echo "${name} matches uploaded file" + else + echo "${name} does not match uploaded file" + exit 1 + fi +} + +redis_start() { + docker container start "${redis_container_name}" +} + +redis_stop() { + docker container stop "${redis_container_name}" +} + +uplink_test() { + local temp_dir + temp_dir=$(mktemp -d -t tmp.XXXXXXXXXX) + temp_dirs+=("${temp_dir}") + + local src_dir="${temp_dir}/source" + local dst_dir="${temp_dir}/dst" + mkdir -p "${src_dir}" "${dst_dir}" + + local uplink_dir="${temp_dir}/uplink" + + random_bytes_file "2KiB" "${src_dir}/small-upload-testfile" # create 2KiB file of random bytes (inline) + random_bytes_file "5MiB" "${src_dir}/big-upload-testfile" # create 5MiB file of random bytes (remote) + # this is special case where we need to test at least one remote segment and inline segment of exact size 0 + random_bytes_file "12MiB" "${src_dir}/multisegment-upload-testfile" # create 12MiB file of random bytes (1 remote segments + inline) + random_bytes_file "13MiB" "${src_dir}/diff-size-segments" # create 13MiB file of random bytes (2 remote segments) + + random_bytes_file "100KiB" "${src_dir}/put-file" # create 100KiB file of random bytes (remote) + + uplink mb "sj://$BUCKET/" + uplink cp "${src_dir}/small-upload-testfile" "sj://$BUCKET/" --progress=false + uplink cp "${src_dir}/big-upload-testfile" "sj://$BUCKET/" --progress=false + uplink cp "${src_dir}/multisegment-upload-testfile" "sj://$BUCKET/" --progress=false + uplink cp "${src_dir}/diff-size-segments" "sj://$BUCKET/" --progress=false + + uplink <"${src_dir}/put-file" put "sj://$BUCKET/put-file" + + uplink --config-dir "${uplink_dir}" import named-access "${STORJ_ACCESS}" + + local files + files=$(STORJ_ACCESS='' uplink --config-dir "${uplink_dir}" --access named-access \ + ls "sj://${BUCKET}" | tee "${temp_dir}/list" | wc -l) + local expected_files="5" + if [ "${files}" == "${expected_files}" ]; then + echo "listing returns ${files} files" + else + echo "listing returns ${files} files but want ${expected_files}" + exit 1 + fi + + local size_check + size_check=$(awk <"${temp_dir}/list" '{if($4 == "0") print "invalid size";}') + if [ "${size_check}" != "" ]; then + echo "listing returns invalid size for one of the objects:" + cat "${temp_dir}/list" + exit 1 + fi + + uplink ls "sj://$BUCKET/non-existing-prefix" + + uplink cp "sj://$BUCKET/small-upload-testfile" "${dst_dir}" --progress=false + uplink cp "sj://$BUCKET/big-upload-testfile" "${dst_dir}" --progress=false + uplink cp "sj://$BUCKET/multisegment-upload-testfile" "${dst_dir}" --progress=false + uplink cp "sj://$BUCKET/diff-size-segments" "${dst_dir}" --progress=false + uplink cp "sj://$BUCKET/put-file" "${dst_dir}" --progress=false + uplink cat "sj://$BUCKET/put-file" >>"${dst_dir}/put-file-from-cat" + + uplink rm "sj://$BUCKET/small-upload-testfile" + uplink rm "sj://$BUCKET/big-upload-testfile" + uplink rm "sj://$BUCKET/multisegment-upload-testfile" + uplink rm "sj://$BUCKET/diff-size-segments" + uplink rm "sj://$BUCKET/put-file" + + uplink ls "sj://$BUCKET" + + uplink rb "sj://$BUCKET" + + compare_files "${src_dir}/small-upload-testfile" "${dst_dir}/small-upload-testfile" + compare_files "${src_dir}/big-upload-testfile" "${dst_dir}/big-upload-testfile" + compare_files "${src_dir}/multisegment-upload-testfile" "${dst_dir}/multisegment-upload-testfile" + compare_files "${src_dir}/diff-size-segments" "${dst_dir}/diff-size-segments" + compare_files "${src_dir}/put-file" "${dst_dir}/put-file" + compare_files "${src_dir}/put-file" "${dst_dir}/put-file-from-cat" + + # test deleting non empty bucket with --force flag + uplink mb "sj://$BUCKET/" + + for i in $(seq -w 1 16); do + uplink cp "${src_dir}/small-upload-testfile" "sj://$BUCKET/small-file-$i" --progress=false + done + + uplink rb "sj://$BUCKET" --force + + if [ "$(uplink ls | grep -c "No buckets")" = "0" ]; then + echo "uplink didn't remove the entire bucket with the 'force' flag" + exit 1 + fi +} + +# Run the test with Redis container running +uplink_test + +# Run the test with Redis container not running +redis_stop +uplink_test From c139cbd76b18fa3d9bc32cacd66ab5f2bafc0b3f Mon Sep 17 00:00:00 2001 From: Qweder93 Date: Fri, 22 Jan 2021 15:06:59 +0200 Subject: [PATCH 11/38] storagenode/payouts: fix CurrentMonthExpectations timezone handling. Estimations based on node's join date. On servers with non-UTC it would have calculated a different month boundary. If node joined in current month calculations will be related on amount of days node've been working. Change-Id: Ie572b197f50c6cdff5a044a53dfb5b9138f82f24 --- go.mod | 1 - go.sum | 2 - private/date/utils.go | 7 ++++ .../estimatedpayouts/estimatedpayouts.go | 10 +++++ .../estimatedpayouts/estimatedpayouts_test.go | 37 +++++++++++++++++++ .../payouts/estimatedpayouts/service.go | 27 +++++++++----- storagenode/peer.go | 9 +++++ 7 files changed, 80 insertions(+), 13 deletions(-) create mode 100644 storagenode/payouts/estimatedpayouts/estimatedpayouts_test.go diff --git a/go.mod b/go.mod index 78d8c443a..fe50f38f2 100644 --- a/go.mod +++ b/go.mod @@ -20,7 +20,6 @@ require ( github.com/jackc/pgconn v1.7.0 github.com/jackc/pgtype v1.5.0 github.com/jackc/pgx/v4 v4.9.0 - github.com/jinzhu/now v1.1.1 github.com/jtolds/monkit-hw/v2 v2.0.0-20191108235325-141a0da276b3 github.com/lucas-clemente/quic-go v0.19.3 github.com/mattn/go-sqlite3 v2.0.3+incompatible diff --git a/go.sum b/go.sum index 19ac02ae3..c8e0fadeb 100644 --- a/go.sum +++ b/go.sum @@ -337,8 +337,6 @@ github.com/jackc/puddle v1.1.1/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dv github.com/jackc/puddle v1.1.2/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jinzhu/now v1.1.1 h1:g39TucaRWyV3dwDO++eEc6qf8TVIQ/Da48WmqjZ3i7E= -github.com/jinzhu/now v1.1.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= diff --git a/private/date/utils.go b/private/date/utils.go index d1e29e224..a33693868 100644 --- a/private/date/utils.go +++ b/private/date/utils.go @@ -51,3 +51,10 @@ func MonthsBetweenDates(from time.Time, to time.Time) int { func TruncateToHourInNano(t time.Time) int64 { return t.Truncate(1 * time.Hour).UnixNano() } + +// UTCEndOfMonth returns utc end of month (f.e. to get last day in month). +func UTCEndOfMonth(now time.Time) time.Time { + now = now.UTC() + y, m, _ := now.Date() + return time.Date(y, m+1, 1, 0, 0, 0, 0, &time.Location{}).Add(-time.Nanosecond) +} diff --git a/storagenode/payouts/estimatedpayouts/estimatedpayouts.go b/storagenode/payouts/estimatedpayouts/estimatedpayouts.go index 8c6b592bb..7862577a0 100644 --- a/storagenode/payouts/estimatedpayouts/estimatedpayouts.go +++ b/storagenode/payouts/estimatedpayouts/estimatedpayouts.go @@ -5,6 +5,9 @@ package estimatedpayouts import ( "math" + "time" + + "storj.io/storj/private/date" ) // EstimatedPayout contains usage and estimated payouts data for current and previous months. @@ -61,3 +64,10 @@ func (pm *PayoutMonthly) SetPayout() { func RoundFloat(value float64) float64 { return math.Round(value*100) / 100 } + +// SetExpectedMonth set current month expectations. +func (estimatedPayout *EstimatedPayout) SetExpectedMonth(now time.Time) { + daysPaste := float64(now.Day() - 1) + timeInMonth := date.UTCEndOfMonth(now) + estimatedPayout.CurrentMonthExpectations = (estimatedPayout.CurrentMonth.Payout / daysPaste) * float64(timeInMonth.Day()) +} diff --git a/storagenode/payouts/estimatedpayouts/estimatedpayouts_test.go b/storagenode/payouts/estimatedpayouts/estimatedpayouts_test.go new file mode 100644 index 000000000..f81cc8fd9 --- /dev/null +++ b/storagenode/payouts/estimatedpayouts/estimatedpayouts_test.go @@ -0,0 +1,37 @@ +// Copyright (C) 2021 Storj Labs, Inc. +// See LICENSE for copying information. + +package estimatedpayouts_test + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + + "storj.io/common/testcontext" + "storj.io/storj/private/testplanet" + "storj.io/storj/storagenode/payouts/estimatedpayouts" +) + +func TestCurrentMonthExpectations(t *testing.T) { + testplanet.Run(t, testplanet.Config{ + StorageNodeCount: 1, + SatelliteCount: 2, + }, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) { + estimatedPayout := estimatedpayouts.EstimatedPayout{ + CurrentMonth: estimatedpayouts.PayoutMonthly{ + Payout: 100, + }, + } + + currentDay := time.Now().Day() - 1 + now := time.Now().UTC() + y, m, _ := now.Date() + daysInMonth := time.Date(y, m+1, 1, 0, 0, 0, -1, &time.Location{}).Day() + + expectations := (estimatedPayout.CurrentMonth.Payout / float64(currentDay)) * float64(daysInMonth) + estimatedPayout.SetExpectedMonth(now) + require.Equal(t, estimatedPayout.CurrentMonthExpectations, expectations) + }) +} diff --git a/storagenode/payouts/estimatedpayouts/service.go b/storagenode/payouts/estimatedpayouts/service.go index 4222f1bae..530c0c24c 100644 --- a/storagenode/payouts/estimatedpayouts/service.go +++ b/storagenode/payouts/estimatedpayouts/service.go @@ -7,7 +7,6 @@ import ( "context" "time" - "github.com/jinzhu/now" "github.com/spacemonkeygo/monkit/v3" "github.com/zeebo/errs" @@ -57,6 +56,7 @@ func NewService(bandwidthDB bandwidth.DB, reputationDB reputation.DB, storageUsa func (s *Service) GetSatelliteEstimatedPayout(ctx context.Context, satelliteID storj.NodeID) (payout EstimatedPayout, err error) { defer mon.Task()(&ctx)(&err) + now := time.Now() currentMonthPayout, previousMonthPayout, err := s.estimatedPayout(ctx, satelliteID) if err != nil { return EstimatedPayout{}, EstimationServiceErr.Wrap(err) @@ -64,14 +64,27 @@ func (s *Service) GetSatelliteEstimatedPayout(ctx context.Context, satelliteID s payout.CurrentMonth = currentMonthPayout payout.PreviousMonth = previousMonthPayout - payout.setExpectations(ctx) + stats, err := s.reputationDB.Get(ctx, satelliteID) + if err != nil { + return EstimatedPayout{}, EstimationServiceErr.Wrap(err) + } + + daysSinceJoined := time.Since(stats.JoinedAt).Hours() / 24 + if daysSinceJoined >= float64(now.Day()) { + payout.SetExpectedMonth(now) + + return payout, nil + } + + payout.CurrentMonthExpectations = (payout.CurrentMonth.Payout / daysSinceJoined) * float64(date.UTCEndOfMonth(now).Day()) return payout, nil } // GetAllSatellitesEstimatedPayout returns estimated payouts for current and previous months from all satellites with current level of load. func (s *Service) GetAllSatellitesEstimatedPayout(ctx context.Context) (payout EstimatedPayout, err error) { defer mon.Task()(&ctx)(&err) + now := time.Now() satelliteIDs := s.trust.GetSatellites(ctx) for i := 0; i < len(satelliteIDs); i++ { @@ -97,18 +110,12 @@ func (s *Service) GetAllSatellitesEstimatedPayout(ctx context.Context) (payout E payout.PreviousMonth.EgressRepairAudit += previous.EgressRepairAudit payout.PreviousMonth.Held += previous.Held } - payout.setExpectations(ctx) + + payout.SetExpectedMonth(now) return payout, nil } -// setExpectations set current month expectations. -func (estimatedPayout *EstimatedPayout) setExpectations(ctx context.Context) { - daysPaste := float64(time.Now().Day() - 1) - DaysInMonth := float64(now.EndOfMonth().Day()) - estimatedPayout.CurrentMonthExpectations = (estimatedPayout.CurrentMonth.Payout / daysPaste) * DaysInMonth -} - // estimatedPayout returns estimated payouts data for current and previous months from specific satellite. func (s *Service) estimatedPayout(ctx context.Context, satelliteID storj.NodeID) (currentMonthPayout PayoutMonthly, previousMonthPayout PayoutMonthly, err error) { defer mon.Task()(&ctx)(&err) diff --git a/storagenode/peer.go b/storagenode/peer.go index 2434f5bb6..6f2e04a49 100644 --- a/storagenode/peer.go +++ b/storagenode/peer.go @@ -286,6 +286,7 @@ type Peer struct { Storage *multinode.StorageEndpoint Bandwidth *multinode.BandwidthEndpoint Node *multinode.NodeEndpoint + Payout *multinode.PayoutEndpoint } } @@ -799,6 +800,11 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB, revocationDB exten peer.DB.Reputation(), peer.Storage2.Trust) + peer.Multinode.Payout = multinode.NewPayoutEndpoint( + peer.Log.Named("multinode:payout-endpoint"), + apiKeys, + peer.DB.Payout()) + if err = multinodepb.DRPCRegisterStorage(peer.Server.DRPC(), peer.Multinode.Storage); err != nil { return nil, errs.Combine(err, peer.Close()) } @@ -808,6 +814,9 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB, revocationDB exten if err = multinodepb.DRPCRegisterNode(peer.Server.DRPC(), peer.Multinode.Node); err != nil { return nil, errs.Combine(err, peer.Close()) } + if err = multinodepb.DRPCRegisterPayout(peer.Server.DRPC(), peer.Multinode.Payout); err != nil { + return nil, errs.Combine(err, peer.Close()) + } } return peer, nil From 8263f18321d2cee27a124cdb2864046cd6a421f7 Mon Sep 17 00:00:00 2001 From: Moby von Briesen Date: Thu, 21 Jan 2021 13:19:37 -0500 Subject: [PATCH 12/38] satellite/console: Add graphql query for owned projects Change-Id: If47183d46cb7552ecdddbb3e536c36d958fad6d0 --- .../console/consoleweb/consoleql/project.go | 62 ++++++++++++++++++- .../console/consoleweb/consoleql/query.go | 15 +++++ .../consoleweb/consoleql/query_test.go | 53 ++++++++++++++++ .../consoleweb/consoleql/typecreator.go | 12 ++++ satellite/console/service.go | 16 +++++ 5 files changed, 157 insertions(+), 1 deletion(-) diff --git a/satellite/console/consoleweb/consoleql/project.go b/satellite/console/consoleweb/consoleql/project.go index 908b492ba..4c0229bc2 100644 --- a/satellite/console/consoleweb/consoleql/project.go +++ b/satellite/console/consoleweb/consoleql/project.go @@ -19,6 +19,10 @@ const ( ProjectInputType = "projectInput" // ProjectUsageType is a graphql type name for project usage. ProjectUsageType = "projectUsage" + // ProjectsCursorInputType is a graphql input type name for projects cursor. + ProjectsCursorInputType = "projectsCursor" + // ProjectsPageType is a graphql type name for projects page. + ProjectsPageType = "projectsPage" // BucketUsageCursorInputType is a graphql input // type name for bucket usage cursor. BucketUsageCursorInputType = "bucketUsageCursor" @@ -62,6 +66,10 @@ const ( FieldCurrentPage = "currentPage" // FieldTotalCount is a field name for bucket usage count total. FieldTotalCount = "totalCount" + // FieldMemberCount is a field name for number of project members. + FieldMemberCount = "memberCount" + // FieldProjects is a field name for projects. + FieldProjects = "projects" // FieldProjectMembers is a field name for project members. FieldProjectMembers = "projectMembers" // CursorArg is an argument name for cursor. @@ -104,6 +112,9 @@ func graphqlProject(service *console.Service, types *TypeCreator) *graphql.Objec FieldCreatedAt: &graphql.Field{ Type: graphql.DateTime, }, + FieldMemberCount: &graphql.Field{ + Type: graphql.Int, + }, FieldMembers: &graphql.Field{ Type: types.projectMemberPage, Args: graphql.FieldConfigArgument{ @@ -255,6 +266,21 @@ func graphqlProjectInput() *graphql.InputObject { }) } +// graphqlBucketUsageCursor creates bucket usage cursor graphql input type. +func graphqlProjectsCursor() *graphql.InputObject { + return graphql.NewInputObject(graphql.InputObjectConfig{ + Name: ProjectsCursorInputType, + Fields: graphql.InputObjectConfigFieldMap{ + LimitArg: &graphql.InputObjectFieldConfig{ + Type: graphql.NewNonNull(graphql.Int), + }, + PageArg: &graphql.InputObjectFieldConfig{ + Type: graphql.NewNonNull(graphql.Int), + }, + }, + }) +} + // graphqlBucketUsageCursor creates bucket usage cursor graphql input type. func graphqlBucketUsageCursor() *graphql.InputObject { return graphql.NewInputObject(graphql.InputObjectConfig{ @@ -300,6 +326,33 @@ func graphqlBucketUsage() *graphql.Object { }) } +// graphqlProjectsPage creates a projects page graphql object. +func graphqlProjectsPage(types *TypeCreator) *graphql.Object { + return graphql.NewObject(graphql.ObjectConfig{ + Name: ProjectsPageType, + Fields: graphql.Fields{ + FieldProjects: &graphql.Field{ + Type: graphql.NewList(types.project), + }, + LimitArg: &graphql.Field{ + Type: graphql.Int, + }, + OffsetArg: &graphql.Field{ + Type: graphql.Int, + }, + FieldPageCount: &graphql.Field{ + Type: graphql.Int, + }, + FieldCurrentPage: &graphql.Field{ + Type: graphql.Int, + }, + FieldTotalCount: &graphql.Field{ + Type: graphql.Int, + }, + }, + }) +} + // graphqlBucketUsagePage creates bucket usage page graphql object. func graphqlBucketUsagePage(types *TypeCreator) *graphql.Object { return graphql.NewObject(graphql.ObjectConfig{ @@ -362,7 +415,14 @@ func fromMapProjectInfo(args map[string]interface{}) (project console.ProjectInf return } -// fromMapBucketUsageCursor creates console.BucketUsageCursor from input args. +// fromMapProjectsCursor creates console.ProjectsCursor from input args. +func fromMapProjectsCursor(args map[string]interface{}) (cursor console.ProjectsCursor) { + cursor.Limit = args[LimitArg].(int) + cursor.Page = args[PageArg].(int) + return +} + +// fromMapBucketUsageCursor creates accounting.BucketUsageCursor from input args. func fromMapBucketUsageCursor(args map[string]interface{}) (cursor accounting.BucketUsageCursor) { limit, _ := args[LimitArg].(int) page, _ := args[PageArg].(int) diff --git a/satellite/console/consoleweb/consoleql/query.go b/satellite/console/consoleweb/consoleql/query.go index 24c698805..ec83aa442 100644 --- a/satellite/console/consoleweb/consoleql/query.go +++ b/satellite/console/consoleweb/consoleql/query.go @@ -17,6 +17,8 @@ const ( Query = "query" // ProjectQuery is a query name for project. ProjectQuery = "project" + // OwnedProjectsQuery is a query name for projects owned by an account. + OwnedProjectsQuery = "ownedProjects" // MyProjectsQuery is a query name for projects related to account. MyProjectsQuery = "myProjects" // ActiveRewardQuery is a query name for current active reward offer. @@ -53,6 +55,19 @@ func rootQuery(service *console.Service, mailService *mailservice.Service, types return project, nil }, }, + OwnedProjectsQuery: &graphql.Field{ + Type: types.projectsPage, + Args: graphql.FieldConfigArgument{ + CursorArg: &graphql.ArgumentConfig{ + Type: graphql.NewNonNull(types.projectsCursor), + }, + }, + Resolve: func(p graphql.ResolveParams) (interface{}, error) { + cursor := fromMapProjectsCursor(p.Args[CursorArg].(map[string]interface{})) + page, err := service.GetUsersOwnedProjectsPage(p.Context, cursor) + return page, err + }, + }, MyProjectsQuery: &graphql.Field{ Type: graphql.NewList(types.project), Resolve: func(p graphql.ResolveParams) (interface{}, error) { diff --git a/satellite/console/consoleweb/consoleql/query_test.go b/satellite/console/consoleweb/consoleql/query_test.go index 0d4eb81df..b39d85677 100644 --- a/satellite/console/consoleweb/consoleql/query_test.go +++ b/satellite/console/consoleweb/consoleql/query_test.go @@ -409,6 +409,59 @@ func TestGraphqlQuery(t *testing.T) { } } + assert.True(t, foundProj1) + assert.True(t, foundProj2) + }) + t.Run("OwnedProjects query", func(t *testing.T) { + query := fmt.Sprintf( + "query {ownedProjects( cursor: { limit: %d, page: %d } ) {projects{id, name, ownerId, description, createdAt, memberCount}, limit, offset, pageCount, currentPage, totalCount } }", + 5, + 1, + ) + + result := testQuery(t, query) + + data := result.(map[string]interface{}) + projectsPage := data[consoleql.OwnedProjectsQuery].(map[string]interface{}) + + projectsList := projectsPage[consoleql.FieldProjects].([]interface{}) + assert.Len(t, projectsList, 2) + + assert.EqualValues(t, 1, projectsPage[consoleql.FieldCurrentPage]) + assert.EqualValues(t, 0, projectsPage[consoleql.OffsetArg]) + assert.EqualValues(t, 5, projectsPage[consoleql.LimitArg]) + assert.EqualValues(t, 1, projectsPage[consoleql.FieldPageCount]) + assert.EqualValues(t, 2, projectsPage[consoleql.FieldTotalCount]) + + testProject := func(t *testing.T, actual map[string]interface{}, expected *console.Project, expectedNumMembers int) { + assert.Equal(t, expected.Name, actual[consoleql.FieldName]) + assert.Equal(t, expected.Description, actual[consoleql.FieldDescription]) + + createdAt := time.Time{} + err := createdAt.UnmarshalText([]byte(actual[consoleql.FieldCreatedAt].(string))) + + assert.NoError(t, err) + assert.True(t, expected.CreatedAt.Equal(createdAt)) + + assert.EqualValues(t, expectedNumMembers, actual[consoleql.FieldMemberCount]) + } + + var foundProj1, foundProj2 bool + + for _, entry := range projectsList { + project := entry.(map[string]interface{}) + + id := project[consoleql.FieldID].(string) + switch id { + case createdProject.ID.String(): + foundProj1 = true + testProject(t, project, createdProject, 3) + case project2.ID.String(): + foundProj2 = true + testProject(t, project, project2, 1) + } + } + assert.True(t, foundProj1) assert.True(t, foundProj2) }) diff --git a/satellite/console/consoleweb/consoleql/typecreator.go b/satellite/console/consoleweb/consoleql/typecreator.go index 12d20493a..5bac956d8 100644 --- a/satellite/console/consoleweb/consoleql/typecreator.go +++ b/satellite/console/consoleweb/consoleql/typecreator.go @@ -21,6 +21,7 @@ type TypeCreator struct { creditUsage *graphql.Object project *graphql.Object projectUsage *graphql.Object + projectsPage *graphql.Object bucketUsage *graphql.Object bucketUsagePage *graphql.Object projectMember *graphql.Object @@ -31,6 +32,7 @@ type TypeCreator struct { userInput *graphql.InputObject projectInput *graphql.InputObject + projectsCursor *graphql.InputObject bucketUsageCursor *graphql.InputObject projectMembersCursor *graphql.InputObject apiKeysCursor *graphql.InputObject @@ -125,6 +127,16 @@ func (c *TypeCreator) Create(log *zap.Logger, service *console.Service, mailServ return err } + c.projectsCursor = graphqlProjectsCursor() + if err := c.projectsCursor.Error(); err != nil { + return err + } + + c.projectsPage = graphqlProjectsPage(c) + if err := c.projectsPage.Error(); err != nil { + return err + } + // root objects c.query = rootQuery(service, mailService, c) if err := c.query.Error(); err != nil { diff --git a/satellite/console/service.go b/satellite/console/service.go index 8bb4c5e36..326392deb 100644 --- a/satellite/console/service.go +++ b/satellite/console/service.go @@ -988,6 +988,22 @@ func (s *Service) GetUsersProjects(ctx context.Context) (ps []Project, err error return } +// GetUsersOwnedProjectsPage is a method for querying paged projects. +func (s *Service) GetUsersOwnedProjectsPage(ctx context.Context, cursor ProjectsCursor) (_ ProjectsPage, err error) { + defer mon.Task()(&ctx)(&err) + auth, err := s.getAuthAndAuditLog(ctx, "get user's owned projects page") + if err != nil { + return ProjectsPage{}, Error.Wrap(err) + } + + projects, err := s.store.Projects().ListByOwnerID(ctx, auth.User.ID, cursor) + if err != nil { + return ProjectsPage{}, Error.Wrap(err) + } + + return projects, nil +} + // GetCurrentRewardByType is a method for querying current active reward offer based on its type. func (s *Service) GetCurrentRewardByType(ctx context.Context, offerType rewards.OfferType) (offer *rewards.Offer, err error) { defer mon.Task()(&ctx)(&err) From 50bea3ab1a4dda135d31249b4140590d45b6d73d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Niewrza=C5=82?= Date: Mon, 25 Jan 2021 15:45:37 +0100 Subject: [PATCH 13/38] satellite/metainfo: adjust tests to changes in uplink One of uplink method changed its signature and we need to fix test on satellite side. Change-Id: Ib89ea6aa25c57bac11bc3e0e9c2c89a4b9debd7c --- go.mod | 4 ++-- go.sum | 11 ++++------- satellite/metainfo/metainfo_test.go | 20 ++++++++++---------- 3 files changed, 16 insertions(+), 19 deletions(-) diff --git a/go.mod b/go.mod index fe50f38f2..f63d5d38e 100644 --- a/go.mod +++ b/go.mod @@ -44,9 +44,9 @@ require ( golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e google.golang.org/api v0.20.0 // indirect google.golang.org/protobuf v1.25.0 // indirect - storj.io/common v0.0.0-20210113135631-07a5dc68dc1c + storj.io/common v0.0.0-20210119231202-8321551aa24d storj.io/drpc v0.0.16 storj.io/monkit-jaeger v0.0.0-20200518165323-80778fc3f91b storj.io/private v0.0.0-20210108233641-2ba1ef686d1f - storj.io/uplink v1.4.5 + storj.io/uplink v1.4.6-0.20210125122828-3cf0f8cae40f ) diff --git a/go.sum b/go.sum index c8e0fadeb..c784905ef 100644 --- a/go.sum +++ b/go.sum @@ -68,8 +68,6 @@ github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13P github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= github.com/btcsuite/btcutil v1.0.1/go.mod h1:j9HUFwoQRsZL3V4n+qG+CUnEGHOarIxfC3Le2Yhbcts= -github.com/btcsuite/btcutil v1.0.3-0.20201124182144-4031bdc69ded h1:WcPFZzCIqGt/TdFJHsOiX5dIlB/MUzrftltMhpjzfA8= -github.com/btcsuite/btcutil v1.0.3-0.20201124182144-4031bdc69ded/go.mod h1:0DVlHczLPewLcPGEIeUEzfOJhqGPQ0mJJRDBtD307+o= github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce h1:YtWJF7RHm2pYCvA5t0RPmAaLUhREsKuKd+SLhxFbFeQ= github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce/go.mod h1:0DVlHczLPewLcPGEIeUEzfOJhqGPQ0mJJRDBtD307+o= github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= @@ -916,9 +914,8 @@ sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2 sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= storj.io/common v0.0.0-20200424175742-65ac59022f4f/go.mod h1:pZyXiIE7bGETIRXtfs0nICqMwp7PM8HqnDuyUeldNA0= storj.io/common v0.0.0-20201026135900-1aaeec90670b/go.mod h1:GqdmNf3fLm2UZX/7Zr0BLFCJ4gFjgm6eHrk/fnmr5jQ= -storj.io/common v0.0.0-20201207172416-78f4e59925c3/go.mod h1:6sepaQTRLuygvA+GNPzdgRPOB1+wFfjde76KBWofbMY= -storj.io/common v0.0.0-20210113135631-07a5dc68dc1c h1:07A5QJMYYYQrOQv51j6RiOTstzMh7OnbqTZGZljp9/M= -storj.io/common v0.0.0-20210113135631-07a5dc68dc1c/go.mod h1:KhVByBTvjV2rsaUQsft0pKgBRRMvCcY1JsDqt6BWr3I= +storj.io/common v0.0.0-20210119231202-8321551aa24d h1:lOLCRtsKISuZlK2lBI5O0uBAc44mp/yO3CtUTXNNSUc= +storj.io/common v0.0.0-20210119231202-8321551aa24d/go.mod h1:KhVByBTvjV2rsaUQsft0pKgBRRMvCcY1JsDqt6BWr3I= storj.io/drpc v0.0.11/go.mod h1:TiFc2obNjL9/3isMW1Rpxjy8V9uE0B2HMeMFGiiI7Iw= storj.io/drpc v0.0.14/go.mod h1:82nfl+6YwRwF6UG31cEWWUqv/FaKvP5SGqUvoqTxCMA= storj.io/drpc v0.0.16 h1:9sxypc5lKi/0D69cR21BR0S21+IvXfON8L5nXMVNTwQ= @@ -927,5 +924,5 @@ storj.io/monkit-jaeger v0.0.0-20200518165323-80778fc3f91b h1:Bbg9JCtY6l3HrDxs3BX storj.io/monkit-jaeger v0.0.0-20200518165323-80778fc3f91b/go.mod h1:gj4vuCeyCRjRmH8LIrgoyU9Dc9uR6H+/GcDUXmTbf80= storj.io/private v0.0.0-20210108233641-2ba1ef686d1f h1:ctEwD9AsWR8MGv+hKxATjsu114lOPuL2wL7fqO2qusg= storj.io/private v0.0.0-20210108233641-2ba1ef686d1f/go.mod h1:3KcGiA7phL3a0HUCe5ar90SlIU3iFb8hKInaEZQ5P7o= -storj.io/uplink v1.4.5 h1:aeJgbob2YtnVPgzrzw16XwmYr241ibuZBhPqVwvyR3E= -storj.io/uplink v1.4.5/go.mod h1:VoxYTP5AzJ+gnzsqptuB5Ra8Old+fVVbwLCmi4jr5y4= +storj.io/uplink v1.4.6-0.20210125122828-3cf0f8cae40f h1:Ed8NG6WvameV55uzYgDsE4U3D1y3/xSWHXqnpeS1BDc= +storj.io/uplink v1.4.6-0.20210125122828-3cf0f8cae40f/go.mod h1:6a95Ux48DWIhFDaNo3fV3ehyfD9lX//fGK9JiIdFbXo= diff --git a/satellite/metainfo/metainfo_test.go b/satellite/metainfo/metainfo_test.go index 91efed572..de213efbb 100644 --- a/satellite/metainfo/metainfo_test.go +++ b/satellite/metainfo/metainfo_test.go @@ -179,7 +179,7 @@ func TestRevokeMacaroon(t *testing.T) { err = client.CommitObject(ctx, metainfo.CommitObjectParams{StreamID: encodedStreamID}) assert.True(t, errs2.IsRPC(err, rpcstatus.PermissionDenied)) - _, _, _, err = client.BeginSegment(ctx, metainfo.BeginSegmentParams{StreamID: encodedStreamID}) + _, err = client.BeginSegment(ctx, metainfo.BeginSegmentParams{StreamID: encodedStreamID}) assert.True(t, errs2.IsRPC(err, rpcstatus.PermissionDenied)) err = client.MakeInlineSegment(ctx, metainfo.MakeInlineSegmentParams{StreamID: encodedStreamID}) @@ -270,7 +270,7 @@ func TestInvalidAPIKey(t *testing.T) { err = client.CommitObject(ctx, metainfo.CommitObjectParams{StreamID: streamID}) assertInvalidArgument(t, err, false) - _, _, _, err = client.BeginSegment(ctx, metainfo.BeginSegmentParams{StreamID: streamID}) + _, err = client.BeginSegment(ctx, metainfo.BeginSegmentParams{StreamID: streamID}) assertInvalidArgument(t, err, false) err = client.MakeInlineSegment(ctx, metainfo.MakeInlineSegmentParams{StreamID: streamID}) @@ -639,7 +639,7 @@ func TestBeginCommit(t *testing.T) { beginObjectResponse, err := metainfoClient.BeginObject(ctx, params) require.NoError(t, err) - segmentID, limits, _, err := metainfoClient.BeginSegment(ctx, metainfo.BeginSegmentParams{ + response, err := metainfoClient.BeginSegment(ctx, metainfo.BeginSegmentParams{ StreamID: beginObjectResponse.StreamID, Position: storj.SegmentPosition{ Index: 0, @@ -654,9 +654,9 @@ func TestBeginCommit(t *testing.T) { } makeResult := func(num int32) *pb.SegmentPieceUploadResult { - nodeID := limits[num].Limit.StorageNodeId + nodeID := response.Limits[num].Limit.StorageNodeId hash := &pb.PieceHash{ - PieceId: limits[num].Limit.PieceId, + PieceId: response.Limits[num].Limit.PieceId, PieceSize: 1048832, Timestamp: time.Now(), } @@ -674,7 +674,7 @@ func TestBeginCommit(t *testing.T) { } } err = metainfoClient.CommitSegment(ctx, metainfo.CommitSegmentParams{ - SegmentID: segmentID, + SegmentID: response.SegmentID, SizeEncryptedData: memory.MiB.Int64(), UploadResult: []*pb.SegmentPieceUploadResult{ @@ -1518,7 +1518,7 @@ func TestCommitObjectMetadataSize(t *testing.T) { beginObjectResponse, err := metainfoClient.BeginObject(ctx, params) require.NoError(t, err) - segmentID, limits, _, err := metainfoClient.BeginSegment(ctx, metainfo.BeginSegmentParams{ + response, err := metainfoClient.BeginSegment(ctx, metainfo.BeginSegmentParams{ StreamID: beginObjectResponse.StreamID, Position: storj.SegmentPosition{ Index: 0, @@ -1533,9 +1533,9 @@ func TestCommitObjectMetadataSize(t *testing.T) { } makeResult := func(num int32) *pb.SegmentPieceUploadResult { - nodeID := limits[num].Limit.StorageNodeId + nodeID := response.Limits[num].Limit.StorageNodeId hash := &pb.PieceHash{ - PieceId: limits[num].Limit.PieceId, + PieceId: response.Limits[num].Limit.PieceId, PieceSize: 1048832, Timestamp: time.Now(), } @@ -1553,7 +1553,7 @@ func TestCommitObjectMetadataSize(t *testing.T) { } } err = metainfoClient.CommitSegment(ctx, metainfo.CommitSegmentParams{ - SegmentID: segmentID, + SegmentID: response.SegmentID, SizeEncryptedData: memory.MiB.Int64(), UploadResult: []*pb.SegmentPieceUploadResult{ From 3fc0d2a83ef6be3e6dc874494b10c1d0c20adc81 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Niewrza=C5=82?= Date: Mon, 25 Jan 2021 16:34:50 +0100 Subject: [PATCH 14/38] satellite/metainfo: add testing method from multipart-upload branch We wanto have single uplink branch for standard and multipart-upload satellite but some tests are using helper methods from multipart. This change adds methods used by uplink test. Change-Id: I82352ed56674ff7e8743b58061ba594018e78e3b --- private/testplanet/satellite.go | 2 + satellite/metainfo/config.go | 80 +++++++++++++++++++++++++++ satellite/metainfo/metabase/common.go | 20 +++++++ 3 files changed, 102 insertions(+) diff --git a/private/testplanet/satellite.go b/private/testplanet/satellite.go index 3ed9fca4b..edb07f816 100644 --- a/private/testplanet/satellite.go +++ b/private/testplanet/satellite.go @@ -104,6 +104,7 @@ type Satellite struct { Service *metainfo.Service Endpoint2 *metainfo.Endpoint Loop *metainfo.Loop + Metabase *metainfo.PointerDBMetabase } Inspector struct { @@ -709,6 +710,7 @@ func createNewSystem(name string, log *zap.Logger, config satellite.Config, peer system.Metainfo.Service = peer.Metainfo.Service system.Metainfo.Endpoint2 = api.Metainfo.Endpoint2 system.Metainfo.Loop = peer.Metainfo.Loop + system.Metainfo.Metabase = metainfo.NewPointerDBMetabase(system.Metainfo.Service) system.Inspector.Endpoint = api.Inspector.Endpoint diff --git a/satellite/metainfo/config.go b/satellite/metainfo/config.go index 1f068687f..a597e4d38 100644 --- a/satellite/metainfo/config.go +++ b/satellite/metainfo/config.go @@ -13,12 +13,17 @@ import ( "go.uber.org/zap" "storj.io/common/memory" + "storj.io/common/pb" + "storj.io/common/storj" + "storj.io/common/uuid" "storj.io/storj/private/dbutil" + "storj.io/storj/satellite/metainfo/metabase" "storj.io/storj/satellite/metainfo/objectdeletion" "storj.io/storj/satellite/metainfo/piecedeletion" "storj.io/storj/storage" "storj.io/storj/storage/cockroachkv" "storj.io/storj/storage/postgreskv" + "storj.io/uplink/private/storage/meta" ) const ( @@ -162,3 +167,78 @@ func OpenStore(ctx context.Context, logger *zap.Logger, dbURLString string, app logger.Debug("Connected to:", zap.String("db source", source)) return db, nil } + +// PointerDBMetabase this is wrapper struct that translates pointerDB to metabase. +// Use only for testing purposes. +type PointerDBMetabase struct { + metainfo *Service +} + +// NewPointerDBMetabase creates new NewPointerDBMetabase instance. +func NewPointerDBMetabase(service *Service) *PointerDBMetabase { + return &PointerDBMetabase{ + metainfo: service, + } +} + +// TestingAllCommittedObjects gets all committed objects from bucket. Use only for testing purposes. +func (m *PointerDBMetabase) TestingAllCommittedObjects(ctx context.Context, projectID uuid.UUID, bucketName string) (objects []metabase.ObjectEntry, err error) { + location, err := CreatePath(ctx, projectID, -1, []byte(bucketName), []byte{}) + if err != nil { + return nil, err + } + items, _, err := m.metainfo.List(ctx, location.Encode(), "", true, -1, meta.All) + if err != nil { + return nil, err + } + entries := make([]metabase.ObjectEntry, len(items)) + for i, item := range items { + entries[i] = metabase.ObjectEntry{ + ObjectKey: metabase.ObjectKey(item.Path), + } + } + return entries, nil +} + +// TestingAllObjectSegments gets all segments for given object. Use only for testing purposes. +func (m *PointerDBMetabase) TestingAllObjectSegments(ctx context.Context, objectLocation metabase.ObjectLocation) (segments []metabase.Segment, err error) { + location, err := CreatePath(ctx, objectLocation.ProjectID, -1, []byte(objectLocation.BucketName), []byte(objectLocation.ObjectKey)) + if err != nil { + return nil, err + } + + pointer, err := m.metainfo.Get(ctx, location.Encode()) + if err != nil { + return nil, err + } + + streamMeta := &pb.StreamMeta{} + err = pb.Unmarshal(pointer.Metadata, streamMeta) + if err != nil { + return nil, err + } + + segments = make([]metabase.Segment, 0) + for i := int64(0); i < streamMeta.NumberOfSegments-1; i++ { + location.Index = i + _, err = m.metainfo.Get(ctx, location.Encode()) + if err != nil { + if storj.ErrObjectNotFound.Has(err) { + continue + } + return nil, err + } + segments = append(segments, metabase.Segment{ + Position: metabase.SegmentPosition{ + Index: uint32(i), + }, + }) + } + + segments = append(segments, metabase.Segment{ + Position: metabase.SegmentPosition{ + Index: uint32(streamMeta.NumberOfSegments - 1), + }, + }) + return segments, nil +} diff --git a/satellite/metainfo/metabase/common.go b/satellite/metainfo/metabase/common.go index 84409c579..c72a02bdf 100644 --- a/satellite/metainfo/metabase/common.go +++ b/satellite/metainfo/metabase/common.go @@ -215,3 +215,23 @@ type Piece struct { Number uint16 StorageNode storj.NodeID } + +// ObjectEntry contains information about an object in a bucket. +type ObjectEntry struct { + ObjectKey ObjectKey + + // TODO copy more fields from metabase multipart-upload package if needed +} + +// SegmentPosition is segment part and index combined. +type SegmentPosition struct { + Part uint32 + Index uint32 +} + +// Segment segment metadata. +type Segment struct { + Position SegmentPosition + + // TODO copy more fields from metabase multipart-upload package if needed +} From 0b2568d71229581ae4e979b78cae98c90f4ab65c Mon Sep 17 00:00:00 2001 From: littleskunk Date: Tue, 26 Jan 2021 11:24:39 +0100 Subject: [PATCH 15/38] satellite/overlay/straynodes: increase development duration without contact Stopping storj-sim for over 5 minutes caused nodes to be disqualified. Set development max duration without contact to 300 days. --- satellite/overlay/straynodes/chore.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/satellite/overlay/straynodes/chore.go b/satellite/overlay/straynodes/chore.go index fb8c39e33..6120e8c19 100644 --- a/satellite/overlay/straynodes/chore.go +++ b/satellite/overlay/straynodes/chore.go @@ -20,7 +20,7 @@ var mon = monkit.Package() type Config struct { EnableDQ bool `help:"whether nodes will be disqualified if they have not been contacted in some time" releaseDefault:"false" devDefault:"true"` Interval time.Duration `help:"how often to check for and DQ stray nodes" releaseDefault:"168h" devDefault:"5m"` - MaxDurationWithoutContact time.Duration `help:"length of time a node can go without contacting satellite before being disqualified" releaseDefault:"720h" devDefault:"5m"` + MaxDurationWithoutContact time.Duration `help:"length of time a node can go without contacting satellite before being disqualified" releaseDefault:"720h" devDefault:"7200h"` } // Chore disqualifies stray nodes. From c92bda7e75b79057d28066053436530956040e46 Mon Sep 17 00:00:00 2001 From: Isaac Hess Date: Mon, 25 Jan 2021 13:07:24 -0700 Subject: [PATCH 16/38] tally monkit: change location to monitor piecesize When we observed the value for total piecesizes stored in the network, we were doing it after converting them to byte-hours, rather than using the actual piece sizes. This fixes that issue. Change-Id: I1564d21b519f70eb59f298d97dbd777baf127723 --- monkit.lock | 2 +- satellite/accounting/tally/tally.go | 7 +++++-- satellite/satellitedb/storagenodeaccounting.go | 3 --- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/monkit.lock b/monkit.lock index 6bfad7daf..2da0b2118 100644 --- a/monkit.lock +++ b/monkit.lock @@ -6,6 +6,7 @@ storj.io/storj/satellite/accounting/tally."bucket_objects" IntVal storj.io/storj/satellite/accounting/tally."bucket_remote_bytes" IntVal storj.io/storj/satellite/accounting/tally."bucket_remote_segments" IntVal storj.io/storj/satellite/accounting/tally."bucket_segments" IntVal +storj.io/storj/satellite/accounting/tally."nodetallies.totalsum" IntVal storj.io/storj/satellite/accounting/tally."total_bytes" IntVal storj.io/storj/satellite/accounting/tally."total_inline_bytes" IntVal storj.io/storj/satellite/accounting/tally."total_inline_segments" IntVal @@ -112,7 +113,6 @@ storj.io/storj/satellite/satellitedb."audit_online_score" FloatVal storj.io/storj/satellite/satellitedb."audit_reputation_alpha" FloatVal storj.io/storj/satellite/satellitedb."audit_reputation_beta" FloatVal storj.io/storj/satellite/satellitedb."bad_audit_dqs" Meter -storj.io/storj/satellite/satellitedb."nodetallies.totalsum" IntVal storj.io/storj/satellite/satellitedb."offline_dqs" Meter storj.io/storj/satellite/satellitedb."unknown_audit_reputation_alpha" FloatVal storj.io/storj/satellite/satellitedb."unknown_audit_reputation_beta" FloatVal diff --git a/satellite/accounting/tally/tally.go b/satellite/accounting/tally/tally.go index d7ad57bc6..6f92ab3e5 100644 --- a/satellite/accounting/tally/tally.go +++ b/satellite/accounting/tally/tally.go @@ -184,9 +184,12 @@ func (service *Service) Tally(ctx context.Context) (err error) { // calculate byte hours, not just bytes hours := time.Since(lastTime).Hours() - for id := range observer.Node { - observer.Node[id] *= hours + var totalSum float64 + for id, pieceSize := range observer.Node { + totalSum += pieceSize + observer.Node[id] = pieceSize * hours } + mon.IntVal("nodetallies.totalsum").Observe(int64(totalSum)) //mon:locked // save the new results var errAtRest, errBucketInfo error diff --git a/satellite/satellitedb/storagenodeaccounting.go b/satellite/satellitedb/storagenodeaccounting.go index 0fbdc4e4d..b201bc0a1 100644 --- a/satellite/satellitedb/storagenodeaccounting.go +++ b/satellite/satellitedb/storagenodeaccounting.go @@ -32,13 +32,10 @@ func (db *StoragenodeAccounting) SaveTallies(ctx context.Context, latestTally ti } var nodeIDs []storj.NodeID var totals []float64 - var totalSum float64 for id, total := range nodeData { nodeIDs = append(nodeIDs, id) totals = append(totals, total) - totalSum += total } - mon.IntVal("nodetallies.totalsum").Observe(int64(totalSum)) //mon:locked err = db.db.WithTx(ctx, func(ctx context.Context, tx *dbx.Tx) error { _, err = tx.Tx.ExecContext(ctx, db.db.Rebind(` From 24d60384c55beb2503e68c9fd8d013b76e6a8ab0 Mon Sep 17 00:00:00 2001 From: Malcolm Bouzi Date: Tue, 26 Jan 2021 11:38:53 -0500 Subject: [PATCH 17/38] satellite/satellitedb: add columns for professional users (#4028) Co-authored-by: Egon Elbre --- satellite/satellitedb/dbx/satellitedb.dbx | 9 +- satellite/satellitedb/dbx/satellitedb.dbx.go | 292 ++++++++- .../satellitedb/dbx/satellitedb.dbx.pgx.sql | 5 + .../dbx/satellitedb.dbx.pgxcockroach.sql | 5 + satellite/satellitedb/migrate.go | 12 + .../satellitedb/testdata/postgres.v139.sql | 2 +- .../satellitedb/testdata/postgres.v140.sql | 576 ++++++++++++++++++ 7 files changed, 874 insertions(+), 27 deletions(-) create mode 100644 satellite/satellitedb/testdata/postgres.v140.sql diff --git a/satellite/satellitedb/dbx/satellitedb.dbx b/satellite/satellitedb/dbx/satellitedb.dbx index 3bea2c71f..dd9051add 100644 --- a/satellite/satellitedb/dbx/satellitedb.dbx +++ b/satellite/satellitedb/dbx/satellitedb.dbx @@ -128,7 +128,7 @@ model node ( index ( name nodes_dis_unk_exit_fin_last_success_index fields disqualified unknown_audit_suspended exit_finished_at last_contact_success - ) + ) field id blob // address is how to contact the node, this can be a hostname or IP and it contains the port @@ -290,6 +290,13 @@ model user ( field partner_id blob ( nullable ) field created_at timestamp ( autoinsert ) field project_limit int ( updatable, default 0 ) + + field position text ( updatable, nullable ) + field company_name text ( updatable, nullable ) + field company_size int ( updatable, nullable ) + field working_on text ( updatable, nullable ) + field is_professional bool ( updatable, default false ) + ) create user ( ) diff --git a/satellite/satellitedb/dbx/satellitedb.dbx.go b/satellite/satellitedb/dbx/satellitedb.dbx.go index 3dfb8d467..ab2054ac7 100644 --- a/satellite/satellitedb/dbx/satellitedb.dbx.go +++ b/satellite/satellitedb/dbx/satellitedb.dbx.go @@ -684,6 +684,11 @@ CREATE TABLE users ( partner_id bytea, created_at timestamp with time zone NOT NULL, project_limit integer NOT NULL DEFAULT 0, + position text, + company_name text, + company_size integer, + working_on text, + is_professional boolean NOT NULL DEFAULT false, PRIMARY KEY ( id ) ); CREATE TABLE value_attributions ( @@ -1236,6 +1241,11 @@ CREATE TABLE users ( partner_id bytea, created_at timestamp with time zone NOT NULL, project_limit integer NOT NULL DEFAULT 0, + position text, + company_name text, + company_size integer, + working_on text, + is_professional boolean NOT NULL DEFAULT false, PRIMARY KEY ( id ) ); CREATE TABLE value_attributions ( @@ -7217,14 +7227,24 @@ type User struct { PartnerId []byte CreatedAt time.Time ProjectLimit int + Position *string + CompanyName *string + CompanySize *int + WorkingOn *string + IsProfessional bool } func (User) _Table() string { return "users" } type User_Create_Fields struct { - ShortName User_ShortName_Field - PartnerId User_PartnerId_Field - ProjectLimit User_ProjectLimit_Field + ShortName User_ShortName_Field + PartnerId User_PartnerId_Field + ProjectLimit User_ProjectLimit_Field + Position User_Position_Field + CompanyName User_CompanyName_Field + CompanySize User_CompanySize_Field + WorkingOn User_WorkingOn_Field + IsProfessional User_IsProfessional_Field } type User_Update_Fields struct { @@ -7235,6 +7255,11 @@ type User_Update_Fields struct { PasswordHash User_PasswordHash_Field Status User_Status_Field ProjectLimit User_ProjectLimit_Field + Position User_Position_Field + CompanyName User_CompanyName_Field + CompanySize User_CompanySize_Field + WorkingOn User_WorkingOn_Field + IsProfessional User_IsProfessional_Field } type User_Id_Field struct { @@ -7453,6 +7478,153 @@ func (f User_ProjectLimit_Field) value() interface{} { func (User_ProjectLimit_Field) _Column() string { return "project_limit" } +type User_Position_Field struct { + _set bool + _null bool + _value *string +} + +func User_Position(v string) User_Position_Field { + return User_Position_Field{_set: true, _value: &v} +} + +func User_Position_Raw(v *string) User_Position_Field { + if v == nil { + return User_Position_Null() + } + return User_Position(*v) +} + +func User_Position_Null() User_Position_Field { + return User_Position_Field{_set: true, _null: true} +} + +func (f User_Position_Field) isnull() bool { return !f._set || f._null || f._value == nil } + +func (f User_Position_Field) value() interface{} { + if !f._set || f._null { + return nil + } + return f._value +} + +func (User_Position_Field) _Column() string { return "position" } + +type User_CompanyName_Field struct { + _set bool + _null bool + _value *string +} + +func User_CompanyName(v string) User_CompanyName_Field { + return User_CompanyName_Field{_set: true, _value: &v} +} + +func User_CompanyName_Raw(v *string) User_CompanyName_Field { + if v == nil { + return User_CompanyName_Null() + } + return User_CompanyName(*v) +} + +func User_CompanyName_Null() User_CompanyName_Field { + return User_CompanyName_Field{_set: true, _null: true} +} + +func (f User_CompanyName_Field) isnull() bool { return !f._set || f._null || f._value == nil } + +func (f User_CompanyName_Field) value() interface{} { + if !f._set || f._null { + return nil + } + return f._value +} + +func (User_CompanyName_Field) _Column() string { return "company_name" } + +type User_CompanySize_Field struct { + _set bool + _null bool + _value *int +} + +func User_CompanySize(v int) User_CompanySize_Field { + return User_CompanySize_Field{_set: true, _value: &v} +} + +func User_CompanySize_Raw(v *int) User_CompanySize_Field { + if v == nil { + return User_CompanySize_Null() + } + return User_CompanySize(*v) +} + +func User_CompanySize_Null() User_CompanySize_Field { + return User_CompanySize_Field{_set: true, _null: true} +} + +func (f User_CompanySize_Field) isnull() bool { return !f._set || f._null || f._value == nil } + +func (f User_CompanySize_Field) value() interface{} { + if !f._set || f._null { + return nil + } + return f._value +} + +func (User_CompanySize_Field) _Column() string { return "company_size" } + +type User_WorkingOn_Field struct { + _set bool + _null bool + _value *string +} + +func User_WorkingOn(v string) User_WorkingOn_Field { + return User_WorkingOn_Field{_set: true, _value: &v} +} + +func User_WorkingOn_Raw(v *string) User_WorkingOn_Field { + if v == nil { + return User_WorkingOn_Null() + } + return User_WorkingOn(*v) +} + +func User_WorkingOn_Null() User_WorkingOn_Field { + return User_WorkingOn_Field{_set: true, _null: true} +} + +func (f User_WorkingOn_Field) isnull() bool { return !f._set || f._null || f._value == nil } + +func (f User_WorkingOn_Field) value() interface{} { + if !f._set || f._null { + return nil + } + return f._value +} + +func (User_WorkingOn_Field) _Column() string { return "working_on" } + +type User_IsProfessional_Field struct { + _set bool + _null bool + _value bool +} + +func User_IsProfessional(v bool) User_IsProfessional_Field { + return User_IsProfessional_Field{_set: true, _value: v} +} + +func (f User_IsProfessional_Field) value() interface{} { + if !f._set || f._null { + return nil + } + return f._value +} + +func (User_IsProfessional_Field) _Column() string { return "is_professional" } + type ValueAttribution struct { ProjectId []byte BucketName []byte @@ -9357,15 +9529,19 @@ func (obj *pgxImpl) Create_User(ctx context.Context, __status_val := int(0) __partner_id_val := optional.PartnerId.value() __created_at_val := __now + __position_val := optional.Position.value() + __company_name_val := optional.CompanyName.value() + __company_size_val := optional.CompanySize.value() + __working_on_val := optional.WorkingOn.value() - var __columns = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("id, email, normalized_email, full_name, short_name, password_hash, status, partner_id, created_at")} - var __placeholders = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("?, ?, ?, ?, ?, ?, ?, ?, ?")} + var __columns = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("id, email, normalized_email, full_name, short_name, password_hash, status, partner_id, created_at, position, company_name, company_size, working_on")} + var __placeholders = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?")} var __clause = &__sqlbundle_Hole{SQL: __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("("), __columns, __sqlbundle_Literal(") VALUES ("), __placeholders, __sqlbundle_Literal(")")}}} - var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("INSERT INTO users "), __clause, __sqlbundle_Literal(" RETURNING users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.created_at, users.project_limit")}} + var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("INSERT INTO users "), __clause, __sqlbundle_Literal(" RETURNING users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.created_at, users.project_limit, users.position, users.company_name, users.company_size, users.working_on, users.is_professional")}} var __values []interface{} - __values = append(__values, __id_val, __email_val, __normalized_email_val, __full_name_val, __short_name_val, __password_hash_val, __status_val, __partner_id_val, __created_at_val) + __values = append(__values, __id_val, __email_val, __normalized_email_val, __full_name_val, __short_name_val, __password_hash_val, __status_val, __partner_id_val, __created_at_val, __position_val, __company_name_val, __company_size_val, __working_on_val) __optional_columns := __sqlbundle_Literals{Join: ", "} __optional_placeholders := __sqlbundle_Literals{Join: ", "} @@ -9376,6 +9552,12 @@ func (obj *pgxImpl) Create_User(ctx context.Context, __optional_placeholders.SQLs = append(__optional_placeholders.SQLs, __sqlbundle_Literal("?")) } + if optional.IsProfessional._set { + __values = append(__values, optional.IsProfessional.value()) + __optional_columns.SQLs = append(__optional_columns.SQLs, __sqlbundle_Literal("is_professional")) + __optional_placeholders.SQLs = append(__optional_placeholders.SQLs, __sqlbundle_Literal("?")) + } + if len(__optional_columns.SQLs) == 0 { if __columns.SQL == nil { __clause.SQL = __sqlbundle_Literal("DEFAULT VALUES") @@ -9388,7 +9570,7 @@ func (obj *pgxImpl) Create_User(ctx context.Context, obj.logStmt(__stmt, __values...) user = &User{} - err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.CreatedAt, &user.ProjectLimit) + err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.CreatedAt, &user.ProjectLimit, &user.Position, &user.CompanyName, &user.CompanySize, &user.WorkingOn, &user.IsProfessional) if err != nil { return nil, obj.makeErr(err) } @@ -10921,7 +11103,7 @@ func (obj *pgxImpl) Get_User_By_NormalizedEmail_And_Status_Not_Number(ctx contex user *User, err error) { defer mon.Task()(&ctx)(&err) - var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.created_at, users.project_limit FROM users WHERE users.normalized_email = ? AND users.status != 0 LIMIT 2") + var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.created_at, users.project_limit, users.position, users.company_name, users.company_size, users.working_on, users.is_professional FROM users WHERE users.normalized_email = ? AND users.status != 0 LIMIT 2") var __values []interface{} __values = append(__values, user_normalized_email.value()) @@ -10945,7 +11127,7 @@ func (obj *pgxImpl) Get_User_By_NormalizedEmail_And_Status_Not_Number(ctx contex } user = &User{} - err = __rows.Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.CreatedAt, &user.ProjectLimit) + err = __rows.Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.CreatedAt, &user.ProjectLimit, &user.Position, &user.CompanyName, &user.CompanySize, &user.WorkingOn, &user.IsProfessional) if err != nil { return nil, err } @@ -10979,7 +11161,7 @@ func (obj *pgxImpl) Get_User_By_Id(ctx context.Context, user *User, err error) { defer mon.Task()(&ctx)(&err) - var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.created_at, users.project_limit FROM users WHERE users.id = ?") + var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.created_at, users.project_limit, users.position, users.company_name, users.company_size, users.working_on, users.is_professional FROM users WHERE users.id = ?") var __values []interface{} __values = append(__values, user_id.value()) @@ -10988,7 +11170,7 @@ func (obj *pgxImpl) Get_User_By_Id(ctx context.Context, obj.logStmt(__stmt, __values...) user = &User{} - err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.CreatedAt, &user.ProjectLimit) + err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.CreatedAt, &user.ProjectLimit, &user.Position, &user.CompanyName, &user.CompanySize, &user.WorkingOn, &user.IsProfessional) if err != nil { return (*User)(nil), obj.makeErr(err) } @@ -13937,7 +14119,7 @@ func (obj *pgxImpl) Update_User_By_Id(ctx context.Context, defer mon.Task()(&ctx)(&err) var __sets = &__sqlbundle_Hole{} - var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE users SET "), __sets, __sqlbundle_Literal(" WHERE users.id = ? RETURNING users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.created_at, users.project_limit")}} + var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE users SET "), __sets, __sqlbundle_Literal(" WHERE users.id = ? RETURNING users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.created_at, users.project_limit, users.position, users.company_name, users.company_size, users.working_on, users.is_professional")}} __sets_sql := __sqlbundle_Literals{Join: ", "} var __values []interface{} @@ -13978,6 +14160,31 @@ func (obj *pgxImpl) Update_User_By_Id(ctx context.Context, __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("project_limit = ?")) } + if update.Position._set { + __values = append(__values, update.Position.value()) + __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("position = ?")) + } + + if update.CompanyName._set { + __values = append(__values, update.CompanyName.value()) + __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("company_name = ?")) + } + + if update.CompanySize._set { + __values = append(__values, update.CompanySize.value()) + __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("company_size = ?")) + } + + if update.WorkingOn._set { + __values = append(__values, update.WorkingOn.value()) + __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("working_on = ?")) + } + + if update.IsProfessional._set { + __values = append(__values, update.IsProfessional.value()) + __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("is_professional = ?")) + } + if len(__sets_sql.SQLs) == 0 { return nil, emptyUpdate() } @@ -13991,7 +14198,7 @@ func (obj *pgxImpl) Update_User_By_Id(ctx context.Context, obj.logStmt(__stmt, __values...) user = &User{} - err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.CreatedAt, &user.ProjectLimit) + err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.CreatedAt, &user.ProjectLimit, &user.Position, &user.CompanyName, &user.CompanySize, &user.WorkingOn, &user.IsProfessional) if err == sql.ErrNoRows { return nil, nil } @@ -16118,15 +16325,19 @@ func (obj *pgxcockroachImpl) Create_User(ctx context.Context, __status_val := int(0) __partner_id_val := optional.PartnerId.value() __created_at_val := __now + __position_val := optional.Position.value() + __company_name_val := optional.CompanyName.value() + __company_size_val := optional.CompanySize.value() + __working_on_val := optional.WorkingOn.value() - var __columns = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("id, email, normalized_email, full_name, short_name, password_hash, status, partner_id, created_at")} - var __placeholders = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("?, ?, ?, ?, ?, ?, ?, ?, ?")} + var __columns = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("id, email, normalized_email, full_name, short_name, password_hash, status, partner_id, created_at, position, company_name, company_size, working_on")} + var __placeholders = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?")} var __clause = &__sqlbundle_Hole{SQL: __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("("), __columns, __sqlbundle_Literal(") VALUES ("), __placeholders, __sqlbundle_Literal(")")}}} - var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("INSERT INTO users "), __clause, __sqlbundle_Literal(" RETURNING users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.created_at, users.project_limit")}} + var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("INSERT INTO users "), __clause, __sqlbundle_Literal(" RETURNING users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.created_at, users.project_limit, users.position, users.company_name, users.company_size, users.working_on, users.is_professional")}} var __values []interface{} - __values = append(__values, __id_val, __email_val, __normalized_email_val, __full_name_val, __short_name_val, __password_hash_val, __status_val, __partner_id_val, __created_at_val) + __values = append(__values, __id_val, __email_val, __normalized_email_val, __full_name_val, __short_name_val, __password_hash_val, __status_val, __partner_id_val, __created_at_val, __position_val, __company_name_val, __company_size_val, __working_on_val) __optional_columns := __sqlbundle_Literals{Join: ", "} __optional_placeholders := __sqlbundle_Literals{Join: ", "} @@ -16137,6 +16348,12 @@ func (obj *pgxcockroachImpl) Create_User(ctx context.Context, __optional_placeholders.SQLs = append(__optional_placeholders.SQLs, __sqlbundle_Literal("?")) } + if optional.IsProfessional._set { + __values = append(__values, optional.IsProfessional.value()) + __optional_columns.SQLs = append(__optional_columns.SQLs, __sqlbundle_Literal("is_professional")) + __optional_placeholders.SQLs = append(__optional_placeholders.SQLs, __sqlbundle_Literal("?")) + } + if len(__optional_columns.SQLs) == 0 { if __columns.SQL == nil { __clause.SQL = __sqlbundle_Literal("DEFAULT VALUES") @@ -16149,7 +16366,7 @@ func (obj *pgxcockroachImpl) Create_User(ctx context.Context, obj.logStmt(__stmt, __values...) user = &User{} - err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.CreatedAt, &user.ProjectLimit) + err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.CreatedAt, &user.ProjectLimit, &user.Position, &user.CompanyName, &user.CompanySize, &user.WorkingOn, &user.IsProfessional) if err != nil { return nil, obj.makeErr(err) } @@ -17682,7 +17899,7 @@ func (obj *pgxcockroachImpl) Get_User_By_NormalizedEmail_And_Status_Not_Number(c user *User, err error) { defer mon.Task()(&ctx)(&err) - var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.created_at, users.project_limit FROM users WHERE users.normalized_email = ? AND users.status != 0 LIMIT 2") + var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.created_at, users.project_limit, users.position, users.company_name, users.company_size, users.working_on, users.is_professional FROM users WHERE users.normalized_email = ? AND users.status != 0 LIMIT 2") var __values []interface{} __values = append(__values, user_normalized_email.value()) @@ -17706,7 +17923,7 @@ func (obj *pgxcockroachImpl) Get_User_By_NormalizedEmail_And_Status_Not_Number(c } user = &User{} - err = __rows.Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.CreatedAt, &user.ProjectLimit) + err = __rows.Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.CreatedAt, &user.ProjectLimit, &user.Position, &user.CompanyName, &user.CompanySize, &user.WorkingOn, &user.IsProfessional) if err != nil { return nil, err } @@ -17740,7 +17957,7 @@ func (obj *pgxcockroachImpl) Get_User_By_Id(ctx context.Context, user *User, err error) { defer mon.Task()(&ctx)(&err) - var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.created_at, users.project_limit FROM users WHERE users.id = ?") + var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.created_at, users.project_limit, users.position, users.company_name, users.company_size, users.working_on, users.is_professional FROM users WHERE users.id = ?") var __values []interface{} __values = append(__values, user_id.value()) @@ -17749,7 +17966,7 @@ func (obj *pgxcockroachImpl) Get_User_By_Id(ctx context.Context, obj.logStmt(__stmt, __values...) user = &User{} - err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.CreatedAt, &user.ProjectLimit) + err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.CreatedAt, &user.ProjectLimit, &user.Position, &user.CompanyName, &user.CompanySize, &user.WorkingOn, &user.IsProfessional) if err != nil { return (*User)(nil), obj.makeErr(err) } @@ -20698,7 +20915,7 @@ func (obj *pgxcockroachImpl) Update_User_By_Id(ctx context.Context, defer mon.Task()(&ctx)(&err) var __sets = &__sqlbundle_Hole{} - var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE users SET "), __sets, __sqlbundle_Literal(" WHERE users.id = ? RETURNING users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.created_at, users.project_limit")}} + var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE users SET "), __sets, __sqlbundle_Literal(" WHERE users.id = ? RETURNING users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.created_at, users.project_limit, users.position, users.company_name, users.company_size, users.working_on, users.is_professional")}} __sets_sql := __sqlbundle_Literals{Join: ", "} var __values []interface{} @@ -20739,6 +20956,31 @@ func (obj *pgxcockroachImpl) Update_User_By_Id(ctx context.Context, __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("project_limit = ?")) } + if update.Position._set { + __values = append(__values, update.Position.value()) + __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("position = ?")) + } + + if update.CompanyName._set { + __values = append(__values, update.CompanyName.value()) + __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("company_name = ?")) + } + + if update.CompanySize._set { + __values = append(__values, update.CompanySize.value()) + __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("company_size = ?")) + } + + if update.WorkingOn._set { + __values = append(__values, update.WorkingOn.value()) + __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("working_on = ?")) + } + + if update.IsProfessional._set { + __values = append(__values, update.IsProfessional.value()) + __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("is_professional = ?")) + } + if len(__sets_sql.SQLs) == 0 { return nil, emptyUpdate() } @@ -20752,7 +20994,7 @@ func (obj *pgxcockroachImpl) Update_User_By_Id(ctx context.Context, obj.logStmt(__stmt, __values...) user = &User{} - err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.CreatedAt, &user.ProjectLimit) + err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.CreatedAt, &user.ProjectLimit, &user.Position, &user.CompanyName, &user.CompanySize, &user.WorkingOn, &user.IsProfessional) if err == sql.ErrNoRows { return nil, nil } diff --git a/satellite/satellitedb/dbx/satellitedb.dbx.pgx.sql b/satellite/satellitedb/dbx/satellitedb.dbx.pgx.sql index 59f08f824..bcf7ff18d 100644 --- a/satellite/satellitedb/dbx/satellitedb.dbx.pgx.sql +++ b/satellite/satellitedb/dbx/satellitedb.dbx.pgx.sql @@ -364,6 +364,11 @@ CREATE TABLE users ( partner_id bytea, created_at timestamp with time zone NOT NULL, project_limit integer NOT NULL DEFAULT 0, + position text, + company_name text, + company_size integer, + working_on text, + is_professional boolean NOT NULL DEFAULT false, PRIMARY KEY ( id ) ); CREATE TABLE value_attributions ( diff --git a/satellite/satellitedb/dbx/satellitedb.dbx.pgxcockroach.sql b/satellite/satellitedb/dbx/satellitedb.dbx.pgxcockroach.sql index 59f08f824..bcf7ff18d 100644 --- a/satellite/satellitedb/dbx/satellitedb.dbx.pgxcockroach.sql +++ b/satellite/satellitedb/dbx/satellitedb.dbx.pgxcockroach.sql @@ -364,6 +364,11 @@ CREATE TABLE users ( partner_id bytea, created_at timestamp with time zone NOT NULL, project_limit integer NOT NULL DEFAULT 0, + position text, + company_name text, + company_size integer, + working_on text, + is_professional boolean NOT NULL DEFAULT false, PRIMARY KEY ( id ) ); CREATE TABLE value_attributions ( diff --git a/satellite/satellitedb/migrate.go b/satellite/satellitedb/migrate.go index 1ee02d958..741a2bd80 100644 --- a/satellite/satellitedb/migrate.go +++ b/satellite/satellitedb/migrate.go @@ -1176,6 +1176,18 @@ func (db *satelliteDB) PostgresMigration() *migrate.Migration { `ALTER TABLE nodes ALTER COLUMN total_uptime_count SET DEFAULT 0;`, }, }, + { + DB: &db.migrationDB, + Description: "add columns for professional users", + Version: 140, + Action: migrate.SQL{ + `ALTER TABLE users ADD COLUMN position text;`, + `ALTER TABLE users ADD COLUMN company_name text;`, + `ALTER TABLE users ADD COLUMN working_on text;`, + `ALTER TABLE users ADD COLUMN company_size int;`, + `ALTER TABLE users ADD COLUMN is_professional boolean NOT NULL DEFAULT false;`, + }, + }, }, } } diff --git a/satellite/satellitedb/testdata/postgres.v139.sql b/satellite/satellitedb/testdata/postgres.v139.sql index 3addaaec4..ab3d8efff 100644 --- a/satellite/satellitedb/testdata/postgres.v139.sql +++ b/satellite/satellitedb/testdata/postgres.v139.sql @@ -568,4 +568,4 @@ INSERT INTO "storagenode_bandwidth_rollups_phase2" ("storagenode_id", "interval_ -- NEW DATA -- -INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\363\\341\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 5, false, 1); \ No newline at end of file +INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\363\\341\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 5, false, 1); diff --git a/satellite/satellitedb/testdata/postgres.v140.sql b/satellite/satellitedb/testdata/postgres.v140.sql new file mode 100644 index 000000000..51f4b9726 --- /dev/null +++ b/satellite/satellitedb/testdata/postgres.v140.sql @@ -0,0 +1,576 @@ +-- AUTOGENERATED BY storj.io/dbx +-- DO NOT EDIT +CREATE TABLE accounting_rollups ( + node_id bytea NOT NULL, + start_time timestamp with time zone NOT NULL, + put_total bigint NOT NULL, + get_total bigint NOT NULL, + get_audit_total bigint NOT NULL, + get_repair_total bigint NOT NULL, + put_repair_total bigint NOT NULL, + at_rest_total double precision NOT NULL, + PRIMARY KEY ( node_id, start_time ) +); +CREATE TABLE accounting_timestamps ( + name text NOT NULL, + value timestamp with time zone NOT NULL, + PRIMARY KEY ( name ) +); +CREATE TABLE audit_histories ( + node_id bytea NOT NULL, + history bytea NOT NULL, + PRIMARY KEY ( node_id ) +); +CREATE TABLE bucket_bandwidth_rollups ( + bucket_name bytea NOT NULL, + project_id bytea NOT NULL, + interval_start timestamp with time zone NOT NULL, + interval_seconds integer NOT NULL, + action integer NOT NULL, + inline bigint NOT NULL, + allocated bigint NOT NULL, + settled bigint NOT NULL, + PRIMARY KEY ( bucket_name, project_id, interval_start, action ) +); +CREATE TABLE bucket_storage_tallies ( + bucket_name bytea NOT NULL, + project_id bytea NOT NULL, + interval_start timestamp with time zone NOT NULL, + inline bigint NOT NULL, + remote bigint NOT NULL, + remote_segments_count integer NOT NULL, + inline_segments_count integer NOT NULL, + object_count integer NOT NULL, + metadata_size bigint NOT NULL, + PRIMARY KEY ( bucket_name, project_id, interval_start ) +); +CREATE TABLE coinpayments_transactions ( + id text NOT NULL, + user_id bytea NOT NULL, + address text NOT NULL, + amount bytea NOT NULL, + received bytea NOT NULL, + status integer NOT NULL, + key text NOT NULL, + timeout integer NOT NULL, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( id ) +); +CREATE TABLE consumed_serials ( + storage_node_id bytea NOT NULL, + serial_number bytea NOT NULL, + expires_at timestamp with time zone NOT NULL, + PRIMARY KEY ( storage_node_id, serial_number ) +); +CREATE TABLE coupons ( + id bytea NOT NULL, + user_id bytea NOT NULL, + amount bigint NOT NULL, + description text NOT NULL, + type integer NOT NULL, + status integer NOT NULL, + duration bigint NOT NULL, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( id ) +); +CREATE TABLE coupon_usages ( + coupon_id bytea NOT NULL, + amount bigint NOT NULL, + status integer NOT NULL, + period timestamp with time zone NOT NULL, + PRIMARY KEY ( coupon_id, period ) +); +CREATE TABLE graceful_exit_progress ( + node_id bytea NOT NULL, + bytes_transferred bigint NOT NULL, + pieces_transferred bigint NOT NULL DEFAULT 0, + pieces_failed bigint NOT NULL DEFAULT 0, + updated_at timestamp with time zone NOT NULL, + PRIMARY KEY ( node_id ) +); +CREATE TABLE graceful_exit_transfer_queue ( + node_id bytea NOT NULL, + path bytea NOT NULL, + piece_num integer NOT NULL, + root_piece_id bytea, + durability_ratio double precision NOT NULL, + queued_at timestamp with time zone NOT NULL, + requested_at timestamp with time zone, + last_failed_at timestamp with time zone, + last_failed_code integer, + failed_count integer, + finished_at timestamp with time zone, + order_limit_send_count integer NOT NULL DEFAULT 0, + PRIMARY KEY ( node_id, path, piece_num ) +); +CREATE TABLE injuredsegments ( + path bytea NOT NULL, + data bytea NOT NULL, + attempted timestamp with time zone, + updated_at timestamp with time zone NOT NULL DEFAULT current_timestamp, + segment_health double precision NOT NULL DEFAULT 1, + PRIMARY KEY ( path ) +); +CREATE TABLE irreparabledbs ( + segmentpath bytea NOT NULL, + segmentdetail bytea NOT NULL, + pieces_lost_count bigint NOT NULL, + seg_damaged_unix_sec bigint NOT NULL, + repair_attempt_count bigint NOT NULL, + PRIMARY KEY ( segmentpath ) +); +CREATE TABLE nodes ( + id bytea NOT NULL, + address text NOT NULL DEFAULT '', + last_net text NOT NULL, + last_ip_port text, + protocol integer NOT NULL DEFAULT 0, + type integer NOT NULL DEFAULT 0, + email text NOT NULL, + wallet text NOT NULL, + free_disk bigint NOT NULL DEFAULT -1, + piece_count bigint NOT NULL DEFAULT 0, + major bigint NOT NULL DEFAULT 0, + minor bigint NOT NULL DEFAULT 0, + patch bigint NOT NULL DEFAULT 0, + hash text NOT NULL DEFAULT '', + timestamp timestamp with time zone NOT NULL DEFAULT '0001-01-01 00:00:00+00', + release boolean NOT NULL DEFAULT false, + latency_90 bigint NOT NULL DEFAULT 0, + audit_success_count bigint NOT NULL DEFAULT 0, + total_audit_count bigint NOT NULL DEFAULT 0, + vetted_at timestamp with time zone, + uptime_success_count bigint NOT NULL DEFAULT 0, + total_uptime_count bigint NOT NULL DEFAULT 0, + created_at timestamp with time zone NOT NULL DEFAULT current_timestamp, + updated_at timestamp with time zone NOT NULL DEFAULT current_timestamp, + last_contact_success timestamp with time zone NOT NULL DEFAULT 'epoch', + last_contact_failure timestamp with time zone NOT NULL DEFAULT 'epoch', + contained boolean NOT NULL DEFAULT false, + disqualified timestamp with time zone, + suspended timestamp with time zone, + unknown_audit_suspended timestamp with time zone, + offline_suspended timestamp with time zone, + under_review timestamp with time zone, + online_score double precision NOT NULL DEFAULT 1, + audit_reputation_alpha double precision NOT NULL DEFAULT 1, + audit_reputation_beta double precision NOT NULL DEFAULT 0, + unknown_audit_reputation_alpha double precision NOT NULL DEFAULT 1, + unknown_audit_reputation_beta double precision NOT NULL DEFAULT 0, + uptime_reputation_alpha double precision NOT NULL DEFAULT 1, + uptime_reputation_beta double precision NOT NULL DEFAULT 0, + exit_initiated_at timestamp with time zone, + exit_loop_completed_at timestamp with time zone, + exit_finished_at timestamp with time zone, + exit_success boolean NOT NULL DEFAULT false, + PRIMARY KEY ( id ) +); +CREATE TABLE node_api_versions ( + id bytea NOT NULL, + api_version integer NOT NULL, + created_at timestamp with time zone NOT NULL, + updated_at timestamp with time zone NOT NULL, + PRIMARY KEY ( id ) +); +CREATE TABLE offers ( + id serial NOT NULL, + name text NOT NULL, + description text NOT NULL, + award_credit_in_cents integer NOT NULL DEFAULT 0, + invitee_credit_in_cents integer NOT NULL DEFAULT 0, + award_credit_duration_days integer, + invitee_credit_duration_days integer, + redeemable_cap integer, + expires_at timestamp with time zone NOT NULL, + created_at timestamp with time zone NOT NULL, + status integer NOT NULL, + type integer NOT NULL, + PRIMARY KEY ( id ) +); +CREATE TABLE peer_identities ( + node_id bytea NOT NULL, + leaf_serial_number bytea NOT NULL, + chain bytea NOT NULL, + updated_at timestamp with time zone NOT NULL, + PRIMARY KEY ( node_id ) +); +CREATE TABLE pending_audits ( + node_id bytea NOT NULL, + piece_id bytea NOT NULL, + stripe_index bigint NOT NULL, + share_size bigint NOT NULL, + expected_share_hash bytea NOT NULL, + reverify_count bigint NOT NULL, + path bytea NOT NULL, + PRIMARY KEY ( node_id ) +); +CREATE TABLE pending_serial_queue ( + storage_node_id bytea NOT NULL, + bucket_id bytea NOT NULL, + serial_number bytea NOT NULL, + action integer NOT NULL, + settled bigint NOT NULL, + expires_at timestamp with time zone NOT NULL, + PRIMARY KEY ( storage_node_id, bucket_id, serial_number ) +); +CREATE TABLE projects ( + id bytea NOT NULL, + name text NOT NULL, + description text NOT NULL, + usage_limit bigint, + bandwidth_limit bigint, + rate_limit integer, + max_buckets integer, + partner_id bytea, + owner_id bytea NOT NULL, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( id ) +); +CREATE TABLE project_bandwidth_rollups ( + project_id bytea NOT NULL, + interval_month date NOT NULL, + egress_allocated bigint NOT NULL, + PRIMARY KEY ( project_id, interval_month ) +); +CREATE TABLE registration_tokens ( + secret bytea NOT NULL, + owner_id bytea, + project_limit integer NOT NULL, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( secret ), + UNIQUE ( owner_id ) +); +CREATE TABLE reported_serials ( + expires_at timestamp with time zone NOT NULL, + storage_node_id bytea NOT NULL, + bucket_id bytea NOT NULL, + action integer NOT NULL, + serial_number bytea NOT NULL, + settled bigint NOT NULL, + observed_at timestamp with time zone NOT NULL, + PRIMARY KEY ( expires_at, storage_node_id, bucket_id, action, serial_number ) +); +CREATE TABLE reset_password_tokens ( + secret bytea NOT NULL, + owner_id bytea NOT NULL, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( secret ), + UNIQUE ( owner_id ) +); +CREATE TABLE revocations ( + revoked bytea NOT NULL, + api_key_id bytea NOT NULL, + PRIMARY KEY ( revoked ) +); +CREATE TABLE serial_numbers ( + id serial NOT NULL, + serial_number bytea NOT NULL, + bucket_id bytea NOT NULL, + expires_at timestamp with time zone NOT NULL, + PRIMARY KEY ( id ) +); +CREATE TABLE storagenode_bandwidth_rollups ( + storagenode_id bytea NOT NULL, + interval_start timestamp with time zone NOT NULL, + interval_seconds integer NOT NULL, + action integer NOT NULL, + allocated bigint DEFAULT 0, + settled bigint NOT NULL, + PRIMARY KEY ( storagenode_id, interval_start, action ) +); +CREATE TABLE storagenode_bandwidth_rollups_phase2 ( + storagenode_id bytea NOT NULL, + interval_start timestamp with time zone NOT NULL, + interval_seconds integer NOT NULL, + action integer NOT NULL, + allocated bigint DEFAULT 0, + settled bigint NOT NULL, + PRIMARY KEY ( storagenode_id, interval_start, action ) +); +CREATE TABLE storagenode_payments ( + id bigserial NOT NULL, + created_at timestamp with time zone NOT NULL, + node_id bytea NOT NULL, + period text NOT NULL, + amount bigint NOT NULL, + receipt text, + notes text, + PRIMARY KEY ( id ) +); +CREATE TABLE storagenode_paystubs ( + period text NOT NULL, + node_id bytea NOT NULL, + created_at timestamp with time zone NOT NULL, + codes text NOT NULL, + usage_at_rest double precision NOT NULL, + usage_get bigint NOT NULL, + usage_put bigint NOT NULL, + usage_get_repair bigint NOT NULL, + usage_put_repair bigint NOT NULL, + usage_get_audit bigint NOT NULL, + comp_at_rest bigint NOT NULL, + comp_get bigint NOT NULL, + comp_put bigint NOT NULL, + comp_get_repair bigint NOT NULL, + comp_put_repair bigint NOT NULL, + comp_get_audit bigint NOT NULL, + surge_percent bigint NOT NULL, + held bigint NOT NULL, + owed bigint NOT NULL, + disposed bigint NOT NULL, + paid bigint NOT NULL, + PRIMARY KEY ( period, node_id ) +); +CREATE TABLE storagenode_storage_tallies ( + node_id bytea NOT NULL, + interval_end_time timestamp with time zone NOT NULL, + data_total double precision NOT NULL, + PRIMARY KEY ( interval_end_time, node_id ) +); +CREATE TABLE stripe_customers ( + user_id bytea NOT NULL, + customer_id text NOT NULL, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( user_id ), + UNIQUE ( customer_id ) +); +CREATE TABLE stripecoinpayments_invoice_project_records ( + id bytea NOT NULL, + project_id bytea NOT NULL, + storage double precision NOT NULL, + egress bigint NOT NULL, + objects bigint NOT NULL, + period_start timestamp with time zone NOT NULL, + period_end timestamp with time zone NOT NULL, + state integer NOT NULL, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( id ), + UNIQUE ( project_id, period_start, period_end ) +); +CREATE TABLE stripecoinpayments_tx_conversion_rates ( + tx_id text NOT NULL, + rate bytea NOT NULL, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( tx_id ) +); +CREATE TABLE users ( + id bytea NOT NULL, + email text NOT NULL, + normalized_email text NOT NULL, + full_name text NOT NULL, + short_name text, + password_hash bytea NOT NULL, + status integer NOT NULL, + partner_id bytea, + created_at timestamp with time zone NOT NULL, + project_limit integer NOT NULL DEFAULT 0, + position text, + company_name text, + company_size integer, + working_on text, + is_professional boolean NOT NULL DEFAULT false, + PRIMARY KEY ( id ) +); +CREATE TABLE value_attributions ( + project_id bytea NOT NULL, + bucket_name bytea NOT NULL, + partner_id bytea NOT NULL, + last_updated timestamp with time zone NOT NULL, + PRIMARY KEY ( project_id, bucket_name ) +); +CREATE TABLE api_keys ( + id bytea NOT NULL, + project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE, + head bytea NOT NULL, + name text NOT NULL, + secret bytea NOT NULL, + partner_id bytea, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( id ), + UNIQUE ( head ), + UNIQUE ( name, project_id ) +); +CREATE TABLE bucket_metainfos ( + id bytea NOT NULL, + project_id bytea NOT NULL REFERENCES projects( id ), + name bytea NOT NULL, + partner_id bytea, + path_cipher integer NOT NULL, + created_at timestamp with time zone NOT NULL, + default_segment_size integer NOT NULL, + default_encryption_cipher_suite integer NOT NULL, + default_encryption_block_size integer NOT NULL, + default_redundancy_algorithm integer NOT NULL, + default_redundancy_share_size integer NOT NULL, + default_redundancy_required_shares integer NOT NULL, + default_redundancy_repair_shares integer NOT NULL, + default_redundancy_optimal_shares integer NOT NULL, + default_redundancy_total_shares integer NOT NULL, + PRIMARY KEY ( id ), + UNIQUE ( name, project_id ), + UNIQUE ( project_id, name ) +); +CREATE TABLE project_members ( + member_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE, + project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( member_id, project_id ) +); +CREATE TABLE stripecoinpayments_apply_balance_intents ( + tx_id text NOT NULL REFERENCES coinpayments_transactions( id ) ON DELETE CASCADE, + state integer NOT NULL, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( tx_id ) +); +CREATE TABLE used_serials ( + serial_number_id integer NOT NULL REFERENCES serial_numbers( id ) ON DELETE CASCADE, + storage_node_id bytea NOT NULL, + PRIMARY KEY ( serial_number_id, storage_node_id ) +); +CREATE TABLE user_credits ( + id serial NOT NULL, + user_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE, + offer_id integer NOT NULL REFERENCES offers( id ), + referred_by bytea REFERENCES users( id ) ON DELETE SET NULL, + type text NOT NULL, + credits_earned_in_cents integer NOT NULL, + credits_used_in_cents integer NOT NULL, + expires_at timestamp with time zone NOT NULL, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( id ), + UNIQUE ( id, offer_id ) +); +CREATE INDEX accounting_rollups_start_time_index ON accounting_rollups ( start_time ); +CREATE INDEX bucket_bandwidth_rollups_project_id_action_interval_index ON bucket_bandwidth_rollups ( project_id, action, interval_start ); +CREATE INDEX bucket_bandwidth_rollups_action_interval_project_id_index ON bucket_bandwidth_rollups ( action, interval_start, project_id ); +CREATE INDEX bucket_storage_tallies_project_id_index ON bucket_storage_tallies ( project_id ); +CREATE INDEX consumed_serials_expires_at_index ON consumed_serials ( expires_at ); +CREATE INDEX graceful_exit_transfer_queue_nid_dr_qa_fa_lfa_index ON graceful_exit_transfer_queue ( node_id, durability_ratio, queued_at, finished_at, last_failed_at ); +CREATE INDEX injuredsegments_attempted_index ON injuredsegments ( attempted ); +CREATE INDEX injuredsegments_segment_health_index ON injuredsegments ( segment_health ); +CREATE INDEX injuredsegments_updated_at_index ON injuredsegments ( updated_at ); +CREATE INDEX node_last_ip ON nodes ( last_net ); +CREATE INDEX nodes_dis_unk_exit_fin_last_success_index ON nodes ( disqualified, unknown_audit_suspended, exit_finished_at, last_contact_success ); +CREATE UNIQUE INDEX serial_number_index ON serial_numbers ( serial_number ); +CREATE INDEX serial_numbers_expires_at_index ON serial_numbers ( expires_at ); +CREATE INDEX storagenode_bandwidth_rollups_interval_start_index ON storagenode_bandwidth_rollups ( interval_start ); +CREATE INDEX storagenode_payments_node_id_period_index ON storagenode_payments ( node_id, period ); +CREATE INDEX storagenode_paystubs_node_id_index ON storagenode_paystubs ( node_id ); +CREATE INDEX storagenode_storage_tallies_node_id_index ON storagenode_storage_tallies ( node_id ); +CREATE UNIQUE INDEX credits_earned_user_id_offer_id ON user_credits ( id, offer_id ); + +INSERT INTO "accounting_rollups"("node_id", "start_time", "put_total", "get_total", "get_audit_total", "get_repair_total", "put_repair_total", "at_rest_total") VALUES (E'\\367M\\177\\251]t/\\022\\256\\214\\265\\025\\224\\204:\\217\\212\\0102<\\321\\374\\020&\\271Qc\\325\\261\\354\\246\\233'::bytea, '2019-02-09 00:00:00+00', 3000, 6000, 9000, 12000, 0, 15000); + +INSERT INTO "accounting_timestamps" VALUES ('LastAtRestTally', '0001-01-01 00:00:00+00'); +INSERT INTO "accounting_timestamps" VALUES ('LastRollup', '0001-01-01 00:00:00+00'); +INSERT INTO "accounting_timestamps" VALUES ('LastBandwidthTally', '0001-01-01 00:00:00+00'); + +INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 5, false, 1); +INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '127.0.0.1:55518', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 0, 3, 3, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 0, false, 1); +INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014', '127.0.0.1:55517', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 0, 0, 0, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 0, false, 1); +INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\015', '127.0.0.1:55519', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 1, 2, 1, 2, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 1, false, 1); +INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "vetted_at", "online_score") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', '127.0.0.1:55520', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 300, 400, 300, 400, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 300, 0, 1, 0, 300, 100, false, '2020-03-18 12:00:00.000000+00', 1); +INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\154\\313\\233\\074\\327\\177\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 75, 25, 100, 5, false, 1); +INSERT INTO "nodes"("id", "address", "last_net", "last_ip_port", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\154\\313\\233\\074\\327\\177\\136\\070\\346\\002', '127.0.0.1:55516', '127.0.0.0', '127.0.0.1:55516', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 75, 25, 100, 5, false, 1); +INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\363\\341\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 5, false, 1); + +INSERT INTO "users"("id", "full_name", "short_name", "email", "normalized_email", "password_hash", "status", "partner_id", "created_at", "is_professional") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'Noahson', 'William', '1email1@mail.test', '1EMAIL1@MAIL.TEST', E'some_readable_hash'::bytea, 1, NULL, '2019-02-14 08:28:24.614594+00', false); +INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets", "partner_id", "owner_id", "created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, 'ProjectName', 'projects description', NULL, NULL, NULL, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:28:24.254934+00'); + +INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets", "partner_id", "owner_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, 'projName1', 'Test project 1', NULL, NULL, NULL, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:28:24.636949+00'); +INSERT INTO "project_members"("member_id", "project_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, '2019-02-14 08:28:24.677953+00'); +INSERT INTO "project_members"("member_id", "project_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, '2019-02-13 08:28:24.677953+00'); + +INSERT INTO "irreparabledbs" ("segmentpath", "segmentdetail", "pieces_lost_count", "seg_damaged_unix_sec", "repair_attempt_count") VALUES ('\x49616d5365676d656e746b6579696e666f30', '\x49616d5365676d656e7464657461696c696e666f30', 10, 1550159554, 10); + +INSERT INTO "registration_tokens" ("secret", "owner_id", "project_limit", "created_at") VALUES (E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, null, 1, '2019-02-14 08:28:24.677953+00'); + +INSERT INTO "serial_numbers" ("id", "serial_number", "bucket_id", "expires_at") VALUES (1, E'0123456701234567'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014/testbucket'::bytea, '2019-03-06 08:28:24.677953+00'); +INSERT INTO "used_serials" ("serial_number_id", "storage_node_id") VALUES (1, E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n'); + +INSERT INTO "storagenode_bandwidth_rollups" ("storagenode_id", "interval_start", "interval_seconds", "action", "allocated", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024); +INSERT INTO "storagenode_storage_tallies" VALUES (E'\\3510\\323\\225"~\\036<\\342\\330m\\0253Jhr\\246\\233K\\246#\\2303\\351\\256\\275j\\212UM\\362\\207', '2019-02-14 08:16:57.812849+00', 1000); + +INSERT INTO "bucket_bandwidth_rollups" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024, 3024); +INSERT INTO "bucket_storage_tallies" ("bucket_name", "project_id", "interval_start", "inline", "remote", "remote_segments_count", "inline_segments_count", "object_count", "metadata_size") VALUES (E'testbucket'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 4024, 5024, 0, 0, 0, 0); +INSERT INTO "bucket_bandwidth_rollups" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\170\\160\\157\\370\\274\\366\\113\\364\\272\\235\\301\\243\\321\\102\\321\\136'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024, 3024); +INSERT INTO "bucket_storage_tallies" ("bucket_name", "project_id", "interval_start", "inline", "remote", "remote_segments_count", "inline_segments_count", "object_count", "metadata_size") VALUES (E'testbucket'::bytea, E'\\170\\160\\157\\370\\274\\366\\113\\364\\272\\235\\301\\243\\321\\102\\321\\136'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 4024, 5024, 0, 0, 0, 0); + +INSERT INTO "reset_password_tokens" ("secret", "owner_id", "created_at") VALUES (E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-05-08 08:28:24.677953+00'); + +INSERT INTO "offers" ("id", "name", "description", "award_credit_in_cents", "invitee_credit_in_cents", "expires_at", "created_at", "status", "type", "award_credit_duration_days", "invitee_credit_duration_days") VALUES (1, 'Default referral offer', 'Is active when no other active referral offer', 300, 600, '2119-03-14 08:28:24.636949+00', '2019-07-14 08:28:24.636949+00', 1, 2, 365, 14); +INSERT INTO "offers" ("id", "name", "description", "award_credit_in_cents", "invitee_credit_in_cents", "expires_at", "created_at", "status", "type", "award_credit_duration_days", "invitee_credit_duration_days") VALUES (2, 'Default free credit offer', 'Is active when no active free credit offer', 0, 300, '2119-03-14 08:28:24.636949+00', '2019-07-14 08:28:24.636949+00', 1, 1, NULL, 14); + +INSERT INTO "api_keys" ("id", "project_id", "head", "name", "secret", "partner_id", "created_at") VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\111\\142\\147\\304\\132\\375\\070\\163\\270\\160\\251\\370\\126\\063\\351\\037\\257\\071\\143\\375\\351\\320\\253\\232\\220\\260\\075\\173\\306\\307\\115\\136'::bytea, 'key 2', E'\\254\\011\\315\\333\\273\\365\\001\\071\\024\\154\\253\\332\\301\\216\\361\\074\\221\\367\\251\\231\\274\\333\\300\\367\\001\\272\\327\\111\\315\\123\\042\\016'::bytea, NULL, '2019-02-14 08:28:24.267934+00'); + +INSERT INTO "value_attributions" ("project_id", "bucket_name", "partner_id", "last_updated") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E''::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-02-14 08:07:31.028103+00'); + +INSERT INTO "user_credits" ("id", "user_id", "offer_id", "referred_by", "credits_earned_in_cents", "credits_used_in_cents", "type", "expires_at", "created_at") VALUES (1, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 1, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 200, 0, 'invalid', '2019-10-01 08:28:24.267934+00', '2019-06-01 08:28:24.267934+00'); + +INSERT INTO "bucket_metainfos" ("id", "project_id", "name", "partner_id", "created_at", "path_cipher", "default_segment_size", "default_encryption_cipher_suite", "default_encryption_block_size", "default_redundancy_algorithm", "default_redundancy_share_size", "default_redundancy_required_shares", "default_redundancy_repair_shares", "default_redundancy_optimal_shares", "default_redundancy_total_shares") VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'testbucketuniquename'::bytea, NULL, '2019-06-14 08:28:24.677953+00', 1, 65536, 1, 8192, 1, 4096, 4, 6, 8, 10); + +INSERT INTO "pending_audits" ("node_id", "piece_id", "stripe_index", "share_size", "expected_share_hash", "reverify_count", "path") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 5, 1024, E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, 1, 'not null'); + +INSERT INTO "peer_identities" VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:07:31.335028+00'); + +INSERT INTO "graceful_exit_progress" ("node_id", "bytes_transferred", "pieces_transferred", "pieces_failed", "updated_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', 1000000000000000, 0, 0, '2019-09-12 10:07:31.028103+00'); +INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n \\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\311', 8, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0); +INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n \\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\312', 8, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0); + +INSERT INTO "stripe_customers" ("user_id", "customer_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'stripe_id', '2019-06-01 08:28:24.267934+00'); + +INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n \\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\311', 9, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0); +INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n \\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\312', 9, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0); + +INSERT INTO "stripecoinpayments_invoice_project_records"("id", "project_id", "storage", "egress", "objects", "period_start", "period_end", "state", "created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\021\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, 0, 0, 0, '2019-06-01 08:28:24.267934+00', '2019-06-01 08:28:24.267934+00', 0, '2019-06-01 08:28:24.267934+00'); + +INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "root_piece_id", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n \\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\311', 10, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0); + +INSERT INTO "stripecoinpayments_tx_conversion_rates" ("tx_id", "rate", "created_at") VALUES ('tx_id', E'\\363\\311\\033w\\222\\303Ci,'::bytea, '2019-06-01 08:28:24.267934+00'); + +INSERT INTO "coinpayments_transactions" ("id", "user_id", "address", "amount", "received", "status", "key", "timeout", "created_at") VALUES ('tx_id', E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'address', E'\\363\\311\\033w'::bytea, E'\\363\\311\\033w'::bytea, 1, 'key', 60, '2019-06-01 08:28:24.267934+00'); + +INSERT INTO "storagenode_bandwidth_rollups" ("storagenode_id", "interval_start", "interval_seconds", "action", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2020-01-11 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 2024); + +INSERT INTO "coupons" ("id", "user_id", "amount", "description", "type", "status", "duration", "created_at") VALUES (E'\\362\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 50, 'description', 0, 0, 2, '2019-06-01 08:28:24.267934+00'); +INSERT INTO "coupon_usages" ("coupon_id", "amount", "status", "period") VALUES (E'\\362\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, 22, 0, '2019-06-01 09:28:24.267934+00'); + +INSERT INTO "reported_serials" ("expires_at", "storage_node_id", "bucket_id", "action", "serial_number", "settled", "observed_at") VALUES ('2020-01-11 08:00:00.000000+00', E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014/testbucket'::bytea, 1, E'0123456701234567'::bytea, 100, '2020-01-11 08:00:00.000000+00'); + +INSERT INTO "stripecoinpayments_apply_balance_intents" ("tx_id", "state", "created_at") VALUES ('tx_id', 0, '2019-06-01 08:28:24.267934+00'); + +INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets", "rate_limit", "partner_id", "owner_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\347'::bytea, 'projName1', 'Test project 1', NULL, NULL, NULL, 2000000, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2020-01-15 08:28:24.636949+00'); + +INSERT INTO "pending_serial_queue" ("storage_node_id", "bucket_id", "serial_number", "action", "settled", "expires_at") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014/testbucket'::bytea, E'5123456701234567'::bytea, 1, 100, '2020-01-11 08:00:00.000000+00'); + +INSERT INTO "consumed_serials" ("storage_node_id", "serial_number", "expires_at") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', E'1234567012345678'::bytea, '2020-01-12 08:00:00.000000+00'); + +INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('0', '\x0a0130120100', 1.0, '2020-09-01 00:00:00.000000+00'); +INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('here''s/a/great/path', '\x0a136865726527732f612f67726561742f70617468120a0102030405060708090a', 1.0, '2020-09-01 00:00:00.000000+00'); +INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('yet/another/cool/path', '\x0a157965742f616e6f746865722f636f6f6c2f70617468120a0102030405060708090a', 1.0, '2020-09-01 00:00:00.000000+00'); +INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('/this/is/a/new/path', '\x0a23736f2f6d616e792f69636f6e69632f70617468732f746f2f63686f6f73652f66726f6d120a0102030405060708090a', 1.0, '2020-09-01 00:00:00.000000+00'); +INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('/some/path/1/23/4', '\x0a23736f2f6d618e792f69636f6e69632f70617468732f746f2f63686f6f73652f66726f6d120a0102030405060708090a', 0.2, '2020-09-01 00:00:00.000000+00'); + +INSERT INTO "project_bandwidth_rollups"("project_id", "interval_month", egress_allocated) VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\347'::bytea, '2020-04-01', 10000); + +INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets","rate_limit", "partner_id", "owner_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\345'::bytea, 'egress101', 'High Bandwidth Project', NULL, NULL, NULL, 2000000, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2020-05-15 08:46:24.000000+00'); + +INSERT INTO "storagenode_paystubs"("period", "node_id", "created_at", "codes", "usage_at_rest", "usage_get", "usage_put", "usage_get_repair", "usage_put_repair", "usage_get_audit", "comp_at_rest", "comp_get", "comp_put", "comp_get_repair", "comp_put_repair", "comp_get_audit", "surge_percent", "held", "owed", "disposed", "paid") VALUES ('2020-01', '\xf2a3b4c4dfdf7221310382fd5db5aa73e1d227d6df09734ec4e5305000000000', '2020-04-07T20:14:21.479141Z', '', 1327959864508416, 294054066688, 159031363328, 226751, 0, 836608, 2861984, 5881081, 0, 226751, 0, 8, 300, 0, 26909472, 0, 26909472); +INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "unknown_audit_suspended", "offline_suspended", "under_review") VALUES (E'\\153\\313\\233\\074\\327\\255\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 5, false, '2019-02-14 08:07:31.108963+00', '2019-02-14 08:07:31.108963+00', '2019-02-14 08:07:31.108963+00'); + +INSERT INTO "audit_histories" ("node_id", "history") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', '\x0a23736f2f6d616e792f69636f6e69632f70617468732f746f2f63686f6f73652f66726f6d120a0102030405060708090a'); + +INSERT INTO "node_api_versions"("id", "api_version", "created_at", "updated_at") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', 1, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00'); +INSERT INTO "node_api_versions"("id", "api_version", "created_at", "updated_at") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', 2, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00'); +INSERT INTO "node_api_versions"("id", "api_version", "created_at", "updated_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014', 3, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00'); + +INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\256\\263'::bytea, 'egress102', 'High Bandwidth Project 2', NULL, NULL, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-05-15 08:46:24.000000+00', 1000); +INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\255\\244'::bytea, 'egress103', 'High Bandwidth Project 3', NULL, NULL, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-05-15 08:46:24.000000+00', 1000); + +INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\253\\231'::bytea, 'Limit Test 1', 'This project is above the default', 50000000001, 50000000001, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-10-14 10:10:10.000000+00', 101); +INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\252\\230'::bytea, 'Limit Test 2', 'This project is below the default', NULL, NULL, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-10-14 10:10:11.000000+00', NULL); + +INSERT INTO "storagenode_bandwidth_rollups_phase2" ("storagenode_id", "interval_start", "interval_seconds", "action", "allocated", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024); + +-- NEW DATA -- +INSERT INTO "users"("id", "full_name", "short_name", "email", "normalized_email", "password_hash", "status", "partner_id", "created_at", "position", "company_name", "working_on", "company_size", "is_professional") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\205\\311",'::bytea, 'Thierry', 'Berg', '2email2@mail.test', '2EMAIL2@MAIL.TEST', E'some_readable_hash'::bytea, 2, NULL, '2020-05-16 10:28:24.614594+00', 'engineer', 'storj', 'data storage', 55, true); From 135d846affba7d4aabe80b66f55a07a773520a24 Mon Sep 17 00:00:00 2001 From: crawter Date: Thu, 21 Jan 2021 18:52:19 +0200 Subject: [PATCH 18/38] mnd/console/server: endpoint with index.html added Change-Id: Ic5154feaa995bf5c26c851024079f7e82612f306 --- multinode/console/controllers/nodes.go | 27 +++------------ multinode/console/server/server.go | 47 ++++++++++++++++++++++++-- multinode/nodes/nodes.go | 39 ++++++++++----------- multinode/nodes/service.go | 25 +++++++------- 4 files changed, 83 insertions(+), 55 deletions(-) diff --git a/multinode/console/controllers/nodes.go b/multinode/console/controllers/nodes.go index 9d761a76c..98da7bd31 100644 --- a/multinode/console/controllers/nodes.go +++ b/multinode/console/controllers/nodes.go @@ -133,32 +133,15 @@ func (controller *Nodes) Get(w http.ResponseWriter, r *http.Request) { node, err := controller.service.Get(ctx, nodeID) if err != nil { controller.log.Error("get node not found error", zap.Error(err)) - controller.serveError(w, http.StatusNotFound, ErrNodes.Wrap(err)) - return - } - - if err = json.NewEncoder(w).Encode(node); err != nil { - controller.log.Error("failed to write json response", zap.Error(err)) - return - } -} - -// List handles retrieving list of nodes. -func (controller *Nodes) List(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - var err error - defer mon.Task()(&ctx)(&err) - - w.Header().Add("Content-Type", "application/json") - - list, err := controller.service.List(ctx) - if err != nil { - controller.log.Error("list nodes internal error", zap.Error(err)) + if nodes.ErrNoNode.Has(err) { + controller.serveError(w, http.StatusNotFound, ErrNodes.Wrap(err)) + return + } controller.serveError(w, http.StatusInternalServerError, ErrNodes.Wrap(err)) return } - if err = json.NewEncoder(w).Encode(list); err != nil { + if err = json.NewEncoder(w).Encode(node); err != nil { controller.log.Error("failed to write json response", zap.Error(err)) return } diff --git a/multinode/console/server/server.go b/multinode/console/server/server.go index 9a0408f11..a4dc857a6 100644 --- a/multinode/console/server/server.go +++ b/multinode/console/server/server.go @@ -5,8 +5,10 @@ package server import ( "context" + "html/template" "net" "net/http" + "path/filepath" "github.com/gorilla/mux" "github.com/zeebo/errs" @@ -39,6 +41,8 @@ type Server struct { listener net.Listener http http.Server + + index *template.Template } // NewServer returns new instance of Multinode Dashboard http server. @@ -51,13 +55,14 @@ func NewServer(log *zap.Logger, config Config, nodes *nodes.Service, listener ne } router := mux.NewRouter() + fs := http.FileServer(http.Dir(server.config.StaticDir)) + apiRouter := router.PathPrefix("/api/v0").Subrouter() apiRouter.NotFoundHandler = controllers.NewNotFound(server.log) nodesController := controllers.NewNodes(server.log, server.nodes) nodesRouter := apiRouter.PathPrefix("/nodes").Subrouter() nodesRouter.HandleFunc("", nodesController.Add).Methods(http.MethodPost) - nodesRouter.HandleFunc("", nodesController.List).Methods(http.MethodGet) nodesRouter.HandleFunc("/infos", nodesController.ListInfos).Methods(http.MethodGet) nodesRouter.HandleFunc("/infos/{satelliteID}", nodesController.ListInfosSatellite).Methods(http.MethodGet) nodesRouter.HandleFunc("/trusted-satellites", nodesController.TrustedSatellites).Methods(http.MethodGet) @@ -65,6 +70,11 @@ func NewServer(log *zap.Logger, config Config, nodes *nodes.Service, listener ne nodesRouter.HandleFunc("/{id}", nodesController.UpdateName).Methods(http.MethodPatch) nodesRouter.HandleFunc("/{id}", nodesController.Delete).Methods(http.MethodDelete) + if server.config.StaticDir != "" { + router.PathPrefix("/static/").Handler(http.StripPrefix("/static", fs)) + router.PathPrefix("/").HandlerFunc(server.appHandler) + } + server.http = http.Server{ Handler: router, } @@ -72,10 +82,33 @@ func NewServer(log *zap.Logger, config Config, nodes *nodes.Service, listener ne return &server, nil } +// appHandler is web app http handler function. +func (server *Server) appHandler(w http.ResponseWriter, r *http.Request) { + header := w.Header() + + header.Set("Content-Type", "text/html; charset=UTF-8") + header.Set("X-Content-Type-Options", "nosniff") + header.Set("Referrer-Policy", "same-origin") + + if server.index == nil { + server.log.Error("index template is not set") + return + } + + if err := server.index.Execute(w, nil); err != nil { + server.log.Error("index template could not be executed", zap.Error(Error.Wrap(err))) + return + } +} + // Run starts the server that host webapp and api endpoints. func (server *Server) Run(ctx context.Context) (err error) { - ctx, cancel := context.WithCancel(ctx) + err = server.initializeTemplates() + if err != nil { + return Error.Wrap(err) + } + ctx, cancel := context.WithCancel(ctx) var group errgroup.Group group.Go(func() error { @@ -94,3 +127,13 @@ func (server *Server) Run(ctx context.Context) (err error) { func (server *Server) Close() error { return Error.Wrap(server.http.Close()) } + +// initializeTemplates is used to initialize all templates. +func (server *Server) initializeTemplates() (err error) { + server.index, err = template.ParseFiles(filepath.Join(server.config.StaticDir, "dist", "index.html")) + if err != nil { + server.log.Error("dist folder is not generated. use 'npm run build' command", zap.Error(err)) + } + + return err +} diff --git a/multinode/nodes/nodes.go b/multinode/nodes/nodes.go index 4ac86d907..2589b7241 100644 --- a/multinode/nodes/nodes.go +++ b/multinode/nodes/nodes.go @@ -33,32 +33,33 @@ var ErrNoNode = errs.Class("no such node") // Node is a representation of storagenode, that SNO could add to the Multinode Dashboard. type Node struct { - ID storj.NodeID + ID storj.NodeID `json:"id"` // APISecret is a secret issued by storagenode, that will be main auth mechanism in MND <-> SNO api. - APISecret []byte - PublicAddress string - Name string + APISecret []byte `json:"apiSecret"` + PublicAddress string `json:"publicAddress"` + Name string `json:"name"` } // NodeInfo contains basic node internal state. type NodeInfo struct { - ID storj.NodeID - Name string - Version string - LastContact time.Time - DiskSpaceUsed int64 - DiskSpaceLeft int64 - BandwidthUsed int64 - TotalEarned int64 + ID storj.NodeID `json:"id"` + Name string `json:"name"` + Version string `json:"version"` + LastContact time.Time `json:"lastContact"` + DiskSpaceUsed int64 `json:"diskSpaceUsed"` + DiskSpaceLeft int64 `json:"diskSpaceLeft"` + BandwidthUsed int64 `json:"bandwidthUsed"` + TotalEarned int64 `json:"totalEarned"` } // NodeInfoSatellite contains satellite specific node internal state. type NodeInfoSatellite struct { - ID storj.NodeID - Name string - Version string - LastContact time.Time - OnlineScore float64 - AuditScore float64 - SuspensionScore float64 + ID storj.NodeID `json:"id"` + Name string `json:"name"` + Version string `json:"version"` + LastContact time.Time `json:"lastContact"` + OnlineScore float64 `json:"onlineScore"` + AuditScore float64 `json:"auditScore"` + SuspensionScore float64 `json:"suspensionScore"` + TotalEarned int64 `json:"totalEarned"` } diff --git a/multinode/nodes/service.go b/multinode/nodes/service.go index fcf64866b..abbf6673c 100644 --- a/multinode/nodes/service.go +++ b/multinode/nodes/service.go @@ -66,18 +66,6 @@ func (service *Service) Get(ctx context.Context, id storj.NodeID) (_ Node, err e } -// List retrieves list of all added nodes. -func (service *Service) List(ctx context.Context) (_ []Node, err error) { - defer mon.Task()(&ctx)(&err) - - nodes, err := service.nodes.List(ctx) - if err != nil { - return nil, Error.Wrap(err) - } - - return nodes, nil -} - // Remove removes node from the system. func (service *Service) Remove(ctx context.Context, id storj.NodeID) (err error) { defer mon.Task()(&ctx)(&err) @@ -90,6 +78,9 @@ func (service *Service) ListInfos(ctx context.Context) (_ []NodeInfo, err error) nodes, err := service.nodes.List(ctx) if err != nil { + if ErrNoNode.Has(err) { + return []NodeInfo{}, nil + } return nil, Error.Wrap(err) } @@ -172,6 +163,9 @@ func (service *Service) ListInfosSatellite(ctx context.Context, satelliteID stor nodes, err := service.nodes.List(ctx) if err != nil { + if ErrNoNode.Has(err) { + return []NodeInfoSatellite{}, nil + } return nil, Error.Wrap(err) } @@ -191,6 +185,7 @@ func (service *Service) ListInfosSatellite(ctx context.Context, satelliteID stor }() nodeClient := multinodepb.NewDRPCNodeClient(conn) + payoutClient := multinodepb.NewDRPCPayoutClient(conn) header := &multinodepb.RequestHeader{ ApiKey: node.APISecret, @@ -214,6 +209,11 @@ func (service *Service) ListInfosSatellite(ctx context.Context, satelliteID stor return NodeInfoSatellite{}, Error.Wrap(err) } + earned, err := payoutClient.Earned(ctx, &multinodepb.EarnedRequest{Header: header}) + if err != nil { + return NodeInfoSatellite{}, Error.Wrap(err) + } + return NodeInfoSatellite{ ID: node.ID, Name: node.Name, @@ -222,6 +222,7 @@ func (service *Service) ListInfosSatellite(ctx context.Context, satelliteID stor OnlineScore: rep.Online.Score, AuditScore: rep.Audit.Score, SuspensionScore: rep.Audit.SuspensionScore, + TotalEarned: earned.Total, }, nil }() if err != nil { From 9820145e14848fdefbd2ee6992ebf5e88611048f Mon Sep 17 00:00:00 2001 From: crawter Date: Thu, 21 Jan 2021 19:04:42 +0200 Subject: [PATCH 19/38] web/mnd: nodes domain, api and store Change-Id: I022c5153dfc85a25eebce6e8ba91b97e906736cb --- web/multinode/package.json | 4 +- web/multinode/src/api/index.ts | 29 ++++ web/multinode/src/api/nodes.ts | 206 +++++++++++++++++++++++++++ web/multinode/src/app/store/index.ts | 16 ++- web/multinode/src/app/store/nodes.ts | 75 ++++++++-- web/multinode/src/nodes/index.ts | 66 ++++++--- web/multinode/src/nodes/service.ts | 112 +++++++++++++++ 7 files changed, 473 insertions(+), 35 deletions(-) create mode 100644 web/multinode/src/api/index.ts create mode 100644 web/multinode/src/api/nodes.ts create mode 100644 web/multinode/src/nodes/service.ts diff --git a/web/multinode/package.json b/web/multinode/package.json index de4e9ce4d..e7d5d082c 100644 --- a/web/multinode/package.json +++ b/web/multinode/package.json @@ -5,7 +5,7 @@ "serve": "vue-cli-service serve", "lint": "vue-cli-service lint && stylelint '**/*.{vue,scss}' --fix", "build": "vue-cli-service build", - "debug": "vue-cli-service build --mode development", + "dev": "vue-cli-service build --mode development", "test": "vue-cli-service test:unit" }, "dependencies": { @@ -23,7 +23,7 @@ "@vue/cli-service": "4.5.9", "babel-core": "6.26.3", "core-js": "3.8.1", - "node-sass": "4.14.1", + "sass": "^1.32.0", "sass-loader": "8.0.0", "stylelint": "13.8.0", "stylelint-config-standard": "20.0.0", diff --git a/web/multinode/src/api/index.ts b/web/multinode/src/api/index.ts new file mode 100644 index 000000000..1ca96e81d --- /dev/null +++ b/web/multinode/src/api/index.ts @@ -0,0 +1,29 @@ +// Copyright (C) 2021 Storj Labs, Inc. +// See LICENSE for copying information. + +/** + * ErrorUnauthorized is a custom error type for performing unauthorized operations. + */ +export class UnauthorizedError extends Error { + public constructor(message: string = 'authorization required') { + super(message); + } +} + +/** + * BadRequestError is a custom error type for performing bad request. + */ +export class BadRequestError extends Error { + public constructor(message: string = 'bad request') { + super(message); + } +} + +/** + * InternalError is a custom error type for internal server error. + */ +export class InternalError extends Error { + public constructor(message: string = 'internal server error') { + super(message); + } +} diff --git a/web/multinode/src/api/nodes.ts b/web/multinode/src/api/nodes.ts new file mode 100644 index 000000000..3778b2c3c --- /dev/null +++ b/web/multinode/src/api/nodes.ts @@ -0,0 +1,206 @@ +// Copyright (C) 2021 Storj Labs, Inc. +// See LICENSE for copying information. + +import { BadRequestError, InternalError, UnauthorizedError } from '@/api/index'; +import { CreateNodeFields, Node, NodeURL } from '@/nodes'; +import { HttpClient } from '@/private/http/client'; + +/** + * client for nodes controller of MND api. + */ +export class NodesClient { + private readonly http: HttpClient = new HttpClient(); + private readonly ROOT_PATH: string = '/api/v0/nodes'; + + /** + * handles node addition. + * + * @param node - node to add. + * + * @throws {@link BadRequestError} + * This exception is thrown if the input is not a valid. + * + * @throws {@link UnauthorizedError} + * Thrown if the auth cookie is missing or invalid. + * + * @throws {@link InternalError} + * Thrown if something goes wrong on server side. + */ + public async add(node: CreateNodeFields): Promise { + const path = `${this.ROOT_PATH}`; + const response = await this.http.post(path, JSON.stringify(node)); + + if (!response.ok) { + await this.handleError(response); + } + } + + /** + * returns list of node infos. + * + * @throws {@link BadRequestError} + * This exception is thrown if the input is not a valid. + * + * @throws {@link UnauthorizedError} + * Thrown if the auth cookie is missing or invalid. + * + * @throws {@link InternalError} + * Thrown if something goes wrong on server side. + */ + public async list(): Promise { + const path = `${this.ROOT_PATH}/infos`; + const response = await this.http.get(path); + + if (!response.ok) { + await this.handleError(response); + } + + const nodeListJson = await response.json(); + + return nodeListJson.map(node => new Node( + node.id, + node.name, + node.version, + new Date(node.lastContact), + node.diskSpaceUsed, + node.diskSpaceLeft, + node.bandwidthUsed, + 0, + 0, + 0, + node.totalEarned, + )); + } + + /** + * returns list of node infos by satellite. + * + * @param satelliteId - id of the satellite. + * + * @throws {@link BadRequestError} + * This exception is thrown if the input is not a valid. + * + * @throws {@link UnauthorizedError} + * Thrown if the auth cookie is missing or invalid. + * + * @throws {@link InternalError} + * Thrown if something goes wrong on server side. + */ + public async listBySatellite(satelliteId: string): Promise { + const path = `${this.ROOT_PATH}/infos/${satelliteId}`; + const response = await this.http.get(path); + + if (!response.ok) { + await this.handleError(response); + } + + const nodeListJson = await response.json(); + + return nodeListJson.map(node => new Node( + node.id, + node.name, + node.version, + new Date(node.lastContact), + 0, + 0, + 0, + node.onlineScore, + node.auditScore, + node.suspensionScore, + node.totalEarned, + )); + } + + /** + * updates nodes name. + * + * @param id - id of the node. + * @param name - new node name. + * + * @throws {@link BadRequestError} + * This exception is thrown if the input is not a valid. + * + * @throws {@link UnauthorizedError} + * Thrown if the auth cookie is missing or invalid. + * + * @throws {@link InternalError} + * Thrown if something goes wrong on server side. + */ + public async updateName(id: string, name: string): Promise { + const path = `${this.ROOT_PATH}/${id}`; + const response = await this.http.patch(path, JSON.stringify({name: name})); + + if (!response.ok) { + await this.handleError(response); + } + } + + /** + * deletes node. + * + * @param id - id of the node. + * + * @throws {@link BadRequestError} + * This exception is thrown if the input is not a valid. + * + * @throws {@link UnauthorizedError} + * Thrown if the auth cookie is missing or invalid. + * + * @throws {@link InternalError} + * Thrown if something goes wrong on server side. + */ + public async delete(id: string): Promise { + const path = `${this.ROOT_PATH}/${id}`; + const response = await this.http.delete(path); + + if (!response.ok) { + await this.handleError(response); + } + } + + /** + * retrieves list of trusted satellites node urls for a node. + */ + public async trustedSatellites(): Promise { + const path = `${this.ROOT_PATH}/trusted-satellites`; + const response = await this.http.get(path); + + if (!response.ok) { + await this.handleError(response); + } + + const urlListJson = await response.json(); + + return urlListJson.map(url => new NodeURL( + url.ID, + url.Name, + )); + } + + /** + * handles error due to response code. + * @param response - response from server. + * + * @throws {@link BadRequestError} + * This exception is thrown if the input is not a valid ISBN number. + * + * @throws {@link UnauthorizedError} + * Thrown if the ISBN number is valid, but no such book exists in the catalog. + * + * @throws {@link InternalError} + * Thrown if the ISBN number is valid, but no such book exists in the catalog. + * + * @private + */ + private async handleError(response: Response): Promise { + const body = await response.json(); + + switch (response.status) { + case 401: throw new UnauthorizedError(body.error); + case 400: throw new BadRequestError(body.error); + case 500: + default: + throw new InternalError(body.error); + } + } +} diff --git a/web/multinode/src/app/store/index.ts b/web/multinode/src/app/store/index.ts index c96315e4e..3ad974ab5 100644 --- a/web/multinode/src/app/store/index.ts +++ b/web/multinode/src/app/store/index.ts @@ -4,9 +4,11 @@ import Vue from 'vue'; import Vuex, { ModuleTree, Store, StoreOptions } from 'vuex'; +import { NodesClient } from '@/api/nodes'; import { NodesModule, NodesState } from '@/app/store/nodes'; +import { Nodes } from '@/nodes/service'; -Vue.use(Vuex); // TODO: place to main.ts when initialization of everything will be there. +Vue.use(Vuex); /** * RootState is a representation of global state. @@ -18,7 +20,7 @@ export class RootState { /** * MultinodeStoreOptions contains all needed data for store creation. */ -class MultinodeStoreOptions implements StoreOptions { +export class MultinodeStoreOptions implements StoreOptions { public readonly strict: boolean; public readonly state: RootState; public readonly modules: ModuleTree; @@ -34,6 +36,14 @@ class MultinodeStoreOptions implements StoreOptions { } } +// Services +const nodesClient: NodesClient = new NodesClient(); +const nodesService: Nodes = new Nodes(nodesClient); + +// Modules +const nodesModule: NodesModule = new NodesModule(nodesService); + +// Store export const store: Store = new Vuex.Store( - new MultinodeStoreOptions(new NodesModule()), + new MultinodeStoreOptions(nodesModule), ); diff --git a/web/multinode/src/app/store/nodes.ts b/web/multinode/src/app/store/nodes.ts index c74bafa15..a8d51a1e1 100644 --- a/web/multinode/src/app/store/nodes.ts +++ b/web/multinode/src/app/store/nodes.ts @@ -4,13 +4,16 @@ import { ActionContext, ActionTree, GetterTree, Module, MutationTree } from 'vuex'; import { RootState } from '@/app/store/index'; -import { Node } from '@/nodes'; +import { CreateNodeFields, Node, NodeURL } from '@/nodes'; +import { Nodes } from '@/nodes/service'; /** * NodesState is a representation of nodes module state. */ export class NodesState { public nodes: Node[] = []; + public selectedSatellite: NodeURL | null = null; + public trustedSatellites: NodeURL[] = []; } /** @@ -23,17 +26,23 @@ export class NodesModule implements Module { public readonly actions: ActionTree; public readonly mutations: MutationTree; - public constructor() { // here should be services, apis, 3d party dependencies. + private readonly nodes: Nodes; + + public constructor(nodes: Nodes) { + this.nodes = nodes; + this.namespaced = true; - this.state = new NodesState(); - this.mutations = { populate: this.populate, + saveTrustedSatellites: this.saveTrustedSatellites, + setSelectedSatellite: this.setSelectedSatellite, }; - this.actions = { - fetch: this.fetch, + fetch: this.fetch.bind(this), + add: this.add.bind(this), + trustedSatellites: this.trustedSatellites.bind(this), + selectSatellite: this.selectSatellite.bind(this), }; } @@ -47,10 +56,60 @@ export class NodesModule implements Module { } /** - * fetch action loads all nodes. + * saveTrustedSatellites mutation will save new list of trusted satellites to store. + * @param state + * @param trustedSatellites + */ + public saveTrustedSatellites(state: NodesState, trustedSatellites: NodeURL[]) { + state.trustedSatellites = trustedSatellites; + } + + /** + * setSelectedSatellite mutation will selected satellite to store. + * @param state + * @param satelliteId - id of the satellite to select. + */ + public setSelectedSatellite(state: NodesState, satelliteId: string) { + state.selectedSatellite = state.trustedSatellites.find((satellite: NodeURL) => satellite.id === satelliteId) || null; + } + + /** + * fetch action loads all nodes information. * @param ctx - context of the Vuex action. */ public async fetch(ctx: ActionContext): Promise { - await new Promise(() => null); + const nodes = ctx.state.selectedSatellite ? await this.nodes.listBySatellite(ctx.state.selectedSatellite.id) : await this.nodes.list(); + ctx.commit('populate', nodes); + } + + /** + * Adds node to multinode list. + * @param ctx - context of the Vuex action. + * @param node - to add. + */ + public async add(ctx: ActionContext, node: CreateNodeFields): Promise { + await this.nodes.add(node); + await this.fetch(ctx); + } + + /** + * retrieves list of trusted satellites node urls for a node. + * @param ctx - context of the Vuex action. + */ + public async trustedSatellites(ctx: ActionContext): Promise { + const satellites: NodeURL[] = await this.nodes.trustedSatellites(); + + ctx.commit('saveTrustedSatellites', satellites); + } + + /** + * save satellite as selected satellite. + * @param ctx - context of the Vuex action. + * @param satelliteId - satellite id to select. + */ + public async selectSatellite(ctx: ActionContext, satelliteId: string): Promise { + ctx.commit('setSelectedSatellite', satelliteId); + + await this.fetch(ctx); } } diff --git a/web/multinode/src/nodes/index.ts b/web/multinode/src/nodes/index.ts index b2bc70b05..f6fa101ed 100644 --- a/web/multinode/src/nodes/index.ts +++ b/web/multinode/src/nodes/index.ts @@ -1,19 +1,6 @@ // Copyright (C) 2020 Storj Labs, Inc. // See LICENSE for copying information. -/** - * NodeToAdd is a representation of storagenode, that SNO could add to the Multinode Dashboard. - */ -export class NodeToAdd { - public id: string; // TODO: create ts analog of storj.NodeID; - /** - * apiSecret is a secret issued by storagenode, that will be main auth mechanism in MND <-> SNO api. - */ - public apiSecret: string; // TODO: change to Uint8Array[]; - public publicAddress: string; - public name: string; -} - /** * Describes node online statuses. */ @@ -22,19 +9,54 @@ export enum NodeStatus { Offline = 'offline', } -// TODO: refactor this /** - * Node holds all information of node for the Multinode Dashboard. + * NodeInfo contains basic node internal state. */ export class Node { + public status: NodeStatus = NodeStatus.Offline; + private readonly STATUS_TRESHHOLD_MILISECONDS: number = 10.8e6; + + public constructor( + public id: string, + public name: string, + public version: string, + public lastContact: Date, + public diskSpaceUsed: number, + public diskSpaceLeft: number, + public bandwidthUsed: number, + public onlineScore: number, + public auditScore: number, + public suspensionScore: number, + public earned: number, + ) { + const now = new Date(); + if (now.getTime() - this.lastContact.getTime() < this.STATUS_TRESHHOLD_MILISECONDS) { + this.status = NodeStatus.Online; + } + } + + public get displayedName(): string { + return this.name || this.id; + } +} + +/** + * CreateNodeFields is a representation of storagenode, that SNO could add to the Multinode Dashboard. + */ +export class CreateNodeFields { public constructor( public id: string = '', - public name: string = '', - public diskSpaceUsed: number = 0, - public diskSpaceLeft: number = 0, - public bandwidthUsed: number = 0, - public earned: number = 0, - public version: string = '', - public status: NodeStatus = NodeStatus.Offline, + public apiSecret: string = '', + public publicAddress: string = '', + ) {} +} + +/** + * NodeURL defines a structure for connecting to a node. + */ +export class NodeURL { + public constructor( + public id: string, + public address: string, ) {} } diff --git a/web/multinode/src/nodes/service.ts b/web/multinode/src/nodes/service.ts new file mode 100644 index 000000000..0661d5d84 --- /dev/null +++ b/web/multinode/src/nodes/service.ts @@ -0,0 +1,112 @@ +// Copyright (C) 2021 Storj Labs, Inc. +// See LICENSE for copying information. + +import { NodesClient } from '@/api/nodes'; +import { CreateNodeFields, Node, NodeURL } from '@/nodes/index'; + +/** + * exposes all nodes related logic + */ +export class Nodes { + private readonly nodes: NodesClient; + + public constructor(nodes: NodesClient) { + this.nodes = nodes; + } + + /** + * handles node addition. + * + * @param node - node to add. + * + * @throws {@link BadRequestError} + * This exception is thrown if the input is not a valid. + * + * @throws {@link UnauthorizedError} + * Thrown if the auth cookie is missing or invalid. + * + * @throws {@link InternalError} + * Thrown if something goes wrong on server side. + */ + public async add(node: CreateNodeFields): Promise { + await this.nodes.add(node); + } + + /** + * returns list of node infos. + * + * @throws {@link BadRequestError} + * This exception is thrown if the input is not a valid. + * + * @throws {@link UnauthorizedError} + * Thrown if the auth cookie is missing or invalid. + * + * @throws {@link InternalError} + * Thrown if something goes wrong on server side. + */ + public async list(): Promise { + return await this.nodes.list(); + } + + /** + * returns list of node infos by satellite. + * + * @param satelliteId - id of the satellite. + * + * @throws {@link BadRequestError} + * This exception is thrown if the input is not a valid. + * + * @throws {@link UnauthorizedError} + * Thrown if the auth cookie is missing or invalid. + * + * @throws {@link InternalError} + * Thrown if something goes wrong on server side. + */ + public async listBySatellite(satelliteId: string): Promise { + return await this.nodes.listBySatellite(satelliteId); + } + + /** + * updates nodes name. + * + * @param id - id of the node. + * @param name - new node name. + * + * @throws {@link BadRequestError} + * This exception is thrown if the input is not a valid. + * + * @throws {@link UnauthorizedError} + * Thrown if the auth cookie is missing or invalid. + * + * @throws {@link InternalError} + * Thrown if something goes wrong on server side. + */ + public async updateName(id: string, name: string): Promise { + await this.nodes.updateName(id, name); + } + + /** + * deletes node. + * + * @param id - id of the node. + * + * @throws {@link BadRequestError} + * This exception is thrown if the input is not a valid. + * + * @throws {@link UnauthorizedError} + * Thrown if the auth cookie is missing or invalid. + * + * @throws {@link InternalError} + * Thrown if something goes wrong on server side. + */ + public async delete(id: string): Promise { + await this.nodes.delete(id); + } + + /** + * retrieves list of trusted satellites node urls for a node. + */ + public async trustedSatellites(): Promise { + return await this.nodes.trustedSatellites(); + } +} From 93e841361760f6abafea792eb1297056f1890a95 Mon Sep 17 00:00:00 2001 From: crawter Date: Thu, 21 Jan 2021 19:07:09 +0200 Subject: [PATCH 20/38] web/mnd: my nodes page ui Change-Id: I96403a6ae6ce232eee08208895e03c6a7aed74cb --- web/multinode/package-lock.json | 1132 +++-------------- .../app/components/common/HeaderedInput.vue | 16 +- .../app/components/common/HeaderlessInput.vue | 13 + .../src/app/components/common/VDropdown.vue | 52 +- .../src/app/components/modals/AddNewNode.vue | 99 +- .../src/app/components/tables/NodesTable.vue | 36 +- web/multinode/src/app/utils/currency.ts | 1 + web/multinode/src/app/utils/percentage.ts | 16 + web/multinode/src/app/views/MyNodes.vue | 52 +- web/multinode/src/main.ts | 37 +- web/multinode/src/private/http/client.ts | 73 ++ .../src/{app/utils => private/memory}/size.ts | 0 web/multinode/vue.config.js | 2 +- 13 files changed, 561 insertions(+), 968 deletions(-) create mode 100644 web/multinode/src/app/utils/percentage.ts create mode 100644 web/multinode/src/private/http/client.ts rename web/multinode/src/{app/utils => private/memory}/size.ts (100%) diff --git a/web/multinode/package-lock.json b/web/multinode/package-lock.json index bbd656611..c8ceac7fa 100644 --- a/web/multinode/package-lock.json +++ b/web/multinode/package-lock.json @@ -1791,105 +1791,6 @@ "yorkie": "^2.0.0" }, "dependencies": { - "ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dev": true, - "optional": true, - "requires": { - "color-convert": "^2.0.1" - } - }, - "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, - "optional": true, - "requires": { - "color-name": "~1.1.4" - } - }, - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true, - "optional": true - }, - "fork-ts-checker-webpack-plugin-v5": { - "version": "npm:fork-ts-checker-webpack-plugin@5.2.1", - "resolved": "https://registry.npmjs.org/fork-ts-checker-webpack-plugin/-/fork-ts-checker-webpack-plugin-5.2.1.tgz", - "integrity": "sha512-SVi+ZAQOGbtAsUWrZvGzz38ga2YqjWvca1pXQFUArIVXqli0lLoDQ8uS0wg0kSpcwpZmaW5jVCZXQebkyUQSsw==", - "dev": true, - "optional": true, - "requires": { - "@babel/code-frame": "^7.8.3", - "@types/json-schema": "^7.0.5", - "chalk": "^4.1.0", - "cosmiconfig": "^6.0.0", - "deepmerge": "^4.2.2", - "fs-extra": "^9.0.0", - "memfs": "^3.1.2", - "minimatch": "^3.0.4", - "schema-utils": "2.7.0", - "semver": "^7.3.2", - "tapable": "^1.0.0" - }, - "dependencies": { - "chalk": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.0.tgz", - "integrity": "sha512-qwx12AxXe2Q5xQ43Ac//I6v5aXTipYrSESdOgzrN+9XjgEpyjpKuvSGaN4qE93f7TQTlerQQ8S+EQ0EyDoVL1A==", - "dev": true, - "optional": true, - "requires": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - } - }, - "schema-utils": { - "version": "2.7.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-2.7.0.tgz", - "integrity": "sha512-0ilKFI6QQF5nxDZLFn2dMjvc4hjg/Wkg7rHd3jK6/A4a1Hl9VFdQWvgB1UMGoU94pad1P/8N7fMcEnLnSiju8A==", - "dev": true, - "optional": true, - "requires": { - "@types/json-schema": "^7.0.4", - "ajv": "^6.12.2", - "ajv-keywords": "^3.4.1" - } - }, - "semver": { - "version": "7.3.4", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.3.4.tgz", - "integrity": "sha512-tCfb2WLjqFAtXn4KEdxIhalnRtoKFN7nAwj0B3ZXCbQloV2tq5eDbcTmT68JJD3nRJq24/XgxtQKFIpQdtvmVw==", - "dev": true, - "optional": true, - "requires": { - "lru-cache": "^6.0.0" - } - } - } - }, - "has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "dev": true, - "optional": true - }, - "lru-cache": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", - "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", - "dev": true, - "optional": true, - "requires": { - "yallist": "^4.0.0" - } - }, "schema-utils": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz", @@ -1901,16 +1802,6 @@ "ajv-keywords": "^3.1.0" } }, - "supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "dev": true, - "optional": true, - "requires": { - "has-flag": "^4.0.0" - } - }, "tslint": { "version": "5.20.1", "resolved": "https://registry.npmjs.org/tslint/-/tslint-5.20.1.tgz", @@ -1962,13 +1853,6 @@ "watchpack": "^1.7.4", "webpack-sources": "^1.4.1" } - }, - "yallist": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", - "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", - "dev": true, - "optional": true } } }, @@ -2048,16 +1932,6 @@ "integrity": "sha512-nQyp0o1/mNdbTO1PO6kHkwSrmgZ0MT/jCCpNiwbUjGoRN4dlBhqJtoQuCnEOKzgTVwg0ZWiCoQy6SxMebQVh8A==", "dev": true }, - "ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dev": true, - "optional": true, - "requires": { - "color-convert": "^2.0.1" - } - }, "cacache": { "version": "13.0.1", "resolved": "https://registry.npmjs.org/cacache/-/cacache-13.0.1.tgz", @@ -2084,34 +1958,6 @@ "unique-filename": "^1.1.1" } }, - "chalk": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.0.tgz", - "integrity": "sha512-qwx12AxXe2Q5xQ43Ac//I6v5aXTipYrSESdOgzrN+9XjgEpyjpKuvSGaN4qE93f7TQTlerQQ8S+EQ0EyDoVL1A==", - "dev": true, - "optional": true, - "requires": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - } - }, - "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, - "optional": true, - "requires": { - "color-name": "~1.1.4" - } - }, - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true, - "optional": true - }, "find-up": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", @@ -2249,32 +2095,6 @@ "integrity": "sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg==", "dev": true }, - "vue-loader-v16": { - "version": "npm:vue-loader@16.1.2", - "resolved": "https://registry.npmjs.org/vue-loader/-/vue-loader-16.1.2.tgz", - "integrity": "sha512-8QTxh+Fd+HB6fiL52iEVLKqE9N1JSlMXLR92Ijm6g8PZrwIxckgpqjPDWRP5TWxdiPaHR+alUWsnu1ShQOwt+Q==", - "dev": true, - "optional": true, - "requires": { - "chalk": "^4.1.0", - "hash-sum": "^2.0.0", - "loader-utils": "^2.0.0" - }, - "dependencies": { - "loader-utils": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-2.0.0.tgz", - "integrity": "sha512-rP4F0h2RaWSvPEkD7BLDFQnvSf+nK+wr3ESUjNTyAGobqrijmW92zc+SO6d4p4B1wh7+B/Jg1mkQe5NYUEHtHQ==", - "dev": true, - "optional": true, - "requires": { - "big.js": "^5.2.2", - "emojis-list": "^3.0.0", - "json5": "^2.1.2" - } - } - } - }, "webpack": { "version": "4.44.2", "resolved": "https://registry.npmjs.org/webpack/-/webpack-4.44.2.tgz", @@ -2661,12 +2481,6 @@ "integrity": "sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ==", "dev": true }, - "abbrev": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-1.1.1.tgz", - "integrity": "sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q==", - "dev": true - }, "accepts": { "version": "1.3.7", "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.7.tgz", @@ -2735,12 +2549,6 @@ "integrity": "sha1-l6ERlkmyEa0zaR2fn0hqjsn74KM=", "dev": true }, - "amdefine": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/amdefine/-/amdefine-1.0.1.tgz", - "integrity": "sha1-SlKCrBZHKek2Gbz9OtFR+BfOkfU=", - "dev": true - }, "ansi-align": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/ansi-align/-/ansi-align-2.0.0.tgz", @@ -2838,16 +2646,6 @@ "integrity": "sha512-Of/R0wqp83cgHozfIYLbBMnej79U/SVGOOyuB3VVFv1NRM/PSFMK12x9KVtiYzJqmnU5WR2qp0Z5rHb7sWGnFQ==", "dev": true }, - "are-we-there-yet": { - "version": "1.1.5", - "resolved": "https://registry.npmjs.org/are-we-there-yet/-/are-we-there-yet-1.1.5.tgz", - "integrity": "sha512-5hYdAkZlcG8tOLujVDTgCT+uPX0VnpAH28gWsLfzpXYm7wP6mp5Q/gYyR7YQ0cKVJcXJnl3j2kpBan13PtQf6w==", - "dev": true, - "requires": { - "delegates": "^1.0.0", - "readable-stream": "^2.0.6" - } - }, "argparse": { "version": "1.0.10", "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", @@ -2875,12 +2673,6 @@ "integrity": "sha1-45sJrqne+Gao8gbiiK9jkZuuOcQ=", "dev": true }, - "array-find-index": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/array-find-index/-/array-find-index-1.0.2.tgz", - "integrity": "sha1-3wEKoSh+Fku9pvlyOwqWoexBh6E=", - "dev": true - }, "array-flatten": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz", @@ -3003,12 +2795,6 @@ "integrity": "sha512-z/WhQ5FPySLdvREByI2vZiTWwCnF0moMJ1hK9YQwDTHKh6I7/uSckMetoRGb5UBZPC1z0jlw+n/XCgjeH7y1AQ==", "dev": true }, - "async-foreach": { - "version": "0.1.3", - "resolved": "https://registry.npmjs.org/async-foreach/-/async-foreach-0.1.3.tgz", - "integrity": "sha1-NhIfhFwFeBct5Bmpfb6x0W7DRUI=", - "dev": true - }, "async-limiter": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/async-limiter/-/async-limiter-1.0.1.tgz", @@ -3497,15 +3283,6 @@ "file-uri-to-path": "1.0.0" } }, - "block-stream": { - "version": "0.0.9", - "resolved": "https://registry.npmjs.org/block-stream/-/block-stream-0.0.9.tgz", - "integrity": "sha1-E+v+d4oDIFz+A3UUgeu0szAMEmo=", - "dev": true, - "requires": { - "inherits": "~2.0.0" - } - }, "bluebird": { "version": "3.7.2", "resolved": "https://registry.npmjs.org/bluebird/-/bluebird-3.7.2.tgz", @@ -4342,12 +4119,6 @@ "q": "^1.1.2" } }, - "code-point-at": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/code-point-at/-/code-point-at-1.1.0.tgz", - "integrity": "sha1-DQcLTQQ6W+ozovGkDi7bPZpMz3c=", - "dev": true - }, "collection-visit": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/collection-visit/-/collection-visit-1.0.0.tgz", @@ -4545,12 +4316,6 @@ "integrity": "sha512-ZMkYO/LkF17QvCPqM0gxw8yUzigAOZOSWSHg91FH6orS7vcEj5dVZTidN2fQ14yBSdg97RqhSNwLUXInd52OTA==", "dev": true }, - "console-control-strings": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/console-control-strings/-/console-control-strings-1.1.0.tgz", - "integrity": "sha1-PXz0Rk22RG6mRL9LOVB/mFEAjo4=", - "dev": true - }, "consolidate": { "version": "0.15.1", "resolved": "https://registry.npmjs.org/consolidate/-/consolidate-0.15.1.tgz", @@ -5168,15 +4933,6 @@ } } }, - "currently-unhandled": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/currently-unhandled/-/currently-unhandled-0.4.1.tgz", - "integrity": "sha1-mI3zP+qxke95mmE2nddsF635V+o=", - "dev": true, - "requires": { - "array-find-index": "^1.0.1" - } - }, "cyclist": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/cyclist/-/cyclist-1.0.1.tgz", @@ -5488,12 +5244,6 @@ "integrity": "sha1-3zrhmayt+31ECqrgsp4icrJOxhk=", "dev": true }, - "delegates": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/delegates/-/delegates-1.0.0.tgz", - "integrity": "sha1-hMbhWbgZBP3KWaDvRM2HDTElD5o=", - "dev": true - }, "depd": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz", @@ -6467,6 +6217,122 @@ "worker-rpc": "^0.1.0" } }, + "fork-ts-checker-webpack-plugin-v5": { + "version": "npm:fork-ts-checker-webpack-plugin@5.2.1", + "resolved": "https://registry.npmjs.org/fork-ts-checker-webpack-plugin/-/fork-ts-checker-webpack-plugin-5.2.1.tgz", + "integrity": "sha512-SVi+ZAQOGbtAsUWrZvGzz38ga2YqjWvca1pXQFUArIVXqli0lLoDQ8uS0wg0kSpcwpZmaW5jVCZXQebkyUQSsw==", + "dev": true, + "optional": true, + "requires": { + "@babel/code-frame": "^7.8.3", + "@types/json-schema": "^7.0.5", + "chalk": "^4.1.0", + "cosmiconfig": "^6.0.0", + "deepmerge": "^4.2.2", + "fs-extra": "^9.0.0", + "memfs": "^3.1.2", + "minimatch": "^3.0.4", + "schema-utils": "2.7.0", + "semver": "^7.3.2", + "tapable": "^1.0.0" + }, + "dependencies": { + "ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "optional": true, + "requires": { + "color-convert": "^2.0.1" + } + }, + "chalk": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.0.tgz", + "integrity": "sha512-qwx12AxXe2Q5xQ43Ac//I6v5aXTipYrSESdOgzrN+9XjgEpyjpKuvSGaN4qE93f7TQTlerQQ8S+EQ0EyDoVL1A==", + "dev": true, + "optional": true, + "requires": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + } + }, + "color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "optional": true, + "requires": { + "color-name": "~1.1.4" + } + }, + "color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true, + "optional": true + }, + "has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "optional": true + }, + "lru-cache": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", + "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", + "dev": true, + "optional": true, + "requires": { + "yallist": "^4.0.0" + } + }, + "schema-utils": { + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-2.7.0.tgz", + "integrity": "sha512-0ilKFI6QQF5nxDZLFn2dMjvc4hjg/Wkg7rHd3jK6/A4a1Hl9VFdQWvgB1UMGoU94pad1P/8N7fMcEnLnSiju8A==", + "dev": true, + "optional": true, + "requires": { + "@types/json-schema": "^7.0.4", + "ajv": "^6.12.2", + "ajv-keywords": "^3.4.1" + } + }, + "semver": { + "version": "7.3.4", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.3.4.tgz", + "integrity": "sha512-tCfb2WLjqFAtXn4KEdxIhalnRtoKFN7nAwj0B3ZXCbQloV2tq5eDbcTmT68JJD3nRJq24/XgxtQKFIpQdtvmVw==", + "dev": true, + "optional": true, + "requires": { + "lru-cache": "^6.0.0" + } + }, + "supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "optional": true, + "requires": { + "has-flag": "^4.0.0" + } + }, + "yallist": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", + "dev": true, + "optional": true + } + } + }, "form-data": { "version": "2.3.3", "resolved": "https://registry.npmjs.org/form-data/-/form-data-2.3.3.tgz", @@ -6563,86 +6429,12 @@ "dev": true, "optional": true }, - "fstream": { - "version": "1.0.12", - "resolved": "https://registry.npmjs.org/fstream/-/fstream-1.0.12.tgz", - "integrity": "sha512-WvJ193OHa0GHPEL+AycEJgxvBEwyfRkN1vhjca23OaPVMCaLCXTd5qAu82AjTcgP1UJmytkOKb63Ypde7raDIg==", - "dev": true, - "requires": { - "graceful-fs": "^4.1.2", - "inherits": "~2.0.0", - "mkdirp": ">=0.5 0", - "rimraf": "2" - } - }, "function-bind": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==", "dev": true }, - "gauge": { - "version": "2.7.4", - "resolved": "https://registry.npmjs.org/gauge/-/gauge-2.7.4.tgz", - "integrity": "sha1-LANAXHU4w51+s3sxcCLjJfsBi/c=", - "dev": true, - "requires": { - "aproba": "^1.0.3", - "console-control-strings": "^1.0.0", - "has-unicode": "^2.0.0", - "object-assign": "^4.1.0", - "signal-exit": "^3.0.0", - "string-width": "^1.0.1", - "strip-ansi": "^3.0.1", - "wide-align": "^1.1.0" - }, - "dependencies": { - "ansi-regex": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz", - "integrity": "sha1-w7M6te42DYbg5ijwRorn7yfWVN8=", - "dev": true - }, - "is-fullwidth-code-point": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-1.0.0.tgz", - "integrity": "sha1-754xOG8DGn8NZDr4L95QxFfvAMs=", - "dev": true, - "requires": { - "number-is-nan": "^1.0.0" - } - }, - "string-width": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-1.0.2.tgz", - "integrity": "sha1-EYvfW4zcUaKn5w0hHgfisLmxB9M=", - "dev": true, - "requires": { - "code-point-at": "^1.0.0", - "is-fullwidth-code-point": "^1.0.0", - "strip-ansi": "^3.0.0" - } - }, - "strip-ansi": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-3.0.1.tgz", - "integrity": "sha1-ajhfuIU9lS1f8F0Oiq+UJ43GPc8=", - "dev": true, - "requires": { - "ansi-regex": "^2.0.0" - } - } - } - }, - "gaze": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/gaze/-/gaze-1.1.3.tgz", - "integrity": "sha512-BRdNm8hbWzFzWHERTrejLqwHDfS4GibPoq5wjTPIoJHoBtKGPg3xAFfxmM+9ztbXelxcf2hwQcaz1PtmFeue8g==", - "dev": true, - "requires": { - "globule": "^1.0.0" - } - }, "gensync": { "version": "1.0.0-beta.2", "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", @@ -6782,17 +6574,6 @@ "integrity": "sha1-L0SUrIkZ43Z8XLtpHp9GMyQoXUM=", "dev": true }, - "globule": { - "version": "1.3.2", - "resolved": "https://registry.npmjs.org/globule/-/globule-1.3.2.tgz", - "integrity": "sha512-7IDTQTIu2xzXkT+6mlluidnWo+BypnbSoEVVQCGfzqnl5Ik8d3e1d4wycb8Rj9tWW+Z39uPWsdlquqiqPCd/pA==", - "dev": true, - "requires": { - "glob": "~7.1.1", - "lodash": "~4.17.10", - "minimatch": "~3.0.2" - } - }, "gonzales-pe": { "version": "4.3.0", "resolved": "https://registry.npmjs.org/gonzales-pe/-/gonzales-pe-4.3.0.tgz", @@ -6911,12 +6692,6 @@ "integrity": "sha512-PLcsoqu++dmEIZB+6totNFKq/7Do+Z0u4oT0zKOJNl3lYK6vGwwu2hjHs+68OEZbTjiUE9bgOABXbP/GvrS0Kg==", "dev": true }, - "has-unicode": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/has-unicode/-/has-unicode-2.0.1.tgz", - "integrity": "sha1-4Ob+aijPUROIVeCG0Wkedx3iqLk=", - "dev": true - }, "has-value": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/has-value/-/has-value-1.0.0.tgz", @@ -7407,12 +7182,6 @@ "integrity": "sha1-khi5srkoojixPcT7a21XbyMUU+o=", "dev": true }, - "in-publish": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/in-publish/-/in-publish-2.0.1.tgz", - "integrity": "sha512-oDM0kUSNFC31ShNxHKUyfZKy8ZeXZBWMjMdZHKLOk13uvT27VTL/QzRGfRUcevJhpkZAvlhPYuXkF7eNWrtyxQ==", - "dev": true - }, "indent-string": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz", @@ -7881,12 +7650,6 @@ "integrity": "sha1-5HnICFjfDBsR3dppQPlgEfzaSpo=", "dev": true }, - "is-utf8": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/is-utf8/-/is-utf8-0.2.1.tgz", - "integrity": "sha1-Sw2hRCEE0bM2NA6AeX6GXPOffXI=", - "dev": true - }, "is-windows": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/is-windows/-/is-windows-1.0.2.tgz", @@ -7929,12 +7692,6 @@ "integrity": "sha512-yV+gqbd5vaOYjqlbk16EG89xB5udgjqQF3C5FAORDg4f/IS1Yc5ERCv5e/57yBcfJYw05V5JyIXabhwb75Xxow==", "dev": true }, - "js-base64": { - "version": "2.6.4", - "resolved": "https://registry.npmjs.org/js-base64/-/js-base64-2.6.4.tgz", - "integrity": "sha512-pZe//GGmwJndub7ZghVHz7vjb2LgC1m8B07Au3eYqeqv9emhESByMXxaEgkUkEqJe87oBbSniGYoQNIBklc7IQ==", - "dev": true - }, "js-message": { "version": "1.0.7", "resolved": "https://registry.npmjs.org/js-message/-/js-message-1.0.7.tgz", @@ -8107,36 +7864,6 @@ "integrity": "sha1-HADHQ7QzzQpOgHWPe2SldEDZ/wA=", "dev": true }, - "load-json-file": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/load-json-file/-/load-json-file-1.1.0.tgz", - "integrity": "sha1-lWkFcI1YtLq0wiYbBPWfMcmTdMA=", - "dev": true, - "requires": { - "graceful-fs": "^4.1.2", - "parse-json": "^2.2.0", - "pify": "^2.0.0", - "pinkie-promise": "^2.0.0", - "strip-bom": "^2.0.0" - }, - "dependencies": { - "parse-json": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-2.2.0.tgz", - "integrity": "sha1-9ID0BDTvgHQfhGkJn43qGPVaTck=", - "dev": true, - "requires": { - "error-ex": "^1.2.0" - } - }, - "pify": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", - "integrity": "sha1-7RQaasBDqEnqWISY59yosVMw6Qw=", - "dev": true - } - } - }, "loader-runner": { "version": "2.4.0", "resolved": "https://registry.npmjs.org/loader-runner/-/loader-runner-2.4.0.tgz", @@ -8246,16 +7973,6 @@ "js-tokens": "^3.0.0 || ^4.0.0" } }, - "loud-rejection": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/loud-rejection/-/loud-rejection-1.6.0.tgz", - "integrity": "sha1-W0b4AUft7leIcPCG0Eghz5mOVR8=", - "dev": true, - "requires": { - "currently-unhandled": "^0.4.1", - "signal-exit": "^3.0.0" - } - }, "lower-case": { "version": "1.1.4", "resolved": "https://registry.npmjs.org/lower-case/-/lower-case-1.1.4.tgz", @@ -8830,7 +8547,8 @@ "version": "2.14.2", "resolved": "https://registry.npmjs.org/nan/-/nan-2.14.2.tgz", "integrity": "sha512-M2ufzIiINKCuDfBSAUr1vWQ+vuVcA9kqx8JJUsbQi6yf1uGRyb7HfpdfUr5qLXf3B/t8dPvcjhKMmlfnP47EzQ==", - "dev": true + "dev": true, + "optional": true }, "nanomatch": { "version": "1.2.13", @@ -8884,34 +8602,6 @@ "integrity": "sha512-PPmu8eEeG9saEUvI97fm4OYxXVB6bFvyNTyiUOBichBpFG8A1Ljw3bY62+5oOjDEMHRnd0Y7HQ+x7uzxOzC6JA==", "dev": true }, - "node-gyp": { - "version": "3.8.0", - "resolved": "https://registry.npmjs.org/node-gyp/-/node-gyp-3.8.0.tgz", - "integrity": "sha512-3g8lYefrRRzvGeSowdJKAKyks8oUpLEd/DyPV4eMhVlhJ0aNaZqIrNUIPuEWWTAoPqyFkfGrM67MC69baqn6vA==", - "dev": true, - "requires": { - "fstream": "^1.0.0", - "glob": "^7.0.3", - "graceful-fs": "^4.1.2", - "mkdirp": "^0.5.0", - "nopt": "2 || 3", - "npmlog": "0 || 1 || 2 || 3 || 4", - "osenv": "0", - "request": "^2.87.0", - "rimraf": "2", - "semver": "~5.3.0", - "tar": "^2.0.0", - "which": "1" - }, - "dependencies": { - "semver": { - "version": "5.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.3.0.tgz", - "integrity": "sha1-myzl094C0XxgEq0yaqa00M9U+U8=", - "dev": true - } - } - }, "node-ipc": { "version": "9.1.3", "resolved": "https://registry.npmjs.org/node-ipc/-/node-ipc-9.1.3.tgz", @@ -8968,245 +8658,6 @@ "integrity": "sha512-V5QF9noGFl3EymEwUYzO+3NTDpGfQB4ve6Qfnzf3UNydMhjQRVPR1DZTuvWiLzaFJYw2fmDwAfnRNEVb64hSIg==", "dev": true }, - "node-sass": { - "version": "4.14.1", - "resolved": "https://registry.npmjs.org/node-sass/-/node-sass-4.14.1.tgz", - "integrity": "sha512-sjCuOlvGyCJS40R8BscF5vhVlQjNN069NtQ1gSxyK1u9iqvn6tf7O1R4GNowVZfiZUCRt5MmMs1xd+4V/7Yr0g==", - "dev": true, - "requires": { - "async-foreach": "^0.1.3", - "chalk": "^1.1.1", - "cross-spawn": "^3.0.0", - "gaze": "^1.0.0", - "get-stdin": "^4.0.1", - "glob": "^7.0.3", - "in-publish": "^2.0.0", - "lodash": "^4.17.15", - "meow": "^3.7.0", - "mkdirp": "^0.5.1", - "nan": "^2.13.2", - "node-gyp": "^3.8.0", - "npmlog": "^4.0.0", - "request": "^2.88.0", - "sass-graph": "2.2.5", - "stdout-stream": "^1.4.0", - "true-case-path": "^1.0.2" - }, - "dependencies": { - "ansi-regex": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz", - "integrity": "sha1-w7M6te42DYbg5ijwRorn7yfWVN8=", - "dev": true - }, - "ansi-styles": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-2.2.1.tgz", - "integrity": "sha1-tDLdM1i2NM914eRmQ2gkBTPB3b4=", - "dev": true - }, - "camelcase": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-2.1.1.tgz", - "integrity": "sha1-fB0W1nmhu+WcoCys7PsBHiAfWh8=", - "dev": true - }, - "camelcase-keys": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/camelcase-keys/-/camelcase-keys-2.1.0.tgz", - "integrity": "sha1-MIvur/3ygRkFHvodkyITyRuPkuc=", - "dev": true, - "requires": { - "camelcase": "^2.0.0", - "map-obj": "^1.0.0" - } - }, - "chalk": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-1.1.3.tgz", - "integrity": "sha1-qBFcVeSnAv5NFQq9OHKCKn4J/Jg=", - "dev": true, - "requires": { - "ansi-styles": "^2.2.1", - "escape-string-regexp": "^1.0.2", - "has-ansi": "^2.0.0", - "strip-ansi": "^3.0.0", - "supports-color": "^2.0.0" - } - }, - "cross-spawn": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-3.0.1.tgz", - "integrity": "sha1-ElYDfsufDF9549bvE14wdwGEuYI=", - "dev": true, - "requires": { - "lru-cache": "^4.0.1", - "which": "^1.2.9" - } - }, - "find-up": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-1.1.2.tgz", - "integrity": "sha1-ay6YIrGizgpgq2TWEOzK1TyyTQ8=", - "dev": true, - "requires": { - "path-exists": "^2.0.0", - "pinkie-promise": "^2.0.0" - } - }, - "get-stdin": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/get-stdin/-/get-stdin-4.0.1.tgz", - "integrity": "sha1-uWjGsKBDhDJJAui/Gl3zJXmkUP4=", - "dev": true - }, - "indent-string": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-2.1.0.tgz", - "integrity": "sha1-ji1INIdCEhtKghi3oTfppSBJ3IA=", - "dev": true, - "requires": { - "repeating": "^2.0.0" - } - }, - "lru-cache": { - "version": "4.1.5", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-4.1.5.tgz", - "integrity": "sha512-sWZlbEP2OsHNkXrMl5GYk/jKk70MBng6UU4YI/qGDYbgf6YbP4EvmqISbXCoJiRKs+1bSpFHVgQxvJ17F2li5g==", - "dev": true, - "requires": { - "pseudomap": "^1.0.2", - "yallist": "^2.1.2" - } - }, - "map-obj": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/map-obj/-/map-obj-1.0.1.tgz", - "integrity": "sha1-2TPOuSBdgr3PSIb2dCvcK03qFG0=", - "dev": true - }, - "meow": { - "version": "3.7.0", - "resolved": "https://registry.npmjs.org/meow/-/meow-3.7.0.tgz", - "integrity": "sha1-cstmi0JSKCkKu/qFaJJYcwioAfs=", - "dev": true, - "requires": { - "camelcase-keys": "^2.0.0", - "decamelize": "^1.1.2", - "loud-rejection": "^1.0.0", - "map-obj": "^1.0.1", - "minimist": "^1.1.3", - "normalize-package-data": "^2.3.4", - "object-assign": "^4.0.1", - "read-pkg-up": "^1.0.1", - "redent": "^1.0.0", - "trim-newlines": "^1.0.0" - } - }, - "path-exists": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-2.1.0.tgz", - "integrity": "sha1-D+tsZPD8UY2adU3V77YscCJ2H0s=", - "dev": true, - "requires": { - "pinkie-promise": "^2.0.0" - } - }, - "path-type": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/path-type/-/path-type-1.1.0.tgz", - "integrity": "sha1-WcRPfuSR2nBNpBXaWkBwuk+P5EE=", - "dev": true, - "requires": { - "graceful-fs": "^4.1.2", - "pify": "^2.0.0", - "pinkie-promise": "^2.0.0" - } - }, - "pify": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", - "integrity": "sha1-7RQaasBDqEnqWISY59yosVMw6Qw=", - "dev": true - }, - "read-pkg": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-1.1.0.tgz", - "integrity": "sha1-9f+qXs0pyzHAR0vKfXVra7KePyg=", - "dev": true, - "requires": { - "load-json-file": "^1.0.0", - "normalize-package-data": "^2.3.2", - "path-type": "^1.0.0" - } - }, - "read-pkg-up": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/read-pkg-up/-/read-pkg-up-1.0.1.tgz", - "integrity": "sha1-nWPBMnbAZZGNV/ACpX9AobZD+wI=", - "dev": true, - "requires": { - "find-up": "^1.0.0", - "read-pkg": "^1.0.0" - } - }, - "redent": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/redent/-/redent-1.0.0.tgz", - "integrity": "sha1-z5Fqsf1fHxbfsggi3W7H9zDCr94=", - "dev": true, - "requires": { - "indent-string": "^2.1.0", - "strip-indent": "^1.0.1" - } - }, - "strip-ansi": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-3.0.1.tgz", - "integrity": "sha1-ajhfuIU9lS1f8F0Oiq+UJ43GPc8=", - "dev": true, - "requires": { - "ansi-regex": "^2.0.0" - } - }, - "strip-indent": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/strip-indent/-/strip-indent-1.0.1.tgz", - "integrity": "sha1-DHlipq3vp7vUrDZkYKY4VSrhoKI=", - "dev": true, - "requires": { - "get-stdin": "^4.0.1" - } - }, - "supports-color": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-2.0.0.tgz", - "integrity": "sha1-U10EXOa2Nj+kARcIRimZXp3zJMc=", - "dev": true - }, - "trim-newlines": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/trim-newlines/-/trim-newlines-1.0.0.tgz", - "integrity": "sha1-WIeWa7WCpFA6QetST301ARgVphM=", - "dev": true - }, - "yallist": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-2.1.2.tgz", - "integrity": "sha1-HBH5IY8HYImkfdUS+TxmmaaoHVI=", - "dev": true - } - } - }, - "nopt": { - "version": "3.0.6", - "resolved": "https://registry.npmjs.org/nopt/-/nopt-3.0.6.tgz", - "integrity": "sha1-xkZdvwirzU2zWTF/eaxopkayj/k=", - "dev": true, - "requires": { - "abbrev": "1" - } - }, "normalize-package-data": { "version": "2.5.0", "resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-2.5.0.tgz", @@ -9252,18 +8703,6 @@ "path-key": "^2.0.0" } }, - "npmlog": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/npmlog/-/npmlog-4.1.2.tgz", - "integrity": "sha512-2uUqazuKlTaSI/dC8AzicUck7+IrEaOnN/e0jd3Xtt1KcGpwx30v50mL7oPyr/h9bL3E4aZccVwpwP+5W9Vjkg==", - "dev": true, - "requires": { - "are-we-there-yet": "~1.1.2", - "console-control-strings": "~1.1.0", - "gauge": "~2.7.3", - "set-blocking": "~2.0.0" - } - }, "nth-check": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/nth-check/-/nth-check-1.0.2.tgz", @@ -9279,12 +8718,6 @@ "integrity": "sha1-b2gragJ6Tp3fpFZM0lidHU5mnt4=", "dev": true }, - "number-is-nan": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/number-is-nan/-/number-is-nan-1.0.1.tgz", - "integrity": "sha1-CXtgK1NCKlIsGvuHkDGDNpQaAR0=", - "dev": true - }, "oauth-sign": { "version": "0.9.0", "resolved": "https://registry.npmjs.org/oauth-sign/-/oauth-sign-0.9.0.tgz", @@ -9518,16 +8951,6 @@ "integrity": "sha1-u+Z0BseaqFxc/sdm/lc0VV36EnQ=", "dev": true }, - "osenv": { - "version": "0.1.5", - "resolved": "https://registry.npmjs.org/osenv/-/osenv-0.1.5.tgz", - "integrity": "sha512-0CWcCECdMVc2Rw3U5w9ZjqX6ga6ubk1xDVKxtBQPK7wis/0F2r9T6k4ydGYhecl7YUBxBVxhL5oisPsNxAPe2g==", - "dev": true, - "requires": { - "os-homedir": "^1.0.0", - "os-tmpdir": "^1.0.0" - } - }, "p-finally": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/p-finally/-/p-finally-1.0.0.tgz", @@ -11316,140 +10739,13 @@ "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", "dev": true }, - "sass-graph": { - "version": "2.2.5", - "resolved": "https://registry.npmjs.org/sass-graph/-/sass-graph-2.2.5.tgz", - "integrity": "sha512-VFWDAHOe6mRuT4mZRd4eKE+d8Uedrk6Xnh7Sh9b4NGufQLQjOrvf/MQoOdx+0s92L89FeyUUNfU597j/3uNpag==", + "sass": { + "version": "1.32.0", + "resolved": "https://registry.npmjs.org/sass/-/sass-1.32.0.tgz", + "integrity": "sha512-fhyqEbMIycQA4blrz/C0pYhv2o4x2y6FYYAH0CshBw3DXh5D5wyERgxw0ptdau1orc/GhNrhF7DFN2etyOCEng==", "dev": true, "requires": { - "glob": "^7.0.0", - "lodash": "^4.0.0", - "scss-tokenizer": "^0.2.3", - "yargs": "^13.3.2" - }, - "dependencies": { - "camelcase": { - "version": "5.3.1", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", - "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", - "dev": true - }, - "cliui": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/cliui/-/cliui-5.0.0.tgz", - "integrity": "sha512-PYeGSEmmHM6zvoef2w8TPzlrnNpXIjTipYK780YswmIP9vjxmd6Y2a3CB2Ks6/AU8NHjZugXvo8w3oWM2qnwXA==", - "dev": true, - "requires": { - "string-width": "^3.1.0", - "strip-ansi": "^5.2.0", - "wrap-ansi": "^5.1.0" - } - }, - "emoji-regex": { - "version": "7.0.3", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-7.0.3.tgz", - "integrity": "sha512-CwBLREIQ7LvYFB0WyRvwhq5N5qPhc6PMjD6bYggFlI5YyDgl+0vxq5VHbMOFqLg7hfWzmu8T5Z1QofhmTIhItA==", - "dev": true - }, - "find-up": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", - "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", - "dev": true, - "requires": { - "locate-path": "^3.0.0" - } - }, - "is-fullwidth-code-point": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", - "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=", - "dev": true - }, - "locate-path": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", - "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", - "dev": true, - "requires": { - "p-locate": "^3.0.0", - "path-exists": "^3.0.0" - } - }, - "p-locate": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", - "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", - "dev": true, - "requires": { - "p-limit": "^2.0.0" - } - }, - "path-exists": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", - "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", - "dev": true - }, - "string-width": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz", - "integrity": "sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==", - "dev": true, - "requires": { - "emoji-regex": "^7.0.1", - "is-fullwidth-code-point": "^2.0.0", - "strip-ansi": "^5.1.0" - } - }, - "strip-ansi": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz", - "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==", - "dev": true, - "requires": { - "ansi-regex": "^4.1.0" - } - }, - "wrap-ansi": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-5.1.0.tgz", - "integrity": "sha512-QC1/iN/2/RPVJ5jYK8BGttj5z83LmSKmvbvrXPNCLZSEb32KKVDJDl/MOt2N01qU2H/FkzEa9PKto1BqDjtd7Q==", - "dev": true, - "requires": { - "ansi-styles": "^3.2.0", - "string-width": "^3.0.0", - "strip-ansi": "^5.0.0" - } - }, - "yargs": { - "version": "13.3.2", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-13.3.2.tgz", - "integrity": "sha512-AX3Zw5iPruN5ie6xGRIDgqkT+ZhnRlZMLMHAs8tg7nRruy2Nb+i5o9bwghAogtM08q1dpr2LVoS8KSTMYpWXUw==", - "dev": true, - "requires": { - "cliui": "^5.0.0", - "find-up": "^3.0.0", - "get-caller-file": "^2.0.1", - "require-directory": "^2.1.1", - "require-main-filename": "^2.0.0", - "set-blocking": "^2.0.0", - "string-width": "^3.0.0", - "which-module": "^2.0.0", - "y18n": "^4.0.0", - "yargs-parser": "^13.1.2" - } - }, - "yargs-parser": { - "version": "13.1.2", - "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-13.1.2.tgz", - "integrity": "sha512-3lbsNRf/j+A4QuSZfDRA7HRSfWrzO0YjqTJd5kjAq37Zep1CEgaYmrH9Q3GwPiB9cHyd1Y1UwggGhJGoxipbzg==", - "dev": true, - "requires": { - "camelcase": "^5.0.0", - "decamelize": "^1.2.0" - } - } + "chokidar": ">=2.0.0 <4.0.0" } }, "sass-loader": { @@ -11490,27 +10786,6 @@ "ajv-keywords": "^3.5.2" } }, - "scss-tokenizer": { - "version": "0.2.3", - "resolved": "https://registry.npmjs.org/scss-tokenizer/-/scss-tokenizer-0.2.3.tgz", - "integrity": "sha1-jrBtualyMzOCTT9VMGQRSYR85dE=", - "dev": true, - "requires": { - "js-base64": "^2.1.8", - "source-map": "^0.4.2" - }, - "dependencies": { - "source-map": { - "version": "0.4.4", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.4.4.tgz", - "integrity": "sha1-66T12pwNyZneaAMti092FzZSA2s=", - "dev": true, - "requires": { - "amdefine": ">=0.0.4" - } - } - } - }, "select-hose": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/select-hose/-/select-hose-2.0.0.tgz", @@ -12208,15 +11483,6 @@ "integrity": "sha1-Fhx9rBd2Wf2YEfQ3cfqZOBR4Yow=", "dev": true }, - "stdout-stream": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/stdout-stream/-/stdout-stream-1.4.1.tgz", - "integrity": "sha512-j4emi03KXqJWcIeF8eIXkjMFN1Cmb8gUlDYGeBALLPo5qdyTfA9bOtl8m33lRoC+vFMkP3gl0WsDr6+gzxbbTA==", - "dev": true, - "requires": { - "readable-stream": "^2.0.1" - } - }, "stream-browserify": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/stream-browserify/-/stream-browserify-2.0.2.tgz", @@ -12319,15 +11585,6 @@ } } }, - "strip-bom": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-2.0.0.tgz", - "integrity": "sha1-YhmoVhZSBJHzV4i9vxRHqZx+aw4=", - "dev": true, - "requires": { - "is-utf8": "^0.2.0" - } - }, "strip-eof": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/strip-eof/-/strip-eof-1.0.0.tgz", @@ -12815,17 +12072,6 @@ "integrity": "sha512-4WK/bYZmj8xLr+HUCODHGF1ZFzsYffasLUgEiMBY4fgtltdO6B4WJtlSbPaDTLpYTcGVwM2qLnFTICEcNxs3kA==", "dev": true }, - "tar": { - "version": "2.2.2", - "resolved": "https://registry.npmjs.org/tar/-/tar-2.2.2.tgz", - "integrity": "sha512-FCEhQ/4rE1zYv9rYXJw/msRqsnmlje5jHP6huWeBZ704jUTy02c5AZyWujpMR1ax6mVw9NyJMfuK2CMDWVIfgA==", - "dev": true, - "requires": { - "block-stream": "*", - "fstream": "^1.0.12", - "inherits": "2" - } - }, "term-size": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/term-size/-/term-size-1.2.0.tgz", @@ -13164,15 +12410,6 @@ "integrity": "sha512-rvuRbTarPXmMb79SmzEp8aqXNKcK+y0XaB298IXueQ8I2PsrATcPBCSPyK/dDNa2iWOhKlfNnOjdAOTBU/nkFA==", "dev": true }, - "true-case-path": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/true-case-path/-/true-case-path-1.0.3.tgz", - "integrity": "sha512-m6s2OdQe5wgpFMC+pAJ+q9djG82O2jcHPOI6RNg1yy9rCYR+WD6Nbpl32fDpfC56nirdRy+opFa/Vk7HYhqaew==", - "dev": true, - "requires": { - "glob": "^7.1.2" - } - }, "tryer": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/tryer/-/tryer-1.0.1.tgz", @@ -13874,6 +13111,87 @@ } } }, + "vue-loader-v16": { + "version": "npm:vue-loader@16.1.2", + "resolved": "https://registry.npmjs.org/vue-loader/-/vue-loader-16.1.2.tgz", + "integrity": "sha512-8QTxh+Fd+HB6fiL52iEVLKqE9N1JSlMXLR92Ijm6g8PZrwIxckgpqjPDWRP5TWxdiPaHR+alUWsnu1ShQOwt+Q==", + "dev": true, + "optional": true, + "requires": { + "chalk": "^4.1.0", + "hash-sum": "^2.0.0", + "loader-utils": "^2.0.0" + }, + "dependencies": { + "ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "optional": true, + "requires": { + "color-convert": "^2.0.1" + } + }, + "chalk": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.0.tgz", + "integrity": "sha512-qwx12AxXe2Q5xQ43Ac//I6v5aXTipYrSESdOgzrN+9XjgEpyjpKuvSGaN4qE93f7TQTlerQQ8S+EQ0EyDoVL1A==", + "dev": true, + "optional": true, + "requires": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + } + }, + "color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "optional": true, + "requires": { + "color-name": "~1.1.4" + } + }, + "color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true, + "optional": true + }, + "has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "optional": true + }, + "loader-utils": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-2.0.0.tgz", + "integrity": "sha512-rP4F0h2RaWSvPEkD7BLDFQnvSf+nK+wr3ESUjNTyAGobqrijmW92zc+SO6d4p4B1wh7+B/Jg1mkQe5NYUEHtHQ==", + "dev": true, + "optional": true, + "requires": { + "big.js": "^5.2.2", + "emojis-list": "^3.0.0", + "json5": "^2.1.2" + } + }, + "supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "optional": true, + "requires": { + "has-flag": "^4.0.0" + } + } + } + }, "vue-parser": { "version": "1.1.6", "resolved": "https://registry.npmjs.org/vue-parser/-/vue-parser-1.1.6.tgz", @@ -14886,48 +14204,6 @@ "integrity": "sha1-2e8H3Od7mQK4o6j6SzHD4/fm6Ho=", "dev": true }, - "wide-align": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/wide-align/-/wide-align-1.1.3.tgz", - "integrity": "sha512-QGkOQc8XL6Bt5PwnsExKBPuMKBxnGxWWW3fU55Xt4feHozMUhdUMaBCk290qpm/wG5u/RSKzwdAC4i51YigihA==", - "dev": true, - "requires": { - "string-width": "^1.0.2 || 2" - }, - "dependencies": { - "ansi-regex": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-3.0.0.tgz", - "integrity": "sha1-7QMXwyIGT3lGbAKWa922Bas32Zg=", - "dev": true - }, - "is-fullwidth-code-point": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", - "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=", - "dev": true - }, - "string-width": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-2.1.1.tgz", - "integrity": "sha512-nOqH59deCq9SRHlxq1Aw85Jnt4w6KvLKqWVik6oA9ZklXLNIOlqg4F2yrT1MVaTjAqvVwdfeZ7w7aCvJD7ugkw==", - "dev": true, - "requires": { - "is-fullwidth-code-point": "^2.0.0", - "strip-ansi": "^4.0.0" - } - }, - "strip-ansi": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-4.0.0.tgz", - "integrity": "sha1-qEeQIusaw2iocTibY1JixQXuNo8=", - "dev": true, - "requires": { - "ansi-regex": "^3.0.0" - } - } - } - }, "widest-line": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/widest-line/-/widest-line-2.0.1.tgz", diff --git a/web/multinode/src/app/components/common/HeaderedInput.vue b/web/multinode/src/app/components/common/HeaderedInput.vue index e044c99df..fcff98f41 100644 --- a/web/multinode/src/app/components/common/HeaderedInput.vue +++ b/web/multinode/src/app/components/common/HeaderedInput.vue @@ -5,7 +5,7 @@
-
+
@@ -33,6 +33,8 @@ wrap="hard" @input="onInput" @change="onInput" + @paste.prevent="onPaste" + autocomplete="off" v-model="value">
@@ -99,13 +103,13 @@ export default class HeaderedInput extends HeaderlessInput { display: flex; justify-content: flex-start; align-items: center; + margin-bottom: 8px; &__label { font-family: 'font_regular', sans-serif; font-size: 16px; line-height: 21px; color: var(--c-gray); - margin-bottom: 8px; } &__error { @@ -114,6 +118,14 @@ export default class HeaderedInput extends HeaderlessInput { color: var(--c-error); margin-left: 10px; } + + &__error-icon-container { + width: 20px; + height: 20px; + max-width: 20px; + max-height: 20px; + display: flex; + } } &__limit { diff --git a/web/multinode/src/app/components/common/HeaderlessInput.vue b/web/multinode/src/app/components/common/HeaderlessInput.vue index 113c5249e..ea04f704e 100644 --- a/web/multinode/src/app/components/common/HeaderlessInput.vue +++ b/web/multinode/src/app/components/common/HeaderlessInput.vue @@ -23,6 +23,7 @@ :class="{'inputError' : error, 'password': isPassword}" @input="onInput" @change="onInput" + @paste.prevent="onPaste" v-model="value" :placeholder="placeholder" :type="type" @@ -133,6 +134,18 @@ export default class HeaderlessInput extends Vue { this.$emit('setData', this.value); } + public onPaste(event): void { + const clipped: string = event.clipboardData.getData('text'); + + if (clipped.length > this.maxSymbols) { + this.value = clipped.slice(0, this.maxSymbols); + } else { + this.value = clipped; + } + + this.$emit('setData', this.value); + } + /** * Triggers input type between text and password to show/hide symbols. */ diff --git a/web/multinode/src/app/components/common/VDropdown.vue b/web/multinode/src/app/components/common/VDropdown.vue index 4d59f2bab..487a746be 100644 --- a/web/multinode/src/app/components/common/VDropdown.vue +++ b/web/multinode/src/app/components/common/VDropdown.vue @@ -4,14 +4,15 @@ diff --git a/web/multinode/src/app/components/tables/NodesTable.vue b/web/multinode/src/app/components/tables/NodesTable.vue index a6be72dc1..9ccf80b14 100644 --- a/web/multinode/src/app/components/tables/NodesTable.vue +++ b/web/multinode/src/app/components/tables/NodesTable.vue @@ -6,9 +6,16 @@ NODE - DISK SPACE USED - DISK SPACE LEFT - BANDWIDTH USED + + EARNED VERSION STATUS @@ -16,10 +23,17 @@ - {{ node.name }} - {{ node.diskSpaceUsed | bytesToBase10String }} - {{ node.diskSpaceLeft | bytesToBase10String }} - {{ node.bandwidthUsed | bytesToBase10String }} + {{ node.displayedName }} + + {{ node.earned | centsToDollars }} {{ node.version }} {{ node.status }} @@ -35,7 +49,13 @@ import { Node } from '@/nodes'; @Component export default class NodesTable extends Vue { - public nodes: Node[] = []; + public get nodes(): Node[] { + return this.$store.state.nodes.nodes; + } + + public get isSatelliteSelected(): boolean { + return !!this.$store.state.nodes.selectedSatellite; + } } diff --git a/web/multinode/src/app/utils/currency.ts b/web/multinode/src/app/utils/currency.ts index f8a3f6601..d69f36986 100644 --- a/web/multinode/src/app/utils/currency.ts +++ b/web/multinode/src/app/utils/currency.ts @@ -5,6 +5,7 @@ * Size class contains currency related functionality such as convertation. */ export class Currency { + /** * dollarsFromCents converts cents to dollars with prefix. * @param cents count diff --git a/web/multinode/src/app/utils/percentage.ts b/web/multinode/src/app/utils/percentage.ts new file mode 100644 index 000000000..064b3c930 --- /dev/null +++ b/web/multinode/src/app/utils/percentage.ts @@ -0,0 +1,16 @@ +// Copyright (C) 2021 Storj Labs, Inc. +// See LICENSE for copying information. + +/** + * Percentage class contains percentage related functionality. + */ +export class Percentage { + + /** + * dollarsFromCents converts cents to dollars with prefix. + * @param number float + */ + public static fromFloat(number: number): string { + return `${parseFloat(`${(number * 100).toFixed(1)}`)}%`; + } +} diff --git a/web/multinode/src/app/views/MyNodes.vue b/web/multinode/src/app/views/MyNodes.vue index 0f6cdafb7..e046bb3a2 100644 --- a/web/multinode/src/app/views/MyNodes.vue +++ b/web/multinode/src/app/views/MyNodes.vue @@ -4,19 +4,61 @@ diff --git a/web/multinode/src/main.ts b/web/multinode/src/main.ts index b310b8b3a..5779f8f68 100644 --- a/web/multinode/src/main.ts +++ b/web/multinode/src/main.ts @@ -1,19 +1,45 @@ // Copyright (C) 2020 Storj Labs, Inc. // See LICENSE for copying information. -import Vue from 'vue'; +import Vue, { VNode } from 'vue'; import Router from 'vue-router'; +import { DirectiveBinding } from 'vue/types/options'; import App from '@/app/App.vue'; import { router } from '@/app/router'; import { store } from '@/app/store'; import { Currency } from '@/app/utils/currency'; -import { Size } from '@/app/utils/size'; +import { Percentage } from '@/app/utils/percentage'; +import { Size } from '@/private/memory/size'; Vue.config.productionTip = false; Vue.use(Router); +let clickOutsideEvent: EventListener; + +/** + * Binds closing action to outside popups area. + */ +Vue.directive('click-outside', { + bind: function (el: HTMLElement, binding: DirectiveBinding, vnode: VNode) { + clickOutsideEvent = function(event: Event): void { + if (el === event.target || el.contains((event.target as Node))) { + return; + } + + if (vnode.context) { + vnode.context[binding.expression](event); + } + }; + + document.body.addEventListener('click', clickOutsideEvent); + }, + unbind: function(): void { + document.body.removeEventListener('click', clickOutsideEvent); + }, +}); + /** * centsToDollars is a Vue filter that converts amount of cents in dollars string. */ @@ -28,6 +54,13 @@ Vue.filter('bytesToBase10String', (amountInBytes: number): string => { return Size.toBase10String(amountInBytes); }); +/** + * Converts float number to percents. + */ +Vue.filter('floatToPercentage', (number: number): string => { + return Percentage.fromFloat(number); +}); + const app = new Vue({ router, store, diff --git a/web/multinode/src/private/http/client.ts b/web/multinode/src/private/http/client.ts new file mode 100644 index 000000000..a6ec9db4e --- /dev/null +++ b/web/multinode/src/private/http/client.ts @@ -0,0 +1,73 @@ +// Copyright (C) 2021 Storj Labs, Inc. +// See LICENSE for copying information. + +/** + * HttpClient is a custom wrapper around fetch api. + * Exposes get, post and delete methods for JSON strings. + */ +export class HttpClient { + /** + * Performs POST http request with JSON body. + * @param path + * @param body serialized JSON + */ + public async post(path: string, body: string | null): Promise { + return this.do('POST', path, body); + } + + /** + * Performs PATCH http request with JSON body. + * @param path + * @param body serialized JSON + */ + public async patch(path: string, body: string | null): Promise { + return this.do('PATCH', path, body); + } + + /** + * Performs PUT http request with JSON body. + * @param path + * @param body serialized JSON + * @param auth indicates if authentication is needed + */ + public async put(path: string, body: string | null, auth: boolean = true): Promise { + return this.do('PUT', path, body); + } + + /** + * Performs GET http request. + * @param path + * @param auth indicates if authentication is needed + */ + public async get(path: string, auth: boolean = true): Promise { + return this.do('GET', path, null); + } + + /** + * Performs DELETE http request. + * @param path + * @param auth indicates if authentication is needed + */ + public async delete(path: string, auth: boolean = true): Promise { + return this.do('DELETE', path, null); + } + + /** + * do sends an HTTP request and returns an HTTP response as configured on the client. + * @param method holds http method type + * @param path + * @param body serialized JSON + */ + private async do(method: string, path: string, body: string | null): Promise { + const request: RequestInit = { + method: method, + body: body, + }; + + request.headers = { + 'Content-Type': 'application/json', + }; + + return await fetch(path, request); + } +} diff --git a/web/multinode/src/app/utils/size.ts b/web/multinode/src/private/memory/size.ts similarity index 100% rename from web/multinode/src/app/utils/size.ts rename to web/multinode/src/private/memory/size.ts diff --git a/web/multinode/vue.config.js b/web/multinode/vue.config.js index 9429dc56f..08f246cbd 100644 --- a/web/multinode/vue.config.js +++ b/web/multinode/vue.config.js @@ -5,7 +5,7 @@ const path = require('path'); const StyleLintPlugin = require('stylelint-webpack-plugin'); module.exports = { - // publicPath: "/static/dist", + publicPath: "/static/dist", productionSourceMap: false, parallel: true, configureWebpack: { From d2148edcdbe136525abc3231c5dd064703ce9cff Mon Sep 17 00:00:00 2001 From: Caleb Case Date: Tue, 26 Jan 2021 14:00:52 -0500 Subject: [PATCH 21/38] cmd/uplink/cmd/setup.go: Access Grant vs API Key error messaging Provide a clearer error message to users who confuse the API Key with the Access Grant and suggest the right command to them. Change-Id: If73ae8cde140b68a19f4cfc3f59bb88a3b74c9c1 --- cmd/uplink/cmd/setup.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/cmd/uplink/cmd/setup.go b/cmd/uplink/cmd/setup.go index c3717c2bb..77d9f10da 100644 --- a/cmd/uplink/cmd/setup.go +++ b/cmd/uplink/cmd/setup.go @@ -9,12 +9,14 @@ import ( "path/filepath" "github.com/spf13/cobra" + "github.com/zeebo/errs" "storj.io/private/cfgstruct" "storj.io/private/process" "storj.io/storj/cmd/internal/wizard" "storj.io/uplink" "storj.io/uplink/backcomp" + "storj.io/uplink/private/access2" ) var ( @@ -108,7 +110,12 @@ func cmdSetup(cmd *cobra.Command, args []string) (err error) { access, err = backcomp.RequestAccessWithPassphraseAndConcurrency(ctx, uplinkConfig, satelliteAddress, apiKeyString, passphrase, uint8(setupCfg.PBKDFConcurrency)) } if err != nil { - return Error.Wrap(err) + _, err2 := access2.ParseAccess(apiKeyString) + if err2 == nil { + err2 = Error.New("API key appears to be an access grant: try running `uplink import` instead") + } + + return errs.Combine(err, err2) } accessData, err := access.Serialize() if err != nil { From f18cb24522440c6658908047922b2df38f016bd0 Mon Sep 17 00:00:00 2001 From: Stefan Benten Date: Wed, 27 Jan 2021 15:34:23 +0100 Subject: [PATCH 22/38] .clabot: add harrymaurya05 and gregoirevda (#4032) Adding both as they signed the CLA. --- .clabot | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.clabot b/.clabot index 85d69c17c..00a27fd41 100644 --- a/.clabot +++ b/.clabot @@ -66,6 +66,8 @@ "dominickmarino", "hectorj2f", "nergdron", - "Doom4535" + "Doom4535", + "harrymaurya05", + "gregoirevda" ] } From 02845e7b8f36f77a4f04425f372c9d59f34ea0b2 Mon Sep 17 00:00:00 2001 From: Yingrong Zhao Date: Tue, 19 Jan 2021 11:33:50 -0500 Subject: [PATCH 23/38] pkg/server,private/testplanet: start to listen on quic This PR introduces a new listener that can listen for quic traffic on both storagenodes and satellites. Change-Id: I5eb5bc82c37dde20d3be2ec8fa5f69c18fae0af0 --- go.sum | 1 + pkg/quic/conn.go | 4 +- pkg/quic/connector.go | 2 +- pkg/server/listener.go | 56 ++++++++-- pkg/server/server.go | 36 ++++-- private/testplanet/rpc_test.go | 193 +++++++++++++++++++-------------- 6 files changed, 188 insertions(+), 104 deletions(-) diff --git a/go.sum b/go.sum index c784905ef..15e17311f 100644 --- a/go.sum +++ b/go.sum @@ -147,6 +147,7 @@ github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5Kwzbycv github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= diff --git a/pkg/quic/conn.go b/pkg/quic/conn.go index e777627e4..d884a7ffd 100644 --- a/pkg/quic/conn.go +++ b/pkg/quic/conn.go @@ -174,9 +174,9 @@ type closeTrackingConn struct { rpc.ConnectorConn } -// trackClose wraps the conn and sets a finalizer on the returned value to +// TrackClose wraps the conn and sets a finalizer on the returned value to // close the conn and monitor that it was leaked. -func trackClose(conn rpc.ConnectorConn) rpc.ConnectorConn { +func TrackClose(conn rpc.ConnectorConn) rpc.ConnectorConn { tracked := &closeTrackingConn{ConnectorConn: conn} runtime.SetFinalizer(tracked, (*closeTrackingConn).finalize) return tracked diff --git a/pkg/quic/connector.go b/pkg/quic/connector.go index 26c73fa93..6b3edc2d8 100644 --- a/pkg/quic/connector.go +++ b/pkg/quic/connector.go @@ -61,7 +61,7 @@ func (c Connector) DialContext(ctx context.Context, tlsConfig *tls.Config, addre } return &timedConn{ - ConnectorConn: trackClose(conn), + ConnectorConn: TrackClose(conn), rate: c.transferRate, }, nil } diff --git a/pkg/server/listener.go b/pkg/server/listener.go index 1cf67e425..fd55a6d3c 100644 --- a/pkg/server/listener.go +++ b/pkg/server/listener.go @@ -10,6 +10,8 @@ import ( "github.com/zeebo/errs" "storj.io/common/netutil" + "storj.io/common/rpc" + "storj.io/storj/pkg/quic" ) // defaultUserTimeout is the value we use for the TCP_USER_TIMEOUT setting. @@ -19,24 +21,27 @@ const defaultUserTimeout = 60 * time.Second // and monitors if the returned connections are closed or leaked. func wrapListener(lis net.Listener) net.Listener { if lis, ok := lis.(*net.TCPListener); ok { - return newUserTimeoutListener(lis) + return newTCPUserTimeoutListener(lis) + } + if lis, ok := lis.(*quic.Listener); ok { + return newQUICTrackedListener(lis) } return lis } -// userTimeoutListener wraps a tcp listener so that it sets the TCP_USER_TIMEOUT +// tcpUserTimeoutListener wraps a tcp listener so that it sets the TCP_USER_TIMEOUT // value for each socket it returns. -type userTimeoutListener struct { +type tcpUserTimeoutListener struct { lis *net.TCPListener } -// newUserTimeoutListener wraps the tcp listener in a userTimeoutListener. -func newUserTimeoutListener(lis *net.TCPListener) *userTimeoutListener { - return &userTimeoutListener{lis: lis} +// newTCPUserTimeoutListener wraps the tcp listener in a userTimeoutListener. +func newTCPUserTimeoutListener(lis *net.TCPListener) *tcpUserTimeoutListener { + return &tcpUserTimeoutListener{lis: lis} } // Accept waits for and returns the next connection to the listener. -func (lis *userTimeoutListener) Accept() (net.Conn, error) { +func (lis *tcpUserTimeoutListener) Accept() (net.Conn, error) { conn, err := lis.lis.AcceptTCP() if err != nil { return nil, err @@ -50,11 +55,44 @@ func (lis *userTimeoutListener) Accept() (net.Conn, error) { // Close closes the listener. // Any blocked Accept operations will be unblocked and return errors. -func (lis *userTimeoutListener) Close() error { +func (lis *tcpUserTimeoutListener) Close() error { return lis.lis.Close() } // Addr returns the listener's network address. -func (lis *userTimeoutListener) Addr() net.Addr { +func (lis *tcpUserTimeoutListener) Addr() net.Addr { + return lis.lis.Addr() +} + +type quicTrackedListener struct { + lis *quic.Listener +} + +func newQUICTrackedListener(lis *quic.Listener) *quicTrackedListener { + return &quicTrackedListener{lis: lis} +} + +func (lis *quicTrackedListener) Accept() (net.Conn, error) { + conn, err := lis.lis.Accept() + if err != nil { + return nil, err + } + + connectorConn, ok := conn.(rpc.ConnectorConn) + if !ok { + return nil, Error.New("quic connection doesn't implement required methods") + } + + return quic.TrackClose(connectorConn), nil +} + +// Close closes the listener. +// Any blocked Accept operations will be unblocked and return errors. +func (lis *quicTrackedListener) Close() error { + return lis.lis.Close() +} + +// Addr returns the listener's network address. +func (lis *quicTrackedListener) Addr() net.Addr { return lis.lis.Addr() } diff --git a/pkg/server/server.go b/pkg/server/server.go index 151f1b739..cad39437a 100644 --- a/pkg/server/server.go +++ b/pkg/server/server.go @@ -9,6 +9,7 @@ import ( "net" "sync" + quicgo "github.com/lucas-clemente/quic-go" "github.com/zeebo/errs" "go.uber.org/zap" "golang.org/x/sync/errgroup" @@ -21,6 +22,7 @@ import ( "storj.io/drpc/drpcserver" jaeger "storj.io/monkit-jaeger" "storj.io/storj/pkg/listenmux" + "storj.io/storj/pkg/quic" ) // Config holds server specific configuration parameters. @@ -33,9 +35,10 @@ type Config struct { } type public struct { - listener net.Listener - drpc *drpcserver.Server - mux *drpcmux.Mux + tcpListener net.Listener + quicListener net.Listener + drpc *drpcserver.Server + mux *drpcmux.Mux } type private struct { @@ -71,22 +74,28 @@ func New(log *zap.Logger, tlsOptions *tlsopts.Options, publicAddr, privateAddr s Manager: rpc.NewDefaultManagerOptions(), } - publicListener, err := net.Listen("tcp", publicAddr) + publicTCPListener, err := net.Listen("tcp", publicAddr) if err != nil { return nil, err } + publicQUICListener, err := quic.NewListener(tlsOptions.ServerTLSConfig(), publicTCPListener.Addr().String(), &quicgo.Config{MaxIdleTimeout: defaultUserTimeout}) + if err != nil { + return nil, errs.Combine(err, publicTCPListener.Close()) + } + publicMux := drpcmux.New() publicTracingHandler := rpctracing.NewHandler(publicMux, jaeger.RemoteTraceHandler) server.public = public{ - listener: wrapListener(publicListener), - drpc: drpcserver.NewWithOptions(publicTracingHandler, serverOptions), - mux: publicMux, + tcpListener: wrapListener(publicTCPListener), + quicListener: wrapListener(publicQUICListener), + drpc: drpcserver.NewWithOptions(publicTracingHandler, serverOptions), + mux: publicMux, } privateListener, err := net.Listen("tcp", privateAddr) if err != nil { - return nil, errs.Combine(err, publicListener.Close()) + return nil, errs.Combine(err, publicTCPListener.Close(), publicQUICListener.Close()) } privateMux := drpcmux.New() privateTracingHandler := rpctracing.NewHandler(privateMux, jaeger.RemoteTraceHandler) @@ -103,7 +112,7 @@ func New(log *zap.Logger, tlsOptions *tlsopts.Options, publicAddr, privateAddr s func (p *Server) Identity() *identity.FullIdentity { return p.tlsOptions.Ident } // Addr returns the server's public listener address. -func (p *Server) Addr() net.Addr { return p.public.listener.Addr() } +func (p *Server) Addr() net.Addr { return p.public.tcpListener.Addr() } // PrivateAddr returns the server's private listener address. func (p *Server) PrivateAddr() net.Addr { return p.private.listener.Addr() } @@ -127,7 +136,8 @@ func (p *Server) Close() error { // We ignore these errors because there's not really anything to do // even if they happen, and they'll just be errors due to duplicate // closes anyway. - _ = p.public.listener.Close() + _ = p.public.quicListener.Close() + _ = p.public.tcpListener.Close() _ = p.private.listener.Close() return nil } @@ -156,7 +166,7 @@ func (p *Server) Run(ctx context.Context) (err error) { // a chance to be notified that they're done running. const drpcHeader = "DRPC!!!1" - publicMux := listenmux.New(p.public.listener, len(drpcHeader)) + publicMux := listenmux.New(p.public.tcpListener, len(drpcHeader)) publicDRPCListener := tls.NewListener(publicMux.Route(drpcHeader), p.tlsOptions.ServerTLSConfig()) privateMux := listenmux.New(p.private.listener, len(drpcHeader)) @@ -197,6 +207,10 @@ func (p *Server) Run(ctx context.Context) (err error) { defer cancel() return p.public.drpc.Serve(ctx, publicDRPCListener) }) + group.Go(func() error { + defer cancel() + return p.public.drpc.Serve(ctx, p.public.quicListener) + }) group.Go(func() error { defer cancel() return p.private.drpc.Serve(ctx, privateDRPCListener) diff --git a/private/testplanet/rpc_test.go b/private/testplanet/rpc_test.go index 8df2de715..ad14533c6 100644 --- a/private/testplanet/rpc_test.go +++ b/private/testplanet/rpc_test.go @@ -18,6 +18,7 @@ import ( "storj.io/common/rpc" "storj.io/common/storj" "storj.io/common/testcontext" + "storj.io/storj/pkg/quic" "storj.io/storj/private/testplanet" "storj.io/storj/satellite" "storj.io/storj/storagenode" @@ -43,82 +44,99 @@ func TestDialNodeURL(t *testing.T) { }, nil) require.NoError(t, err) - dialer := rpc.NewDefaultDialer(tlsOptions) + tcpDialer := rpc.NewDefaultDialer(tlsOptions) + quicDialer := rpc.NewDefaultDialer(tlsOptions) + quicDialer.Connector = quic.NewDefaultConnector(nil) unsignedClientOpts, err := tlsopts.NewOptions(unsignedIdent, tlsopts.Config{ PeerIDVersions: "*", }, nil) require.NoError(t, err) - unsignedDialer := rpc.NewDefaultDialer(unsignedClientOpts) + unsignedTCPDialer := rpc.NewDefaultDialer(unsignedClientOpts) + unsignedQUICDialer := rpc.NewDefaultDialer(unsignedClientOpts) + unsignedQUICDialer.Connector = quic.NewDefaultConnector(nil) - t.Run("DialNodeURL with invalid targets", func(t *testing.T) { - targets := []storj.NodeURL{ - { - ID: storj.NodeID{}, - Address: "", - }, - { - ID: storj.NodeID{123}, - Address: "127.0.0.1:100", - }, - { - ID: storj.NodeID{}, - Address: planet.StorageNodes[1].Addr(), - }, - } + test := func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet, dialer rpc.Dialer, unsignedDialer rpc.Dialer) { + t.Run("DialNodeURL with invalid targets", func(t *testing.T) { + targets := []storj.NodeURL{ + { + ID: storj.NodeID{}, + Address: "", + }, + { + ID: storj.NodeID{123}, + Address: "127.0.0.1:100", + }, + { + ID: storj.NodeID{}, + Address: planet.StorageNodes[1].Addr(), + }, + } - for _, target := range targets { - tag := fmt.Sprintf("%+v", target) + for _, target := range targets { + tag := fmt.Sprintf("%+v", target) + timedCtx, cancel := context.WithTimeout(ctx, time.Second) + conn, err := dialer.DialNodeURL(timedCtx, target) + cancel() + assert.Error(t, err, tag) + assert.Nil(t, conn, tag) + } + }) + + t.Run("DialNode with valid signed target", func(t *testing.T) { timedCtx, cancel := context.WithTimeout(ctx, time.Second) - conn, err := dialer.DialNodeURL(timedCtx, target) + conn, err := dialer.DialNodeURL(timedCtx, planet.StorageNodes[1].NodeURL()) cancel() - assert.Error(t, err, tag) - assert.Nil(t, conn, tag) - } + + assert.NoError(t, err) + require.NotNil(t, conn) + + assert.NoError(t, conn.Close()) + }) + + t.Run("DialNode with unsigned identity", func(t *testing.T) { + timedCtx, cancel := context.WithTimeout(ctx, time.Second) + conn, err := unsignedDialer.DialNodeURL(timedCtx, planet.StorageNodes[1].NodeURL()) + cancel() + + assert.NotNil(t, conn) + require.NoError(t, err) + assert.NoError(t, conn.Close()) + }) + + t.Run("DialAddress with unsigned identity", func(t *testing.T) { + timedCtx, cancel := context.WithTimeout(ctx, time.Second) + conn, err := unsignedDialer.DialAddressInsecure(timedCtx, planet.StorageNodes[1].Addr()) + cancel() + + assert.NotNil(t, conn) + require.NoError(t, err) + assert.NoError(t, conn.Close()) + }) + + t.Run("DialAddress with valid address", func(t *testing.T) { + timedCtx, cancel := context.WithTimeout(ctx, time.Second) + conn, err := dialer.DialAddressInsecure(timedCtx, planet.StorageNodes[1].Addr()) + cancel() + + assert.NoError(t, err) + require.NotNil(t, conn) + assert.NoError(t, conn.Close()) + }) + + } + + // test with tcp + t.Run("TCP", func(t *testing.T) { + test(t, ctx, planet, tcpDialer, unsignedTCPDialer) + }) + // test with quic + t.Run("QUIC", func(t *testing.T) { + test(t, ctx, planet, quicDialer, unsignedQUICDialer) }) - t.Run("DialNode with valid signed target", func(t *testing.T) { - timedCtx, cancel := context.WithTimeout(ctx, time.Second) - conn, err := dialer.DialNodeURL(timedCtx, planet.StorageNodes[1].NodeURL()) - cancel() - - assert.NoError(t, err) - require.NotNil(t, conn) - - assert.NoError(t, conn.Close()) - }) - - t.Run("DialNode with unsigned identity", func(t *testing.T) { - timedCtx, cancel := context.WithTimeout(ctx, time.Second) - conn, err := unsignedDialer.DialNodeURL(timedCtx, planet.StorageNodes[1].NodeURL()) - cancel() - - assert.NotNil(t, conn) - require.NoError(t, err) - assert.NoError(t, conn.Close()) - }) - - t.Run("DialAddress with unsigned identity", func(t *testing.T) { - timedCtx, cancel := context.WithTimeout(ctx, time.Second) - conn, err := unsignedDialer.DialAddressInsecure(timedCtx, planet.StorageNodes[1].Addr()) - cancel() - - assert.NotNil(t, conn) - require.NoError(t, err) - assert.NoError(t, conn.Close()) - }) - - t.Run("DialAddress with valid address", func(t *testing.T) { - timedCtx, cancel := context.WithTimeout(ctx, time.Second) - conn, err := dialer.DialAddressInsecure(timedCtx, planet.StorageNodes[1].Addr()) - cancel() - - assert.NoError(t, err) - require.NotNil(t, conn) - assert.NoError(t, conn.Close()) - }) }) } @@ -150,27 +168,40 @@ func TestDialNode_BadServerCertificate(t *testing.T) { }, nil) require.NoError(t, err) - dialer := rpc.NewDefaultDialer(tlsOptions) + tcpDialer := rpc.NewDefaultDialer(tlsOptions) + quicDialer := rpc.NewDefaultDialer(tlsOptions) + quicDialer.Connector = quic.NewDefaultConnector(nil) - t.Run("DialNodeURL with bad server certificate", func(t *testing.T) { - timedCtx, cancel := context.WithTimeout(ctx, time.Second) - conn, err := dialer.DialNodeURL(timedCtx, planet.StorageNodes[1].NodeURL()) - cancel() + test := func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet, dialer rpc.Dialer) { + t.Run("DialNodeURL with bad server certificate", func(t *testing.T) { + timedCtx, cancel := context.WithTimeout(ctx, time.Second) + conn, err := dialer.DialNodeURL(timedCtx, planet.StorageNodes[1].NodeURL()) + cancel() - tag := fmt.Sprintf("%+v", planet.StorageNodes[1].NodeURL()) - assert.Nil(t, conn, tag) - require.Error(t, err, tag) - assert.Contains(t, err.Error(), "not signed by any CA in the whitelist") + tag := fmt.Sprintf("%+v", planet.StorageNodes[1].NodeURL()) + assert.Nil(t, conn, tag) + require.Error(t, err, tag) + assert.Contains(t, err.Error(), "not signed by any CA in the whitelist") + }) + + t.Run("DialAddress with bad server certificate", func(t *testing.T) { + timedCtx, cancel := context.WithTimeout(ctx, time.Second) + conn, err := dialer.DialNodeURL(timedCtx, planet.StorageNodes[1].NodeURL()) + cancel() + + assert.Nil(t, conn) + require.Error(t, err) + assert.Contains(t, err.Error(), "not signed by any CA in the whitelist") + }) + } + + // test with tcp + t.Run("TCP", func(t *testing.T) { + test(t, ctx, planet, tcpDialer) }) - - t.Run("DialAddress with bad server certificate", func(t *testing.T) { - timedCtx, cancel := context.WithTimeout(ctx, time.Second) - conn, err := dialer.DialNodeURL(timedCtx, planet.StorageNodes[1].NodeURL()) - cancel() - - assert.Nil(t, conn) - require.Error(t, err) - assert.Contains(t, err.Error(), "not signed by any CA in the whitelist") + // test with quic + t.Run("QUIC", func(t *testing.T) { + test(t, ctx, planet, quicDialer) }) }) } From 66e15fb7f129bf838e03022e640123cb180d2347 Mon Sep 17 00:00:00 2001 From: Jeff Wendling Date: Thu, 14 Jan 2021 11:15:56 -0500 Subject: [PATCH 24/38] satellite/compensation: remove ytd paid amounts they aren't right and we aren't using them. Change-Id: I5ca024e38d055696696886278863e941b5bc51bf --- cmd/satellite/compensation.go | 8 ++------ satellite/compensation/db.go | 3 --- satellite/compensation/invoice.go | 2 +- satellite/satellitedb/compensation.go | 26 -------------------------- 4 files changed, 3 insertions(+), 36 deletions(-) diff --git a/cmd/satellite/compensation.go b/cmd/satellite/compensation.go index 55eb2bfe5..dc467d6da 100644 --- a/cmd/satellite/compensation.go +++ b/cmd/satellite/compensation.go @@ -12,6 +12,7 @@ import ( "github.com/zeebo/errs" "go.uber.org/zap" + "storj.io/storj/private/currency" "storj.io/storj/satellite/compensation" "storj.io/storj/satellite/satellitedb" ) @@ -76,11 +77,6 @@ func generateInvoicesCSV(ctx context.Context, period compensation.Period, out io } } - paidYTD, err := db.Compensation().QueryPaidInYear(ctx, usage.NodeID, period.Year) - if err != nil { - return err - } - nodeInfo := compensation.NodeInfo{ ID: usage.NodeID, CreatedAt: node.CreatedAt, @@ -103,7 +99,7 @@ func generateInvoicesCSV(ctx context.Context, period compensation.Period, out io NodeWallet: node.Operator.Wallet, NodeAddress: nodeAddress, NodeLastIP: nodeLastIP, - PaidYTD: paidYTD, + PaidYTD: currency.Zero, // deprecated } if err := invoice.MergeNodeInfo(nodeInfo); err != nil { diff --git a/satellite/compensation/db.go b/satellite/compensation/db.go index c84140a47..32ae38583 100644 --- a/satellite/compensation/db.go +++ b/satellite/compensation/db.go @@ -21,9 +21,6 @@ type DB interface { // QueryWithheldAmounts queries the WithheldAmounts for the given nodeID. QueryWithheldAmounts(ctx context.Context, nodeID storj.NodeID) (WithheldAmounts, error) - // QueryPaidInYear returns the total amount paid to the nodeID in the provided year. - QueryPaidInYear(ctx context.Context, nodeID storj.NodeID, year int) (currency.MicroUnit, error) - // RecordPeriod records a set of paystubs and payments for some time period. RecordPeriod(ctx context.Context, paystubs []Paystub, payments []Payment) error diff --git a/satellite/compensation/invoice.go b/satellite/compensation/invoice.go index 335985f59..d966ec7bd 100644 --- a/satellite/compensation/invoice.go +++ b/satellite/compensation/invoice.go @@ -40,7 +40,7 @@ type Invoice struct { Disposed currency.MicroUnit `csv:"disposed"` // Amount of owed that is due to graceful-exit or held period ending TotalHeld currency.MicroUnit `csv:"total-held"` // Total amount ever held from the node TotalDisposed currency.MicroUnit `csv:"total-disposed"` // Total amount ever disposed to the node - PaidYTD currency.MicroUnit `csv:"paid-ytd"` // Total amount paid so far this year (not including this period) + PaidYTD currency.MicroUnit `csv:"paid-ytd"` // Deprecated } // MergeNodeInfo updates the fields representing the node information into the invoice. diff --git a/satellite/satellitedb/compensation.go b/satellite/satellitedb/compensation.go index 96bc93ec8..1b1ed592d 100644 --- a/satellite/satellitedb/compensation.go +++ b/satellite/satellitedb/compensation.go @@ -5,7 +5,6 @@ package satellitedb import ( "context" - "fmt" "storj.io/common/storj" "storj.io/storj/private/currency" @@ -17,31 +16,6 @@ type compensationDB struct { db *satelliteDB } -func (comp *compensationDB) QueryPaidInYear(ctx context.Context, nodeID storj.NodeID, year int) (totalPaid currency.MicroUnit, err error) { - defer mon.Task()(&ctx)(&err) - - start := fmt.Sprintf("%04d-01", year) - endExclusive := fmt.Sprintf("%04d-01", year+1) - - stmt := comp.db.Rebind(` - SELECT - coalesce(SUM(amount), 0) AS sum_paid - FROM - storagenode_payments - WHERE - node_id = ? - AND - period >= ? AND period < ? - `) - - var sumPaid int64 - if err := comp.db.DB.QueryRow(ctx, stmt, nodeID, start, endExclusive).Scan(&sumPaid); err != nil { - return currency.Zero, Error.Wrap(err) - } - - return currency.NewMicroUnit(sumPaid), nil -} - // QueryWithheldAmounts returns withheld data for the given node. func (comp *compensationDB) QueryWithheldAmounts(ctx context.Context, nodeID storj.NodeID) (_ compensation.WithheldAmounts, err error) { defer mon.Task()(&ctx)(&err) From ca86820b8bcf02b0d65854cf81af859197c60625 Mon Sep 17 00:00:00 2001 From: Jeff Wendling Date: Fri, 15 Jan 2021 11:23:19 -0500 Subject: [PATCH 25/38] satellite/snopayouts: use dbx + some refactorings Change-Id: I8f3973d2377f071bcea2f61e0fc21d913ffa7ea8 --- satellite/api.go | 14 +- satellite/peer.go | 2 +- satellite/satellitedb/database.go | 6 +- satellite/satellitedb/dbx/satellitedb.dbx | 24 ++ satellite/satellitedb/dbx/satellitedb.dbx.go | 388 +++++++++++++++++++ satellite/satellitedb/payouts.go | 277 ++++++------- satellite/snopayouts/endpoint.go | 194 ++++------ satellite/snopayouts/payouts.go | 50 +-- satellite/snopayouts/payouts_test.go | 189 +++------ 9 files changed, 708 insertions(+), 436 deletions(-) diff --git a/satellite/api.go b/satellite/api.go index 093a27964..19961c0a3 100644 --- a/satellite/api.go +++ b/satellite/api.go @@ -156,7 +156,7 @@ type API struct { Endpoint *nodestats.Endpoint } - SnoPayout struct { + SNOPayouts struct { Endpoint *snopayouts.Endpoint Service *snopayouts.Service DB snopayouts.DB @@ -654,16 +654,16 @@ func NewAPI(log *zap.Logger, full *identity.FullIdentity, db DB, } { // setup SnoPayout endpoint - peer.SnoPayout.DB = peer.DB.SnoPayout() - peer.SnoPayout.Service = snopayouts.NewService( + peer.SNOPayouts.DB = peer.DB.SNOPayouts() + peer.SNOPayouts.Service = snopayouts.NewService( peer.Log.Named("payouts:service"), - peer.SnoPayout.DB) - peer.SnoPayout.Endpoint = snopayouts.NewEndpoint( + peer.SNOPayouts.DB) + peer.SNOPayouts.Endpoint = snopayouts.NewEndpoint( peer.Log.Named("payouts:endpoint"), peer.DB.StoragenodeAccounting(), peer.Overlay.DB, - peer.SnoPayout.Service) - if err := pb.DRPCRegisterHeldAmount(peer.Server.DRPC(), peer.SnoPayout.Endpoint); err != nil { + peer.SNOPayouts.Service) + if err := pb.DRPCRegisterHeldAmount(peer.Server.DRPC(), peer.SNOPayouts.Endpoint); err != nil { return nil, errs.Combine(err, peer.Close()) } } diff --git a/satellite/peer.go b/satellite/peer.go index e8cff2cf9..bf9c3da6f 100644 --- a/satellite/peer.go +++ b/satellite/peer.go @@ -98,7 +98,7 @@ type DB interface { // StripeCoinPayments returns stripecoinpayments database. StripeCoinPayments() stripecoinpayments.DB // SnoPayout returns database for payouts. - SnoPayout() snopayouts.DB + SNOPayouts() snopayouts.DB // Compoensation tracks storage node compensation Compensation() compensation.DB // Revocation tracks revoked macaroons diff --git a/satellite/satellitedb/database.go b/satellite/satellitedb/database.go index 56e18beed..868cfe5a5 100644 --- a/satellite/satellitedb/database.go +++ b/satellite/satellitedb/database.go @@ -277,9 +277,9 @@ func (dbc *satelliteDBCollection) StripeCoinPayments() stripecoinpayments.DB { return &stripeCoinPaymentsDB{db: dbc.getByName("stripecoinpayments")} } -// SnoPayout returns database for storagenode payStubs and payments info. -func (dbc *satelliteDBCollection) SnoPayout() snopayouts.DB { - return &paymentStubs{db: dbc.getByName("snopayouts")} +// SNOPayouts returns database for storagenode payStubs and payments info. +func (dbc *satelliteDBCollection) SNOPayouts() snopayouts.DB { + return &snopayoutsDB{db: dbc.getByName("snopayouts")} } // Compenstation returns database for storage node compensation. diff --git a/satellite/satellitedb/dbx/satellitedb.dbx b/satellite/satellitedb/dbx/satellitedb.dbx index dd9051add..5c4470c6c 100644 --- a/satellite/satellitedb/dbx/satellitedb.dbx +++ b/satellite/satellitedb/dbx/satellitedb.dbx @@ -778,6 +778,17 @@ model storagenode_paystub ( create storagenode_paystub ( noreturn ) +read one ( + select storagenode_paystub + where storagenode_paystub.node_id = ? + where storagenode_paystub.period = ? +) + +read all ( + select storagenode_paystub + where storagenode_paystub.node_id = ? +) + model storagenode_payment ( key id @@ -794,6 +805,19 @@ model storagenode_payment ( create storagenode_payment ( noreturn ) +read limitoffset ( + select storagenode_payment + where storagenode_payment.node_id = ? + where storagenode_payment.period = ? + orderby desc storagenode_payment.id +) + +read all ( + select storagenode_payment + where storagenode_payment.node_id = ? +) + + //--- peer_identity ---// model peer_identity ( diff --git a/satellite/satellitedb/dbx/satellitedb.dbx.go b/satellite/satellitedb/dbx/satellitedb.dbx.go index ab2054ac7..ba8e748cf 100644 --- a/satellite/satellitedb/dbx/satellitedb.dbx.go +++ b/satellite/satellitedb/dbx/satellitedb.dbx.go @@ -12390,6 +12390,169 @@ func (obj *pgxImpl) All_StoragenodeStorageTally_By_IntervalEndTime_GreaterOrEqua } +func (obj *pgxImpl) Get_StoragenodePaystub_By_NodeId_And_Period(ctx context.Context, + storagenode_paystub_node_id StoragenodePaystub_NodeId_Field, + storagenode_paystub_period StoragenodePaystub_Period_Field) ( + storagenode_paystub *StoragenodePaystub, err error) { + defer mon.Task()(&ctx)(&err) + + var __embed_stmt = __sqlbundle_Literal("SELECT storagenode_paystubs.period, storagenode_paystubs.node_id, storagenode_paystubs.created_at, storagenode_paystubs.codes, storagenode_paystubs.usage_at_rest, storagenode_paystubs.usage_get, storagenode_paystubs.usage_put, storagenode_paystubs.usage_get_repair, storagenode_paystubs.usage_put_repair, storagenode_paystubs.usage_get_audit, storagenode_paystubs.comp_at_rest, storagenode_paystubs.comp_get, storagenode_paystubs.comp_put, storagenode_paystubs.comp_get_repair, storagenode_paystubs.comp_put_repair, storagenode_paystubs.comp_get_audit, storagenode_paystubs.surge_percent, storagenode_paystubs.held, storagenode_paystubs.owed, storagenode_paystubs.disposed, storagenode_paystubs.paid FROM storagenode_paystubs WHERE storagenode_paystubs.node_id = ? AND storagenode_paystubs.period = ?") + + var __values []interface{} + __values = append(__values, storagenode_paystub_node_id.value(), storagenode_paystub_period.value()) + + var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) + obj.logStmt(__stmt, __values...) + + storagenode_paystub = &StoragenodePaystub{} + err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&storagenode_paystub.Period, &storagenode_paystub.NodeId, &storagenode_paystub.CreatedAt, &storagenode_paystub.Codes, &storagenode_paystub.UsageAtRest, &storagenode_paystub.UsageGet, &storagenode_paystub.UsagePut, &storagenode_paystub.UsageGetRepair, &storagenode_paystub.UsagePutRepair, &storagenode_paystub.UsageGetAudit, &storagenode_paystub.CompAtRest, &storagenode_paystub.CompGet, &storagenode_paystub.CompPut, &storagenode_paystub.CompGetRepair, &storagenode_paystub.CompPutRepair, &storagenode_paystub.CompGetAudit, &storagenode_paystub.SurgePercent, &storagenode_paystub.Held, &storagenode_paystub.Owed, &storagenode_paystub.Disposed, &storagenode_paystub.Paid) + if err != nil { + return (*StoragenodePaystub)(nil), obj.makeErr(err) + } + return storagenode_paystub, nil + +} + +func (obj *pgxImpl) All_StoragenodePaystub_By_NodeId(ctx context.Context, + storagenode_paystub_node_id StoragenodePaystub_NodeId_Field) ( + rows []*StoragenodePaystub, err error) { + defer mon.Task()(&ctx)(&err) + + var __embed_stmt = __sqlbundle_Literal("SELECT storagenode_paystubs.period, storagenode_paystubs.node_id, storagenode_paystubs.created_at, storagenode_paystubs.codes, storagenode_paystubs.usage_at_rest, storagenode_paystubs.usage_get, storagenode_paystubs.usage_put, storagenode_paystubs.usage_get_repair, storagenode_paystubs.usage_put_repair, storagenode_paystubs.usage_get_audit, storagenode_paystubs.comp_at_rest, storagenode_paystubs.comp_get, storagenode_paystubs.comp_put, storagenode_paystubs.comp_get_repair, storagenode_paystubs.comp_put_repair, storagenode_paystubs.comp_get_audit, storagenode_paystubs.surge_percent, storagenode_paystubs.held, storagenode_paystubs.owed, storagenode_paystubs.disposed, storagenode_paystubs.paid FROM storagenode_paystubs WHERE storagenode_paystubs.node_id = ?") + + var __values []interface{} + __values = append(__values, storagenode_paystub_node_id.value()) + + var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) + obj.logStmt(__stmt, __values...) + + for { + rows, err = func() (rows []*StoragenodePaystub, err error) { + __rows, err := obj.driver.QueryContext(ctx, __stmt, __values...) + if err != nil { + return nil, err + } + defer __rows.Close() + + for __rows.Next() { + storagenode_paystub := &StoragenodePaystub{} + err = __rows.Scan(&storagenode_paystub.Period, &storagenode_paystub.NodeId, &storagenode_paystub.CreatedAt, &storagenode_paystub.Codes, &storagenode_paystub.UsageAtRest, &storagenode_paystub.UsageGet, &storagenode_paystub.UsagePut, &storagenode_paystub.UsageGetRepair, &storagenode_paystub.UsagePutRepair, &storagenode_paystub.UsageGetAudit, &storagenode_paystub.CompAtRest, &storagenode_paystub.CompGet, &storagenode_paystub.CompPut, &storagenode_paystub.CompGetRepair, &storagenode_paystub.CompPutRepair, &storagenode_paystub.CompGetAudit, &storagenode_paystub.SurgePercent, &storagenode_paystub.Held, &storagenode_paystub.Owed, &storagenode_paystub.Disposed, &storagenode_paystub.Paid) + if err != nil { + return nil, err + } + rows = append(rows, storagenode_paystub) + } + if err := __rows.Err(); err != nil { + return nil, err + } + return rows, nil + }() + if err != nil { + if obj.shouldRetry(err) { + continue + } + return nil, obj.makeErr(err) + } + return rows, nil + } + +} + +func (obj *pgxImpl) Limited_StoragenodePayment_By_NodeId_And_Period_OrderBy_Desc_Id(ctx context.Context, + storagenode_payment_node_id StoragenodePayment_NodeId_Field, + storagenode_payment_period StoragenodePayment_Period_Field, + limit int, offset int64) ( + rows []*StoragenodePayment, err error) { + defer mon.Task()(&ctx)(&err) + + var __embed_stmt = __sqlbundle_Literal("SELECT storagenode_payments.id, storagenode_payments.created_at, storagenode_payments.node_id, storagenode_payments.period, storagenode_payments.amount, storagenode_payments.receipt, storagenode_payments.notes FROM storagenode_payments WHERE storagenode_payments.node_id = ? AND storagenode_payments.period = ? ORDER BY storagenode_payments.id DESC LIMIT ? OFFSET ?") + + var __values []interface{} + __values = append(__values, storagenode_payment_node_id.value(), storagenode_payment_period.value()) + + __values = append(__values, limit, offset) + + var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) + obj.logStmt(__stmt, __values...) + + for { + rows, err = func() (rows []*StoragenodePayment, err error) { + __rows, err := obj.driver.QueryContext(ctx, __stmt, __values...) + if err != nil { + return nil, err + } + defer __rows.Close() + + for __rows.Next() { + storagenode_payment := &StoragenodePayment{} + err = __rows.Scan(&storagenode_payment.Id, &storagenode_payment.CreatedAt, &storagenode_payment.NodeId, &storagenode_payment.Period, &storagenode_payment.Amount, &storagenode_payment.Receipt, &storagenode_payment.Notes) + if err != nil { + return nil, err + } + rows = append(rows, storagenode_payment) + } + err = __rows.Err() + if err != nil { + return nil, err + } + return rows, nil + }() + if err != nil { + if obj.shouldRetry(err) { + continue + } + return nil, obj.makeErr(err) + } + return rows, nil + } + +} + +func (obj *pgxImpl) All_StoragenodePayment_By_NodeId(ctx context.Context, + storagenode_payment_node_id StoragenodePayment_NodeId_Field) ( + rows []*StoragenodePayment, err error) { + defer mon.Task()(&ctx)(&err) + + var __embed_stmt = __sqlbundle_Literal("SELECT storagenode_payments.id, storagenode_payments.created_at, storagenode_payments.node_id, storagenode_payments.period, storagenode_payments.amount, storagenode_payments.receipt, storagenode_payments.notes FROM storagenode_payments WHERE storagenode_payments.node_id = ?") + + var __values []interface{} + __values = append(__values, storagenode_payment_node_id.value()) + + var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) + obj.logStmt(__stmt, __values...) + + for { + rows, err = func() (rows []*StoragenodePayment, err error) { + __rows, err := obj.driver.QueryContext(ctx, __stmt, __values...) + if err != nil { + return nil, err + } + defer __rows.Close() + + for __rows.Next() { + storagenode_payment := &StoragenodePayment{} + err = __rows.Scan(&storagenode_payment.Id, &storagenode_payment.CreatedAt, &storagenode_payment.NodeId, &storagenode_payment.Period, &storagenode_payment.Amount, &storagenode_payment.Receipt, &storagenode_payment.Notes) + if err != nil { + return nil, err + } + rows = append(rows, storagenode_payment) + } + if err := __rows.Err(); err != nil { + return nil, err + } + return rows, nil + }() + if err != nil { + if obj.shouldRetry(err) { + continue + } + return nil, obj.makeErr(err) + } + return rows, nil + } + +} + func (obj *pgxImpl) Get_PeerIdentity_By_NodeId(ctx context.Context, peer_identity_node_id PeerIdentity_NodeId_Field) ( peer_identity *PeerIdentity, err error) { @@ -19186,6 +19349,169 @@ func (obj *pgxcockroachImpl) All_StoragenodeStorageTally_By_IntervalEndTime_Grea } +func (obj *pgxcockroachImpl) Get_StoragenodePaystub_By_NodeId_And_Period(ctx context.Context, + storagenode_paystub_node_id StoragenodePaystub_NodeId_Field, + storagenode_paystub_period StoragenodePaystub_Period_Field) ( + storagenode_paystub *StoragenodePaystub, err error) { + defer mon.Task()(&ctx)(&err) + + var __embed_stmt = __sqlbundle_Literal("SELECT storagenode_paystubs.period, storagenode_paystubs.node_id, storagenode_paystubs.created_at, storagenode_paystubs.codes, storagenode_paystubs.usage_at_rest, storagenode_paystubs.usage_get, storagenode_paystubs.usage_put, storagenode_paystubs.usage_get_repair, storagenode_paystubs.usage_put_repair, storagenode_paystubs.usage_get_audit, storagenode_paystubs.comp_at_rest, storagenode_paystubs.comp_get, storagenode_paystubs.comp_put, storagenode_paystubs.comp_get_repair, storagenode_paystubs.comp_put_repair, storagenode_paystubs.comp_get_audit, storagenode_paystubs.surge_percent, storagenode_paystubs.held, storagenode_paystubs.owed, storagenode_paystubs.disposed, storagenode_paystubs.paid FROM storagenode_paystubs WHERE storagenode_paystubs.node_id = ? AND storagenode_paystubs.period = ?") + + var __values []interface{} + __values = append(__values, storagenode_paystub_node_id.value(), storagenode_paystub_period.value()) + + var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) + obj.logStmt(__stmt, __values...) + + storagenode_paystub = &StoragenodePaystub{} + err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&storagenode_paystub.Period, &storagenode_paystub.NodeId, &storagenode_paystub.CreatedAt, &storagenode_paystub.Codes, &storagenode_paystub.UsageAtRest, &storagenode_paystub.UsageGet, &storagenode_paystub.UsagePut, &storagenode_paystub.UsageGetRepair, &storagenode_paystub.UsagePutRepair, &storagenode_paystub.UsageGetAudit, &storagenode_paystub.CompAtRest, &storagenode_paystub.CompGet, &storagenode_paystub.CompPut, &storagenode_paystub.CompGetRepair, &storagenode_paystub.CompPutRepair, &storagenode_paystub.CompGetAudit, &storagenode_paystub.SurgePercent, &storagenode_paystub.Held, &storagenode_paystub.Owed, &storagenode_paystub.Disposed, &storagenode_paystub.Paid) + if err != nil { + return (*StoragenodePaystub)(nil), obj.makeErr(err) + } + return storagenode_paystub, nil + +} + +func (obj *pgxcockroachImpl) All_StoragenodePaystub_By_NodeId(ctx context.Context, + storagenode_paystub_node_id StoragenodePaystub_NodeId_Field) ( + rows []*StoragenodePaystub, err error) { + defer mon.Task()(&ctx)(&err) + + var __embed_stmt = __sqlbundle_Literal("SELECT storagenode_paystubs.period, storagenode_paystubs.node_id, storagenode_paystubs.created_at, storagenode_paystubs.codes, storagenode_paystubs.usage_at_rest, storagenode_paystubs.usage_get, storagenode_paystubs.usage_put, storagenode_paystubs.usage_get_repair, storagenode_paystubs.usage_put_repair, storagenode_paystubs.usage_get_audit, storagenode_paystubs.comp_at_rest, storagenode_paystubs.comp_get, storagenode_paystubs.comp_put, storagenode_paystubs.comp_get_repair, storagenode_paystubs.comp_put_repair, storagenode_paystubs.comp_get_audit, storagenode_paystubs.surge_percent, storagenode_paystubs.held, storagenode_paystubs.owed, storagenode_paystubs.disposed, storagenode_paystubs.paid FROM storagenode_paystubs WHERE storagenode_paystubs.node_id = ?") + + var __values []interface{} + __values = append(__values, storagenode_paystub_node_id.value()) + + var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) + obj.logStmt(__stmt, __values...) + + for { + rows, err = func() (rows []*StoragenodePaystub, err error) { + __rows, err := obj.driver.QueryContext(ctx, __stmt, __values...) + if err != nil { + return nil, err + } + defer __rows.Close() + + for __rows.Next() { + storagenode_paystub := &StoragenodePaystub{} + err = __rows.Scan(&storagenode_paystub.Period, &storagenode_paystub.NodeId, &storagenode_paystub.CreatedAt, &storagenode_paystub.Codes, &storagenode_paystub.UsageAtRest, &storagenode_paystub.UsageGet, &storagenode_paystub.UsagePut, &storagenode_paystub.UsageGetRepair, &storagenode_paystub.UsagePutRepair, &storagenode_paystub.UsageGetAudit, &storagenode_paystub.CompAtRest, &storagenode_paystub.CompGet, &storagenode_paystub.CompPut, &storagenode_paystub.CompGetRepair, &storagenode_paystub.CompPutRepair, &storagenode_paystub.CompGetAudit, &storagenode_paystub.SurgePercent, &storagenode_paystub.Held, &storagenode_paystub.Owed, &storagenode_paystub.Disposed, &storagenode_paystub.Paid) + if err != nil { + return nil, err + } + rows = append(rows, storagenode_paystub) + } + if err := __rows.Err(); err != nil { + return nil, err + } + return rows, nil + }() + if err != nil { + if obj.shouldRetry(err) { + continue + } + return nil, obj.makeErr(err) + } + return rows, nil + } + +} + +func (obj *pgxcockroachImpl) Limited_StoragenodePayment_By_NodeId_And_Period_OrderBy_Desc_Id(ctx context.Context, + storagenode_payment_node_id StoragenodePayment_NodeId_Field, + storagenode_payment_period StoragenodePayment_Period_Field, + limit int, offset int64) ( + rows []*StoragenodePayment, err error) { + defer mon.Task()(&ctx)(&err) + + var __embed_stmt = __sqlbundle_Literal("SELECT storagenode_payments.id, storagenode_payments.created_at, storagenode_payments.node_id, storagenode_payments.period, storagenode_payments.amount, storagenode_payments.receipt, storagenode_payments.notes FROM storagenode_payments WHERE storagenode_payments.node_id = ? AND storagenode_payments.period = ? ORDER BY storagenode_payments.id DESC LIMIT ? OFFSET ?") + + var __values []interface{} + __values = append(__values, storagenode_payment_node_id.value(), storagenode_payment_period.value()) + + __values = append(__values, limit, offset) + + var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) + obj.logStmt(__stmt, __values...) + + for { + rows, err = func() (rows []*StoragenodePayment, err error) { + __rows, err := obj.driver.QueryContext(ctx, __stmt, __values...) + if err != nil { + return nil, err + } + defer __rows.Close() + + for __rows.Next() { + storagenode_payment := &StoragenodePayment{} + err = __rows.Scan(&storagenode_payment.Id, &storagenode_payment.CreatedAt, &storagenode_payment.NodeId, &storagenode_payment.Period, &storagenode_payment.Amount, &storagenode_payment.Receipt, &storagenode_payment.Notes) + if err != nil { + return nil, err + } + rows = append(rows, storagenode_payment) + } + err = __rows.Err() + if err != nil { + return nil, err + } + return rows, nil + }() + if err != nil { + if obj.shouldRetry(err) { + continue + } + return nil, obj.makeErr(err) + } + return rows, nil + } + +} + +func (obj *pgxcockroachImpl) All_StoragenodePayment_By_NodeId(ctx context.Context, + storagenode_payment_node_id StoragenodePayment_NodeId_Field) ( + rows []*StoragenodePayment, err error) { + defer mon.Task()(&ctx)(&err) + + var __embed_stmt = __sqlbundle_Literal("SELECT storagenode_payments.id, storagenode_payments.created_at, storagenode_payments.node_id, storagenode_payments.period, storagenode_payments.amount, storagenode_payments.receipt, storagenode_payments.notes FROM storagenode_payments WHERE storagenode_payments.node_id = ?") + + var __values []interface{} + __values = append(__values, storagenode_payment_node_id.value()) + + var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) + obj.logStmt(__stmt, __values...) + + for { + rows, err = func() (rows []*StoragenodePayment, err error) { + __rows, err := obj.driver.QueryContext(ctx, __stmt, __values...) + if err != nil { + return nil, err + } + defer __rows.Close() + + for __rows.Next() { + storagenode_payment := &StoragenodePayment{} + err = __rows.Scan(&storagenode_payment.Id, &storagenode_payment.CreatedAt, &storagenode_payment.NodeId, &storagenode_payment.Period, &storagenode_payment.Amount, &storagenode_payment.Receipt, &storagenode_payment.Notes) + if err != nil { + return nil, err + } + rows = append(rows, storagenode_payment) + } + if err := __rows.Err(); err != nil { + return nil, err + } + return rows, nil + }() + if err != nil { + if obj.shouldRetry(err) { + continue + } + return nil, obj.makeErr(err) + } + return rows, nil + } + +} + func (obj *pgxcockroachImpl) Get_PeerIdentity_By_NodeId(ctx context.Context, peer_identity_node_id PeerIdentity_NodeId_Field) ( peer_identity *PeerIdentity, err error) { @@ -22933,6 +23259,26 @@ func (rx *Rx) All_StoragenodeBandwidthRollup_By_StoragenodeId_And_IntervalStart( return tx.All_StoragenodeBandwidthRollup_By_StoragenodeId_And_IntervalStart(ctx, storagenode_bandwidth_rollup_storagenode_id, storagenode_bandwidth_rollup_interval_start) } +func (rx *Rx) All_StoragenodePayment_By_NodeId(ctx context.Context, + storagenode_payment_node_id StoragenodePayment_NodeId_Field) ( + rows []*StoragenodePayment, err error) { + var tx *Tx + if tx, err = rx.getTx(ctx); err != nil { + return + } + return tx.All_StoragenodePayment_By_NodeId(ctx, storagenode_payment_node_id) +} + +func (rx *Rx) All_StoragenodePaystub_By_NodeId(ctx context.Context, + storagenode_paystub_node_id StoragenodePaystub_NodeId_Field) ( + rows []*StoragenodePaystub, err error) { + var tx *Tx + if tx, err = rx.getTx(ctx); err != nil { + return + } + return tx.All_StoragenodePaystub_By_NodeId(ctx, storagenode_paystub_node_id) +} + func (rx *Rx) All_StoragenodeStorageTally(ctx context.Context) ( rows []*StoragenodeStorageTally, err error) { var tx *Tx @@ -24014,6 +24360,17 @@ func (rx *Rx) Get_SerialNumber_BucketId_By_SerialNumber(ctx context.Context, return tx.Get_SerialNumber_BucketId_By_SerialNumber(ctx, serial_number_serial_number) } +func (rx *Rx) Get_StoragenodePaystub_By_NodeId_And_Period(ctx context.Context, + storagenode_paystub_node_id StoragenodePaystub_NodeId_Field, + storagenode_paystub_period StoragenodePaystub_Period_Field) ( + storagenode_paystub *StoragenodePaystub, err error) { + var tx *Tx + if tx, err = rx.getTx(ctx); err != nil { + return + } + return tx.Get_StoragenodePaystub_By_NodeId_And_Period(ctx, storagenode_paystub_node_id, storagenode_paystub_period) +} + func (rx *Rx) Get_StripeCustomer_CustomerId_By_UserId(ctx context.Context, stripe_customer_user_id StripeCustomer_UserId_Field) ( row *CustomerId_Row, err error) { @@ -24234,6 +24591,18 @@ func (rx *Rx) Limited_Project_By_CreatedAt_Less_OrderBy_Asc_CreatedAt(ctx contex return tx.Limited_Project_By_CreatedAt_Less_OrderBy_Asc_CreatedAt(ctx, project_created_at_less, limit, offset) } +func (rx *Rx) Limited_StoragenodePayment_By_NodeId_And_Period_OrderBy_Desc_Id(ctx context.Context, + storagenode_payment_node_id StoragenodePayment_NodeId_Field, + storagenode_payment_period StoragenodePayment_Period_Field, + limit int, offset int64) ( + rows []*StoragenodePayment, err error) { + var tx *Tx + if tx, err = rx.getTx(ctx); err != nil { + return + } + return tx.Limited_StoragenodePayment_By_NodeId_And_Period_OrderBy_Desc_Id(ctx, storagenode_payment_node_id, storagenode_payment_period, limit, offset) +} + func (rx *Rx) Limited_StripeCustomer_By_CreatedAt_LessOrEqual_OrderBy_Desc_CreatedAt(ctx context.Context, stripe_customer_created_at_less_or_equal StripeCustomer_CreatedAt_Field, limit int, offset int64) ( @@ -24647,6 +25016,14 @@ type Methods interface { storagenode_bandwidth_rollup_interval_start StoragenodeBandwidthRollup_IntervalStart_Field) ( rows []*StoragenodeBandwidthRollup, err error) + All_StoragenodePayment_By_NodeId(ctx context.Context, + storagenode_payment_node_id StoragenodePayment_NodeId_Field) ( + rows []*StoragenodePayment, err error) + + All_StoragenodePaystub_By_NodeId(ctx context.Context, + storagenode_paystub_node_id StoragenodePaystub_NodeId_Field) ( + rows []*StoragenodePaystub, err error) + All_StoragenodeStorageTally(ctx context.Context) ( rows []*StoragenodeStorageTally, err error) @@ -25155,6 +25532,11 @@ type Methods interface { serial_number_serial_number SerialNumber_SerialNumber_Field) ( row *BucketId_Row, err error) + Get_StoragenodePaystub_By_NodeId_And_Period(ctx context.Context, + storagenode_paystub_node_id StoragenodePaystub_NodeId_Field, + storagenode_paystub_period StoragenodePaystub_Period_Field) ( + storagenode_paystub *StoragenodePaystub, err error) + Get_StripeCustomer_CustomerId_By_UserId(ctx context.Context, stripe_customer_user_id StripeCustomer_UserId_Field) ( row *CustomerId_Row, err error) @@ -25255,6 +25637,12 @@ type Methods interface { limit int, offset int64) ( rows []*Project, err error) + Limited_StoragenodePayment_By_NodeId_And_Period_OrderBy_Desc_Id(ctx context.Context, + storagenode_payment_node_id StoragenodePayment_NodeId_Field, + storagenode_payment_period StoragenodePayment_Period_Field, + limit int, offset int64) ( + rows []*StoragenodePayment, err error) + Limited_StripeCustomer_By_CreatedAt_LessOrEqual_OrderBy_Desc_CreatedAt(ctx context.Context, stripe_customer_created_at_less_or_equal StripeCustomer_CreatedAt_Field, limit int, offset int64) ( diff --git a/satellite/satellitedb/payouts.go b/satellite/satellitedb/payouts.go index 060e00940..a7df049be 100644 --- a/satellite/satellitedb/payouts.go +++ b/satellite/satellitedb/payouts.go @@ -8,114 +8,155 @@ import ( "database/sql" "errors" - "github.com/zeebo/errs" - "storj.io/common/storj" "storj.io/storj/satellite/satellitedb/dbx" "storj.io/storj/satellite/snopayouts" ) -// paymentStubs is payment data for specific storagenode for some specific period by working with satellite. +// snopayoutsDB is payment data for specific storagenode for some specific period by working with satellite. // // architecture: Database -type paymentStubs struct { +type snopayoutsDB struct { db *satelliteDB } // GetPaystub returns payStub by nodeID and period. -func (paystubs *paymentStubs) GetPaystub(ctx context.Context, nodeID storj.NodeID, period string) (payStub snopayouts.PayStub, err error) { - query := `SELECT * FROM storagenode_paystubs WHERE node_id = $1 AND period = $2;` - - row := paystubs.db.QueryRowContext(ctx, query, nodeID, period) - err = row.Scan( - &payStub.Period, - &payStub.NodeID, - &payStub.Created, - &payStub.Codes, - &payStub.UsageAtRest, - &payStub.UsageGet, - &payStub.UsagePut, - &payStub.UsageGetRepair, - &payStub.UsagePutRepair, - &payStub.UsageGetAudit, - &payStub.CompAtRest, - &payStub.CompGet, - &payStub.CompPut, - &payStub.CompGetRepair, - &payStub.CompPutRepair, - &payStub.CompGetAudit, - &payStub.SurgePercent, - &payStub.Held, - &payStub.Owed, - &payStub.Disposed, - &payStub.Paid, - ) +func (db *snopayoutsDB) GetPaystub(ctx context.Context, nodeID storj.NodeID, period string) (paystub snopayouts.Paystub, err error) { + dbxPaystub, err := db.db.Get_StoragenodePaystub_By_NodeId_And_Period(ctx, + dbx.StoragenodePaystub_NodeId(nodeID.Bytes()), + dbx.StoragenodePaystub_Period(period)) if err != nil { if errors.Is(err, sql.ErrNoRows) { - return snopayouts.PayStub{}, snopayouts.ErrNoDataForPeriod.Wrap(err) + return snopayouts.Paystub{}, snopayouts.ErrNoDataForPeriod.Wrap(err) } - - return snopayouts.PayStub{}, Error.Wrap(err) + return snopayouts.Paystub{}, Error.Wrap(err) } - - return payStub, nil + return convertDBXPaystub(dbxPaystub) } // GetAllPaystubs return all payStubs by nodeID. -func (paystubs *paymentStubs) GetAllPaystubs(ctx context.Context, nodeID storj.NodeID) (payStubs []snopayouts.PayStub, err error) { - query := `SELECT * FROM storagenode_paystubs WHERE node_id = $1;` - - rows, err := paystubs.db.QueryContext(ctx, query, nodeID) +func (db *snopayoutsDB) GetAllPaystubs(ctx context.Context, nodeID storj.NodeID) (paystubs []snopayouts.Paystub, err error) { + dbxPaystubs, err := db.db.All_StoragenodePaystub_By_NodeId(ctx, + dbx.StoragenodePaystub_NodeId(nodeID.Bytes())) if err != nil { - return []snopayouts.PayStub{}, Error.Wrap(err) + return nil, Error.Wrap(err) } - - defer func() { - err = errs.Combine(err, Error.Wrap(rows.Close())) - }() - - for rows.Next() { - paystub := snopayouts.PayStub{} - - err = rows.Scan( - &paystub.Period, - &paystub.NodeID, - &paystub.Created, - &paystub.Codes, - &paystub.UsageAtRest, - &paystub.UsageGet, - &paystub.UsagePut, - &paystub.UsageGetRepair, - &paystub.UsagePutRepair, - &paystub.UsageGetAudit, - &paystub.CompAtRest, - &paystub.CompGet, - &paystub.CompPut, - &paystub.CompGetRepair, - &paystub.CompPutRepair, - &paystub.CompGetAudit, - &paystub.SurgePercent, - &paystub.Held, - &paystub.Owed, - &paystub.Disposed, - &paystub.Paid, - ) - if err = rows.Err(); err != nil { - return []snopayouts.PayStub{}, Error.Wrap(err) + for _, dbxPaystub := range dbxPaystubs { + payStub, err := convertDBXPaystub(dbxPaystub) + if err != nil { + return nil, Error.Wrap(err) } - - payStubs = append(payStubs, paystub) + paystubs = append(paystubs, payStub) } - - return payStubs, Error.Wrap(rows.Err()) + return paystubs, nil } -// CreatePaystub inserts storagenode_paystub into database. -func (paystubs *paymentStubs) CreatePaystub(ctx context.Context, stub snopayouts.PayStub) (err error) { - return paystubs.db.CreateNoReturn_StoragenodePaystub( - ctx, +func convertDBXPaystub(dbxPaystub *dbx.StoragenodePaystub) (snopayouts.Paystub, error) { + nodeID, err := storj.NodeIDFromBytes(dbxPaystub.NodeId) + if err != nil { + return snopayouts.Paystub{}, Error.Wrap(err) + } + return snopayouts.Paystub{ + Period: dbxPaystub.Period, + NodeID: nodeID, + Created: dbxPaystub.CreatedAt, + Codes: dbxPaystub.Codes, + UsageAtRest: dbxPaystub.UsageAtRest, + UsageGet: dbxPaystub.UsageGet, + UsagePut: dbxPaystub.UsagePut, + UsageGetRepair: dbxPaystub.UsageGetRepair, + UsagePutRepair: dbxPaystub.UsagePutRepair, + UsageGetAudit: dbxPaystub.UsageGetAudit, + CompAtRest: dbxPaystub.CompAtRest, + CompGet: dbxPaystub.CompGet, + CompPut: dbxPaystub.CompPut, + CompGetRepair: dbxPaystub.CompGetRepair, + CompPutRepair: dbxPaystub.CompPutRepair, + CompGetAudit: dbxPaystub.CompGetAudit, + SurgePercent: dbxPaystub.SurgePercent, + Held: dbxPaystub.Held, + Owed: dbxPaystub.Owed, + Disposed: dbxPaystub.Disposed, + Paid: dbxPaystub.Paid, + }, nil +} + +// GetPayment returns payment by nodeID and period. +func (db *snopayoutsDB) GetPayment(ctx context.Context, nodeID storj.NodeID, period string) (payment snopayouts.Payment, err error) { + // N.B. There can be multiple payments for a single node id and period, but the old query + // here did not take that into account. Indeed, all above layers do not take it into account + // from the service endpoints to the protobuf rpcs to the node client side. Instead of fixing + // all of those things now, emulate the behavior with dbx as much as possible. + + dbxPayments, err := db.db.Limited_StoragenodePayment_By_NodeId_And_Period_OrderBy_Desc_Id(ctx, + dbx.StoragenodePayment_NodeId(nodeID.Bytes()), + dbx.StoragenodePayment_Period(period), + 1, 0) + if err != nil { + return snopayouts.Payment{}, Error.Wrap(err) + } + + switch len(dbxPayments) { + case 0: + return snopayouts.Payment{}, snopayouts.ErrNoDataForPeriod.Wrap(sql.ErrNoRows) + case 1: + return convertDBXPayment(dbxPayments[0]) + default: + return snopayouts.Payment{}, Error.New("impossible number of rows returned: %d", len(dbxPayments)) + } +} + +// GetAllPayments return all payments by nodeID. +func (db *snopayoutsDB) GetAllPayments(ctx context.Context, nodeID storj.NodeID) (payments []snopayouts.Payment, err error) { + dbxPayments, err := db.db.All_StoragenodePayment_By_NodeId(ctx, + dbx.StoragenodePayment_NodeId(nodeID.Bytes())) + if err != nil { + return nil, Error.Wrap(err) + } + + for _, dbxPayment := range dbxPayments { + payment, err := convertDBXPayment(dbxPayment) + if err != nil { + return nil, Error.Wrap(err) + } + payments = append(payments, payment) + } + + return payments, nil +} + +func convertDBXPayment(dbxPayment *dbx.StoragenodePayment) (snopayouts.Payment, error) { + nodeID, err := storj.NodeIDFromBytes(dbxPayment.NodeId) + if err != nil { + return snopayouts.Payment{}, Error.Wrap(err) + } + return snopayouts.Payment{ + ID: dbxPayment.Id, + Created: dbxPayment.CreatedAt, + NodeID: nodeID, + Period: dbxPayment.Period, + Amount: dbxPayment.Amount, + Receipt: derefStringOr(dbxPayment.Receipt, ""), + Notes: derefStringOr(dbxPayment.Notes, ""), + }, nil +} + +func derefStringOr(v *string, def string) string { + if v != nil { + return *v + } + return def +} + +// +// test helpers +// + +// TestCreatePaystub inserts storagenode_paystub into database. Only used for tests. +func (db *snopayoutsDB) TestCreatePaystub(ctx context.Context, stub snopayouts.Paystub) (err error) { + return db.db.CreateNoReturn_StoragenodePaystub(ctx, dbx.StoragenodePaystub_Period(stub.Period), - dbx.StoragenodePaystub_NodeId(stub.NodeID[:]), + dbx.StoragenodePaystub_NodeId(stub.NodeID.Bytes()), dbx.StoragenodePaystub_Codes(stub.Codes), dbx.StoragenodePaystub_UsageAtRest(stub.UsageAtRest), dbx.StoragenodePaystub_UsageGet(stub.UsageGet), @@ -137,36 +178,10 @@ func (paystubs *paymentStubs) CreatePaystub(ctx context.Context, stub snopayouts ) } -// GetPayment returns payment by nodeID and period. -func (paystubs *paymentStubs) GetPayment(ctx context.Context, nodeID storj.NodeID, period string) (payment snopayouts.StoragenodePayment, err error) { - query := `SELECT * FROM storagenode_payments WHERE node_id = $1 AND period = $2;` - - row := paystubs.db.QueryRowContext(ctx, query, nodeID, period) - err = row.Scan( - &payment.ID, - &payment.Created, - &payment.NodeID, - &payment.Period, - &payment.Amount, - &payment.Receipt, - &payment.Notes, - ) - if err != nil { - if errors.Is(err, sql.ErrNoRows) { - return snopayouts.StoragenodePayment{}, snopayouts.ErrNoDataForPeriod.Wrap(err) - } - - return snopayouts.StoragenodePayment{}, Error.Wrap(err) - } - - return payment, nil -} - -// CreatePayment inserts storagenode_payment into database. -func (paystubs *paymentStubs) CreatePayment(ctx context.Context, payment snopayouts.StoragenodePayment) (err error) { - return paystubs.db.CreateNoReturn_StoragenodePayment( - ctx, - dbx.StoragenodePayment_NodeId(payment.NodeID[:]), +// TestCreatePayment inserts storagenode_payment into database. Only used for tests. +func (db *snopayoutsDB) TestCreatePayment(ctx context.Context, payment snopayouts.Payment) (err error) { + return db.db.CreateNoReturn_StoragenodePayment(ctx, + dbx.StoragenodePayment_NodeId(payment.NodeID.Bytes()), dbx.StoragenodePayment_Period(payment.Period), dbx.StoragenodePayment_Amount(payment.Amount), dbx.StoragenodePayment_Create_Fields{ @@ -175,39 +190,3 @@ func (paystubs *paymentStubs) CreatePayment(ctx context.Context, payment snopayo }, ) } - -// GetAllPayments return all payments by nodeID. -func (paystubs *paymentStubs) GetAllPayments(ctx context.Context, nodeID storj.NodeID) (payments []snopayouts.StoragenodePayment, err error) { - query := `SELECT * FROM storagenode_payments WHERE node_id = $1;` - - rows, err := paystubs.db.QueryContext(ctx, query, nodeID) - if err != nil { - return nil, Error.Wrap(err) - } - - defer func() { - err = errs.Combine(err, Error.Wrap(rows.Close())) - }() - - for rows.Next() { - payment := snopayouts.StoragenodePayment{} - - err = rows.Scan( - &payment.ID, - &payment.Created, - &payment.NodeID, - &payment.Period, - &payment.Amount, - &payment.Receipt, - &payment.Notes, - ) - - if err = rows.Err(); err != nil { - return nil, Error.Wrap(err) - } - - payments = append(payments, payment) - } - - return payments, Error.Wrap(rows.Err()) -} diff --git a/satellite/snopayouts/endpoint.go b/satellite/snopayouts/endpoint.go index e780ad0db..3cecddebd 100644 --- a/satellite/snopayouts/endpoint.go +++ b/satellite/snopayouts/endpoint.go @@ -47,49 +47,23 @@ func (e *Endpoint) GetPayStub(ctx context.Context, req *pb.GetHeldAmountRequest) peer, err := identity.PeerIdentityFromContext(ctx) if err != nil { - return nil, rpcstatus.Error(rpcstatus.Unauthenticated, err.Error()) + return nil, rpcstatus.Wrap(rpcstatus.Unauthenticated, err) } + node, err := e.overlay.Get(ctx, peer.ID) if err != nil { - return nil, rpcstatus.Error(rpcstatus.Internal, err.Error()) + return nil, rpcstatus.Wrap(rpcstatus.Internal, err) } - period := req.Period.String()[0:7] - stub, err := e.service.GetPayStub(ctx, node.Id, period) + paystub, err := e.service.GetPaystub(ctx, node.Id, req.Period.Format("2006-01")) if err != nil { if ErrNoDataForPeriod.Has(err) { - return nil, rpcstatus.Error(rpcstatus.OutOfRange, err.Error()) + return nil, rpcstatus.Wrap(rpcstatus.OutOfRange, err) } - return nil, Error.Wrap(err) + return nil, rpcstatus.Wrap(rpcstatus.Internal, err) } - periodTime, err := date.PeriodToTime(stub.Period) - if err != nil { - return nil, err - } - return &pb.GetHeldAmountResponse{ - Period: periodTime, - NodeId: stub.NodeID, - CreatedAt: stub.Created, - Codes: stub.Codes, - UsageAtRest: stub.UsageAtRest, - UsageGet: stub.UsageGet, - UsagePut: stub.UsagePut, - UsageGetRepair: stub.UsageGetRepair, - UsagePutRepair: stub.UsagePutRepair, - UsageGetAudit: stub.UsageGetAudit, - CompAtRest: stub.CompAtRest, - CompGet: stub.CompGet, - CompPut: stub.CompPut, - CompGetRepair: stub.CompGetRepair, - CompPutRepair: stub.CompPutRepair, - CompGetAudit: stub.CompGetAudit, - SurgePercent: stub.SurgePercent, - Held: stub.Held, - Owed: stub.Owed, - Disposed: stub.Disposed, - Paid: stub.Paid, - }, nil + return convertPaystub(paystub) } // GetAllPaystubs sends all paystubs for client node. @@ -98,65 +72,65 @@ func (e *Endpoint) GetAllPaystubs(ctx context.Context, req *pb.GetAllPaystubsReq peer, err := identity.PeerIdentityFromContext(ctx) if err != nil { - return nil, rpcstatus.Error(rpcstatus.Unauthenticated, err.Error()) + return nil, rpcstatus.Wrap(rpcstatus.Unauthenticated, err) } + node, err := e.overlay.Get(ctx, peer.ID) if err != nil { if overlay.ErrNodeNotFound.Has(err) { return &pb.GetAllPaystubsResponse{}, nil } - - return nil, rpcstatus.Error(rpcstatus.Internal, err.Error()) + return nil, rpcstatus.Wrap(rpcstatus.Internal, err) } - stubs, err := e.service.GetAllPaystubs(ctx, node.Id) + paystubs, err := e.service.GetAllPaystubs(ctx, node.Id) if err != nil { if ErrNoDataForPeriod.Has(err) { - return nil, rpcstatus.Error(rpcstatus.OutOfRange, err.Error()) + return nil, rpcstatus.Wrap(rpcstatus.OutOfRange, err) } return nil, Error.Wrap(err) } - var paystubs []*pb.GetHeldAmountResponse - - response := pb.GetAllPaystubsResponse{ - Paystub: paystubs, - } - - for i := 0; i < len(stubs); i++ { - period, err := date.PeriodToTime(stubs[i].Period) + response := &pb.GetAllPaystubsResponse{} + for _, paystub := range paystubs { + pbPaystub, err := convertPaystub(paystub) if err != nil { - return nil, Error.Wrap(err) + return nil, rpcstatus.Wrap(rpcstatus.Internal, err) } + response.Paystub = append(response.Paystub, pbPaystub) + } + return response, nil +} - heldAmountResponse := pb.GetHeldAmountResponse{ - Period: period, - NodeId: stubs[i].NodeID, - CreatedAt: stubs[i].Created, - Codes: stubs[i].Codes, - UsageAtRest: stubs[i].UsageAtRest, - UsageGet: stubs[i].UsageGet, - UsagePut: stubs[i].UsagePut, - UsageGetRepair: stubs[i].UsageGetRepair, - UsagePutRepair: stubs[i].UsagePutRepair, - UsageGetAudit: stubs[i].UsageGetAudit, - CompAtRest: stubs[i].CompAtRest, - CompGet: stubs[i].CompGet, - CompPut: stubs[i].CompPut, - CompGetRepair: stubs[i].CompGetRepair, - CompPutRepair: stubs[i].CompPutRepair, - CompGetAudit: stubs[i].CompGetAudit, - SurgePercent: stubs[i].SurgePercent, - Held: stubs[i].Held, - Owed: stubs[i].Owed, - Disposed: stubs[i].Disposed, - Paid: stubs[i].Paid, - } - - response.Paystub = append(response.Paystub, &heldAmountResponse) +func convertPaystub(paystub Paystub) (*pb.GetHeldAmountResponse, error) { + period, err := date.PeriodToTime(paystub.Period) + if err != nil { + return nil, rpcstatus.Wrap(rpcstatus.Internal, Error.Wrap(err)) } - return &response, nil + return &pb.GetHeldAmountResponse{ + Period: period, + NodeId: paystub.NodeID, + CreatedAt: paystub.Created, + Codes: paystub.Codes, + UsageAtRest: paystub.UsageAtRest, + UsageGet: paystub.UsageGet, + UsagePut: paystub.UsagePut, + UsageGetRepair: paystub.UsageGetRepair, + UsagePutRepair: paystub.UsagePutRepair, + UsageGetAudit: paystub.UsageGetAudit, + CompAtRest: paystub.CompAtRest, + CompGet: paystub.CompGet, + CompPut: paystub.CompPut, + CompGetRepair: paystub.CompGetRepair, + CompPutRepair: paystub.CompPutRepair, + CompGetAudit: paystub.CompGetAudit, + SurgePercent: paystub.SurgePercent, + Held: paystub.Held, + Owed: paystub.Owed, + Disposed: paystub.Disposed, + Paid: paystub.Paid, + }, err } // GetPayment sends node payment data for client node. @@ -165,35 +139,23 @@ func (e *Endpoint) GetPayment(ctx context.Context, req *pb.GetPaymentRequest) (_ peer, err := identity.PeerIdentityFromContext(ctx) if err != nil { - return nil, rpcstatus.Error(rpcstatus.Unauthenticated, err.Error()) + return nil, rpcstatus.Wrap(rpcstatus.Unauthenticated, err) } + node, err := e.overlay.Get(ctx, peer.ID) if err != nil { - return nil, rpcstatus.Error(rpcstatus.Internal, err.Error()) + return nil, rpcstatus.Wrap(rpcstatus.Internal, err) } payment, err := e.service.GetPayment(ctx, node.Id, req.Period.String()) if err != nil { if ErrNoDataForPeriod.Has(err) { - return nil, rpcstatus.Error(rpcstatus.OutOfRange, err.Error()) + return nil, rpcstatus.Wrap(rpcstatus.OutOfRange, err) } return nil, Error.Wrap(err) } - timePeriod, err := date.PeriodToTime(payment.Period) - if err != nil { - return nil, Error.Wrap(err) - } - - return &pb.GetPaymentResponse{ - NodeId: payment.NodeID, - CreatedAt: payment.Created, - Period: timePeriod, - Amount: payment.Amount, - Receipt: payment.Receipt, - Notes: payment.Notes, - Id: payment.ID, - }, nil + return convertPayment(payment) } // GetAllPayments sends all payments to node. @@ -202,49 +164,49 @@ func (e *Endpoint) GetAllPayments(ctx context.Context, req *pb.GetAllPaymentsReq peer, err := identity.PeerIdentityFromContext(ctx) if err != nil { - return nil, rpcstatus.Error(rpcstatus.Unauthenticated, err.Error()) + return nil, rpcstatus.Wrap(rpcstatus.Unauthenticated, err) } + node, err := e.overlay.Get(ctx, peer.ID) if err != nil { if overlay.ErrNodeNotFound.Has(err) { return &pb.GetAllPaymentsResponse{}, nil } - - return nil, rpcstatus.Error(rpcstatus.Internal, err.Error()) + return nil, rpcstatus.Wrap(rpcstatus.Internal, err) } - allPayments, err := e.service.GetAllPayments(ctx, node.Id) + payments, err := e.service.GetAllPayments(ctx, node.Id) if err != nil { if ErrNoDataForPeriod.Has(err) { - return nil, rpcstatus.Error(rpcstatus.OutOfRange, err.Error()) + return nil, rpcstatus.Wrap(rpcstatus.OutOfRange, err) } return nil, Error.Wrap(err) } - var payments []*pb.GetPaymentResponse - - response := pb.GetAllPaymentsResponse{ - Payment: payments, - } - - for i := 0; i < len(allPayments); i++ { - period, err := date.PeriodToTime(allPayments[i].Period) + response := &pb.GetAllPaymentsResponse{} + for _, payment := range payments { + pbPayment, err := convertPayment(payment) if err != nil { - return nil, Error.Wrap(err) + return nil, rpcstatus.Wrap(rpcstatus.Internal, err) } + response.Payment = append(response.Payment, pbPayment) + } + return response, nil +} - paymentResponse := pb.GetPaymentResponse{ - NodeId: allPayments[i].NodeID, - CreatedAt: allPayments[i].Created, - Period: period, - Amount: allPayments[i].Amount, - Receipt: allPayments[i].Receipt, - Notes: allPayments[i].Notes, - Id: allPayments[i].ID, - } - - response.Payment = append(response.Payment, &paymentResponse) +func convertPayment(payment Payment) (*pb.GetPaymentResponse, error) { + period, err := date.PeriodToTime(payment.Period) + if err != nil { + return nil, rpcstatus.Wrap(rpcstatus.Internal, Error.Wrap(err)) } - return &response, nil + return &pb.GetPaymentResponse{ + Id: payment.ID, + CreatedAt: payment.Created, + NodeId: payment.NodeID, + Period: period, + Amount: payment.Amount, + Receipt: payment.Receipt, + Notes: payment.Notes, + }, nil } diff --git a/satellite/snopayouts/payouts.go b/satellite/snopayouts/payouts.go index 47a83ef85..f98ee6c0d 100644 --- a/satellite/snopayouts/payouts.go +++ b/satellite/snopayouts/payouts.go @@ -18,17 +18,19 @@ import ( // architecture: Service type DB interface { // GetPaystub return payStub by nodeID and period. - GetPaystub(ctx context.Context, nodeID storj.NodeID, period string) (PayStub, error) + GetPaystub(ctx context.Context, nodeID storj.NodeID, period string) (Paystub, error) // GetAllPaystubs return all payStubs by nodeID. - GetAllPaystubs(ctx context.Context, nodeID storj.NodeID) ([]PayStub, error) - // CreatePaystub insert paystub into db. - CreatePaystub(ctx context.Context, stub PayStub) (err error) + GetAllPaystubs(ctx context.Context, nodeID storj.NodeID) ([]Paystub, error) + // GetPayment return storagenode payment by nodeID and period. - GetPayment(ctx context.Context, nodeID storj.NodeID, period string) (StoragenodePayment, error) - // CreatePayment insert payment into db. - CreatePayment(ctx context.Context, payment StoragenodePayment) (err error) + GetPayment(ctx context.Context, nodeID storj.NodeID, period string) (Payment, error) // GetAllPayments return all payments by nodeID. - GetAllPayments(ctx context.Context, nodeID storj.NodeID) ([]StoragenodePayment, error) + GetAllPayments(ctx context.Context, nodeID storj.NodeID) ([]Payment, error) + + // TestCreatePaystub insert paystub into db. Only used for tests. + TestCreatePaystub(ctx context.Context, stub Paystub) (err error) + // TestCreatePayment insert payment into db. Only used for tests. + TestCreatePayment(ctx context.Context, payment Payment) (err error) } // ErrNoDataForPeriod represents errors from the payouts database. @@ -37,8 +39,8 @@ var ErrNoDataForPeriod = errs.Class("no payStub/payments for period error") // Error is the default error class for payouts package. var Error = errs.Class("payouts db error") -// PayStub is an entity that holds held amount of cash that will be paid to storagenode operator after some period. -type PayStub struct { +// Paystub is an entity that holds held amount of cash that will be paid to storagenode operator after some period. +type Paystub struct { Period string `json:"period"` NodeID storj.NodeID `json:"nodeId"` Created time.Time `json:"created"` @@ -62,8 +64,8 @@ type PayStub struct { Paid int64 `json:"paid"` } -// StoragenodePayment is an entity that holds payment to storagenode operator parameters. -type StoragenodePayment struct { +// Payment is an entity that holds payment to storagenode operator parameters. +type Payment struct { ID int64 `json:"id"` Created time.Time `json:"created"` NodeID storj.NodeID `json:"nodeId"` @@ -89,38 +91,38 @@ func NewService(log *zap.Logger, db DB) *Service { } } -// GetPayStub returns PayStub by nodeID and period. -func (service *Service) GetPayStub(ctx context.Context, nodeID storj.NodeID, period string) (PayStub, error) { - payStub, err := service.db.GetPaystub(ctx, nodeID, period) +// GetPaystub returns Paystub by nodeID and period. +func (service *Service) GetPaystub(ctx context.Context, nodeID storj.NodeID, period string) (Paystub, error) { + paystub, err := service.db.GetPaystub(ctx, nodeID, period) if err != nil { - return PayStub{}, Error.Wrap(err) + return Paystub{}, Error.Wrap(err) } - return payStub, nil + return paystub, nil } // GetAllPaystubs returns all paystubs by nodeID. -func (service *Service) GetAllPaystubs(ctx context.Context, nodeID storj.NodeID) ([]PayStub, error) { - payStubs, err := service.db.GetAllPaystubs(ctx, nodeID) +func (service *Service) GetAllPaystubs(ctx context.Context, nodeID storj.NodeID) ([]Paystub, error) { + paystubs, err := service.db.GetAllPaystubs(ctx, nodeID) if err != nil { - return []PayStub{}, Error.Wrap(err) + return []Paystub{}, Error.Wrap(err) } - return payStubs, nil + return paystubs, nil } // GetPayment returns storagenode payment data by nodeID and period. -func (service *Service) GetPayment(ctx context.Context, nodeID storj.NodeID, period string) (StoragenodePayment, error) { +func (service *Service) GetPayment(ctx context.Context, nodeID storj.NodeID, period string) (Payment, error) { payment, err := service.db.GetPayment(ctx, nodeID, period) if err != nil { - return StoragenodePayment{}, Error.Wrap(err) + return Payment{}, Error.Wrap(err) } return payment, nil } // GetAllPayments returns all payments by nodeID. -func (service *Service) GetAllPayments(ctx context.Context, nodeID storj.NodeID) ([]StoragenodePayment, error) { +func (service *Service) GetAllPayments(ctx context.Context, nodeID storj.NodeID) ([]Payment, error) { payments, err := service.db.GetAllPayments(ctx, nodeID) if err != nil { return nil, Error.Wrap(err) diff --git a/satellite/snopayouts/payouts_test.go b/satellite/snopayouts/payouts_test.go index 6ab19aa13..a3cc7e919 100644 --- a/satellite/snopayouts/payouts_test.go +++ b/satellite/snopayouts/payouts_test.go @@ -7,7 +7,7 @@ import ( "testing" "time" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "storj.io/common/storj" "storj.io/common/testcontext" @@ -19,13 +19,12 @@ import ( func TestPayoutDB(t *testing.T) { satellitedbtest.Run(t, func(ctx *testcontext.Context, t *testing.T, db satellite.DB) { - snoPayout := db.SnoPayout() + snoPayoutDB := db.SNOPayouts() NodeID := storj.NodeID{} - period := "2020-01" - paystub := snopayouts.PayStub{ + + paystub := snopayouts.Paystub{ Period: "2020-01", NodeID: NodeID, - Created: time.Now().UTC(), Codes: "1", UsageAtRest: 1, UsageGet: 2, @@ -46,10 +45,9 @@ func TestPayoutDB(t *testing.T) { Paid: 17, } - paystub2 := snopayouts.PayStub{ + paystub2 := snopayouts.Paystub{ Period: "2020-02", NodeID: NodeID, - Created: time.Now().UTC(), Codes: "2", UsageAtRest: 4, UsageGet: 5, @@ -70,10 +68,9 @@ func TestPayoutDB(t *testing.T) { Paid: 20, } - paystub3 := snopayouts.PayStub{ + paystub3 := snopayouts.Paystub{ Period: "2020-03", NodeID: NodeID, - Created: time.Now().UTC(), Codes: "33", UsageAtRest: 10, UsageGet: 11, @@ -94,122 +91,44 @@ func TestPayoutDB(t *testing.T) { Paid: 26, } - t.Run("Test StorePayStub", func(t *testing.T) { - err := snoPayout.CreatePaystub(ctx, paystub) - assert.NoError(t, err) - err = snoPayout.CreatePaystub(ctx, paystub2) - assert.NoError(t, err) - err = snoPayout.CreatePaystub(ctx, paystub3) - assert.NoError(t, err) - }) + { + err := snoPayoutDB.TestCreatePaystub(ctx, paystub) + require.NoError(t, err) - t.Run("Test GetPayStub", func(t *testing.T) { - stub, err := snoPayout.GetPaystub(ctx, NodeID, period) - assert.NoError(t, err) - assert.Equal(t, stub.Period, paystub.Period) - assert.Equal(t, stub.Codes, paystub.Codes) - assert.Equal(t, stub.CompAtRest, paystub.CompAtRest) - assert.Equal(t, stub.CompGet, paystub.CompGet) - assert.Equal(t, stub.CompGetAudit, paystub.CompGetAudit) - assert.Equal(t, stub.CompGetRepair, paystub.CompGetRepair) - assert.Equal(t, stub.CompPut, paystub.CompPut) - assert.Equal(t, stub.CompPutRepair, paystub.CompPutRepair) - assert.Equal(t, stub.Disposed, paystub.Disposed) - assert.Equal(t, stub.Held, paystub.Held) - assert.Equal(t, stub.Owed, paystub.Owed) - assert.Equal(t, stub.Paid, paystub.Paid) - assert.Equal(t, stub.NodeID, paystub.NodeID) - assert.Equal(t, stub.SurgePercent, paystub.SurgePercent) - assert.Equal(t, stub.UsageAtRest, paystub.UsageAtRest) - assert.Equal(t, stub.UsageGet, paystub.UsageGet) - assert.Equal(t, stub.UsageGetAudit, paystub.UsageGetAudit) - assert.Equal(t, stub.UsageGetRepair, paystub.UsageGetRepair) - assert.Equal(t, stub.UsagePut, paystub.UsagePut) - assert.Equal(t, stub.UsagePutRepair, paystub.UsagePutRepair) + err = snoPayoutDB.TestCreatePaystub(ctx, paystub2) + require.NoError(t, err) - stub, err = snoPayout.GetPaystub(ctx, NodeID, "") - assert.Error(t, err) + err = snoPayoutDB.TestCreatePaystub(ctx, paystub3) + require.NoError(t, err) + } - stub, err = snoPayout.GetPaystub(ctx, storj.NodeID{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}, period) - assert.Error(t, err) - }) + { + actual, err := snoPayoutDB.GetPaystub(ctx, NodeID, "2020-01") + require.NoError(t, err) + actual.Created = time.Time{} // created is chosen by the database layer + require.Equal(t, paystub, actual) - t.Run("Test GetAllPaystubs", func(t *testing.T) { - stubs, err := snoPayout.GetAllPaystubs(ctx, NodeID) - assert.NoError(t, err) - for i := 0; i < len(stubs); i++ { - if stubs[i].Period == "2020-01" { - assert.Equal(t, stubs[i].Period, paystub.Period) - assert.Equal(t, stubs[i].Codes, paystub.Codes) - assert.Equal(t, stubs[i].CompAtRest, paystub.CompAtRest) - assert.Equal(t, stubs[i].CompGet, paystub.CompGet) - assert.Equal(t, stubs[i].CompGetAudit, paystub.CompGetAudit) - assert.Equal(t, stubs[i].CompGetRepair, paystub.CompGetRepair) - assert.Equal(t, stubs[i].CompPut, paystub.CompPut) - assert.Equal(t, stubs[i].CompPutRepair, paystub.CompPutRepair) - assert.Equal(t, stubs[i].Disposed, paystub.Disposed) - assert.Equal(t, stubs[i].Held, paystub.Held) - assert.Equal(t, stubs[i].Owed, paystub.Owed) - assert.Equal(t, stubs[i].Paid, paystub.Paid) - assert.Equal(t, stubs[i].NodeID, paystub.NodeID) - assert.Equal(t, stubs[i].SurgePercent, paystub.SurgePercent) - assert.Equal(t, stubs[i].UsageAtRest, paystub.UsageAtRest) - assert.Equal(t, stubs[i].UsageGet, paystub.UsageGet) - assert.Equal(t, stubs[i].UsageGetAudit, paystub.UsageGetAudit) - assert.Equal(t, stubs[i].UsageGetRepair, paystub.UsageGetRepair) - assert.Equal(t, stubs[i].UsagePut, paystub.UsagePut) - assert.Equal(t, stubs[i].UsagePutRepair, paystub.UsagePutRepair) - } - if stubs[i].Period == "2020-02" { - assert.Equal(t, stubs[i].Period, paystub2.Period) - assert.Equal(t, stubs[i].Codes, paystub2.Codes) - assert.Equal(t, stubs[i].CompAtRest, paystub2.CompAtRest) - assert.Equal(t, stubs[i].CompGet, paystub2.CompGet) - assert.Equal(t, stubs[i].CompGetAudit, paystub2.CompGetAudit) - assert.Equal(t, stubs[i].CompGetRepair, paystub2.CompGetRepair) - assert.Equal(t, stubs[i].CompPut, paystub2.CompPut) - assert.Equal(t, stubs[i].CompPutRepair, paystub2.CompPutRepair) - assert.Equal(t, stubs[i].Disposed, paystub2.Disposed) - assert.Equal(t, stubs[i].Held, paystub2.Held) - assert.Equal(t, stubs[i].Owed, paystub2.Owed) - assert.Equal(t, stubs[i].Paid, paystub2.Paid) - assert.Equal(t, stubs[i].NodeID, paystub2.NodeID) - assert.Equal(t, stubs[i].SurgePercent, paystub2.SurgePercent) - assert.Equal(t, stubs[i].UsageAtRest, paystub2.UsageAtRest) - assert.Equal(t, stubs[i].UsageGet, paystub2.UsageGet) - assert.Equal(t, stubs[i].UsageGetAudit, paystub2.UsageGetAudit) - assert.Equal(t, stubs[i].UsageGetRepair, paystub2.UsageGetRepair) - assert.Equal(t, stubs[i].UsagePut, paystub2.UsagePut) - assert.Equal(t, stubs[i].UsagePutRepair, paystub2.UsagePutRepair) - } - if stubs[i].Period == "2020-03" { - assert.Equal(t, stubs[i].Period, paystub3.Period) - assert.Equal(t, stubs[i].Codes, paystub3.Codes) - assert.Equal(t, stubs[i].CompAtRest, paystub3.CompAtRest) - assert.Equal(t, stubs[i].CompGet, paystub3.CompGet) - assert.Equal(t, stubs[i].CompGetAudit, paystub3.CompGetAudit) - assert.Equal(t, stubs[i].CompGetRepair, paystub3.CompGetRepair) - assert.Equal(t, stubs[i].CompPut, paystub3.CompPut) - assert.Equal(t, stubs[i].CompPutRepair, paystub3.CompPutRepair) - assert.Equal(t, stubs[i].Disposed, paystub3.Disposed) - assert.Equal(t, stubs[i].Held, paystub3.Held) - assert.Equal(t, stubs[i].Owed, paystub3.Owed) - assert.Equal(t, stubs[i].Paid, paystub3.Paid) - assert.Equal(t, stubs[i].NodeID, paystub3.NodeID) - assert.Equal(t, stubs[i].SurgePercent, paystub3.SurgePercent) - assert.Equal(t, stubs[i].UsageAtRest, paystub3.UsageAtRest) - assert.Equal(t, stubs[i].UsageGet, paystub3.UsageGet) - assert.Equal(t, stubs[i].UsageGetAudit, paystub3.UsageGetAudit) - assert.Equal(t, stubs[i].UsageGetRepair, paystub3.UsageGetRepair) - assert.Equal(t, stubs[i].UsagePut, paystub3.UsagePut) - assert.Equal(t, stubs[i].UsagePutRepair, paystub3.UsagePutRepair) - } + _, err = snoPayoutDB.GetPaystub(ctx, NodeID, "") + require.Error(t, err) + + _, err = snoPayoutDB.GetPaystub(ctx, testrand.NodeID(), "2020-01") + require.Error(t, err) + } + + { + stubs, err := snoPayoutDB.GetAllPaystubs(ctx, NodeID) + require.NoError(t, err) + for _, actual := range stubs { + actual.Created = time.Time{} // created is chosen by the database layer + require.Equal(t, actual, map[string]snopayouts.Paystub{ + "2020-01": paystub, + "2020-02": paystub2, + "2020-03": paystub3, + }[actual.Period]) } - }) + } - payment := snopayouts.StoragenodePayment{ - ID: 1, - Created: time.Now().UTC(), + payment := snopayouts.Payment{ NodeID: NodeID, Period: "2020-01", Amount: 123, @@ -217,25 +136,23 @@ func TestPayoutDB(t *testing.T) { Notes: "notes", } - t.Run("Test StorePayment", func(t *testing.T) { - err := snoPayout.CreatePayment(ctx, payment) - assert.NoError(t, err) - }) + { + err := snoPayoutDB.TestCreatePayment(ctx, payment) + require.NoError(t, err) + } - t.Run("Test GetPayment", func(t *testing.T) { - paym, err := snoPayout.GetPayment(ctx, NodeID, period) - assert.NoError(t, err) - assert.Equal(t, paym.NodeID, payment.NodeID) - assert.Equal(t, paym.Period, payment.Period) - assert.Equal(t, paym.Amount, payment.Amount) - assert.Equal(t, paym.Notes, payment.Notes) - assert.Equal(t, paym.Receipt, payment.Receipt) + { + actual, err := snoPayoutDB.GetPayment(ctx, NodeID, "2020-01") + require.NoError(t, err) + actual.Created = time.Time{} // created is chosen by the database layer + actual.ID = 0 // id is chosen by the database layer + require.Equal(t, payment, actual) - paym, err = snoPayout.GetPayment(ctx, NodeID, "") - assert.Error(t, err) + _, err = snoPayoutDB.GetPayment(ctx, NodeID, "") + require.Error(t, err) - paym, err = snoPayout.GetPayment(ctx, testrand.NodeID(), period) - assert.Error(t, err) - }) + _, err = snoPayoutDB.GetPayment(ctx, testrand.NodeID(), "2020-01") + require.Error(t, err) + } }) } From 824fd6f131c16b405ba232e439aea57906a389a2 Mon Sep 17 00:00:00 2001 From: Yingrong Zhao Date: Wed, 27 Jan 2021 15:17:19 -0500 Subject: [PATCH 26/38] pkg/quic: add backward compatibility for qtls Change-Id: I2560074ece5b61d9ddb236269172325e1b5de83e --- pkg/quic/conn.go | 3 ++- pkg/quic/qtls/go114.go | 29 +++++++++++++++++++++++++++++ pkg/quic/qtls/go115.go | 18 ++++++++++++++++++ 3 files changed, 49 insertions(+), 1 deletion(-) create mode 100644 pkg/quic/qtls/go114.go create mode 100644 pkg/quic/qtls/go115.go diff --git a/pkg/quic/conn.go b/pkg/quic/conn.go index d884a7ffd..60c43b11e 100644 --- a/pkg/quic/conn.go +++ b/pkg/quic/conn.go @@ -15,6 +15,7 @@ import ( "storj.io/common/memory" "storj.io/common/rpc" + "storj.io/storj/pkg/quic/qtls" ) // Conn is a wrapper around a quic connection and fulfills net.Conn interface. @@ -71,7 +72,7 @@ func (c *Conn) getStream() (quic.Stream, error) { // ConnectionState converts quic session state to tls connection state and returns tls state. func (c *Conn) ConnectionState() tls.ConnectionState { - return c.session.ConnectionState().ConnectionState + return qtls.ToTLSConnectionState(c.session.ConnectionState()) } // Close closes the quic connection. diff --git a/pkg/quic/qtls/go114.go b/pkg/quic/qtls/go114.go new file mode 100644 index 000000000..51257dc90 --- /dev/null +++ b/pkg/quic/qtls/go114.go @@ -0,0 +1,29 @@ +// Copyright (C) 2021 Storj Labs, Inc. +// See LICENSE for copying information. + +// +build go1.13 go1.14 + +package qtls + +import ( + "crypto/tls" + + quicgo "github.com/lucas-clemente/quic-go" +) + +// ToTLSConnectionState converts a quic-go connection state to tls connection +// state. +func ToTLSConnectionState(state quicgo.ConnectionState) tls.ConnectionState { + return tls.ConnectionState{ + Version: state.Version, + HandshakeComplete: state.HandshakeComplete, + DidResume: state.DidResume, + CipherSuite: state.CipherSuite, + NegotiatedProtocol: state.NegotiatedProtocol, + ServerName: state.ServerName, + PeerCertificates: state.PeerCertificates, + VerifiedChains: state.VerifiedChains, + SignedCertificateTimestamps: state.SignedCertificateTimestamps, + OCSPResponse: state.OCSPResponse, + } +} diff --git a/pkg/quic/qtls/go115.go b/pkg/quic/qtls/go115.go new file mode 100644 index 000000000..ad46a783a --- /dev/null +++ b/pkg/quic/qtls/go115.go @@ -0,0 +1,18 @@ +// Copyright (C) 2021 Storj Labs, Inc. +// See LICENSE for copying information. + +// +build !go1.13 !go1.14 + +package qtls + +import ( + "crypto/tls" + + quicgo "github.com/lucas-clemente/quic-go" +) + +// ToTLSConnectionState converts a quic-go connection state to tls connection +// state. +func ToTLSConnectionState(state quicgo.ConnectionState) tls.ConnectionState { + return state.ConnectionState +} From 4d32bdaefbacee8197c2e8a8b4ff91313f567d3c Mon Sep 17 00:00:00 2001 From: Kaloyan Raev Date: Wed, 27 Jan 2021 22:40:47 +0200 Subject: [PATCH 27/38] satellite/satellitedb: drop bucket_metainfos_name_project_id_key index This index is obsolete and duplicates a similiar (project_id, name) index on the same table. Moreover, it might confuse CockroachDB which of the two index to use, which may might affect DB performance. Change-Id: If8d1df8347714942cea9dca82864ba5f4973bed3 --- satellite/satellitedb/dbx/satellitedb.dbx | 1 - satellite/satellitedb/dbx/satellitedb.dbx.go | 2 - .../satellitedb/dbx/satellitedb.dbx.pgx.sql | 1 - .../dbx/satellitedb.dbx.pgxcockroach.sql | 1 - satellite/satellitedb/migrate.go | 24 + .../satellitedb/testdata/postgres.v141.sql | 576 ++++++++++++++++++ 6 files changed, 600 insertions(+), 5 deletions(-) create mode 100644 satellite/satellitedb/testdata/postgres.v141.sql diff --git a/satellite/satellitedb/dbx/satellitedb.dbx b/satellite/satellitedb/dbx/satellitedb.dbx index 5c4470c6c..6ec32ad88 100644 --- a/satellite/satellitedb/dbx/satellitedb.dbx +++ b/satellite/satellitedb/dbx/satellitedb.dbx @@ -981,7 +981,6 @@ create user_credit () model bucket_metainfo ( key id - unique name project_id //to remove later unique project_id name field id blob diff --git a/satellite/satellitedb/dbx/satellitedb.dbx.go b/satellite/satellitedb/dbx/satellitedb.dbx.go index ba8e748cf..88f5d2f9a 100644 --- a/satellite/satellitedb/dbx/satellitedb.dbx.go +++ b/satellite/satellitedb/dbx/satellitedb.dbx.go @@ -727,7 +727,6 @@ CREATE TABLE bucket_metainfos ( default_redundancy_optimal_shares integer NOT NULL, default_redundancy_total_shares integer NOT NULL, PRIMARY KEY ( id ), - UNIQUE ( name, project_id ), UNIQUE ( project_id, name ) ); CREATE TABLE project_members ( @@ -1284,7 +1283,6 @@ CREATE TABLE bucket_metainfos ( default_redundancy_optimal_shares integer NOT NULL, default_redundancy_total_shares integer NOT NULL, PRIMARY KEY ( id ), - UNIQUE ( name, project_id ), UNIQUE ( project_id, name ) ); CREATE TABLE project_members ( diff --git a/satellite/satellitedb/dbx/satellitedb.dbx.pgx.sql b/satellite/satellitedb/dbx/satellitedb.dbx.pgx.sql index bcf7ff18d..75c6737bd 100644 --- a/satellite/satellitedb/dbx/satellitedb.dbx.pgx.sql +++ b/satellite/satellitedb/dbx/satellitedb.dbx.pgx.sql @@ -407,7 +407,6 @@ CREATE TABLE bucket_metainfos ( default_redundancy_optimal_shares integer NOT NULL, default_redundancy_total_shares integer NOT NULL, PRIMARY KEY ( id ), - UNIQUE ( name, project_id ), UNIQUE ( project_id, name ) ); CREATE TABLE project_members ( diff --git a/satellite/satellitedb/dbx/satellitedb.dbx.pgxcockroach.sql b/satellite/satellitedb/dbx/satellitedb.dbx.pgxcockroach.sql index bcf7ff18d..75c6737bd 100644 --- a/satellite/satellitedb/dbx/satellitedb.dbx.pgxcockroach.sql +++ b/satellite/satellitedb/dbx/satellitedb.dbx.pgxcockroach.sql @@ -407,7 +407,6 @@ CREATE TABLE bucket_metainfos ( default_redundancy_optimal_shares integer NOT NULL, default_redundancy_total_shares integer NOT NULL, PRIMARY KEY ( id ), - UNIQUE ( name, project_id ), UNIQUE ( project_id, name ) ); CREATE TABLE project_members ( diff --git a/satellite/satellitedb/migrate.go b/satellite/satellitedb/migrate.go index 741a2bd80..720db04fe 100644 --- a/satellite/satellitedb/migrate.go +++ b/satellite/satellitedb/migrate.go @@ -1188,6 +1188,30 @@ func (db *satelliteDB) PostgresMigration() *migrate.Migration { `ALTER TABLE users ADD COLUMN is_professional boolean NOT NULL DEFAULT false;`, }, }, + { + DB: &db.migrationDB, + Description: "drop the obsolete (name, project_id) index from bucket_metainfos table.", + Version: 141, + Action: migrate.Func(func(ctx context.Context, log *zap.Logger, db tagsql.DB, tx tagsql.Tx) error { + if _, ok := db.Driver().(*cockroachutil.Driver); ok { + _, err := db.Exec(ctx, + `DROP INDEX bucket_metainfos_name_project_id_key CASCADE;`, + ) + if err != nil { + return ErrMigrate.Wrap(err) + } + return nil + } + + _, err := db.Exec(ctx, + `ALTER TABLE bucket_metainfos DROP CONSTRAINT bucket_metainfos_name_project_id_key;`, + ) + if err != nil { + return ErrMigrate.Wrap(err) + } + return nil + }), + }, }, } } diff --git a/satellite/satellitedb/testdata/postgres.v141.sql b/satellite/satellitedb/testdata/postgres.v141.sql new file mode 100644 index 000000000..af205961b --- /dev/null +++ b/satellite/satellitedb/testdata/postgres.v141.sql @@ -0,0 +1,576 @@ +-- AUTOGENERATED BY storj.io/dbx +-- DO NOT EDIT +CREATE TABLE accounting_rollups ( + node_id bytea NOT NULL, + start_time timestamp with time zone NOT NULL, + put_total bigint NOT NULL, + get_total bigint NOT NULL, + get_audit_total bigint NOT NULL, + get_repair_total bigint NOT NULL, + put_repair_total bigint NOT NULL, + at_rest_total double precision NOT NULL, + PRIMARY KEY ( node_id, start_time ) +); +CREATE TABLE accounting_timestamps ( + name text NOT NULL, + value timestamp with time zone NOT NULL, + PRIMARY KEY ( name ) +); +CREATE TABLE audit_histories ( + node_id bytea NOT NULL, + history bytea NOT NULL, + PRIMARY KEY ( node_id ) +); +CREATE TABLE bucket_bandwidth_rollups ( + bucket_name bytea NOT NULL, + project_id bytea NOT NULL, + interval_start timestamp with time zone NOT NULL, + interval_seconds integer NOT NULL, + action integer NOT NULL, + inline bigint NOT NULL, + allocated bigint NOT NULL, + settled bigint NOT NULL, + PRIMARY KEY ( bucket_name, project_id, interval_start, action ) +); +CREATE TABLE bucket_storage_tallies ( + bucket_name bytea NOT NULL, + project_id bytea NOT NULL, + interval_start timestamp with time zone NOT NULL, + inline bigint NOT NULL, + remote bigint NOT NULL, + remote_segments_count integer NOT NULL, + inline_segments_count integer NOT NULL, + object_count integer NOT NULL, + metadata_size bigint NOT NULL, + PRIMARY KEY ( bucket_name, project_id, interval_start ) +); +CREATE TABLE coinpayments_transactions ( + id text NOT NULL, + user_id bytea NOT NULL, + address text NOT NULL, + amount bytea NOT NULL, + received bytea NOT NULL, + status integer NOT NULL, + key text NOT NULL, + timeout integer NOT NULL, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( id ) +); +CREATE TABLE consumed_serials ( + storage_node_id bytea NOT NULL, + serial_number bytea NOT NULL, + expires_at timestamp with time zone NOT NULL, + PRIMARY KEY ( storage_node_id, serial_number ) +); +CREATE TABLE coupons ( + id bytea NOT NULL, + user_id bytea NOT NULL, + amount bigint NOT NULL, + description text NOT NULL, + type integer NOT NULL, + status integer NOT NULL, + duration bigint NOT NULL, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( id ) +); +CREATE TABLE coupon_usages ( + coupon_id bytea NOT NULL, + amount bigint NOT NULL, + status integer NOT NULL, + period timestamp with time zone NOT NULL, + PRIMARY KEY ( coupon_id, period ) +); +CREATE TABLE graceful_exit_progress ( + node_id bytea NOT NULL, + bytes_transferred bigint NOT NULL, + pieces_transferred bigint NOT NULL DEFAULT 0, + pieces_failed bigint NOT NULL DEFAULT 0, + updated_at timestamp with time zone NOT NULL, + PRIMARY KEY ( node_id ) +); +CREATE TABLE graceful_exit_transfer_queue ( + node_id bytea NOT NULL, + path bytea NOT NULL, + piece_num integer NOT NULL, + root_piece_id bytea, + durability_ratio double precision NOT NULL, + queued_at timestamp with time zone NOT NULL, + requested_at timestamp with time zone, + last_failed_at timestamp with time zone, + last_failed_code integer, + failed_count integer, + finished_at timestamp with time zone, + order_limit_send_count integer NOT NULL DEFAULT 0, + PRIMARY KEY ( node_id, path, piece_num ) +); +CREATE TABLE injuredsegments ( + path bytea NOT NULL, + data bytea NOT NULL, + attempted timestamp with time zone, + updated_at timestamp with time zone NOT NULL DEFAULT current_timestamp, + segment_health double precision NOT NULL DEFAULT 1, + PRIMARY KEY ( path ) +); +CREATE TABLE irreparabledbs ( + segmentpath bytea NOT NULL, + segmentdetail bytea NOT NULL, + pieces_lost_count bigint NOT NULL, + seg_damaged_unix_sec bigint NOT NULL, + repair_attempt_count bigint NOT NULL, + PRIMARY KEY ( segmentpath ) +); +CREATE TABLE nodes ( + id bytea NOT NULL, + address text NOT NULL DEFAULT '', + last_net text NOT NULL, + last_ip_port text, + protocol integer NOT NULL DEFAULT 0, + type integer NOT NULL DEFAULT 0, + email text NOT NULL, + wallet text NOT NULL, + free_disk bigint NOT NULL DEFAULT -1, + piece_count bigint NOT NULL DEFAULT 0, + major bigint NOT NULL DEFAULT 0, + minor bigint NOT NULL DEFAULT 0, + patch bigint NOT NULL DEFAULT 0, + hash text NOT NULL DEFAULT '', + timestamp timestamp with time zone NOT NULL DEFAULT '0001-01-01 00:00:00+00', + release boolean NOT NULL DEFAULT false, + latency_90 bigint NOT NULL DEFAULT 0, + audit_success_count bigint NOT NULL DEFAULT 0, + total_audit_count bigint NOT NULL DEFAULT 0, + vetted_at timestamp with time zone, + uptime_success_count bigint NOT NULL DEFAULT 0, + total_uptime_count bigint NOT NULL DEFAULT 0, + created_at timestamp with time zone NOT NULL DEFAULT current_timestamp, + updated_at timestamp with time zone NOT NULL DEFAULT current_timestamp, + last_contact_success timestamp with time zone NOT NULL DEFAULT 'epoch', + last_contact_failure timestamp with time zone NOT NULL DEFAULT 'epoch', + contained boolean NOT NULL DEFAULT false, + disqualified timestamp with time zone, + suspended timestamp with time zone, + unknown_audit_suspended timestamp with time zone, + offline_suspended timestamp with time zone, + under_review timestamp with time zone, + online_score double precision NOT NULL DEFAULT 1, + audit_reputation_alpha double precision NOT NULL DEFAULT 1, + audit_reputation_beta double precision NOT NULL DEFAULT 0, + unknown_audit_reputation_alpha double precision NOT NULL DEFAULT 1, + unknown_audit_reputation_beta double precision NOT NULL DEFAULT 0, + uptime_reputation_alpha double precision NOT NULL DEFAULT 1, + uptime_reputation_beta double precision NOT NULL DEFAULT 0, + exit_initiated_at timestamp with time zone, + exit_loop_completed_at timestamp with time zone, + exit_finished_at timestamp with time zone, + exit_success boolean NOT NULL DEFAULT false, + PRIMARY KEY ( id ) +); +CREATE TABLE node_api_versions ( + id bytea NOT NULL, + api_version integer NOT NULL, + created_at timestamp with time zone NOT NULL, + updated_at timestamp with time zone NOT NULL, + PRIMARY KEY ( id ) +); +CREATE TABLE offers ( + id serial NOT NULL, + name text NOT NULL, + description text NOT NULL, + award_credit_in_cents integer NOT NULL DEFAULT 0, + invitee_credit_in_cents integer NOT NULL DEFAULT 0, + award_credit_duration_days integer, + invitee_credit_duration_days integer, + redeemable_cap integer, + expires_at timestamp with time zone NOT NULL, + created_at timestamp with time zone NOT NULL, + status integer NOT NULL, + type integer NOT NULL, + PRIMARY KEY ( id ) +); +CREATE TABLE peer_identities ( + node_id bytea NOT NULL, + leaf_serial_number bytea NOT NULL, + chain bytea NOT NULL, + updated_at timestamp with time zone NOT NULL, + PRIMARY KEY ( node_id ) +); +CREATE TABLE pending_audits ( + node_id bytea NOT NULL, + piece_id bytea NOT NULL, + stripe_index bigint NOT NULL, + share_size bigint NOT NULL, + expected_share_hash bytea NOT NULL, + reverify_count bigint NOT NULL, + path bytea NOT NULL, + PRIMARY KEY ( node_id ) +); +CREATE TABLE pending_serial_queue ( + storage_node_id bytea NOT NULL, + bucket_id bytea NOT NULL, + serial_number bytea NOT NULL, + action integer NOT NULL, + settled bigint NOT NULL, + expires_at timestamp with time zone NOT NULL, + PRIMARY KEY ( storage_node_id, bucket_id, serial_number ) +); +CREATE TABLE projects ( + id bytea NOT NULL, + name text NOT NULL, + description text NOT NULL, + usage_limit bigint, + bandwidth_limit bigint, + rate_limit integer, + max_buckets integer, + partner_id bytea, + owner_id bytea NOT NULL, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( id ) +); +CREATE TABLE project_bandwidth_rollups ( + project_id bytea NOT NULL, + interval_month date NOT NULL, + egress_allocated bigint NOT NULL, + PRIMARY KEY ( project_id, interval_month ) +); +CREATE TABLE registration_tokens ( + secret bytea NOT NULL, + owner_id bytea, + project_limit integer NOT NULL, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( secret ), + UNIQUE ( owner_id ) +); +CREATE TABLE reported_serials ( + expires_at timestamp with time zone NOT NULL, + storage_node_id bytea NOT NULL, + bucket_id bytea NOT NULL, + action integer NOT NULL, + serial_number bytea NOT NULL, + settled bigint NOT NULL, + observed_at timestamp with time zone NOT NULL, + PRIMARY KEY ( expires_at, storage_node_id, bucket_id, action, serial_number ) +); +CREATE TABLE reset_password_tokens ( + secret bytea NOT NULL, + owner_id bytea NOT NULL, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( secret ), + UNIQUE ( owner_id ) +); +CREATE TABLE revocations ( + revoked bytea NOT NULL, + api_key_id bytea NOT NULL, + PRIMARY KEY ( revoked ) +); +CREATE TABLE serial_numbers ( + id serial NOT NULL, + serial_number bytea NOT NULL, + bucket_id bytea NOT NULL, + expires_at timestamp with time zone NOT NULL, + PRIMARY KEY ( id ) +); +CREATE TABLE storagenode_bandwidth_rollups ( + storagenode_id bytea NOT NULL, + interval_start timestamp with time zone NOT NULL, + interval_seconds integer NOT NULL, + action integer NOT NULL, + allocated bigint DEFAULT 0, + settled bigint NOT NULL, + PRIMARY KEY ( storagenode_id, interval_start, action ) +); +CREATE TABLE storagenode_bandwidth_rollups_phase2 ( + storagenode_id bytea NOT NULL, + interval_start timestamp with time zone NOT NULL, + interval_seconds integer NOT NULL, + action integer NOT NULL, + allocated bigint DEFAULT 0, + settled bigint NOT NULL, + PRIMARY KEY ( storagenode_id, interval_start, action ) +); +CREATE TABLE storagenode_payments ( + id bigserial NOT NULL, + created_at timestamp with time zone NOT NULL, + node_id bytea NOT NULL, + period text NOT NULL, + amount bigint NOT NULL, + receipt text, + notes text, + PRIMARY KEY ( id ) +); +CREATE TABLE storagenode_paystubs ( + period text NOT NULL, + node_id bytea NOT NULL, + created_at timestamp with time zone NOT NULL, + codes text NOT NULL, + usage_at_rest double precision NOT NULL, + usage_get bigint NOT NULL, + usage_put bigint NOT NULL, + usage_get_repair bigint NOT NULL, + usage_put_repair bigint NOT NULL, + usage_get_audit bigint NOT NULL, + comp_at_rest bigint NOT NULL, + comp_get bigint NOT NULL, + comp_put bigint NOT NULL, + comp_get_repair bigint NOT NULL, + comp_put_repair bigint NOT NULL, + comp_get_audit bigint NOT NULL, + surge_percent bigint NOT NULL, + held bigint NOT NULL, + owed bigint NOT NULL, + disposed bigint NOT NULL, + paid bigint NOT NULL, + PRIMARY KEY ( period, node_id ) +); +CREATE TABLE storagenode_storage_tallies ( + node_id bytea NOT NULL, + interval_end_time timestamp with time zone NOT NULL, + data_total double precision NOT NULL, + PRIMARY KEY ( interval_end_time, node_id ) +); +CREATE TABLE stripe_customers ( + user_id bytea NOT NULL, + customer_id text NOT NULL, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( user_id ), + UNIQUE ( customer_id ) +); +CREATE TABLE stripecoinpayments_invoice_project_records ( + id bytea NOT NULL, + project_id bytea NOT NULL, + storage double precision NOT NULL, + egress bigint NOT NULL, + objects bigint NOT NULL, + period_start timestamp with time zone NOT NULL, + period_end timestamp with time zone NOT NULL, + state integer NOT NULL, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( id ), + UNIQUE ( project_id, period_start, period_end ) +); +CREATE TABLE stripecoinpayments_tx_conversion_rates ( + tx_id text NOT NULL, + rate bytea NOT NULL, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( tx_id ) +); +CREATE TABLE users ( + id bytea NOT NULL, + email text NOT NULL, + normalized_email text NOT NULL, + full_name text NOT NULL, + short_name text, + password_hash bytea NOT NULL, + status integer NOT NULL, + partner_id bytea, + created_at timestamp with time zone NOT NULL, + project_limit integer NOT NULL DEFAULT 0, + position text, + company_name text, + company_size integer, + working_on text, + is_professional boolean NOT NULL DEFAULT false, + PRIMARY KEY ( id ) +); +CREATE TABLE value_attributions ( + project_id bytea NOT NULL, + bucket_name bytea NOT NULL, + partner_id bytea NOT NULL, + last_updated timestamp with time zone NOT NULL, + PRIMARY KEY ( project_id, bucket_name ) +); +CREATE TABLE api_keys ( + id bytea NOT NULL, + project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE, + head bytea NOT NULL, + name text NOT NULL, + secret bytea NOT NULL, + partner_id bytea, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( id ), + UNIQUE ( head ), + UNIQUE ( name, project_id ) +); +CREATE TABLE bucket_metainfos ( + id bytea NOT NULL, + project_id bytea NOT NULL REFERENCES projects( id ), + name bytea NOT NULL, + partner_id bytea, + path_cipher integer NOT NULL, + created_at timestamp with time zone NOT NULL, + default_segment_size integer NOT NULL, + default_encryption_cipher_suite integer NOT NULL, + default_encryption_block_size integer NOT NULL, + default_redundancy_algorithm integer NOT NULL, + default_redundancy_share_size integer NOT NULL, + default_redundancy_required_shares integer NOT NULL, + default_redundancy_repair_shares integer NOT NULL, + default_redundancy_optimal_shares integer NOT NULL, + default_redundancy_total_shares integer NOT NULL, + PRIMARY KEY ( id ), + UNIQUE ( project_id, name ) +); +CREATE TABLE project_members ( + member_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE, + project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( member_id, project_id ) +); +CREATE TABLE stripecoinpayments_apply_balance_intents ( + tx_id text NOT NULL REFERENCES coinpayments_transactions( id ) ON DELETE CASCADE, + state integer NOT NULL, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( tx_id ) +); +CREATE TABLE used_serials ( + serial_number_id integer NOT NULL REFERENCES serial_numbers( id ) ON DELETE CASCADE, + storage_node_id bytea NOT NULL, + PRIMARY KEY ( serial_number_id, storage_node_id ) +); +CREATE TABLE user_credits ( + id serial NOT NULL, + user_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE, + offer_id integer NOT NULL REFERENCES offers( id ), + referred_by bytea REFERENCES users( id ) ON DELETE SET NULL, + type text NOT NULL, + credits_earned_in_cents integer NOT NULL, + credits_used_in_cents integer NOT NULL, + expires_at timestamp with time zone NOT NULL, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( id ), + UNIQUE ( id, offer_id ) +); +CREATE INDEX accounting_rollups_start_time_index ON accounting_rollups ( start_time ); +CREATE INDEX bucket_bandwidth_rollups_project_id_action_interval_index ON bucket_bandwidth_rollups ( project_id, action, interval_start ); +CREATE INDEX bucket_bandwidth_rollups_action_interval_project_id_index ON bucket_bandwidth_rollups ( action, interval_start, project_id ); +CREATE INDEX bucket_storage_tallies_project_id_index ON bucket_storage_tallies ( project_id ); +CREATE INDEX consumed_serials_expires_at_index ON consumed_serials ( expires_at ); +CREATE INDEX graceful_exit_transfer_queue_nid_dr_qa_fa_lfa_index ON graceful_exit_transfer_queue ( node_id, durability_ratio, queued_at, finished_at, last_failed_at ); +CREATE INDEX injuredsegments_attempted_index ON injuredsegments ( attempted ); +CREATE INDEX injuredsegments_segment_health_index ON injuredsegments ( segment_health ); +CREATE INDEX injuredsegments_updated_at_index ON injuredsegments ( updated_at ); +CREATE INDEX node_last_ip ON nodes ( last_net ); +CREATE INDEX nodes_dis_unk_exit_fin_last_success_index ON nodes ( disqualified, unknown_audit_suspended, exit_finished_at, last_contact_success ); +CREATE UNIQUE INDEX serial_number_index ON serial_numbers ( serial_number ); +CREATE INDEX serial_numbers_expires_at_index ON serial_numbers ( expires_at ); +CREATE INDEX storagenode_bandwidth_rollups_interval_start_index ON storagenode_bandwidth_rollups ( interval_start ); +CREATE INDEX storagenode_payments_node_id_period_index ON storagenode_payments ( node_id, period ); +CREATE INDEX storagenode_paystubs_node_id_index ON storagenode_paystubs ( node_id ); +CREATE INDEX storagenode_storage_tallies_node_id_index ON storagenode_storage_tallies ( node_id ); +CREATE UNIQUE INDEX credits_earned_user_id_offer_id ON user_credits ( id, offer_id ); + +INSERT INTO "accounting_rollups"("node_id", "start_time", "put_total", "get_total", "get_audit_total", "get_repair_total", "put_repair_total", "at_rest_total") VALUES (E'\\367M\\177\\251]t/\\022\\256\\214\\265\\025\\224\\204:\\217\\212\\0102<\\321\\374\\020&\\271Qc\\325\\261\\354\\246\\233'::bytea, '2019-02-09 00:00:00+00', 3000, 6000, 9000, 12000, 0, 15000); + +INSERT INTO "accounting_timestamps" VALUES ('LastAtRestTally', '0001-01-01 00:00:00+00'); +INSERT INTO "accounting_timestamps" VALUES ('LastRollup', '0001-01-01 00:00:00+00'); +INSERT INTO "accounting_timestamps" VALUES ('LastBandwidthTally', '0001-01-01 00:00:00+00'); + +INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 5, false, 1); +INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '127.0.0.1:55518', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 0, 3, 3, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 0, false, 1); +INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014', '127.0.0.1:55517', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 0, 0, 0, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 0, false, 1); +INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\015', '127.0.0.1:55519', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 1, 2, 1, 2, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 1, false, 1); +INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "vetted_at", "online_score") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', '127.0.0.1:55520', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 300, 400, 300, 400, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 300, 0, 1, 0, 300, 100, false, '2020-03-18 12:00:00.000000+00', 1); +INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\154\\313\\233\\074\\327\\177\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 75, 25, 100, 5, false, 1); +INSERT INTO "nodes"("id", "address", "last_net", "last_ip_port", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\154\\313\\233\\074\\327\\177\\136\\070\\346\\002', '127.0.0.1:55516', '127.0.0.0', '127.0.0.1:55516', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 75, 25, 100, 5, false, 1); +INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\363\\341\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 5, false, 1); + +INSERT INTO "users"("id", "full_name", "short_name", "email", "normalized_email", "password_hash", "status", "partner_id", "created_at", "is_professional") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'Noahson', 'William', '1email1@mail.test', '1EMAIL1@MAIL.TEST', E'some_readable_hash'::bytea, 1, NULL, '2019-02-14 08:28:24.614594+00', false); +INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets", "partner_id", "owner_id", "created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, 'ProjectName', 'projects description', NULL, NULL, NULL, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:28:24.254934+00'); + +INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets", "partner_id", "owner_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, 'projName1', 'Test project 1', NULL, NULL, NULL, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:28:24.636949+00'); +INSERT INTO "project_members"("member_id", "project_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, '2019-02-14 08:28:24.677953+00'); +INSERT INTO "project_members"("member_id", "project_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, '2019-02-13 08:28:24.677953+00'); + +INSERT INTO "irreparabledbs" ("segmentpath", "segmentdetail", "pieces_lost_count", "seg_damaged_unix_sec", "repair_attempt_count") VALUES ('\x49616d5365676d656e746b6579696e666f30', '\x49616d5365676d656e7464657461696c696e666f30', 10, 1550159554, 10); + +INSERT INTO "registration_tokens" ("secret", "owner_id", "project_limit", "created_at") VALUES (E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, null, 1, '2019-02-14 08:28:24.677953+00'); + +INSERT INTO "serial_numbers" ("id", "serial_number", "bucket_id", "expires_at") VALUES (1, E'0123456701234567'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014/testbucket'::bytea, '2019-03-06 08:28:24.677953+00'); +INSERT INTO "used_serials" ("serial_number_id", "storage_node_id") VALUES (1, E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n'); + +INSERT INTO "storagenode_bandwidth_rollups" ("storagenode_id", "interval_start", "interval_seconds", "action", "allocated", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024); +INSERT INTO "storagenode_storage_tallies" VALUES (E'\\3510\\323\\225"~\\036<\\342\\330m\\0253Jhr\\246\\233K\\246#\\2303\\351\\256\\275j\\212UM\\362\\207', '2019-02-14 08:16:57.812849+00', 1000); + +INSERT INTO "bucket_bandwidth_rollups" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024, 3024); +INSERT INTO "bucket_storage_tallies" ("bucket_name", "project_id", "interval_start", "inline", "remote", "remote_segments_count", "inline_segments_count", "object_count", "metadata_size") VALUES (E'testbucket'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 4024, 5024, 0, 0, 0, 0); +INSERT INTO "bucket_bandwidth_rollups" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\170\\160\\157\\370\\274\\366\\113\\364\\272\\235\\301\\243\\321\\102\\321\\136'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024, 3024); +INSERT INTO "bucket_storage_tallies" ("bucket_name", "project_id", "interval_start", "inline", "remote", "remote_segments_count", "inline_segments_count", "object_count", "metadata_size") VALUES (E'testbucket'::bytea, E'\\170\\160\\157\\370\\274\\366\\113\\364\\272\\235\\301\\243\\321\\102\\321\\136'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 4024, 5024, 0, 0, 0, 0); + +INSERT INTO "reset_password_tokens" ("secret", "owner_id", "created_at") VALUES (E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-05-08 08:28:24.677953+00'); + +INSERT INTO "offers" ("id", "name", "description", "award_credit_in_cents", "invitee_credit_in_cents", "expires_at", "created_at", "status", "type", "award_credit_duration_days", "invitee_credit_duration_days") VALUES (1, 'Default referral offer', 'Is active when no other active referral offer', 300, 600, '2119-03-14 08:28:24.636949+00', '2019-07-14 08:28:24.636949+00', 1, 2, 365, 14); +INSERT INTO "offers" ("id", "name", "description", "award_credit_in_cents", "invitee_credit_in_cents", "expires_at", "created_at", "status", "type", "award_credit_duration_days", "invitee_credit_duration_days") VALUES (2, 'Default free credit offer', 'Is active when no active free credit offer', 0, 300, '2119-03-14 08:28:24.636949+00', '2019-07-14 08:28:24.636949+00', 1, 1, NULL, 14); + +INSERT INTO "api_keys" ("id", "project_id", "head", "name", "secret", "partner_id", "created_at") VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\111\\142\\147\\304\\132\\375\\070\\163\\270\\160\\251\\370\\126\\063\\351\\037\\257\\071\\143\\375\\351\\320\\253\\232\\220\\260\\075\\173\\306\\307\\115\\136'::bytea, 'key 2', E'\\254\\011\\315\\333\\273\\365\\001\\071\\024\\154\\253\\332\\301\\216\\361\\074\\221\\367\\251\\231\\274\\333\\300\\367\\001\\272\\327\\111\\315\\123\\042\\016'::bytea, NULL, '2019-02-14 08:28:24.267934+00'); + +INSERT INTO "value_attributions" ("project_id", "bucket_name", "partner_id", "last_updated") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E''::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-02-14 08:07:31.028103+00'); + +INSERT INTO "user_credits" ("id", "user_id", "offer_id", "referred_by", "credits_earned_in_cents", "credits_used_in_cents", "type", "expires_at", "created_at") VALUES (1, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 1, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 200, 0, 'invalid', '2019-10-01 08:28:24.267934+00', '2019-06-01 08:28:24.267934+00'); + +INSERT INTO "bucket_metainfos" ("id", "project_id", "name", "partner_id", "created_at", "path_cipher", "default_segment_size", "default_encryption_cipher_suite", "default_encryption_block_size", "default_redundancy_algorithm", "default_redundancy_share_size", "default_redundancy_required_shares", "default_redundancy_repair_shares", "default_redundancy_optimal_shares", "default_redundancy_total_shares") VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'testbucketuniquename'::bytea, NULL, '2019-06-14 08:28:24.677953+00', 1, 65536, 1, 8192, 1, 4096, 4, 6, 8, 10); + +INSERT INTO "pending_audits" ("node_id", "piece_id", "stripe_index", "share_size", "expected_share_hash", "reverify_count", "path") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 5, 1024, E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, 1, 'not null'); + +INSERT INTO "peer_identities" VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:07:31.335028+00'); + +INSERT INTO "graceful_exit_progress" ("node_id", "bytes_transferred", "pieces_transferred", "pieces_failed", "updated_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', 1000000000000000, 0, 0, '2019-09-12 10:07:31.028103+00'); +INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n \\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\311', 8, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0); +INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n \\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\312', 8, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0); + +INSERT INTO "stripe_customers" ("user_id", "customer_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'stripe_id', '2019-06-01 08:28:24.267934+00'); + +INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n \\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\311', 9, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0); +INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n \\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\312', 9, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0); + +INSERT INTO "stripecoinpayments_invoice_project_records"("id", "project_id", "storage", "egress", "objects", "period_start", "period_end", "state", "created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\021\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, 0, 0, 0, '2019-06-01 08:28:24.267934+00', '2019-06-01 08:28:24.267934+00', 0, '2019-06-01 08:28:24.267934+00'); + +INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "root_piece_id", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n \\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\311', 10, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0); + +INSERT INTO "stripecoinpayments_tx_conversion_rates" ("tx_id", "rate", "created_at") VALUES ('tx_id', E'\\363\\311\\033w\\222\\303Ci,'::bytea, '2019-06-01 08:28:24.267934+00'); + +INSERT INTO "coinpayments_transactions" ("id", "user_id", "address", "amount", "received", "status", "key", "timeout", "created_at") VALUES ('tx_id', E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'address', E'\\363\\311\\033w'::bytea, E'\\363\\311\\033w'::bytea, 1, 'key', 60, '2019-06-01 08:28:24.267934+00'); + +INSERT INTO "storagenode_bandwidth_rollups" ("storagenode_id", "interval_start", "interval_seconds", "action", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2020-01-11 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 2024); + +INSERT INTO "coupons" ("id", "user_id", "amount", "description", "type", "status", "duration", "created_at") VALUES (E'\\362\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 50, 'description', 0, 0, 2, '2019-06-01 08:28:24.267934+00'); +INSERT INTO "coupon_usages" ("coupon_id", "amount", "status", "period") VALUES (E'\\362\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, 22, 0, '2019-06-01 09:28:24.267934+00'); + +INSERT INTO "reported_serials" ("expires_at", "storage_node_id", "bucket_id", "action", "serial_number", "settled", "observed_at") VALUES ('2020-01-11 08:00:00.000000+00', E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014/testbucket'::bytea, 1, E'0123456701234567'::bytea, 100, '2020-01-11 08:00:00.000000+00'); + +INSERT INTO "stripecoinpayments_apply_balance_intents" ("tx_id", "state", "created_at") VALUES ('tx_id', 0, '2019-06-01 08:28:24.267934+00'); + +INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets", "rate_limit", "partner_id", "owner_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\347'::bytea, 'projName1', 'Test project 1', NULL, NULL, NULL, 2000000, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2020-01-15 08:28:24.636949+00'); + +INSERT INTO "pending_serial_queue" ("storage_node_id", "bucket_id", "serial_number", "action", "settled", "expires_at") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014/testbucket'::bytea, E'5123456701234567'::bytea, 1, 100, '2020-01-11 08:00:00.000000+00'); + +INSERT INTO "consumed_serials" ("storage_node_id", "serial_number", "expires_at") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', E'1234567012345678'::bytea, '2020-01-12 08:00:00.000000+00'); + +INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('0', '\x0a0130120100', 1.0, '2020-09-01 00:00:00.000000+00'); +INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('here''s/a/great/path', '\x0a136865726527732f612f67726561742f70617468120a0102030405060708090a', 1.0, '2020-09-01 00:00:00.000000+00'); +INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('yet/another/cool/path', '\x0a157965742f616e6f746865722f636f6f6c2f70617468120a0102030405060708090a', 1.0, '2020-09-01 00:00:00.000000+00'); +INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('/this/is/a/new/path', '\x0a23736f2f6d616e792f69636f6e69632f70617468732f746f2f63686f6f73652f66726f6d120a0102030405060708090a', 1.0, '2020-09-01 00:00:00.000000+00'); +INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('/some/path/1/23/4', '\x0a23736f2f6d618e792f69636f6e69632f70617468732f746f2f63686f6f73652f66726f6d120a0102030405060708090a', 0.2, '2020-09-01 00:00:00.000000+00'); + +INSERT INTO "project_bandwidth_rollups"("project_id", "interval_month", egress_allocated) VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\347'::bytea, '2020-04-01', 10000); + +INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets","rate_limit", "partner_id", "owner_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\345'::bytea, 'egress101', 'High Bandwidth Project', NULL, NULL, NULL, 2000000, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2020-05-15 08:46:24.000000+00'); + +INSERT INTO "storagenode_paystubs"("period", "node_id", "created_at", "codes", "usage_at_rest", "usage_get", "usage_put", "usage_get_repair", "usage_put_repair", "usage_get_audit", "comp_at_rest", "comp_get", "comp_put", "comp_get_repair", "comp_put_repair", "comp_get_audit", "surge_percent", "held", "owed", "disposed", "paid") VALUES ('2020-01', '\xf2a3b4c4dfdf7221310382fd5db5aa73e1d227d6df09734ec4e5305000000000', '2020-04-07T20:14:21.479141Z', '', 1327959864508416, 294054066688, 159031363328, 226751, 0, 836608, 2861984, 5881081, 0, 226751, 0, 8, 300, 0, 26909472, 0, 26909472); +INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "unknown_audit_suspended", "offline_suspended", "under_review") VALUES (E'\\153\\313\\233\\074\\327\\255\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 5, false, '2019-02-14 08:07:31.108963+00', '2019-02-14 08:07:31.108963+00', '2019-02-14 08:07:31.108963+00'); + +INSERT INTO "audit_histories" ("node_id", "history") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', '\x0a23736f2f6d616e792f69636f6e69632f70617468732f746f2f63686f6f73652f66726f6d120a0102030405060708090a'); + +INSERT INTO "node_api_versions"("id", "api_version", "created_at", "updated_at") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', 1, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00'); +INSERT INTO "node_api_versions"("id", "api_version", "created_at", "updated_at") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', 2, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00'); +INSERT INTO "node_api_versions"("id", "api_version", "created_at", "updated_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014', 3, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00'); + +INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\256\\263'::bytea, 'egress102', 'High Bandwidth Project 2', NULL, NULL, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-05-15 08:46:24.000000+00', 1000); +INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\255\\244'::bytea, 'egress103', 'High Bandwidth Project 3', NULL, NULL, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-05-15 08:46:24.000000+00', 1000); + +INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\253\\231'::bytea, 'Limit Test 1', 'This project is above the default', 50000000001, 50000000001, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-10-14 10:10:10.000000+00', 101); +INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\252\\230'::bytea, 'Limit Test 2', 'This project is below the default', NULL, NULL, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-10-14 10:10:11.000000+00', NULL); + +INSERT INTO "storagenode_bandwidth_rollups_phase2" ("storagenode_id", "interval_start", "interval_seconds", "action", "allocated", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024); + +INSERT INTO "users"("id", "full_name", "short_name", "email", "normalized_email", "password_hash", "status", "partner_id", "created_at", "position", "company_name", "working_on", "company_size", "is_professional") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\205\\311",'::bytea, 'Thierry', 'Berg', '2email2@mail.test', '2EMAIL2@MAIL.TEST', E'some_readable_hash'::bytea, 2, NULL, '2020-05-16 10:28:24.614594+00', 'engineer', 'storj', 'data storage', 55, true); + +-- NEW DATA -- \ No newline at end of file From a97b5c8552155c052cd1b8240f039dd9cf6e5071 Mon Sep 17 00:00:00 2001 From: Egon Elbre Date: Thu, 28 Jan 2021 10:28:45 +0200 Subject: [PATCH 28/38] go.mod: bump quic to master, such it compiles with Go 1.16 Change-Id: I4af35cd561955094a34fe3c96b0c532f325ac505 --- go.mod | 2 +- go.sum | 7 +++++-- pkg/quic/qtls/go114.go | 22 +++++++++++----------- pkg/quic/qtls/go115.go | 4 ++-- 4 files changed, 19 insertions(+), 16 deletions(-) diff --git a/go.mod b/go.mod index f63d5d38e..0616c9616 100644 --- a/go.mod +++ b/go.mod @@ -21,7 +21,7 @@ require ( github.com/jackc/pgtype v1.5.0 github.com/jackc/pgx/v4 v4.9.0 github.com/jtolds/monkit-hw/v2 v2.0.0-20191108235325-141a0da276b3 - github.com/lucas-clemente/quic-go v0.19.3 + github.com/lucas-clemente/quic-go v0.7.1-0.20210125162258-7456e643b956 github.com/mattn/go-sqlite3 v2.0.3+incompatible github.com/nsf/jsondiff v0.0.0-20200515183724-f29ed568f4ce github.com/nsf/termbox-go v0.0.0-20200418040025-38ba6e5628f1 diff --git a/go.sum b/go.sum index 15e17311f..1b3c441f3 100644 --- a/go.sum +++ b/go.sum @@ -372,8 +372,8 @@ github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.3.0 h1:/qkRGz8zljWiDcFvgpwUpwIAPu3r07TDvs3Rws+o/pU= github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lucas-clemente/quic-go v0.19.3 h1:eCDQqvGBB+kCTkA0XrAFtNe81FMa0/fn4QSoeAbmiF4= -github.com/lucas-clemente/quic-go v0.19.3/go.mod h1:ADXpNbTQjq1hIzCpB+y/k5iz4n4z4IwqoLb94Kh5Hu8= +github.com/lucas-clemente/quic-go v0.7.1-0.20210125162258-7456e643b956 h1:/AxIsj5sEh+ELGhBilVugx+iCdD9G1Ym0O1i7kUSKzA= +github.com/lucas-clemente/quic-go v0.7.1-0.20210125162258-7456e643b956/go.mod h1:Cxx5SWK/K2dp7TA7qsnIHgF43e3NAufUDcSb9jTpL68= github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= @@ -384,6 +384,8 @@ github.com/marten-seemann/qtls v0.10.0 h1:ECsuYUKalRL240rRD4Ri33ISb7kAQ3qGDlrrl5 github.com/marten-seemann/qtls v0.10.0/go.mod h1:UvMd1oaYDACI99/oZUYLzMCkBXQVT0aGm99sJhbT8hs= github.com/marten-seemann/qtls-go1-15 v0.1.1 h1:LIH6K34bPVttyXnUWixk0bzH6/N07VxbSabxn5A5gZQ= github.com/marten-seemann/qtls-go1-15 v0.1.1/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= +github.com/marten-seemann/qtls-go1-16 v0.1.0-beta.1.1 h1:CWVWoLCcdfarQRGgWi2b9ILKhc5v8MXtfs3bz9dmE00= +github.com/marten-seemann/qtls-go1-16 v0.1.0-beta.1.1/go.mod h1:gNpI2Ol+lRS3WwSOtIUUtRwZEQMXjYK+dQSBFbethAk= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= @@ -763,6 +765,7 @@ golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200610111108-226ff32320da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201231184435-2d18734c6014/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112091331-59c308dcf3cc h1:y0Og6AYdwus7SIAnKnDxjc4gJetRiYEWOx4AKbOeyEI= golang.org/x/sys v0.0.0-20210112091331-59c308dcf3cc/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221 h1:/ZHdbVpdR/jk3g30/d4yUL0JU9kksj8+F/bnQUVLGDM= diff --git a/pkg/quic/qtls/go114.go b/pkg/quic/qtls/go114.go index 51257dc90..750e4fd20 100644 --- a/pkg/quic/qtls/go114.go +++ b/pkg/quic/qtls/go114.go @@ -1,7 +1,7 @@ // Copyright (C) 2021 Storj Labs, Inc. // See LICENSE for copying information. -// +build go1.13 go1.14 +// +build !go1.15 package qtls @@ -15,15 +15,15 @@ import ( // state. func ToTLSConnectionState(state quicgo.ConnectionState) tls.ConnectionState { return tls.ConnectionState{ - Version: state.Version, - HandshakeComplete: state.HandshakeComplete, - DidResume: state.DidResume, - CipherSuite: state.CipherSuite, - NegotiatedProtocol: state.NegotiatedProtocol, - ServerName: state.ServerName, - PeerCertificates: state.PeerCertificates, - VerifiedChains: state.VerifiedChains, - SignedCertificateTimestamps: state.SignedCertificateTimestamps, - OCSPResponse: state.OCSPResponse, + Version: state.TLS.Version, + HandshakeComplete: state.TLS.HandshakeComplete, + DidResume: state.TLS.DidResume, + CipherSuite: state.TLS.CipherSuite, + NegotiatedProtocol: state.TLS.NegotiatedProtocol, + ServerName: state.TLS.ServerName, + PeerCertificates: state.TLS.PeerCertificates, + VerifiedChains: state.TLS.VerifiedChains, + SignedCertificateTimestamps: state.TLS.SignedCertificateTimestamps, + OCSPResponse: state.TLS.OCSPResponse, } } diff --git a/pkg/quic/qtls/go115.go b/pkg/quic/qtls/go115.go index ad46a783a..6219457d8 100644 --- a/pkg/quic/qtls/go115.go +++ b/pkg/quic/qtls/go115.go @@ -1,7 +1,7 @@ // Copyright (C) 2021 Storj Labs, Inc. // See LICENSE for copying information. -// +build !go1.13 !go1.14 +// +build go1.15 package qtls @@ -14,5 +14,5 @@ import ( // ToTLSConnectionState converts a quic-go connection state to tls connection // state. func ToTLSConnectionState(state quicgo.ConnectionState) tls.ConnectionState { - return state.ConnectionState + return state.TLS.ConnectionState } From 19e3dc4ec0150544affdc7ee8a2eb9eb4596cbcf Mon Sep 17 00:00:00 2001 From: Egon Elbre Date: Thu, 28 Jan 2021 13:46:18 +0200 Subject: [PATCH 29/38] satellite/overlay: rename NodeSelectionCache to UploadSelectionCache It wasn't obvious that NodeSelectionCache was only for uploads. Change-Id: Ifeeaa6fdb50a4b7916245b48d8634d70ac54459c --- private/testplanet/satellite.go | 2 +- satellite/overlay/benchmark_test.go | 10 +++---- satellite/overlay/config.go | 2 +- satellite/overlay/selection_test.go | 14 ++++----- satellite/overlay/service.go | 18 +++++------ satellite/overlay/service_test.go | 8 ++--- ...deselectioncache.go => uploadselection.go} | 30 +++++++++---------- ...ncache_test.go => uploadselection_test.go} | 20 ++++++------- 8 files changed, 52 insertions(+), 52 deletions(-) rename satellite/overlay/{nodeselectioncache.go => uploadselection.go} (78%) rename satellite/overlay/{nodeselectioncache_test.go => uploadselection_test.go} (95%) diff --git a/private/testplanet/satellite.go b/private/testplanet/satellite.go index edb07f816..1fb05f436 100644 --- a/private/testplanet/satellite.go +++ b/private/testplanet/satellite.go @@ -431,7 +431,7 @@ func (planet *Planet) newSatellite(ctx context.Context, prefix string, index int SuspensionGracePeriod: time.Hour, SuspensionDQEnabled: true, }, - NodeSelectionCache: overlay.CacheConfig{ + NodeSelectionCache: overlay.UploadSelectionCacheConfig{ Staleness: 3 * time.Minute, }, UpdateStatsBatchSize: 100, diff --git a/satellite/overlay/benchmark_test.go b/satellite/overlay/benchmark_test.go index 943267cb7..9f4fc295d 100644 --- a/satellite/overlay/benchmark_test.go +++ b/satellite/overlay/benchmark_test.go @@ -442,7 +442,7 @@ func BenchmarkNodeSelection(b *testing.B) { service, err := overlay.NewService(zap.NewNop(), overlaydb, overlay.Config{ Node: nodeSelectionConfig, - NodeSelectionCache: overlay.CacheConfig{ + NodeSelectionCache: overlay.UploadSelectionCacheConfig{ Staleness: time.Hour, }, }) @@ -496,9 +496,9 @@ func BenchmarkNodeSelection(b *testing.B) { } }) - b.Run("NodeSelectionCacheGetNodes", func(b *testing.B) { + b.Run("UploadSelectionCacheGetNodes", func(b *testing.B) { for i := 0; i < b.N; i++ { - selected, err := service.SelectionCache.GetNodes(ctx, overlay.FindStorageNodesRequest{ + selected, err := service.UploadSelectionCache.GetNodes(ctx, overlay.FindStorageNodesRequest{ RequestedCount: SelectCount, ExcludedIDs: nil, MinimumVersion: "v1.0.0", @@ -508,9 +508,9 @@ func BenchmarkNodeSelection(b *testing.B) { } }) - b.Run("NodeSelectionCacheGetNodesExclusion", func(b *testing.B) { + b.Run("UploadSelectionCacheGetNodesExclusion", func(b *testing.B) { for i := 0; i < b.N; i++ { - selected, err := service.SelectionCache.GetNodes(ctx, overlay.FindStorageNodesRequest{ + selected, err := service.UploadSelectionCache.GetNodes(ctx, overlay.FindStorageNodesRequest{ RequestedCount: SelectCount, ExcludedIDs: excludedIDs, MinimumVersion: "v1.0.0", diff --git a/satellite/overlay/config.go b/satellite/overlay/config.go index 8074b2472..0c63ea09e 100644 --- a/satellite/overlay/config.go +++ b/satellite/overlay/config.go @@ -21,7 +21,7 @@ var ( // Config is a configuration for overlay service. type Config struct { Node NodeSelectionConfig - NodeSelectionCache CacheConfig + NodeSelectionCache UploadSelectionCacheConfig UpdateStatsBatchSize int `help:"number of update requests to process per transaction" default:"100"` AuditHistory AuditHistoryConfig } diff --git a/satellite/overlay/selection_test.go b/satellite/overlay/selection_test.go index 44a2af521..015c07491 100644 --- a/satellite/overlay/selection_test.go +++ b/satellite/overlay/selection_test.go @@ -78,7 +78,7 @@ func TestMinimumDiskSpace(t *testing.T) { n1, err := saOverlay.Service.FindStorageNodesForUpload(ctx, req) require.Error(t, err) require.True(t, overlay.ErrNotEnoughNodes.Has(err)) - n2, err := saOverlay.Service.SelectionCache.GetNodes(ctx, req) + n2, err := saOverlay.Service.UploadSelectionCache.GetNodes(ctx, req) require.Error(t, err) require.True(t, overlay.ErrNotEnoughNodes.Has(err)) require.Equal(t, len(n2), len(n1)) @@ -104,7 +104,7 @@ func TestMinimumDiskSpace(t *testing.T) { n2, err = saOverlay.Service.FindStorageNodesWithPreferences(ctx, req, &nodeConfig) require.NoError(t, err) require.Equal(t, len(n1), len(n2)) - n3, err = saOverlay.Service.SelectionCache.GetNodes(ctx, req) + n3, err = saOverlay.Service.UploadSelectionCache.GetNodes(ctx, req) require.NoError(t, err) require.Equal(t, len(n1), len(n3)) }) @@ -218,7 +218,7 @@ func TestEnsureMinimumRequested(t *testing.T) { require.Len(t, nodes, requestedCount) require.Equal(t, 0, countReputable(nodes)) - n2, err := service.SelectionCache.GetNodes(ctx, req) + n2, err := service.UploadSelectionCache.GetNodes(ctx, req) require.NoError(t, err) require.Equal(t, requestedCount, len(n2)) }) @@ -521,7 +521,7 @@ func TestFindStorageNodesDistinctNetworks(t *testing.T) { require.NotEqual(t, nodes[0].LastIPPort, nodes[1].LastIPPort) require.NotEqual(t, nodes[0].LastIPPort, excludedNodeAddr) require.NotEqual(t, nodes[1].LastIPPort, excludedNodeAddr) - n2, err := satellite.Overlay.Service.SelectionCache.GetNodes(ctx, req) + n2, err := satellite.Overlay.Service.UploadSelectionCache.GetNodes(ctx, req) require.NoError(t, err) require.Len(t, n2, 2) require.NotEqual(t, n2[0].LastIPPort, n2[1].LastIPPort) @@ -543,7 +543,7 @@ func TestFindStorageNodesDistinctNetworks(t *testing.T) { n1, err := satellite.Overlay.Service.FindStorageNodesWithPreferences(ctx, req, &satellite.Config.Overlay.Node) require.Error(t, err) require.Equal(t, len(n), len(n1)) - n2, err = satellite.Overlay.Service.SelectionCache.GetNodes(ctx, req) + n2, err = satellite.Overlay.Service.UploadSelectionCache.GetNodes(ctx, req) require.Error(t, err) require.Equal(t, len(n1), len(n2)) }) @@ -593,7 +593,7 @@ func TestSelectNewStorageNodesExcludedIPs(t *testing.T) { require.NotEqual(t, nodes[0].LastIPPort, nodes[1].LastIPPort) require.NotEqual(t, nodes[0].LastIPPort, excludedNodeAddr) require.NotEqual(t, nodes[1].LastIPPort, excludedNodeAddr) - n2, err := satellite.Overlay.Service.SelectionCache.GetNodes(ctx, req) + n2, err := satellite.Overlay.Service.UploadSelectionCache.GetNodes(ctx, req) require.NoError(t, err) require.Len(t, n2, 2) require.NotEqual(t, n2[0].LastIPPort, n2[1].LastIPPort) @@ -749,7 +749,7 @@ func TestCacheSelectionVsDBSelection(t *testing.T) { req := overlay.FindStorageNodesRequest{RequestedCount: 5} n1, err := saOverlay.Service.FindStorageNodesForUpload(ctx, req) require.NoError(t, err) - n2, err := saOverlay.Service.SelectionCache.GetNodes(ctx, req) + n2, err := saOverlay.Service.UploadSelectionCache.GetNodes(ctx, req) require.NoError(t, err) require.Equal(t, len(n2), len(n1)) n3, err := saOverlay.Service.FindStorageNodesWithPreferences(ctx, req, &nodeConfig) diff --git a/satellite/overlay/service.go b/satellite/overlay/service.go index 5f2fca2f1..3821b6d13 100644 --- a/satellite/overlay/service.go +++ b/satellite/overlay/service.go @@ -262,10 +262,10 @@ func (node *SelectedNode) Clone() *SelectedNode { // // architecture: Service type Service struct { - log *zap.Logger - db DB - config Config - SelectionCache *NodeSelectionCache + log *zap.Logger + db DB + config Config + UploadSelectionCache *UploadSelectionCache } // NewService returns a new Service. @@ -278,7 +278,7 @@ func NewService(log *zap.Logger, db DB, config Config) (*Service, error) { log: log, db: db, config: config, - SelectionCache: NewNodeSelectionCache(log, db, + UploadSelectionCache: NewUploadSelectionCache(log, db, config.NodeSelectionCache.Staleness, config.Node, ), }, nil @@ -313,7 +313,7 @@ func (service *Service) GetOnlineNodesForGetDelete(ctx context.Context, nodeIDs // GetNodeIPs returns a map of node ip:port for the supplied nodeIDs. func (service *Service) GetNodeIPs(ctx context.Context, nodeIDs []storj.NodeID) (_ map[storj.NodeID]string, err error) { defer mon.Task()(&ctx)(&err) - return service.SelectionCache.GetNodeIPs(ctx, nodeIDs) + return service.UploadSelectionCache.GetNodeIPs(ctx, nodeIDs) } // IsOnline checks if a node is 'online' based on the collected statistics. @@ -346,7 +346,7 @@ func (service *Service) FindStorageNodesForUpload(ctx context.Context, req FindS return service.FindStorageNodesWithPreferences(ctx, req, &service.config.Node) } - selectedNodes, err := service.SelectionCache.GetNodes(ctx, req) + selectedNodes, err := service.UploadSelectionCache.GetNodes(ctx, req) if err != nil { service.log.Warn("error selecting from node selection cache", zap.String("err", err.Error())) } @@ -544,7 +544,7 @@ func (service *Service) TestVetNode(ctx context.Context, nodeID storj.NodeID) (v service.log.Warn("error vetting node", zap.Stringer("node ID", nodeID)) return nil, err } - err = service.SelectionCache.Refresh(ctx) + err = service.UploadSelectionCache.Refresh(ctx) service.log.Warn("nodecache refresh err", zap.Error(err)) return vettedTime, err } @@ -556,7 +556,7 @@ func (service *Service) TestUnvetNode(ctx context.Context, nodeID storj.NodeID) service.log.Warn("error unvetting node", zap.Stringer("node ID", nodeID), zap.Error(err)) return err } - err = service.SelectionCache.Refresh(ctx) + err = service.UploadSelectionCache.Refresh(ctx) service.log.Warn("nodecache refresh err", zap.Error(err)) return err } diff --git a/satellite/overlay/service_test.go b/satellite/overlay/service_test.go index d3fc2eb0c..7fec0004f 100644 --- a/satellite/overlay/service_test.go +++ b/satellite/overlay/service_test.go @@ -274,7 +274,7 @@ func TestRandomizedSelectionCache(t *testing.T) { }, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) { satellite := planet.Satellites[0] overlaydb := satellite.Overlay.DB - nodeSelectionCache := satellite.Overlay.Service.SelectionCache + uploadSelectionCache := satellite.Overlay.Service.UploadSelectionCache allIDs := make(storj.NodeIDList, totalNodes) nodeCounts := make(map[storj.NodeID]int) expectedNewCount := int(float64(totalNodes) * satellite.Config.Overlay.Node.NewNodeFraction) @@ -324,9 +324,9 @@ func TestRandomizedSelectionCache(t *testing.T) { nodeCounts[newID] = 0 } - err := nodeSelectionCache.Refresh(ctx) + err := uploadSelectionCache.Refresh(ctx) require.NoError(t, err) - reputable, new := nodeSelectionCache.Size() + reputable, new := uploadSelectionCache.Size() require.Equal(t, totalNodes-expectedNewCount, reputable) require.Equal(t, expectedNewCount, new) @@ -338,7 +338,7 @@ func TestRandomizedSelectionCache(t *testing.T) { RequestedCount: numNodesToSelect, } - nodes, err = nodeSelectionCache.GetNodes(ctx, req) + nodes, err = uploadSelectionCache.GetNodes(ctx, req) require.NoError(t, err) require.Len(t, nodes, numNodesToSelect) diff --git a/satellite/overlay/nodeselectioncache.go b/satellite/overlay/uploadselection.go similarity index 78% rename from satellite/overlay/nodeselectioncache.go rename to satellite/overlay/uploadselection.go index 15deead53..c74c70841 100644 --- a/satellite/overlay/nodeselectioncache.go +++ b/satellite/overlay/uploadselection.go @@ -15,26 +15,26 @@ import ( "storj.io/storj/satellite/nodeselection" ) -// CacheDB implements the database for overlay node selection cache. +// UploadSelectionDB implements the database for upload selection cache. // // architecture: Database -type CacheDB interface { +type UploadSelectionDB interface { // SelectAllStorageNodesUpload returns all nodes that qualify to store data, organized as reputable nodes and new nodes SelectAllStorageNodesUpload(ctx context.Context, selectionCfg NodeSelectionConfig) (reputable, new []*SelectedNode, err error) } -// CacheConfig is a configuration for overlay node selection cache. -type CacheConfig struct { +// UploadSelectionCacheConfig is a configuration for upload selection cache. +type UploadSelectionCacheConfig struct { Disabled bool `help:"disable node cache" default:"false"` Staleness time.Duration `help:"how stale the node selection cache can be" releaseDefault:"3m" devDefault:"5m"` } -// NodeSelectionCache keeps a list of all the storage nodes that are qualified to store data +// UploadSelectionCache keeps a list of all the storage nodes that are qualified to store data // We organize the nodes by if they are reputable or a new node on the network. // The cache will sync with the nodes table in the database and get refreshed once the staleness time has past. -type NodeSelectionCache struct { +type UploadSelectionCache struct { log *zap.Logger - db CacheDB + db UploadSelectionDB selectionConfig NodeSelectionConfig staleness time.Duration @@ -43,9 +43,9 @@ type NodeSelectionCache struct { state *nodeselection.State } -// NewNodeSelectionCache creates a new cache that keeps a list of all the storage nodes that are qualified to store data. -func NewNodeSelectionCache(log *zap.Logger, db CacheDB, staleness time.Duration, config NodeSelectionConfig) *NodeSelectionCache { - return &NodeSelectionCache{ +// NewUploadSelectionCache creates a new cache that keeps a list of all the storage nodes that are qualified to store data. +func NewUploadSelectionCache(log *zap.Logger, db UploadSelectionDB, staleness time.Duration, config NodeSelectionConfig) *UploadSelectionCache { + return &UploadSelectionCache{ log: log, db: db, staleness: staleness, @@ -55,7 +55,7 @@ func NewNodeSelectionCache(log *zap.Logger, db CacheDB, staleness time.Duration, // Refresh populates the cache with all of the reputableNodes and newNode nodes // This method is useful for tests. -func (cache *NodeSelectionCache) Refresh(ctx context.Context) (err error) { +func (cache *UploadSelectionCache) Refresh(ctx context.Context) (err error) { defer mon.Task()(&ctx)(&err) _, err = cache.refresh(ctx) return err @@ -64,7 +64,7 @@ func (cache *NodeSelectionCache) Refresh(ctx context.Context) (err error) { // refresh calls out to the database and refreshes the cache with the most up-to-date // data from the nodes table, then sets time that the last refresh occurred so we know when // to refresh again in the future. -func (cache *NodeSelectionCache) refresh(ctx context.Context) (state *nodeselection.State, err error) { +func (cache *UploadSelectionCache) refresh(ctx context.Context) (state *nodeselection.State, err error) { defer mon.Task()(&ctx)(&err) cache.mu.Lock() defer cache.mu.Unlock() @@ -89,7 +89,7 @@ func (cache *NodeSelectionCache) refresh(ctx context.Context) (state *nodeselect // GetNodes selects nodes from the cache that will be used to upload a file. // Every node selected will be from a distinct network. // If the cache hasn't been refreshed recently it will do so first. -func (cache *NodeSelectionCache) GetNodes(ctx context.Context, req FindStorageNodesRequest) (_ []*SelectedNode, err error) { +func (cache *UploadSelectionCache) GetNodes(ctx context.Context, req FindStorageNodesRequest) (_ []*SelectedNode, err error) { defer mon.Task()(&ctx)(&err) cache.mu.RLock() @@ -119,7 +119,7 @@ func (cache *NodeSelectionCache) GetNodes(ctx context.Context, req FindStorageNo } // GetNodeIPs gets the last node ip:port from the cache, refreshing when needed. -func (cache *NodeSelectionCache) GetNodeIPs(ctx context.Context, nodes []storj.NodeID) (_ map[storj.NodeID]string, err error) { +func (cache *UploadSelectionCache) GetNodeIPs(ctx context.Context, nodes []storj.NodeID) (_ map[storj.NodeID]string, err error) { defer mon.Task()(&ctx)(&err) cache.mu.RLock() @@ -139,7 +139,7 @@ func (cache *NodeSelectionCache) GetNodeIPs(ctx context.Context, nodes []storj.N } // Size returns how many reputable nodes and new nodes are in the cache. -func (cache *NodeSelectionCache) Size() (reputableNodeCount int, newNodeCount int) { +func (cache *UploadSelectionCache) Size() (reputableNodeCount int, newNodeCount int) { cache.mu.RLock() state := cache.state cache.mu.RUnlock() diff --git a/satellite/overlay/nodeselectioncache_test.go b/satellite/overlay/uploadselection_test.go similarity index 95% rename from satellite/overlay/nodeselectioncache_test.go rename to satellite/overlay/uploadselection_test.go index a392d8ae7..e285e883f 100644 --- a/satellite/overlay/nodeselectioncache_test.go +++ b/satellite/overlay/uploadselection_test.go @@ -48,7 +48,7 @@ const ( func TestRefresh(t *testing.T) { satellitedbtest.Run(t, func(ctx *testcontext.Context, t *testing.T, db satellite.DB) { - cache := overlay.NewNodeSelectionCache(zap.NewNop(), + cache := overlay.NewUploadSelectionCache(zap.NewNop(), db.OverlayCache(), lowStaleness, nodeSelectionConfig, @@ -147,7 +147,7 @@ func TestRefreshConcurrent(t *testing.T) { // concurrent cache.Refresh with high staleness, where high staleness means the // cache should only be refreshed the first time we call cache.Refresh mockDB := mockdb{} - cache := overlay.NewNodeSelectionCache(zap.NewNop(), + cache := overlay.NewUploadSelectionCache(zap.NewNop(), &mockDB, highStaleness, nodeSelectionConfig, @@ -168,7 +168,7 @@ func TestRefreshConcurrent(t *testing.T) { // concurrent cache.Refresh with low staleness, where low staleness // means that the cache will refresh *every time* cache.Refresh is called mockDB = mockdb{} - cache = overlay.NewNodeSelectionCache(zap.NewNop(), + cache = overlay.NewUploadSelectionCache(zap.NewNop(), &mockDB, lowStaleness, nodeSelectionConfig, @@ -194,7 +194,7 @@ func TestGetNodes(t *testing.T) { DistinctIP: true, MinimumDiskSpace: 100 * memory.MiB, } - cache := overlay.NewNodeSelectionCache(zap.NewNop(), + cache := overlay.NewUploadSelectionCache(zap.NewNop(), db.OverlayCache(), lowStaleness, nodeSelectionConfig, @@ -249,7 +249,7 @@ func TestGetNodesConcurrent(t *testing.T) { reputable: reputableNodes, new: newNodes, } - cache := overlay.NewNodeSelectionCache(zap.NewNop(), + cache := overlay.NewUploadSelectionCache(zap.NewNop(), &mockDB, highStaleness, nodeSelectionConfig, @@ -289,7 +289,7 @@ func TestGetNodesConcurrent(t *testing.T) { reputable: reputableNodes, new: newNodes, } - cache = overlay.NewNodeSelectionCache(zap.NewNop(), + cache = overlay.NewUploadSelectionCache(zap.NewNop(), &mockDB, lowStaleness, nodeSelectionConfig, @@ -376,7 +376,7 @@ func TestGetNodesDistinct(t *testing.T) { config := nodeSelectionConfig config.NewNodeFraction = 0.5 config.DistinctIP = true - cache := overlay.NewNodeSelectionCache(zap.NewNop(), + cache := overlay.NewUploadSelectionCache(zap.NewNop(), &mockDB, highStaleness, config, @@ -404,7 +404,7 @@ func TestGetNodesDistinct(t *testing.T) { config := nodeSelectionConfig config.NewNodeFraction = 0.5 config.DistinctIP = false - cache := overlay.NewNodeSelectionCache(zap.NewNop(), + cache := overlay.NewUploadSelectionCache(zap.NewNop(), &mockDB, highStaleness, config, @@ -422,7 +422,7 @@ func TestGetNodesError(t *testing.T) { defer ctx.Cleanup() mockDB := mockdb{} - cache := overlay.NewNodeSelectionCache(zap.NewNop(), + cache := overlay.NewUploadSelectionCache(zap.NewNop(), &mockDB, highStaleness, nodeSelectionConfig, @@ -450,7 +450,7 @@ func TestNewNodeFraction(t *testing.T) { DistinctIP: true, MinimumDiskSpace: 10 * memory.MiB, } - cache := overlay.NewNodeSelectionCache(zap.NewNop(), + cache := overlay.NewUploadSelectionCache(zap.NewNop(), db.OverlayCache(), lowStaleness, nodeSelectionConfig, From fff10b041ce696f38d1baeee17d43fccfd3ee9b8 Mon Sep 17 00:00:00 2001 From: Brandon Iglesias Date: Thu, 28 Jan 2021 10:16:57 -0500 Subject: [PATCH 30/38] quick update to readme (#4033) Co-authored-by: Stefan Benten --- README.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/README.md b/README.md index ef2cdc1e8..00968a4d3 100644 --- a/README.md +++ b/README.md @@ -33,14 +33,13 @@ All of our code for Storj v3 is open source. Have a code change you think would Have comments or bug reports? Want to propose a PR before hand-crafting it? Jump on to our [forum](https://forum.storj.io) and join the [Engineering Discussions](https://forum.storj.io/c/engineer-amas) to say hi to the developer community and to talk to the Storj core team. -Want to vote on or suggest new features? Post it on [ideas.storj.io](https://ideas.storj.io). +Want to vote on or suggest new features? Post it on the [forum](https://forum.storj.io/c/parent-cat/5). ### Issue tracking and roadmap See the breakdown of what we're building by checking out the following resources: * [White paper](https://storj.io/whitepaper) - * [Aha! Roadmap](https://storjlabs.aha.io/published/bc0db77dc0580bb10c0faf2b383d0529?page=1) ### Install required packages From 52d6852e58502ced2ef5820f5365548953b1125b Mon Sep 17 00:00:00 2001 From: Yingrong Zhao Date: Thu, 28 Jan 2021 14:43:47 -0500 Subject: [PATCH 31/38] pkg/server: add retry logic for random port assignment Change-Id: I70464e344a79dce8eadb9513d2a990faf3b2cca8 --- pkg/server/server.go | 55 ++++++++++++++++++++++++++++++++++++++------ 1 file changed, 48 insertions(+), 7 deletions(-) diff --git a/pkg/server/server.go b/pkg/server/server.go index cad39437a..ad6155bac 100644 --- a/pkg/server/server.go +++ b/pkg/server/server.go @@ -6,8 +6,12 @@ package server import ( "context" "crypto/tls" + "errors" "net" + "os" + "runtime" "sync" + "syscall" quicgo "github.com/lucas-clemente/quic-go" "github.com/zeebo/errs" @@ -74,14 +78,30 @@ func New(log *zap.Logger, tlsOptions *tlsopts.Options, publicAddr, privateAddr s Manager: rpc.NewDefaultManagerOptions(), } - publicTCPListener, err := net.Listen("tcp", publicAddr) - if err != nil { - return nil, err - } + var err error + var publicTCPListener, publicQUICListener net.Listener + for retry := 0; ; retry++ { + publicTCPListener, err = net.Listen("tcp", publicAddr) + if err != nil { + return nil, err + } - publicQUICListener, err := quic.NewListener(tlsOptions.ServerTLSConfig(), publicTCPListener.Addr().String(), &quicgo.Config{MaxIdleTimeout: defaultUserTimeout}) - if err != nil { - return nil, errs.Combine(err, publicTCPListener.Close()) + publicQUICListener, err = quic.NewListener(tlsOptions.ServerTLSConfig(), publicTCPListener.Addr().String(), &quicgo.Config{MaxIdleTimeout: defaultUserTimeout}) + if err != nil { + _, port, _ := net.SplitHostPort(publicAddr) + if port == "0" && retry < 10 && isErrorAddressAlreadyInUse(err) { + // from here, we know for sure that the tcp port chosen by the + // os is available, but we don't know if the same port number + // for udp is also available. + // if a udp port is already in use, we will close the tcp port and retry + // to find one that is available for both udp and tcp. + _ = publicTCPListener.Close() + continue + } + return nil, errs.Combine(err, publicTCPListener.Close()) + } + + break } publicMux := drpcmux.New() @@ -223,3 +243,24 @@ func (p *Server) Run(ctx context.Context) (err error) { muxCancel() return errs.Combine(err, muxGroup.Wait()) } + +// isErrorAddressAlreadyInUse checks whether the error is corresponding to +// EADDRINUSE. Taken from https://stackoverflow.com/a/65865898. +func isErrorAddressAlreadyInUse(err error) bool { + var eOsSyscall *os.SyscallError + if !errors.As(err, &eOsSyscall) { + return false + } + var errErrno syscall.Errno + if !errors.As(eOsSyscall.Err, &errErrno) { + return false + } + if errErrno == syscall.EADDRINUSE { + return true + } + const WSAEADDRINUSE = 10048 + if runtime.GOOS == "windows" && errErrno == WSAEADDRINUSE { + return true + } + return false +} From 89e682b4d73dc2b1c4623e174095d3d441ceb1b6 Mon Sep 17 00:00:00 2001 From: Cameron Ayer Date: Thu, 28 Jan 2021 16:55:16 -0500 Subject: [PATCH 32/38] satellite/repair/checker: add 29/80/130-52 to default repair overrides Change-Id: I2e5a7538fdf33f3869fcb65fc88f7abb10faad79 --- satellite/repair/checker/config.go | 2 +- scripts/testdata/satellite-config.yaml.lock | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/satellite/repair/checker/config.go b/satellite/repair/checker/config.go index a800dfe22..b572436df 100644 --- a/satellite/repair/checker/config.go +++ b/satellite/repair/checker/config.go @@ -19,7 +19,7 @@ type Config struct { IrreparableInterval time.Duration `help:"how frequently irrepairable checker should check for lost pieces" releaseDefault:"30m" devDefault:"0h0m5s"` ReliabilityCacheStaleness time.Duration `help:"how stale reliable node cache can be" releaseDefault:"5m" devDefault:"5m"` - RepairOverrides RepairOverrides `help:"comma-separated override values for repair threshold in the format k/o/n-override (min/optimal/total-override)" releaseDefault:"29/80/110-52,29/80/95-52" devDefault:""` + RepairOverrides RepairOverrides `help:"comma-separated override values for repair threshold in the format k/o/n-override (min/optimal/total-override)" releaseDefault:"29/80/110-52,29/80/95-52,29/80/130-52" devDefault:""` // Node failure rate is an estimation based on a 6 hour checker run interval (4 checker iterations per day), a network of about 9200 nodes, and about 2 nodes churning per day. // This results in `2/9200/4 = 0.00005435` being the probability of any single node going down in the interval of one checker iteration. NodeFailureRate float64 `help:"the probability of a single node going down within the next checker iteration" default:"0.00005435"` diff --git a/scripts/testdata/satellite-config.yaml.lock b/scripts/testdata/satellite-config.yaml.lock index d3ba3d169..87685d4bd 100755 --- a/scripts/testdata/satellite-config.yaml.lock +++ b/scripts/testdata/satellite-config.yaml.lock @@ -38,7 +38,7 @@ # checker.reliability-cache-staleness: 5m0s # comma-separated override values for repair threshold in the format k/o/n-override (min/optimal/total-override) -# checker.repair-overrides: 29/80/110-52,29/80/95-52 +# checker.repair-overrides: 29/80/110-52,29/80/95-52,29/80/130-52 # percent of held amount disposed to node after leaving withheld compensation.dispose-percent: 50 From 54e01d37f98c521d3a316a7984a2f27a3d55722d Mon Sep 17 00:00:00 2001 From: Egon Elbre Date: Thu, 28 Jan 2021 16:33:53 +0200 Subject: [PATCH 33/38] satellite/overlay: add DownloadSelectionCache Change-Id: Ic0779280172325f8d03f55a2e9673722f72bdd44 --- satellite/overlay/downloadselection.go | 148 ++++++++++++++++++++ satellite/overlay/downloadselection_test.go | 90 ++++++++++++ satellite/overlay/service.go | 19 ++- satellite/overlay/uploadselection_test.go | 6 +- satellite/satellitedb/overlaycache.go | 56 ++++++++ 5 files changed, 312 insertions(+), 7 deletions(-) create mode 100644 satellite/overlay/downloadselection.go create mode 100644 satellite/overlay/downloadselection_test.go diff --git a/satellite/overlay/downloadselection.go b/satellite/overlay/downloadselection.go new file mode 100644 index 000000000..af5c56101 --- /dev/null +++ b/satellite/overlay/downloadselection.go @@ -0,0 +1,148 @@ +// Copyright (C) 2019 Storj Labs, Incache. +// See LICENSE for copying information. + +package overlay + +import ( + "context" + "sync" + "time" + + "go.uber.org/zap" + + "storj.io/common/storj" +) + +// DownloadSelectionDB implements the database for download selection cache. +// +// architecture: Database +type DownloadSelectionDB interface { + // SelectAllStorageNodesDownload returns nodes that are ready for downloading + SelectAllStorageNodesDownload(ctx context.Context, onlineWindow time.Duration, asOf AsOfSystemTimeConfig) ([]*SelectedNode, error) +} + +// DownloadSelectionCacheConfig contains configuration for the selection cache. +type DownloadSelectionCacheConfig struct { + Staleness time.Duration + OnlineWindow time.Duration + AsOfSystemTime AsOfSystemTimeConfig +} + +// DownloadSelectionCache keeps a list of all the storage nodes that are qualified to download data from. +// The cache will sync with the nodes table in the database and get refreshed once the staleness time has past. +type DownloadSelectionCache struct { + log *zap.Logger + db DownloadSelectionDB + config DownloadSelectionCacheConfig + + mu sync.RWMutex + lastRefresh time.Time + state *DownloadSelectionCacheState +} + +// NewDownloadSelectionCache creates a new cache that keeps a list of all the storage nodes that are qualified to download data from. +func NewDownloadSelectionCache(log *zap.Logger, db DownloadSelectionDB, config DownloadSelectionCacheConfig) *DownloadSelectionCache { + return &DownloadSelectionCache{ + log: log, + db: db, + config: config, + } +} + +// Refresh populates the cache with all of the reputableNodes and newNode nodes +// This method is useful for tests. +func (cache *DownloadSelectionCache) Refresh(ctx context.Context) (err error) { + defer mon.Task()(&ctx)(&err) + _, err = cache.refresh(ctx) + return err +} + +// refresh calls out to the database and refreshes the cache with the most up-to-date +// data from the nodes table, then sets time that the last refresh occurred so we know when +// to refresh again in the future. +func (cache *DownloadSelectionCache) refresh(ctx context.Context) (state *DownloadSelectionCacheState, err error) { + defer mon.Task()(&ctx)(&err) + cache.mu.Lock() + defer cache.mu.Unlock() + + if cache.state != nil && time.Since(cache.lastRefresh) <= cache.config.Staleness { + return cache.state, nil + } + + onlineNodes, err := cache.db.SelectAllStorageNodesDownload(ctx, cache.config.OnlineWindow, cache.config.AsOfSystemTime) + if err != nil { + return cache.state, err + } + + cache.lastRefresh = time.Now().UTC() + cache.state = NewDownloadSelectionCacheState(onlineNodes) + + mon.IntVal("refresh_cache_size_online").Observe(int64(len(onlineNodes))) + return cache.state, nil +} + +// GetNodeIPs gets the last node ip:port from the cache, refreshing when needed. +func (cache *DownloadSelectionCache) GetNodeIPs(ctx context.Context, nodes []storj.NodeID) (_ map[storj.NodeID]string, err error) { + defer mon.Task()(&ctx)(&err) + + cache.mu.RLock() + lastRefresh := cache.lastRefresh + state := cache.state + cache.mu.RUnlock() + + // if the cache is stale, then refresh it before we get nodes + if state == nil || time.Since(lastRefresh) > cache.config.Staleness { + state, err = cache.refresh(ctx) + if err != nil { + return nil, err + } + } + + return state.IPs(nodes), nil +} + +// Size returns how many nodes are in the cache. +func (cache *DownloadSelectionCache) Size() int { + cache.mu.RLock() + state := cache.state + cache.mu.RUnlock() + + if state == nil { + return 0 + } + + return state.Size() +} + +// DownloadSelectionCacheState contains state of download selection cache. +type DownloadSelectionCacheState struct { + // ipPortByID returns IP based on storj.NodeID + ipPortByID map[storj.NodeID]string +} + +// NewDownloadSelectionCacheState creates a new state from the nodes. +func NewDownloadSelectionCacheState(nodes []*SelectedNode) *DownloadSelectionCacheState { + ipPortByID := map[storj.NodeID]string{} + for _, n := range nodes { + ipPortByID[n.ID] = n.LastIPPort + } + return &DownloadSelectionCacheState{ + ipPortByID: ipPortByID, + } +} + +// Size returns how many nodes are in the state. +func (state *DownloadSelectionCacheState) Size() int { + return len(state.ipPortByID) +} + +// IPs returns node ip:port for nodes that are in state. +func (state *DownloadSelectionCacheState) IPs(nodes []storj.NodeID) map[storj.NodeID]string { + xs := make(map[storj.NodeID]string, len(nodes)) + for _, nodeID := range nodes { + if ip, exists := state.ipPortByID[nodeID]; exists { + xs[nodeID] = ip + } + } + return xs +} diff --git a/satellite/overlay/downloadselection_test.go b/satellite/overlay/downloadselection_test.go new file mode 100644 index 000000000..88fff6351 --- /dev/null +++ b/satellite/overlay/downloadselection_test.go @@ -0,0 +1,90 @@ +// Copyright (C) 2021 Storj Labs, Incache. +// See LICENSE for copying information. + +package overlay_test + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "storj.io/common/pb" + "storj.io/common/storj" + "storj.io/common/testcontext" + "storj.io/common/testrand" + "storj.io/storj/satellite" + "storj.io/storj/satellite/overlay" + "storj.io/storj/satellite/satellitedb/satellitedbtest" +) + +var downloadSelectionCacheConfig = overlay.DownloadSelectionCacheConfig{ + Staleness: lowStaleness, + OnlineWindow: time.Hour, + AsOfSystemTime: overlay.AsOfSystemTimeConfig{Enabled: true, DefaultInterval: time.Minute}, +} + +func TestDownloadSelectionCacheState_Refresh(t *testing.T) { + satellitedbtest.Run(t, func(ctx *testcontext.Context, t *testing.T, db satellite.DB) { + cache := overlay.NewDownloadSelectionCache(zap.NewNop(), + db.OverlayCache(), + downloadSelectionCacheConfig, + ) + + // the cache should have no nodes to start + err := cache.Refresh(ctx) + require.NoError(t, err) + require.Equal(t, 0, cache.Size()) + + // add some nodes to the database + const nodeCount = 2 + addNodesToNodesTable(ctx, t, db.OverlayCache(), nodeCount, 0) + + // confirm nodes are in the cache once + err = cache.Refresh(ctx) + require.NoError(t, err) + require.Equal(t, nodeCount, cache.Size()) + }) +} + +func TestDownloadSelectionCacheState_GetNodeIPs(t *testing.T) { + satellitedbtest.Run(t, func(ctx *testcontext.Context, t *testing.T, db satellite.DB) { + cache := overlay.NewDownloadSelectionCache(zap.NewNop(), + db.OverlayCache(), + downloadSelectionCacheConfig, + ) + + // add some nodes to the database + const nodeCount = 2 + ids := addNodesToNodesTable(ctx, t, db.OverlayCache(), nodeCount, 0) + + // confirm nodes are in the cache once + nodeips, err := cache.GetNodeIPs(ctx, ids) + require.NoError(t, err) + for _, id := range ids { + require.NotEmpty(t, nodeips[id]) + } + }) +} + +func TestDownloadSelectionCacheState_IPs(t *testing.T) { + ctx := testcontext.New(t) + defer ctx.Cleanup() + + node := &overlay.SelectedNode{ + ID: testrand.NodeID(), + Address: &pb.NodeAddress{ + Address: "1.0.1.1:8080", + }, + LastNet: "1.0.1", + LastIPPort: "1.0.1.1:8080", + } + + state := overlay.NewDownloadSelectionCacheState([]*overlay.SelectedNode{node}) + require.Equal(t, state.Size(), 1) + + ips := state.IPs([]storj.NodeID{testrand.NodeID(), node.ID}) + require.Len(t, ips, 1) + require.Equal(t, node.LastIPPort, ips[node.ID]) +} diff --git a/satellite/overlay/service.go b/satellite/overlay/service.go index 3821b6d13..4758b770a 100644 --- a/satellite/overlay/service.go +++ b/satellite/overlay/service.go @@ -45,6 +45,8 @@ type DB interface { SelectStorageNodes(ctx context.Context, totalNeededNodes, newNodeCount int, criteria *NodeCriteria) ([]*SelectedNode, error) // SelectAllStorageNodesUpload returns all nodes that qualify to store data, organized as reputable nodes and new nodes SelectAllStorageNodesUpload(ctx context.Context, selectionCfg NodeSelectionConfig) (reputable, new []*SelectedNode, err error) + // SelectAllStorageNodesDownload returns a nodes that are ready for downloading + SelectAllStorageNodesDownload(ctx context.Context, onlineWindow time.Duration, asOf AsOfSystemTimeConfig) ([]*SelectedNode, error) // Get looks up the node by nodeID Get(ctx context.Context, nodeID storj.NodeID) (*NodeDossier, error) @@ -262,10 +264,12 @@ func (node *SelectedNode) Clone() *SelectedNode { // // architecture: Service type Service struct { - log *zap.Logger - db DB - config Config - UploadSelectionCache *UploadSelectionCache + log *zap.Logger + db DB + config Config + + UploadSelectionCache *UploadSelectionCache + DownloadSelectionCache *DownloadSelectionCache } // NewService returns a new Service. @@ -278,9 +282,16 @@ func NewService(log *zap.Logger, db DB, config Config) (*Service, error) { log: log, db: db, config: config, + UploadSelectionCache: NewUploadSelectionCache(log, db, config.NodeSelectionCache.Staleness, config.Node, ), + + DownloadSelectionCache: NewDownloadSelectionCache(log, db, DownloadSelectionCacheConfig{ + Staleness: config.NodeSelectionCache.Staleness, + OnlineWindow: config.Node.OnlineWindow, + AsOfSystemTime: config.Node.AsOfSystemTime, + }), }, nil } diff --git a/satellite/overlay/uploadselection_test.go b/satellite/overlay/uploadselection_test.go index e285e883f..4509e2712 100644 --- a/satellite/overlay/uploadselection_test.go +++ b/satellite/overlay/uploadselection_test.go @@ -73,7 +73,7 @@ func TestRefresh(t *testing.T) { }) } -func addNodesToNodesTable(ctx context.Context, t *testing.T, db overlay.DB, count, makeReputable int) (reputableIds []storj.NodeID) { +func addNodesToNodesTable(ctx context.Context, t *testing.T, db overlay.DB, count, makeReputable int) (ids []storj.NodeID) { for i := 0; i < count; i++ { subnet := strconv.Itoa(i) + ".1.2" addr := subnet + ".3:8080" @@ -109,10 +109,10 @@ func addNodesToNodesTable(ctx context.Context, t *testing.T, db overlay.DB, coun }, time.Now()) require.NoError(t, err) require.NotNil(t, stats.VettedAt) - reputableIds = append(reputableIds, storj.NodeID{byte(i)}) + ids = append(ids, storj.NodeID{byte(i)}) } } - return reputableIds + return ids } type mockdb struct { diff --git a/satellite/satellitedb/overlaycache.go b/satellite/satellitedb/overlaycache.go index 8bf003548..a4f7206c0 100644 --- a/satellite/satellitedb/overlaycache.go +++ b/satellite/satellitedb/overlaycache.go @@ -118,6 +118,62 @@ func (cache *overlaycache) selectAllStorageNodesUpload(ctx context.Context, sele return reputableNodes, newNodes, Error.Wrap(rows.Err()) } +// SelectAllStorageNodesDownload returns all nodes that qualify to store data, organized as reputable nodes and new nodes. +func (cache *overlaycache) SelectAllStorageNodesDownload(ctx context.Context, onlineWindow time.Duration, asOf overlay.AsOfSystemTimeConfig) (nodes []*overlay.SelectedNode, err error) { + for { + nodes, err = cache.selectAllStorageNodesDownload(ctx, onlineWindow, asOf) + if err != nil { + if cockroachutil.NeedsRetry(err) { + continue + } + return nodes, err + } + break + } + + return nodes, err +} + +func (cache *overlaycache) selectAllStorageNodesDownload(ctx context.Context, onlineWindow time.Duration, asOfConfig overlay.AsOfSystemTimeConfig) (_ []*overlay.SelectedNode, err error) { + defer mon.Task()(&ctx)(&err) + + asOf := cache.db.AsOfSystemTimeClause(asOfConfig.DefaultInterval) + + query := ` + SELECT id, address, last_net, last_ip_port + FROM nodes ` + asOf + ` + WHERE disqualified IS NULL + AND exit_finished_at IS NULL + AND last_contact_success > $1 + ` + args := []interface{}{ + // $1 + time.Now().Add(-onlineWindow), + } + + rows, err := cache.db.Query(ctx, query, args...) + if err != nil { + return nil, err + } + defer func() { err = errs.Combine(err, rows.Close()) }() + + var nodes []*overlay.SelectedNode + for rows.Next() { + var node overlay.SelectedNode + node.Address = &pb.NodeAddress{} + var lastIPPort sql.NullString + err = rows.Scan(&node.ID, &node.Address.Address, &node.LastNet, &lastIPPort) + if err != nil { + return nil, err + } + if lastIPPort.Valid { + node.LastIPPort = lastIPPort.String + } + nodes = append(nodes, &node) + } + return nodes, Error.Wrap(rows.Err()) +} + // GetNodesNetwork returns the /24 subnet for each storage node, order is not guaranteed. func (cache *overlaycache) GetNodesNetwork(ctx context.Context, nodeIDs []storj.NodeID) (nodeNets []string, err error) { for { From b7a0739219e6ee25026fc6ca9e3224967cea5f98 Mon Sep 17 00:00:00 2001 From: Egon Elbre Date: Thu, 28 Jan 2021 17:02:34 +0200 Subject: [PATCH 34/38] satellite/overlay: use DownloadSelectionCache for getting node IPs Change-Id: Ib8f4eedb2bf465767050693a1e961b37a294ca06 --- satellite/nodeselection/state.go | 17 ----------------- satellite/nodeselection/state_test.go | 24 ------------------------ satellite/overlay/service.go | 2 +- satellite/overlay/uploadselection.go | 20 -------------------- 4 files changed, 1 insertion(+), 62 deletions(-) diff --git a/satellite/nodeselection/state.go b/satellite/nodeselection/state.go index 6813d8485..0cdaa19a9 100644 --- a/satellite/nodeselection/state.go +++ b/satellite/nodeselection/state.go @@ -22,8 +22,6 @@ type State struct { stats Stats // netByID returns subnet based on storj.NodeID netByID map[storj.NodeID]string - // ipPortByID returns IP based on storj.NodeID - ipPortByID map[storj.NodeID]string // nonDistinct contains selectors for non-distinct selection. nonDistinct struct { Reputable SelectByID @@ -59,14 +57,11 @@ func NewState(reputableNodes, newNodes []*Node) *State { state := &State{} state.netByID = map[storj.NodeID]string{} - state.ipPortByID = map[storj.NodeID]string{} for _, node := range reputableNodes { state.netByID[node.ID] = node.LastNet - state.ipPortByID[node.ID] = node.LastIPPort } for _, node := range newNodes { state.netByID[node.ID] = node.LastNet - state.ipPortByID[node.ID] = node.LastIPPort } state.nonDistinct.Reputable = SelectByID(reputableNodes) @@ -140,18 +135,6 @@ func (state *State) Select(ctx context.Context, request Request) (_ []*Node, err return selected, nil } -// IPs returns node ip:port for nodes that are in state. -func (state *State) IPs(ctx context.Context, nodes []storj.NodeID) map[storj.NodeID]string { - defer mon.Task()(&ctx)(nil) - xs := make(map[storj.NodeID]string, len(nodes)) - for _, nodeID := range nodes { - if ip, exists := state.ipPortByID[nodeID]; exists { - xs[nodeID] = ip - } - } - return xs -} - // Stats returns state information. func (state *State) Stats() Stats { state.mu.RLock() diff --git a/satellite/nodeselection/state_test.go b/satellite/nodeselection/state_test.go index 1acf2aeee..5e861939b 100644 --- a/satellite/nodeselection/state_test.go +++ b/satellite/nodeselection/state_test.go @@ -203,27 +203,3 @@ next: return xs } - -func TestState_IPs(t *testing.T) { - ctx := testcontext.New(t) - defer ctx.Cleanup() - - reputableNodes := createRandomNodes(2, "1.0.1") - newNodes := createRandomNodes(2, "1.0.3") - - state := nodeselection.NewState(reputableNodes, newNodes) - - nodeIPs := state.IPs(ctx, nil) - require.Equal(t, map[storj.NodeID]string{}, nodeIPs) - - missing := storj.NodeID{} - nodeIPs = state.IPs(ctx, []storj.NodeID{ - reputableNodes[0].ID, - newNodes[1].ID, - missing, - }) - require.Equal(t, map[storj.NodeID]string{ - reputableNodes[0].ID: "1.0.1.0:8080", - newNodes[1].ID: "1.0.3.1:8080", - }, nodeIPs) -} diff --git a/satellite/overlay/service.go b/satellite/overlay/service.go index 4758b770a..24ffc879b 100644 --- a/satellite/overlay/service.go +++ b/satellite/overlay/service.go @@ -324,7 +324,7 @@ func (service *Service) GetOnlineNodesForGetDelete(ctx context.Context, nodeIDs // GetNodeIPs returns a map of node ip:port for the supplied nodeIDs. func (service *Service) GetNodeIPs(ctx context.Context, nodeIDs []storj.NodeID) (_ map[storj.NodeID]string, err error) { defer mon.Task()(&ctx)(&err) - return service.UploadSelectionCache.GetNodeIPs(ctx, nodeIDs) + return service.DownloadSelectionCache.GetNodeIPs(ctx, nodeIDs) } // IsOnline checks if a node is 'online' based on the collected statistics. diff --git a/satellite/overlay/uploadselection.go b/satellite/overlay/uploadselection.go index c74c70841..d6a8a7035 100644 --- a/satellite/overlay/uploadselection.go +++ b/satellite/overlay/uploadselection.go @@ -118,26 +118,6 @@ func (cache *UploadSelectionCache) GetNodes(ctx context.Context, req FindStorage return convNodesToSelectedNodes(selected), err } -// GetNodeIPs gets the last node ip:port from the cache, refreshing when needed. -func (cache *UploadSelectionCache) GetNodeIPs(ctx context.Context, nodes []storj.NodeID) (_ map[storj.NodeID]string, err error) { - defer mon.Task()(&ctx)(&err) - - cache.mu.RLock() - lastRefresh := cache.lastRefresh - state := cache.state - cache.mu.RUnlock() - - // if the cache is stale, then refresh it before we get nodes - if state == nil || time.Since(lastRefresh) > cache.staleness { - state, err = cache.refresh(ctx) - if err != nil { - return nil, err - } - } - - return state.IPs(ctx, nodes), nil -} - // Size returns how many reputable nodes and new nodes are in the cache. func (cache *UploadSelectionCache) Size() (reputableNodeCount int, newNodeCount int) { cache.mu.RLock() From c5ecca1e1da0af2be569adf1db2acf9221b0e8fb Mon Sep 17 00:00:00 2001 From: Cameron Ayer Date: Fri, 18 Dec 2020 14:27:28 -0500 Subject: [PATCH 35/38] web/storagenode: remove uptime columns and references Full scope: storagenode/{console,nodestats,notifications,reputation,storagenodedb}, web/storagenode These columns are deprecated. They used to be for the uptime reputation system which has been replaced by downtime tracking with audits. Change-Id: I151d6569577d89733ac97af21a1d885323522b21 --- storagenode/nodestats/service.go | 5 -- storagenode/notifications/notifications.go | 6 +- storagenode/reputation/reputation.go | 1 - storagenode/reputation/reputation_test.go | 17 ------ storagenode/storagenodedb/database.go | 55 +++++++++++++++++++ storagenode/storagenodedb/reputation.go | 34 +----------- storagenode/storagenodedb/schema.go | 25 --------- .../storagenodedb/testdata/multidbsnapshot.go | 1 + storagenode/storagenodedb/testdata/v48.go | 50 +++++++++++++++++ .../src/app/components/ChecksArea.vue | 8 +-- .../src/app/components/SNOContentFilling.vue | 27 +++------ .../components/payments/HeldHistoryArea.vue | 8 --- web/storagenode/src/app/store/modules/node.ts | 4 +- .../src/app/types/notifications.ts | 3 - web/storagenode/src/app/types/sno.ts | 2 +- .../src/storagenode/api/storagenode.ts | 28 ++-------- .../notifications/notifications.ts | 5 +- web/storagenode/src/storagenode/sno/sno.ts | 8 +-- .../notifications/SNONotification.spec.ts | 2 +- .../SNONotification.spec.ts.snap | 5 +- .../payments/EstimationPeriodDropdown.spec.ts | 5 +- .../payments/HeldHistoryArea.spec.ts | 12 +--- .../components/payments/TotalHeldArea.spec.ts | 5 +- web/storagenode/tests/unit/store/node.spec.ts | 19 +++---- .../tests/unit/store/notifications.spec.ts | 4 +- 25 files changed, 151 insertions(+), 188 deletions(-) create mode 100644 storagenode/storagenodedb/testdata/v48.go diff --git a/storagenode/nodestats/service.go b/storagenode/nodestats/service.go index 5a53b3c0f..68fdcdf31 100644 --- a/storagenode/nodestats/service.go +++ b/storagenode/nodestats/service.go @@ -74,15 +74,10 @@ func (s *Service) GetReputationStats(ctx context.Context, satelliteID storj.Node return nil, NodeStatsServiceErr.Wrap(err) } - uptime := resp.GetUptimeCheck() audit := resp.GetAuditCheck() return &reputation.Stats{ SatelliteID: satelliteID, - Uptime: reputation.Metric{ - TotalCount: uptime.GetTotalCount(), - SuccessCount: uptime.GetSuccessCount(), - }, Audit: reputation.Metric{ TotalCount: audit.GetTotalCount(), SuccessCount: audit.GetSuccessCount(), diff --git a/storagenode/notifications/notifications.go b/storagenode/notifications/notifications.go index 2fe20682a..fe393e3a4 100644 --- a/storagenode/notifications/notifications.go +++ b/storagenode/notifications/notifications.go @@ -31,12 +31,10 @@ const ( TypeCustom Type = 0 // TypeAuditCheckFailure is a notification type which describes node's audit check failure. TypeAuditCheckFailure Type = 1 - // TypeUptimeCheckFailure is a notification type which describes node's uptime check failure. - TypeUptimeCheckFailure Type = 2 // TypeDisqualification is a notification type which describes node's disqualification status. - TypeDisqualification Type = 3 + TypeDisqualification Type = 2 // TypeSuspension is a notification type which describes node's suspension status. - TypeSuspension Type = 4 + TypeSuspension Type = 3 ) // NewNotification holds notification entity info which is being received from satellite or local client. diff --git a/storagenode/reputation/reputation.go b/storagenode/reputation/reputation.go index 067643e4e..bd1fe2183 100644 --- a/storagenode/reputation/reputation.go +++ b/storagenode/reputation/reputation.go @@ -27,7 +27,6 @@ type DB interface { type Stats struct { SatelliteID storj.NodeID - Uptime Metric Audit Metric OnlineScore float64 diff --git a/storagenode/reputation/reputation_test.go b/storagenode/reputation/reputation_test.go index 1bba18d71..12a2baf89 100644 --- a/storagenode/reputation/reputation_test.go +++ b/storagenode/reputation/reputation_test.go @@ -25,13 +25,6 @@ func TestReputationDBGetInsert(t *testing.T) { stats := reputation.Stats{ SatelliteID: testrand.NodeID(), - Uptime: reputation.Metric{ - TotalCount: 1, - SuccessCount: 2, - Alpha: 3, - Beta: 4, - Score: 5, - }, Audit: reputation.Metric{ TotalCount: 6, SuccessCount: 7, @@ -70,7 +63,6 @@ func TestReputationDBGetInsert(t *testing.T) { assert.Equal(t, res.OnlineScore, stats.OnlineScore) assert.Nil(t, res.AuditHistory) - compareReputationMetric(t, &res.Uptime, &stats.Uptime) compareReputationMetric(t, &res.Audit, &stats.Audit) }) }) @@ -87,13 +79,6 @@ func TestReputationDBGetAll(t *testing.T) { rep := reputation.Stats{ SatelliteID: testrand.NodeID(), - Uptime: reputation.Metric{ - TotalCount: int64(i + 1), - SuccessCount: int64(i + 2), - Alpha: float64(i + 3), - Beta: float64(i + 4), - Score: float64(i + 5), - }, Audit: reputation.Metric{ TotalCount: int64(i + 6), SuccessCount: int64(i + 7), @@ -137,7 +122,6 @@ func TestReputationDBGetAll(t *testing.T) { assert.Equal(t, rep.OnlineScore, stats[0].OnlineScore) assert.Nil(t, rep.AuditHistory) - compareReputationMetric(t, &rep.Uptime, &stats[0].Uptime) compareReputationMetric(t, &rep.Audit, &stats[0].Audit) } } @@ -160,7 +144,6 @@ func TestReputationDBGetInsertAuditHistory(t *testing.T) { stats := reputation.Stats{ SatelliteID: testrand.NodeID(), - Uptime: reputation.Metric{}, Audit: reputation.Metric{}, AuditHistory: &pb.AuditHistory{ Score: 0.5, diff --git a/storagenode/storagenodedb/database.go b/storagenode/storagenodedb/database.go index 09bf9a3cf..19f9da9b3 100644 --- a/storagenode/storagenodedb/database.go +++ b/storagenode/storagenodedb/database.go @@ -1838,6 +1838,61 @@ func (db *DB) Migration(ctx context.Context) *migrate.Migration { `ALTER TABLE reputation ADD COLUMN audit_history BLOB`, }, }, + { + DB: &db.reputationDB.DB, + Description: "drop uptime columns", + Version: 48, + Action: migrate.Func(func(ctx context.Context, _ *zap.Logger, rdb tagsql.DB, rtx tagsql.Tx) (err error) { + _, err = rtx.Exec(ctx, ` + CREATE TABLE reputation_new ( + satellite_id BLOB NOT NULL, + audit_success_count INTEGER NOT NULL, + audit_total_count INTEGER NOT NULL, + audit_reputation_alpha REAL NOT NULL, + audit_reputation_beta REAL NOT NULL, + audit_reputation_score REAL NOT NULL, + audit_unknown_reputation_alpha REAL NOT NULL, + audit_unknown_reputation_beta REAL NOT NULL, + audit_unknown_reputation_score REAL NOT NULL, + online_score REAL NOT NULL, + audit_history BLOB, + disqualified_at TIMESTAMP, + updated_at TIMESTAMP NOT NULL, + suspended_at TIMESTAMP, + offline_suspended_at TIMESTAMP, + offline_under_review_at TIMESTAMP, + joined_at TIMESTAMP NOT NULL, + PRIMARY KEY (satellite_id) + ); + INSERT INTO reputation_new SELECT + satellite_id, + audit_success_count, + audit_total_count, + audit_reputation_alpha, + audit_reputation_beta, + audit_reputation_score, + audit_unknown_reputation_alpha, + audit_unknown_reputation_beta, + audit_unknown_reputation_score, + online_score, + audit_history, + disqualified_at, + updated_at, + suspended_at, + offline_suspended_at, + offline_under_review_at, + joined_at + FROM reputation; + DROP TABLE reputation; + ALTER TABLE reputation_new RENAME TO reputation; + `) + if err != nil { + return errs.Wrap(err) + } + + return nil + }), + }, }, } } diff --git a/storagenode/storagenodedb/reputation.go b/storagenode/storagenodedb/reputation.go index df0607673..6650e9c0a 100644 --- a/storagenode/storagenodedb/reputation.go +++ b/storagenode/storagenodedb/reputation.go @@ -32,11 +32,6 @@ func (db *reputationDB) Store(ctx context.Context, stats reputation.Stats) (err query := `INSERT OR REPLACE INTO reputation ( satellite_id, - uptime_success_count, - uptime_total_count, - uptime_reputation_alpha, - uptime_reputation_beta, - uptime_reputation_score, audit_success_count, audit_total_count, audit_reputation_alpha, @@ -53,7 +48,7 @@ func (db *reputationDB) Store(ctx context.Context, stats reputation.Stats) (err offline_under_review_at, updated_at, joined_at - ) VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)` + ) VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)` // ensure we insert utc if stats.DisqualifiedAt != nil { @@ -83,11 +78,6 @@ func (db *reputationDB) Store(ctx context.Context, stats reputation.Stats) (err _, err = db.ExecContext(ctx, query, stats.SatelliteID, - stats.Uptime.SuccessCount, - stats.Uptime.TotalCount, - stats.Uptime.Alpha, - stats.Uptime.Beta, - stats.Uptime.Score, stats.Audit.SuccessCount, stats.Audit.TotalCount, stats.Audit.Alpha, @@ -118,12 +108,7 @@ func (db *reputationDB) Get(ctx context.Context, satelliteID storj.NodeID) (_ *r } row := db.QueryRowContext(ctx, - `SELECT uptime_success_count, - uptime_total_count, - uptime_reputation_alpha, - uptime_reputation_beta, - uptime_reputation_score, - audit_success_count, + `SELECT audit_success_count, audit_total_count, audit_reputation_alpha, audit_reputation_beta, @@ -145,11 +130,6 @@ func (db *reputationDB) Get(ctx context.Context, satelliteID storj.NodeID) (_ *r var auditHistoryBytes []byte err = row.Scan( - &stats.Uptime.SuccessCount, - &stats.Uptime.TotalCount, - &stats.Uptime.Alpha, - &stats.Uptime.Beta, - &stats.Uptime.Score, &stats.Audit.SuccessCount, &stats.Audit.TotalCount, &stats.Audit.Alpha, @@ -188,11 +168,6 @@ func (db *reputationDB) All(ctx context.Context) (_ []reputation.Stats, err erro defer mon.Task()(&ctx)(&err) query := `SELECT satellite_id, - uptime_success_count, - uptime_total_count, - uptime_reputation_alpha, - uptime_reputation_beta, - uptime_reputation_score, audit_success_count, audit_total_count, audit_reputation_alpha, @@ -222,11 +197,6 @@ func (db *reputationDB) All(ctx context.Context) (_ []reputation.Stats, err erro var stats reputation.Stats err := rows.Scan(&stats.SatelliteID, - &stats.Uptime.SuccessCount, - &stats.Uptime.TotalCount, - &stats.Uptime.Alpha, - &stats.Uptime.Beta, - &stats.Uptime.Score, &stats.Audit.SuccessCount, &stats.Audit.TotalCount, &stats.Audit.Alpha, diff --git a/storagenode/storagenodedb/schema.go b/storagenode/storagenodedb/schema.go index d79494878..d0e199911 100644 --- a/storagenode/storagenodedb/schema.go +++ b/storagenode/storagenodedb/schema.go @@ -606,31 +606,6 @@ func Schema() map[string]*dbschema.Schema { Type: "TIMESTAMP", IsNullable: false, }, - &dbschema.Column{ - Name: "uptime_reputation_alpha", - Type: "REAL", - IsNullable: false, - }, - &dbschema.Column{ - Name: "uptime_reputation_beta", - Type: "REAL", - IsNullable: false, - }, - &dbschema.Column{ - Name: "uptime_reputation_score", - Type: "REAL", - IsNullable: false, - }, - &dbschema.Column{ - Name: "uptime_success_count", - Type: "INTEGER", - IsNullable: false, - }, - &dbschema.Column{ - Name: "uptime_total_count", - Type: "INTEGER", - IsNullable: false, - }, }, }, }, diff --git a/storagenode/storagenodedb/testdata/multidbsnapshot.go b/storagenode/storagenodedb/testdata/multidbsnapshot.go index 7022875dd..b176d114c 100644 --- a/storagenode/storagenodedb/testdata/multidbsnapshot.go +++ b/storagenode/storagenodedb/testdata/multidbsnapshot.go @@ -62,6 +62,7 @@ var States = MultiDBStates{ &v45, &v46, &v47, + &v48, }, } diff --git a/storagenode/storagenodedb/testdata/v48.go b/storagenode/storagenodedb/testdata/v48.go new file mode 100644 index 000000000..eed69b72d --- /dev/null +++ b/storagenode/storagenodedb/testdata/v48.go @@ -0,0 +1,50 @@ +// Copyright (C) 2020 Storj Labs, Inc. +// See LICENSE for copying information. + +package testdata + +import "storj.io/storj/storagenode/storagenodedb" + +var v48 = MultiDBState{ + Version: 48, + DBStates: DBStates{ + storagenodedb.UsedSerialsDBName: v47.DBStates[storagenodedb.UsedSerialsDBName], + storagenodedb.StorageUsageDBName: v47.DBStates[storagenodedb.StorageUsageDBName], + storagenodedb.ReputationDBName: &DBState{ + SQL: ` + -- table to store nodestats cache + CREATE TABLE reputation ( + satellite_id BLOB NOT NULL, + audit_success_count INTEGER NOT NULL, + audit_total_count INTEGER NOT NULL, + audit_reputation_alpha REAL NOT NULL, + audit_reputation_beta REAL NOT NULL, + audit_reputation_score REAL NOT NULL, + audit_unknown_reputation_alpha REAL NOT NULL, + audit_unknown_reputation_beta REAL NOT NULL, + audit_unknown_reputation_score REAL NOT NULL, + online_score REAL NOT NULL, + audit_history BLOB, + disqualified_at TIMESTAMP, + updated_at TIMESTAMP NOT NULL, + suspended_at TIMESTAMP, + offline_suspended_at TIMESTAMP, + offline_under_review_at TIMESTAMP, + joined_at TIMESTAMP NOT NULL, + PRIMARY KEY (satellite_id) + ); + INSERT INTO reputation VALUES(X'0ed28abb2813e184a1e98b0f6605c4911ea468c7e8433eb583e0fca7ceac3000',1,1,1.0,1.0,1.0,1.0,1.0,1.0,1.0,NULL,'2019-07-19 20:00:00+00:00','2019-08-23 20:00:00+00:00',NULL,NULL,NULL,'1970-01-01 00:00:00+00:00'); + `, + }, + storagenodedb.PieceSpaceUsedDBName: v47.DBStates[storagenodedb.PieceSpaceUsedDBName], + storagenodedb.PieceInfoDBName: v47.DBStates[storagenodedb.PieceInfoDBName], + storagenodedb.PieceExpirationDBName: v47.DBStates[storagenodedb.PieceExpirationDBName], + storagenodedb.OrdersDBName: v47.DBStates[storagenodedb.OrdersDBName], + storagenodedb.BandwidthDBName: v47.DBStates[storagenodedb.BandwidthDBName], + storagenodedb.SatellitesDBName: v47.DBStates[storagenodedb.SatellitesDBName], + storagenodedb.DeprecatedInfoDBName: v47.DBStates[storagenodedb.DeprecatedInfoDBName], + storagenodedb.NotificationsDBName: v47.DBStates[storagenodedb.NotificationsDBName], + storagenodedb.HeldAmountDBName: v47.DBStates[storagenodedb.HeldAmountDBName], + storagenodedb.PricingDBName: v47.DBStates[storagenodedb.PricingDBName], + storagenodedb.APIKeysDBName: v47.DBStates[storagenodedb.APIKeysDBName]}, +} diff --git a/web/storagenode/src/app/components/ChecksArea.vue b/web/storagenode/src/app/components/ChecksArea.vue index ad5bcd661..4bf520e17 100644 --- a/web/storagenode/src/app/components/ChecksArea.vue +++ b/web/storagenode/src/app/components/ChecksArea.vue @@ -15,7 +15,7 @@
-

{{ value }}%

+

{{ amount }}

@@ -36,7 +36,7 @@ export default class ChecksArea extends Vue { @Prop({default: ''}) private readonly label: string; @Prop({default: ''}) - private readonly amount: number; + private readonly amount: string; @Prop({default: ''}) private readonly infoText: string; @@ -51,10 +51,6 @@ export default class ChecksArea extends Vue { public toggleTooltipVisibility(): void { this.isTooltipVisible = !this.isTooltipVisible; } - - public get value(): string { - return this.amount.toFixed(2); - } } diff --git a/web/storagenode/src/app/components/SNOContentFilling.vue b/web/storagenode/src/app/components/SNOContentFilling.vue index 2802ea33f..3d57d7111 100644 --- a/web/storagenode/src/app/components/SNOContentFilling.vue +++ b/web/storagenode/src/app/components/SNOContentFilling.vue @@ -126,12 +126,12 @@
@@ -175,20 +175,7 @@ import LargeSuspensionIcon from '@/../static/images/largeSuspend.svg'; import { RouteConfig } from '@/app/router'; import { APPSTATE_ACTIONS } from '@/app/store/modules/appState'; import { formatBytes } from '@/app/utils/converter'; -import { SatelliteInfo } from '@/storagenode/sno/sno'; - -/** - * Checks class holds info for Checks entity. - */ -class Checks { - public uptime: number; - public audit: number; - - public constructor(uptime: number, audit: number) { - this.uptime = uptime; - this.audit = audit; - } -} +import { SatelliteInfo, SatelliteScores } from '@/storagenode/sno/sno'; @Component ({ components: { @@ -343,11 +330,11 @@ export default class SNOContentFilling extends Vue { } /** - * checks - uptime and audit checks statuses from store. - * @return Checks - uptime and audit checks statuses + * checks - audit checks status from store. + * @return Checks - audit checks statuses */ - public get checks(): Checks { - return this.$store.state.node.checks; + public get audits(): SatelliteScores { + return this.$store.state.node.audits; } /** diff --git a/web/storagenode/src/app/components/payments/HeldHistoryArea.vue b/web/storagenode/src/app/components/payments/HeldHistoryArea.vue index 026ae9c01..84c3c41f7 100644 --- a/web/storagenode/src/app/components/payments/HeldHistoryArea.vue +++ b/web/storagenode/src/app/components/payments/HeldHistoryArea.vue @@ -54,14 +54,6 @@ export default class HeldHistoryArea extends Vue { */ public isAllStatsShown: boolean = true; - /** - * Lifecycle hook before component render. - * Fetches held history information. - */ - public beforeMount(): void { - this.$store.dispatch(PAYOUT_ACTIONS.GET_HELD_HISTORY); - } - /** * Sets held history table state to All Stats. */ diff --git a/web/storagenode/src/app/store/modules/node.ts b/web/storagenode/src/app/store/modules/node.ts index 453893dda..efd1955f5 100644 --- a/web/storagenode/src/app/store/modules/node.ts +++ b/web/storagenode/src/app/store/modules/node.ts @@ -7,13 +7,11 @@ import { Duration, millisecondsInSecond, secondsInMinute } from '@/app/utils/dur import { getMonthsBeforeNow } from '@/app/utils/payout'; import { StorageNodeService } from '@/storagenode/sno/service'; import { - Checks, Dashboard, Node, Satellite, SatelliteInfo, Satellites, - Traffic, Utilization, } from '@/storagenode/sno/sno'; @@ -84,7 +82,7 @@ export function newNodeModule(service: StorageNodeService): StoreModule { const testNotification = new Notification( '123', '1234', - NotificationTypes.UptimeCheckFailure, + NotificationTypes.AuditCheckFailure, 'title1', 'message1', ); diff --git a/web/storagenode/tests/unit/components/notifications/__snapshots__/SNONotification.spec.ts.snap b/web/storagenode/tests/unit/components/notifications/__snapshots__/SNONotification.spec.ts.snap index 653d3ace6..a124ea8b7 100644 --- a/web/storagenode/tests/unit/components/notifications/__snapshots__/SNONotification.spec.ts.snap +++ b/web/storagenode/tests/unit/components/notifications/__snapshots__/SNONotification.spec.ts.snap @@ -6,8 +6,9 @@ exports[`SNONotification renders correctly 1`] = `
- -
+ + +

title1: message1 diff --git a/web/storagenode/tests/unit/components/payments/EstimationPeriodDropdown.spec.ts b/web/storagenode/tests/unit/components/payments/EstimationPeriodDropdown.spec.ts index 69a5f51a2..79b638308 100644 --- a/web/storagenode/tests/unit/components/payments/EstimationPeriodDropdown.spec.ts +++ b/web/storagenode/tests/unit/components/payments/EstimationPeriodDropdown.spec.ts @@ -14,7 +14,7 @@ import { Dashboard, Metric, Satellite, - SatelliteInfo, + SatelliteInfo, SatelliteScores, Stamp, Traffic, } from '@/storagenode/sno/sno'; @@ -55,8 +55,7 @@ describe('EstimationPeriodDropdown', (): void => { 222, 50, 70, - new Metric(1, 1, 1, 0, 1, 0, 0, 1), - new Metric(2, 1, 1, 0, 1, 0, 0, 1), + new SatelliteScores('', 1, 0, 0), new Date(), ); diff --git a/web/storagenode/tests/unit/components/payments/HeldHistoryArea.spec.ts b/web/storagenode/tests/unit/components/payments/HeldHistoryArea.spec.ts index 7eefcd6a7..e9d20b1cd 100644 --- a/web/storagenode/tests/unit/components/payments/HeldHistoryArea.spec.ts +++ b/web/storagenode/tests/unit/components/payments/HeldHistoryArea.spec.ts @@ -20,14 +20,12 @@ const payoutModule = newPayoutModule(payoutService); const store = new Vuex.Store({ modules: { payoutModule }}); describe('HeldHistoryArea', (): void => { - it('renders correctly', async (): Promise => { + it('renders correctly', (): void => { const wrapper = shallowMount(HeldHistoryArea, { store, localVue, }); - await localVue.nextTick(); - expect(wrapper).toMatchSnapshot(); }); @@ -37,15 +35,11 @@ describe('HeldHistoryArea', (): void => { localVue, }); - wrapper.findAll('.held-history-container__header__selection-area__item').at(1).trigger('click'); - - await localVue.nextTick(); + await wrapper.findAll('.held-history-container__header__selection-area__item').at(1).trigger('click'); expect(wrapper).toMatchSnapshot(); - wrapper.findAll('.held-history-container__header__selection-area__item').at(0).trigger('click'); - - await localVue.nextTick(); + await wrapper.findAll('.held-history-container__header__selection-area__item').at(0).trigger('click'); expect(wrapper).toMatchSnapshot(); }); diff --git a/web/storagenode/tests/unit/components/payments/TotalHeldArea.spec.ts b/web/storagenode/tests/unit/components/payments/TotalHeldArea.spec.ts index 75b51a716..1c984b576 100644 --- a/web/storagenode/tests/unit/components/payments/TotalHeldArea.spec.ts +++ b/web/storagenode/tests/unit/components/payments/TotalHeldArea.spec.ts @@ -12,7 +12,7 @@ import { StorageNodeApi } from '@/storagenode/api/storagenode'; import { Paystub, TotalHeldAndPaid } from '@/storagenode/payouts/payouts'; import { PayoutService } from '@/storagenode/payouts/service'; import { StorageNodeService } from '@/storagenode/sno/service'; -import { Metric, Satellite, Stamp } from '@/storagenode/sno/sno'; +import { Metric, Satellite, SatelliteScores, Stamp } from '@/storagenode/sno/sno'; import { createLocalVue, shallowMount } from '@vue/test-utils'; const localVue = createLocalVue(); @@ -59,8 +59,7 @@ describe('TotalHeldArea', (): void => { 222, 50, 70, - new Metric(1, 1, 1, 0, 1), - new Metric(2, 1, 1, 0, 1), + new SatelliteScores('', 1, 0, 0), testJoinAt, ); const paystub = new Paystub(); diff --git a/web/storagenode/tests/unit/store/node.spec.ts b/web/storagenode/tests/unit/store/node.spec.ts index 20b62b9c2..c2e201bd4 100644 --- a/web/storagenode/tests/unit/store/node.spec.ts +++ b/web/storagenode/tests/unit/store/node.spec.ts @@ -13,7 +13,6 @@ import { EgressUsed, Ingress, IngressUsed, - Metric, Satellite, SatelliteInfo, Satellites, @@ -79,17 +78,15 @@ describe('mutations', () => { 222, 50, 70, - new Metric(1, 1, 1, 0, 1, 0, 0, 1), - new Metric(2, 1, 1, 0, 1), + new SatelliteScores('', 1, 0, 0), new Date(2019, 3, 1), ); store.commit(NODE_MUTATIONS.SELECT_SATELLITE, satelliteInfo); expect(state.node.selectedSatellite.id).toBe(satelliteInfo.id); - expect(state.node.checks.audit).toBe(0); - expect(state.node.checks.uptime).toBe(50); - expect(state.node.checks.suspension).toBe(100); + expect(state.node.audits.auditScore.label).toBe('100 %'); + expect(state.node.audits.suspensionScore.label).toBe('0 %'); }); it('don`t selects wrong satellite', () => { @@ -143,8 +140,7 @@ describe('mutations', () => { 222, 50, 70, - new Metric(1, 1, 1, 0, 1), - new Metric(2, 1, 1, 0, 1), + new SatelliteScores('', 100, 200, 300), new Date(2019, 3, 1), ); @@ -244,8 +240,7 @@ describe('actions', () => { 2221, 501, 701, - new Metric(1, 1, 1, 0, 1), - new Metric(2, 1, 1, 0, 1), + new SatelliteScores('', 100, 200, 0.2), new Date(2019, 3, 1), ), ), @@ -262,6 +257,7 @@ describe('actions', () => { expect(state.node.egressSummary).toBe(501); expect(state.node.ingressSummary).toBe(701); expect(state.node.storageSummary).toBe(1111); + expect(state.node.audits.onlineScore.label).toBe('20 %'); }); it('fetch all satellites info throws error on api call fail', async () => { @@ -313,8 +309,7 @@ describe('getters', () => { 222, 50, 70, - new Metric(1, 1, 1, 0, 1), - new Metric(2, 1, 1, 0, 1), + new SatelliteScores('', 100, 200, 300), testJoinAt, ); diff --git a/web/storagenode/tests/unit/store/notifications.spec.ts b/web/storagenode/tests/unit/store/notifications.spec.ts index 6d968004a..f3b0a5d3f 100644 --- a/web/storagenode/tests/unit/store/notifications.spec.ts +++ b/web/storagenode/tests/unit/store/notifications.spec.ts @@ -35,7 +35,7 @@ describe('mutations', () => { createLocalVue().use(Vuex); notifications = [ new UINotification(new Notification('1', '1', NotificationTypes.Disqualification, 'title1', 'message1', null)), - new UINotification(new Notification('2', '1', NotificationTypes.UptimeCheckFailure, 'title2', 'message2', null)), + new UINotification(new Notification('2', '1', NotificationTypes.AuditCheckFailure, 'title2', 'message2', null)), ]; }); @@ -75,7 +75,7 @@ describe('actions', () => { jest.resetAllMocks(); notifications = [ new UINotification(new Notification('1', '1', NotificationTypes.Disqualification, 'title1', 'message1', null)), - new UINotification(new Notification('2', '1', NotificationTypes.UptimeCheckFailure, 'title2', 'message2', null)), + new UINotification(new Notification('2', '1', NotificationTypes.AuditCheckFailure, 'title2', 'message2', null)), ]; }); From 8a3db08f68d135cff724f4592c6f43750e7cd22e Mon Sep 17 00:00:00 2001 From: Egon Elbre Date: Mon, 1 Feb 2021 13:18:47 +0200 Subject: [PATCH 36/38] storagenode/payouts/estimatedpayouts: fix calculations Change-Id: Iaa01ebd06a32c19d3ddc46b52524020e51212a7b --- .../estimatedpayouts/estimatedpayouts.go | 12 +++++-- .../estimatedpayouts/estimatedpayouts_test.go | 35 +++++++++++++------ 2 files changed, 33 insertions(+), 14 deletions(-) diff --git a/storagenode/payouts/estimatedpayouts/estimatedpayouts.go b/storagenode/payouts/estimatedpayouts/estimatedpayouts.go index 7862577a0..685fd99f1 100644 --- a/storagenode/payouts/estimatedpayouts/estimatedpayouts.go +++ b/storagenode/payouts/estimatedpayouts/estimatedpayouts.go @@ -67,7 +67,13 @@ func RoundFloat(value float64) float64 { // SetExpectedMonth set current month expectations. func (estimatedPayout *EstimatedPayout) SetExpectedMonth(now time.Time) { - daysPaste := float64(now.Day() - 1) - timeInMonth := date.UTCEndOfMonth(now) - estimatedPayout.CurrentMonthExpectations = (estimatedPayout.CurrentMonth.Payout / daysPaste) * float64(timeInMonth.Day()) + daysPast := float64(now.Day()) - 1 + if daysPast < 1 { + daysPast = 1 + } + + daysPerMonth := float64(date.UTCEndOfMonth(now).Day()) + payoutPerDay := estimatedPayout.CurrentMonth.Payout / daysPast + + estimatedPayout.CurrentMonthExpectations = payoutPerDay * daysPerMonth } diff --git a/storagenode/payouts/estimatedpayouts/estimatedpayouts_test.go b/storagenode/payouts/estimatedpayouts/estimatedpayouts_test.go index f81cc8fd9..5dcae105a 100644 --- a/storagenode/payouts/estimatedpayouts/estimatedpayouts_test.go +++ b/storagenode/payouts/estimatedpayouts/estimatedpayouts_test.go @@ -4,6 +4,7 @@ package estimatedpayouts_test import ( + "math" "testing" "time" @@ -19,19 +20,31 @@ func TestCurrentMonthExpectations(t *testing.T) { StorageNodeCount: 1, SatelliteCount: 2, }, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) { - estimatedPayout := estimatedpayouts.EstimatedPayout{ - CurrentMonth: estimatedpayouts.PayoutMonthly{ - Payout: 100, - }, + const payout = 100.0 + + type test struct { + time time.Time + expected float64 + } + tests := []test{ + // 28 days in month + {time.Date(2021, 2, 1, 16, 0, 0, 0, time.UTC), 2800.00}, + {time.Date(2021, 2, 28, 10, 0, 0, 0, time.UTC), 103.70}, + // 31 days in month + {time.Date(2021, 3, 1, 19, 0, 0, 0, time.UTC), 3100.0}, + {time.Date(2021, 3, 31, 21, 0, 0, 0, time.UTC), 103.33}, } - currentDay := time.Now().Day() - 1 - now := time.Now().UTC() - y, m, _ := now.Date() - daysInMonth := time.Date(y, m+1, 1, 0, 0, 0, -1, &time.Location{}).Day() + for _, test := range tests { + estimates := estimatedpayouts.EstimatedPayout{ + CurrentMonth: estimatedpayouts.PayoutMonthly{ + Payout: payout, + }, + } - expectations := (estimatedPayout.CurrentMonth.Payout / float64(currentDay)) * float64(daysInMonth) - estimatedPayout.SetExpectedMonth(now) - require.Equal(t, estimatedPayout.CurrentMonthExpectations, expectations) + estimates.SetExpectedMonth(test.time) + require.False(t, math.IsNaN(estimates.CurrentMonthExpectations)) + require.InDelta(t, test.expected, estimates.CurrentMonthExpectations, 0.01) + } }) } From ee1f67bb1874daae0772468719049741a7e20298 Mon Sep 17 00:00:00 2001 From: Egon Elbre Date: Mon, 1 Feb 2021 13:09:01 +0200 Subject: [PATCH 37/38] go.mod: bump quic to fix Go 1.16rc1 Change-Id: Ief21af5302c36a91d19755e58a2e36fae61cd15b --- go.mod | 4 ++-- go.sum | 10 ++++++---- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 0616c9616..95257f37d 100644 --- a/go.mod +++ b/go.mod @@ -21,7 +21,7 @@ require ( github.com/jackc/pgtype v1.5.0 github.com/jackc/pgx/v4 v4.9.0 github.com/jtolds/monkit-hw/v2 v2.0.0-20191108235325-141a0da276b3 - github.com/lucas-clemente/quic-go v0.7.1-0.20210125162258-7456e643b956 + github.com/lucas-clemente/quic-go v0.7.1-0.20210131023823-622ca23d4eb4 github.com/mattn/go-sqlite3 v2.0.3+incompatible github.com/nsf/jsondiff v0.0.0-20200515183724-f29ed568f4ce github.com/nsf/termbox-go v0.0.0-20200418040025-38ba6e5628f1 @@ -39,7 +39,7 @@ require ( go.uber.org/zap v1.16.0 golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad golang.org/x/sync v0.0.0-20201207232520-09787c993a3a - golang.org/x/sys v0.0.0-20210112091331-59c308dcf3cc + golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c golang.org/x/term v0.0.0-20201117132131-f5c789dd3221 golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e google.golang.org/api v0.20.0 // indirect diff --git a/go.sum b/go.sum index 1b3c441f3..1909c376e 100644 --- a/go.sum +++ b/go.sum @@ -372,8 +372,8 @@ github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.3.0 h1:/qkRGz8zljWiDcFvgpwUpwIAPu3r07TDvs3Rws+o/pU= github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lucas-clemente/quic-go v0.7.1-0.20210125162258-7456e643b956 h1:/AxIsj5sEh+ELGhBilVugx+iCdD9G1Ym0O1i7kUSKzA= -github.com/lucas-clemente/quic-go v0.7.1-0.20210125162258-7456e643b956/go.mod h1:Cxx5SWK/K2dp7TA7qsnIHgF43e3NAufUDcSb9jTpL68= +github.com/lucas-clemente/quic-go v0.7.1-0.20210131023823-622ca23d4eb4 h1:gsTNebTJiHCgCfVptaRMMLAHZSMcPkpvCx+vAHJrwx8= +github.com/lucas-clemente/quic-go v0.7.1-0.20210131023823-622ca23d4eb4/go.mod h1:RqK5iyJgjjGJRLSfhBm2ZhdRDDllcN/QqNT1EvQ7ZNg= github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= @@ -384,8 +384,8 @@ github.com/marten-seemann/qtls v0.10.0 h1:ECsuYUKalRL240rRD4Ri33ISb7kAQ3qGDlrrl5 github.com/marten-seemann/qtls v0.10.0/go.mod h1:UvMd1oaYDACI99/oZUYLzMCkBXQVT0aGm99sJhbT8hs= github.com/marten-seemann/qtls-go1-15 v0.1.1 h1:LIH6K34bPVttyXnUWixk0bzH6/N07VxbSabxn5A5gZQ= github.com/marten-seemann/qtls-go1-15 v0.1.1/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= -github.com/marten-seemann/qtls-go1-16 v0.1.0-beta.1.1 h1:CWVWoLCcdfarQRGgWi2b9ILKhc5v8MXtfs3bz9dmE00= -github.com/marten-seemann/qtls-go1-16 v0.1.0-beta.1.1/go.mod h1:gNpI2Ol+lRS3WwSOtIUUtRwZEQMXjYK+dQSBFbethAk= +github.com/marten-seemann/qtls-go1-16 v0.1.0-rc.1 h1:JCvEgXNTQjxa+vxOx5c8e84iRttJvyt+7Jo7GLgR7KI= +github.com/marten-seemann/qtls-go1-16 v0.1.0-rc.1/go.mod h1:gNpI2Ol+lRS3WwSOtIUUtRwZEQMXjYK+dQSBFbethAk= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= @@ -768,6 +768,8 @@ golang.org/x/sys v0.0.0-20200610111108-226ff32320da/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201231184435-2d18734c6014/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112091331-59c308dcf3cc h1:y0Og6AYdwus7SIAnKnDxjc4gJetRiYEWOx4AKbOeyEI= golang.org/x/sys v0.0.0-20210112091331-59c308dcf3cc/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221 h1:/ZHdbVpdR/jk3g30/d4yUL0JU9kksj8+F/bnQUVLGDM= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= From 5d895fb4044d56ca273bd1a0112e20c0b79fa72b Mon Sep 17 00:00:00 2001 From: Egon Elbre Date: Mon, 1 Feb 2021 13:56:19 +0200 Subject: [PATCH 38/38] storagenode/{payouts,console}: use same time for all calculations When using calling time.Now() multiple times, they can cross month boundary causing errors in calculations. Change-Id: I66b5be7598f3bf475b4b5fe0dcce82eee55b3134 --- storagenode/console/consoleapi/storagenode.go | 7 +++++-- .../console/consoleapi/storagenode_test.go | 2 +- storagenode/console/service.go | 8 ++++---- .../payouts/estimatedpayouts/service.go | 18 ++++++++---------- 4 files changed, 18 insertions(+), 17 deletions(-) diff --git a/storagenode/console/consoleapi/storagenode.go b/storagenode/console/consoleapi/storagenode.go index 64834bf20..b4dcb6137 100644 --- a/storagenode/console/consoleapi/storagenode.go +++ b/storagenode/console/consoleapi/storagenode.go @@ -6,6 +6,7 @@ package consoleapi import ( "encoding/json" "net/http" + "time" "github.com/gorilla/mux" "github.com/zeebo/errs" @@ -119,10 +120,12 @@ func (dashboard *StorageNode) EstimatedPayout(w http.ResponseWriter, r *http.Req w.Header().Set(contentType, applicationJSON) + now := time.Now() + queryParams := r.URL.Query() id := queryParams.Get("id") if id == "" { - data, err := dashboard.service.GetAllSatellitesEstimatedPayout(ctx) + data, err := dashboard.service.GetAllSatellitesEstimatedPayout(ctx, now) if err != nil { dashboard.serveJSONError(w, http.StatusInternalServerError, ErrStorageNodeAPI.Wrap(err)) return @@ -139,7 +142,7 @@ func (dashboard *StorageNode) EstimatedPayout(w http.ResponseWriter, r *http.Req return } - data, err := dashboard.service.GetSatelliteEstimatedPayout(ctx, satelliteID) + data, err := dashboard.service.GetSatelliteEstimatedPayout(ctx, satelliteID, now) if err != nil { dashboard.serveJSONError(w, http.StatusInternalServerError, ErrStorageNodeAPI.Wrap(err)) return diff --git a/storagenode/console/consoleapi/storagenode_test.go b/storagenode/console/consoleapi/storagenode_test.go index f019866a0..fbef41ebc 100644 --- a/storagenode/console/consoleapi/storagenode_test.go +++ b/storagenode/console/consoleapi/storagenode_test.go @@ -115,7 +115,7 @@ func TestStorageNodeApi(t *testing.T) { body, err := ioutil.ReadAll(res.Body) require.NoError(t, err) - estimation, err := sno.Console.Service.GetAllSatellitesEstimatedPayout(ctx) + estimation, err := sno.Console.Service.GetAllSatellitesEstimatedPayout(ctx, time.Now()) require.NoError(t, err) expected, err := json.Marshal(estimatedpayouts.EstimatedPayout{ CurrentMonth: estimation.CurrentMonth, diff --git a/storagenode/console/service.go b/storagenode/console/service.go index cd6e44a63..bf36b1a01 100644 --- a/storagenode/console/service.go +++ b/storagenode/console/service.go @@ -429,8 +429,8 @@ func (s *Service) GetAllSatellitesData(ctx context.Context) (_ *Satellites, err } // GetSatelliteEstimatedPayout returns estimated payouts for current and previous months for selected satellite. -func (s *Service) GetSatelliteEstimatedPayout(ctx context.Context, satelliteID storj.NodeID) (estimatedPayout estimatedpayouts.EstimatedPayout, err error) { - estimatedPayout, err = s.estimation.GetSatelliteEstimatedPayout(ctx, satelliteID) +func (s *Service) GetSatelliteEstimatedPayout(ctx context.Context, satelliteID storj.NodeID, now time.Time) (estimatedPayout estimatedpayouts.EstimatedPayout, err error) { + estimatedPayout, err = s.estimation.GetSatelliteEstimatedPayout(ctx, satelliteID, now) if err != nil { return estimatedpayouts.EstimatedPayout{}, SNOServiceErr.Wrap(err) } @@ -439,8 +439,8 @@ func (s *Service) GetSatelliteEstimatedPayout(ctx context.Context, satelliteID s } // GetAllSatellitesEstimatedPayout returns estimated payouts for current and previous months for all satellites. -func (s *Service) GetAllSatellitesEstimatedPayout(ctx context.Context) (estimatedPayout estimatedpayouts.EstimatedPayout, err error) { - estimatedPayout, err = s.estimation.GetAllSatellitesEstimatedPayout(ctx) +func (s *Service) GetAllSatellitesEstimatedPayout(ctx context.Context, now time.Time) (estimatedPayout estimatedpayouts.EstimatedPayout, err error) { + estimatedPayout, err = s.estimation.GetAllSatellitesEstimatedPayout(ctx, now) if err != nil { return estimatedpayouts.EstimatedPayout{}, SNOServiceErr.Wrap(err) } diff --git a/storagenode/payouts/estimatedpayouts/service.go b/storagenode/payouts/estimatedpayouts/service.go index 530c0c24c..4394b0c66 100644 --- a/storagenode/payouts/estimatedpayouts/service.go +++ b/storagenode/payouts/estimatedpayouts/service.go @@ -53,11 +53,10 @@ func NewService(bandwidthDB bandwidth.DB, reputationDB reputation.DB, storageUsa } // GetSatelliteEstimatedPayout returns estimated payouts for current and previous months from specific satellite with current level of load. -func (s *Service) GetSatelliteEstimatedPayout(ctx context.Context, satelliteID storj.NodeID) (payout EstimatedPayout, err error) { +func (s *Service) GetSatelliteEstimatedPayout(ctx context.Context, satelliteID storj.NodeID, now time.Time) (payout EstimatedPayout, err error) { defer mon.Task()(&ctx)(&err) - now := time.Now() - currentMonthPayout, previousMonthPayout, err := s.estimatedPayout(ctx, satelliteID) + currentMonthPayout, previousMonthPayout, err := s.estimatedPayout(ctx, satelliteID, now) if err != nil { return EstimatedPayout{}, EstimationServiceErr.Wrap(err) } @@ -70,7 +69,7 @@ func (s *Service) GetSatelliteEstimatedPayout(ctx context.Context, satelliteID s return EstimatedPayout{}, EstimationServiceErr.Wrap(err) } - daysSinceJoined := time.Since(stats.JoinedAt).Hours() / 24 + daysSinceJoined := stats.JoinedAt.Sub(now).Hours() / 24 if daysSinceJoined >= float64(now.Day()) { payout.SetExpectedMonth(now) @@ -82,13 +81,12 @@ func (s *Service) GetSatelliteEstimatedPayout(ctx context.Context, satelliteID s } // GetAllSatellitesEstimatedPayout returns estimated payouts for current and previous months from all satellites with current level of load. -func (s *Service) GetAllSatellitesEstimatedPayout(ctx context.Context) (payout EstimatedPayout, err error) { +func (s *Service) GetAllSatellitesEstimatedPayout(ctx context.Context, now time.Time) (payout EstimatedPayout, err error) { defer mon.Task()(&ctx)(&err) - now := time.Now() satelliteIDs := s.trust.GetSatellites(ctx) for i := 0; i < len(satelliteIDs); i++ { - current, previous, err := s.estimatedPayout(ctx, satelliteIDs[i]) + current, previous, err := s.estimatedPayout(ctx, satelliteIDs[i], now) if err != nil { return EstimatedPayout{}, EstimationServiceErr.Wrap(err) } @@ -117,7 +115,7 @@ func (s *Service) GetAllSatellitesEstimatedPayout(ctx context.Context) (payout E } // estimatedPayout returns estimated payouts data for current and previous months from specific satellite. -func (s *Service) estimatedPayout(ctx context.Context, satelliteID storj.NodeID) (currentMonthPayout PayoutMonthly, previousMonthPayout PayoutMonthly, err error) { +func (s *Service) estimatedPayout(ctx context.Context, satelliteID storj.NodeID, now time.Time) (currentMonthPayout PayoutMonthly, previousMonthPayout PayoutMonthly, err error) { defer mon.Task()(&ctx)(&err) priceModel, err := s.pricingDB.Get(ctx, satelliteID) @@ -130,8 +128,8 @@ func (s *Service) estimatedPayout(ctx context.Context, satelliteID storj.NodeID) return PayoutMonthly{}, PayoutMonthly{}, EstimationServiceErr.Wrap(err) } - currentMonthPayout, err = s.estimationUsagePeriod(ctx, time.Now().UTC(), stats.JoinedAt, priceModel) - previousMonthPayout, err = s.estimationUsagePeriod(ctx, time.Now().UTC().AddDate(0, -1, 0), stats.JoinedAt, priceModel) + currentMonthPayout, err = s.estimationUsagePeriod(ctx, now.UTC(), stats.JoinedAt, priceModel) + previousMonthPayout, err = s.estimationUsagePeriod(ctx, now.UTC().AddDate(0, -1, 0), stats.JoinedAt, priceModel) return currentMonthPayout, previousMonthPayout, nil }