Merge remote-tracking branch 'origin/main' into multipart-upload
Conflicts: go.mod go.sum satellite/metainfo/config.go satellite/metainfo/metainfo_test.go Change-Id: I95cf3c1d020a7918795b5eec63f36112fdb86749
This commit is contained in:
commit
d0612199f0
4
.clabot
4
.clabot
@ -66,6 +66,8 @@
|
||||
"dominickmarino",
|
||||
"hectorj2f",
|
||||
"nergdron",
|
||||
"Doom4535"
|
||||
"Doom4535",
|
||||
"harrymaurya05",
|
||||
"gregoirevda"
|
||||
]
|
||||
}
|
||||
|
4
Jenkinsfile
vendored
4
Jenkinsfile
vendored
@ -32,7 +32,7 @@ node('node') {
|
||||
sh 'docker exec postgres-$BUILD_NUMBER createdb -U postgres teststorj'
|
||||
// fetch the remote main branch
|
||||
sh 'git fetch --no-tags --progress -- https://github.com/storj/storj.git +refs/heads/main:refs/remotes/origin/main'
|
||||
sh 'docker run -u $(id -u):$(id -g) --rm -i -v $PWD:$PWD -w $PWD --entrypoint $PWD/scripts/tests/testversions/test-sim-versions.sh -e STORJ_SIM_POSTGRES -e STORJ_SIM_REDIS --link redis-$BUILD_NUMBER:redis --link postgres-$BUILD_NUMBER:postgres -e CC=gcc storjlabs/golang:1.15.6'
|
||||
sh 'docker run -u $(id -u):$(id -g) --rm -i -v $PWD:$PWD -w $PWD --entrypoint $PWD/scripts/tests/testversions/test-sim-versions.sh -e STORJ_SIM_POSTGRES -e STORJ_SIM_REDIS --link redis-$BUILD_NUMBER:redis --link postgres-$BUILD_NUMBER:postgres -e CC=gcc storjlabs/golang:1.15.7'
|
||||
}
|
||||
catch(err){
|
||||
throw err
|
||||
@ -67,7 +67,7 @@ node('node') {
|
||||
sh 'docker exec postgres-$BUILD_NUMBER createdb -U postgres teststorj'
|
||||
// fetch the remote main branch
|
||||
sh 'git fetch --no-tags --progress -- https://github.com/storj/storj.git +refs/heads/main:refs/remotes/origin/main'
|
||||
sh 'docker run -u $(id -u):$(id -g) --rm -i -v $PWD:$PWD -w $PWD --entrypoint $PWD/scripts/tests/rollingupgrade/test-sim-rolling-upgrade.sh -e BRANCH_NAME -e STORJ_SIM_POSTGRES -e STORJ_SIM_REDIS --link redis-$BUILD_NUMBER:redis --link postgres-$BUILD_NUMBER:postgres -e CC=gcc storjlabs/golang:1.15.6'
|
||||
sh 'docker run -u $(id -u):$(id -g) --rm -i -v $PWD:$PWD -w $PWD --entrypoint $PWD/scripts/tests/rollingupgrade/test-sim-rolling-upgrade.sh -e BRANCH_NAME -e STORJ_SIM_POSTGRES -e STORJ_SIM_REDIS --link redis-$BUILD_NUMBER:redis --link postgres-$BUILD_NUMBER:postgres -e CC=gcc storjlabs/golang:1.15.7'
|
||||
}
|
||||
catch(err){
|
||||
throw err
|
||||
|
@ -57,6 +57,7 @@ pipeline {
|
||||
sh 'golangci-lint --config /go/ci/.golangci.yml -j=2 run'
|
||||
sh 'check-mod-tidy -mod .build/go.mod.orig'
|
||||
sh 'make check-monitoring'
|
||||
sh 'make test-wasm-size'
|
||||
}
|
||||
}
|
||||
|
||||
@ -197,6 +198,16 @@ pipeline {
|
||||
}
|
||||
}
|
||||
|
||||
stage('wasm npm') {
|
||||
steps {
|
||||
dir(".build") {
|
||||
sh 'cp -r ../satellite/console/wasm/tests/ .'
|
||||
sh 'cd tests && cp "$(go env GOROOT)/misc/wasm/wasm_exec.js" .'
|
||||
sh 'cd tests && npm install && npm run test'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage('storagenode npm') {
|
||||
steps {
|
||||
dir("web/storagenode") {
|
||||
|
7
Makefile
7
Makefile
@ -1,4 +1,4 @@
|
||||
GO_VERSION ?= 1.15.6
|
||||
GO_VERSION ?= 1.15.7
|
||||
GOOS ?= linux
|
||||
GOARCH ?= amd64
|
||||
GOPATH ?= $(shell go env GOPATH)
|
||||
@ -129,6 +129,11 @@ check-monitoring: ## Check for locked monkit calls that have changed
|
||||
|| (echo "Locked monkit metrics have been changed. Notify #data-science and run \`go run github.com/storj/ci/check-monitoring -out monkit.lock ./...\` to update monkit.lock file." \
|
||||
&& exit 1)
|
||||
|
||||
.PHONY: test-wasm-size
|
||||
test-wasm-size: ## Test that the built .wasm code has not increased in size
|
||||
@echo "Running ${@}"
|
||||
@./scripts/test-wasm-size.sh
|
||||
|
||||
##@ Build
|
||||
|
||||
.PHONY: storagenode-console
|
||||
|
@ -33,14 +33,13 @@ All of our code for Storj v3 is open source. Have a code change you think would
|
||||
|
||||
Have comments or bug reports? Want to propose a PR before hand-crafting it? Jump on to our [forum](https://forum.storj.io) and join the [Engineering Discussions](https://forum.storj.io/c/engineer-amas) to say hi to the developer community and to talk to the Storj core team.
|
||||
|
||||
Want to vote on or suggest new features? Post it on [ideas.storj.io](https://ideas.storj.io).
|
||||
Want to vote on or suggest new features? Post it on the [forum](https://forum.storj.io/c/parent-cat/5).
|
||||
|
||||
### Issue tracking and roadmap
|
||||
|
||||
See the breakdown of what we're building by checking out the following resources:
|
||||
|
||||
* [White paper](https://storj.io/whitepaper)
|
||||
* [Aha! Roadmap](https://storjlabs.aha.io/published/bc0db77dc0580bb10c0faf2b383d0529?page=1)
|
||||
|
||||
### Install required packages
|
||||
|
||||
|
@ -12,6 +12,7 @@ import (
|
||||
"github.com/zeebo/errs"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"storj.io/storj/private/currency"
|
||||
"storj.io/storj/satellite/compensation"
|
||||
"storj.io/storj/satellite/satellitedb"
|
||||
)
|
||||
@ -76,11 +77,6 @@ func generateInvoicesCSV(ctx context.Context, period compensation.Period, out io
|
||||
}
|
||||
}
|
||||
|
||||
paidYTD, err := db.Compensation().QueryPaidInYear(ctx, usage.NodeID, period.Year)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nodeInfo := compensation.NodeInfo{
|
||||
ID: usage.NodeID,
|
||||
CreatedAt: node.CreatedAt,
|
||||
@ -103,7 +99,7 @@ func generateInvoicesCSV(ctx context.Context, period compensation.Period, out io
|
||||
NodeWallet: node.Operator.Wallet,
|
||||
NodeAddress: nodeAddress,
|
||||
NodeLastIP: nodeLastIP,
|
||||
PaidYTD: paidYTD,
|
||||
PaidYTD: currency.Zero, // deprecated
|
||||
}
|
||||
|
||||
if err := invoice.MergeNodeInfo(nodeInfo); err != nil {
|
||||
|
@ -51,10 +51,11 @@ const (
|
||||
// to create a port with a consistent format for storj-sim services.
|
||||
|
||||
// Peer classes.
|
||||
satellitePeer = 0
|
||||
gatewayPeer = 1
|
||||
versioncontrolPeer = 2
|
||||
storagenodePeer = 3
|
||||
satellitePeer = 0
|
||||
satellitePeerWorker = 4
|
||||
gatewayPeer = 1
|
||||
versioncontrolPeer = 2
|
||||
storagenodePeer = 3
|
||||
|
||||
// Endpoints.
|
||||
publicRPC = 0
|
||||
@ -64,12 +65,15 @@ const (
|
||||
debugHTTP = 9
|
||||
|
||||
// Satellite specific constants.
|
||||
redisPort = 4
|
||||
adminHTTP = 5
|
||||
debugAdminHTTP = 6
|
||||
debugPeerHTTP = 7
|
||||
debugRepairerHTTP = 8
|
||||
debugGCHTTP = 10
|
||||
redisPort = 4
|
||||
adminHTTP = 5
|
||||
debugAdminHTTP = 6
|
||||
debugCoreHTTP = 7
|
||||
|
||||
// Satellite worker specific constants.
|
||||
debugMigrationHTTP = 0
|
||||
debugRepairerHTTP = 1
|
||||
debugGCHTTP = 2
|
||||
)
|
||||
|
||||
// port creates a port with a consistent format for storj-sim services.
|
||||
@ -394,7 +398,7 @@ func newNetwork(flags *Flags) (*Processes, error) {
|
||||
migrationProcess.Arguments = withCommon(apiProcess.Directory, Arguments{
|
||||
"run": {
|
||||
"migration",
|
||||
"--debug.addr", net.JoinHostPort(host, port(satellitePeer, i, debugPeerHTTP)),
|
||||
"--debug.addr", net.JoinHostPort(host, port(satellitePeerWorker, i, debugMigrationHTTP)),
|
||||
},
|
||||
})
|
||||
apiProcess.WaitForExited(migrationProcess)
|
||||
@ -407,7 +411,7 @@ func newNetwork(flags *Flags) (*Processes, error) {
|
||||
})
|
||||
coreProcess.Arguments = withCommon(apiProcess.Directory, Arguments{
|
||||
"run": {
|
||||
"--debug.addr", net.JoinHostPort(host, port(satellitePeer, i, debugPeerHTTP)),
|
||||
"--debug.addr", net.JoinHostPort(host, port(satellitePeer, i, debugCoreHTTP)),
|
||||
"--orders.encryption-keys", "0100000000000000=0100000000000000000000000000000000000000000000000000000000000000",
|
||||
},
|
||||
})
|
||||
@ -435,7 +439,7 @@ func newNetwork(flags *Flags) (*Processes, error) {
|
||||
repairProcess.Arguments = withCommon(apiProcess.Directory, Arguments{
|
||||
"run": {
|
||||
"repair",
|
||||
"--debug.addr", net.JoinHostPort(host, port(satellitePeer, i, debugRepairerHTTP)),
|
||||
"--debug.addr", net.JoinHostPort(host, port(satellitePeerWorker, i, debugRepairerHTTP)),
|
||||
"--orders.encryption-keys", "0100000000000000=0100000000000000000000000000000000000000000000000000000000000000",
|
||||
},
|
||||
})
|
||||
@ -449,7 +453,7 @@ func newNetwork(flags *Flags) (*Processes, error) {
|
||||
garbageCollectionProcess.Arguments = withCommon(apiProcess.Directory, Arguments{
|
||||
"run": {
|
||||
"garbage-collection",
|
||||
"--debug.addr", net.JoinHostPort(host, port(satellitePeer, i, debugGCHTTP)),
|
||||
"--debug.addr", net.JoinHostPort(host, port(satellitePeerWorker, i, debugGCHTTP)),
|
||||
},
|
||||
})
|
||||
garbageCollectionProcess.WaitForExited(migrationProcess)
|
||||
|
@ -9,12 +9,14 @@ import (
|
||||
"path/filepath"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/zeebo/errs"
|
||||
|
||||
"storj.io/private/cfgstruct"
|
||||
"storj.io/private/process"
|
||||
"storj.io/storj/cmd/internal/wizard"
|
||||
"storj.io/uplink"
|
||||
"storj.io/uplink/backcomp"
|
||||
"storj.io/uplink/private/access2"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -108,7 +110,12 @@ func cmdSetup(cmd *cobra.Command, args []string) (err error) {
|
||||
access, err = backcomp.RequestAccessWithPassphraseAndConcurrency(ctx, uplinkConfig, satelliteAddress, apiKeyString, passphrase, uint8(setupCfg.PBKDFConcurrency))
|
||||
}
|
||||
if err != nil {
|
||||
return Error.Wrap(err)
|
||||
_, err2 := access2.ParseAccess(apiKeyString)
|
||||
if err2 == nil {
|
||||
err2 = Error.New("API key appears to be an access grant: try running `uplink import` instead")
|
||||
}
|
||||
|
||||
return errs.Combine(err, err2)
|
||||
}
|
||||
accessData, err := access.Serialize()
|
||||
if err != nil {
|
||||
|
17
go.mod
17
go.mod
@ -12,23 +12,21 @@ require (
|
||||
github.com/go-redis/redis v6.15.9+incompatible
|
||||
github.com/gogo/protobuf v1.3.1
|
||||
github.com/golang-migrate/migrate/v4 v4.7.0
|
||||
github.com/golang/mock v1.4.4 // indirect
|
||||
github.com/google/go-cmp v0.5.2
|
||||
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3 // indirect
|
||||
github.com/gorilla/mux v1.8.0
|
||||
github.com/gorilla/schema v1.2.0
|
||||
github.com/graphql-go/graphql v0.7.9
|
||||
github.com/jackc/pgconn v1.7.0
|
||||
github.com/jackc/pgconn v1.8.0
|
||||
github.com/jackc/pgerrcode v0.0.0-20201024163028-a0d42d470451
|
||||
github.com/jackc/pgtype v1.5.0
|
||||
github.com/jackc/pgx/v4 v4.9.0
|
||||
github.com/jinzhu/now v1.1.1
|
||||
github.com/jackc/pgtype v1.6.2
|
||||
github.com/jackc/pgx/v4 v4.10.1
|
||||
github.com/jtolds/monkit-hw/v2 v2.0.0-20191108235325-141a0da276b3
|
||||
github.com/loov/hrtime v1.0.3
|
||||
github.com/lucas-clemente/quic-go v0.7.1-0.20210131023823-622ca23d4eb4
|
||||
github.com/mattn/go-sqlite3 v2.0.3+incompatible
|
||||
github.com/nsf/jsondiff v0.0.0-20200515183724-f29ed568f4ce
|
||||
github.com/nsf/termbox-go v0.0.0-20200418040025-38ba6e5628f1
|
||||
github.com/onsi/ginkgo v1.14.0 // indirect
|
||||
github.com/shopspring/decimal v1.2.0
|
||||
github.com/spacemonkeygo/monkit/v3 v3.0.7
|
||||
github.com/spf13/cobra v1.0.0
|
||||
@ -42,16 +40,15 @@ require (
|
||||
go.etcd.io/bbolt v1.3.5
|
||||
go.uber.org/zap v1.16.0
|
||||
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381 // indirect
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a
|
||||
golang.org/x/sys v0.0.0-20210112091331-59c308dcf3cc
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221
|
||||
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e
|
||||
google.golang.org/api v0.20.0 // indirect
|
||||
google.golang.org/protobuf v1.25.0 // indirect
|
||||
storj.io/common v0.0.0-20210115161819-ee11aaf35a7f
|
||||
storj.io/common v0.0.0-20210119231202-8321551aa24d
|
||||
storj.io/drpc v0.0.16
|
||||
storj.io/monkit-jaeger v0.0.0-20200518165323-80778fc3f91b
|
||||
storj.io/private v0.0.0-20210108233641-2ba1ef686d1f
|
||||
storj.io/private v0.0.0-20210120150301-bd3ac3e989f0
|
||||
storj.io/uplink v1.4.6-0.20210115090500-10cfa3d1c277
|
||||
)
|
||||
|
157
go.sum
157
go.sum
@ -1,5 +1,7 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo=
|
||||
cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw=
|
||||
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
||||
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
|
||||
@ -17,7 +19,12 @@ cloud.google.com/go/pubsub v1.0.1 h1:W9tAK3E57P75u0XLLR82LZyw8VpAnhmyTOxW9qzmyj8
|
||||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||
cloud.google.com/go/storage v1.0.0 h1:VV2nUM3wwLLGh9lSABFgZMjInyUbJeaRSE64WuAIQ+4=
|
||||
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
|
||||
dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU=
|
||||
dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4=
|
||||
dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU=
|
||||
git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
@ -40,6 +47,7 @@ github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a h1:HbKu58rmZp
|
||||
github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc=
|
||||
github.com/alicebob/miniredis/v2 v2.13.3 h1:kohgdtN58KW/r9ZDVmMJE3MrfbumwsDQStd0LPAGmmw=
|
||||
github.com/alicebob/miniredis/v2 v2.13.3/go.mod h1:uS970Sw5Gs9/iK3yBg0l9Uj9s25wXxSpQUE9EaJ/Blg=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
|
||||
github.com/apache/thrift v0.12.0 h1:pODnxUFNcjP9UTLZGTdeh+j16A8lJbRvD3rOtrk/7bs=
|
||||
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
||||
@ -55,6 +63,7 @@ github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJm
|
||||
github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=
|
||||
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
||||
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
|
||||
github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
|
||||
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
|
||||
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
|
||||
@ -66,12 +75,15 @@ github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVa
|
||||
github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
|
||||
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
|
||||
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
|
||||
github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
|
||||
github.com/calebcase/tmpfile v1.0.1/go.mod h1:iErLeG/iqJr8LaQ/gYRv4GXdqssi3jg4iSzvrA06/lw=
|
||||
github.com/calebcase/tmpfile v1.0.2-0.20200602150926-3af473ef8439/go.mod h1:iErLeG/iqJr8LaQ/gYRv4GXdqssi3jg4iSzvrA06/lw=
|
||||
github.com/calebcase/tmpfile v1.0.2 h1:1AGuhKiUu4J6wxz6lxuF6ck3f8G2kaV6KSEny0RGCig=
|
||||
github.com/calebcase/tmpfile v1.0.2/go.mod h1:iErLeG/iqJr8LaQ/gYRv4GXdqssi3jg4iSzvrA06/lw=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cheekybits/genny v1.0.0 h1:uGGa4nei+j20rOSeDeP5Of12XVm7TGUd4dJA9RDitfE=
|
||||
github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ=
|
||||
github.com/cheggaaa/pb/v3 v3.0.5 h1:lmZOti7CraK9RSjzExsY53+WWfub9Qv13B5m4ptEoPE=
|
||||
github.com/cheggaaa/pb/v3 v3.0.5/go.mod h1:X1L61/+36nz9bjIsrDU52qHKOQukUQe2Ge+YvGuquCw=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
@ -90,6 +102,7 @@ github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc
|
||||
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
@ -123,6 +136,7 @@ github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKoh
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-units v0.3.3 h1:Xk8S3Xj5sLGlG5g67hJmYMmUgXv5N4PhkjJHHqrwnTk=
|
||||
github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
|
||||
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
|
||||
@ -132,12 +146,17 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s=
|
||||
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
|
||||
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
|
||||
github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk=
|
||||
github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/fsouza/fake-gcs-server v1.7.0/go.mod h1:5XIRs4YvwNbNoz+1JF8j6KLAyDh7RHGAyAK3EP2EsNk=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
|
||||
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
@ -161,17 +180,19 @@ github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekf
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7 h1:5ZkaAPbicIKTF2I64qf5Fh8Aa83Q/dnOafMYV0OMwjA=
|
||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.3.1 h1:qGJ6qTW+x6xX/my+8YUVl4WNpX9B7+/l2tRsHGZ7f2s=
|
||||
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
||||
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.4 h1:l75CXGRSwbaYNpl/Z2X1XIIAMSCquvXgpVZDhwEIJsc=
|
||||
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
@ -203,6 +224,9 @@ github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hf
|
||||
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3 h1:SRgJV+IoxM5MKyFdlSUeNy6/ycRUF2yBAKdAQswoHUk=
|
||||
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/googleapis/gax-go v2.0.0+incompatible h1:j0GKcs05QVmm7yesiZq2+9cxHkNK9YM6zKx4D2qucQU=
|
||||
github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
|
||||
github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
@ -219,8 +243,10 @@ github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoA
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/graphql-go/graphql v0.7.9 h1:5Va/Rt4l5g3YjwDnid3vFfn43faaQBq7rMcIZ0VnV34=
|
||||
github.com/graphql-go/graphql v0.7.9/go.mod h1:k6yrAYQaSP59DC5UVxbgxESlmVyojThKdORUqGDGmrI=
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4=
|
||||
github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
|
||||
@ -246,7 +272,6 @@ github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO
|
||||
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
|
||||
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
|
||||
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
|
||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
||||
@ -263,8 +288,8 @@ github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsU
|
||||
github.com/jackc/pgconn v1.4.0/go.mod h1:Y2O3ZDF0q4mMacyWV3AstPJpeHXWGEetiFttmq5lahk=
|
||||
github.com/jackc/pgconn v1.5.0/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI=
|
||||
github.com/jackc/pgconn v1.5.1-0.20200601181101-fa742c524853/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI=
|
||||
github.com/jackc/pgconn v1.7.0 h1:pwjzcYyfmz/HQOQlENvG1OcDqauTGaqlVahq934F0/U=
|
||||
github.com/jackc/pgconn v1.7.0/go.mod h1:sF/lPpNEMEOp+IYhyQGdAvrG20gWf6A1tKlr0v7JMeA=
|
||||
github.com/jackc/pgconn v1.8.0 h1:FmjZ0rOyXTr1wfWs45i4a9vjnjWUAGpMuQLD9OSs+lw=
|
||||
github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o=
|
||||
github.com/jackc/pgerrcode v0.0.0-20201024163028-a0d42d470451 h1:WAvSpGf7MsFuzAtK4Vk7R4EVe+liW4x83r4oWu0WHKw=
|
||||
github.com/jackc/pgerrcode v0.0.0-20201024163028-a0d42d470451/go.mod h1:a/s9Lp5W7n/DD0VrVoyJ00FbP2ytTPDVOivvn2bMlds=
|
||||
github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE=
|
||||
@ -280,8 +305,8 @@ github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod
|
||||
github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM=
|
||||
github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM=
|
||||
github.com/jackc/pgproto3/v2 v2.0.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
|
||||
github.com/jackc/pgproto3/v2 v2.0.5 h1:NUbEWPmCQZbMmYlTjVoNPhc0CfnYyz2bfUAh6A5ZVJM=
|
||||
github.com/jackc/pgproto3/v2 v2.0.5/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
|
||||
github.com/jackc/pgproto3/v2 v2.0.6 h1:b1105ZGEMFe7aCvrT1Cca3VoVb4ZFMaFJLJcg/3zD+8=
|
||||
github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
|
||||
github.com/jackc/pgservicefile v0.0.0-20200307190119-3430c5407db8/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E=
|
||||
github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b h1:C8S2+VttkHFdOOCXJe+YGfa4vHYwlt4Zx+IVXQ97jYg=
|
||||
github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E=
|
||||
@ -291,8 +316,8 @@ github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrU
|
||||
github.com/jackc/pgtype v1.2.0/go.mod h1:5m2OfMh1wTK7x+Fk952IDmI4nw3nPrvtQdM0ZT4WpC0=
|
||||
github.com/jackc/pgtype v1.3.1-0.20200510190516-8cd94a14c75a/go.mod h1:vaogEUkALtxZMCH411K+tKzNpwzCKU+AnPzBKZ+I+Po=
|
||||
github.com/jackc/pgtype v1.3.1-0.20200606141011-f6355165a91c/go.mod h1:cvk9Bgu/VzJ9/lxTO5R5sf80p0DiucVtN7ZxvaC4GmQ=
|
||||
github.com/jackc/pgtype v1.5.0 h1:jzBqRk2HFG2CV4AIwgCI2PwTgm6UUoCAK2ofHHRirtc=
|
||||
github.com/jackc/pgtype v1.5.0/go.mod h1:JCULISAZBFGrHaOXIIFiyfzW5VY0GRitRr8NeJsrdig=
|
||||
github.com/jackc/pgtype v1.6.2 h1:b3pDeuhbbzBYcg5kwNmNDun4pFUD/0AAr1kLXZLeNt8=
|
||||
github.com/jackc/pgtype v1.6.2/go.mod h1:JCULISAZBFGrHaOXIIFiyfzW5VY0GRitRr8NeJsrdig=
|
||||
github.com/jackc/pgx v3.2.0+incompatible h1:0Vihzu20St42/UDsvZGdNE6jak7oi/UOeMzwMPHkgFY=
|
||||
github.com/jackc/pgx v3.2.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I=
|
||||
github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y=
|
||||
@ -301,16 +326,15 @@ github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQ
|
||||
github.com/jackc/pgx/v4 v4.5.0/go.mod h1:EpAKPLdnTorwmPUUsqrPxy5fphV18j9q3wrfRXgo+kA=
|
||||
github.com/jackc/pgx/v4 v4.6.1-0.20200510190926-94ba730bb1e9/go.mod h1:t3/cdRQl6fOLDxqtlyhe9UWgfIi9R8+8v8GKV5TRA/o=
|
||||
github.com/jackc/pgx/v4 v4.6.1-0.20200606145419-4e5062306904/go.mod h1:ZDaNWkt9sW1JMiNn0kdYBaLelIhw7Pg4qd+Vk6tw7Hg=
|
||||
github.com/jackc/pgx/v4 v4.9.0 h1:6STjDqppM2ROy5p1wNDcsC7zJTjSHeuCsguZmXyzx7c=
|
||||
github.com/jackc/pgx/v4 v4.9.0/go.mod h1:MNGWmViCgqbZck9ujOOBN63gK9XVGILXWCvKLGKmnms=
|
||||
github.com/jackc/pgx/v4 v4.10.1 h1:/6Q3ye4myIj6AaplUm+eRcz4OhK9HAvFf4ePsG40LJY=
|
||||
github.com/jackc/pgx/v4 v4.10.1/go.mod h1:QlrWebbs3kqEZPHCTGyxecvzG6tvIsYu+A5b1raylkA=
|
||||
github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
||||
github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
||||
github.com/jackc/puddle v1.1.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
||||
github.com/jackc/puddle v1.1.1/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
||||
github.com/jackc/puddle v1.1.2/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
||||
github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
||||
github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU=
|
||||
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jinzhu/now v1.1.1 h1:g39TucaRWyV3dwDO++eEc6qf8TVIQ/Da48WmqjZ3i7E=
|
||||
github.com/jinzhu/now v1.1.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
|
||||
@ -337,6 +361,7 @@ github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFB
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
@ -348,9 +373,20 @@ github.com/lib/pq v1.3.0 h1:/qkRGz8zljWiDcFvgpwUpwIAPu3r07TDvs3Rws+o/pU=
|
||||
github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/loov/hrtime v1.0.3 h1:LiWKU3B9skJwRPUf0Urs9+0+OE3TxdMuiRPOTwR0gcU=
|
||||
github.com/loov/hrtime v1.0.3/go.mod h1:yDY3Pwv2izeY4sq7YcPX/dtLwzg5NU1AxWuWxKwd0p0=
|
||||
github.com/lucas-clemente/quic-go v0.7.1-0.20210131023823-622ca23d4eb4 h1:gsTNebTJiHCgCfVptaRMMLAHZSMcPkpvCx+vAHJrwx8=
|
||||
github.com/lucas-clemente/quic-go v0.7.1-0.20210131023823-622ca23d4eb4/go.mod h1:RqK5iyJgjjGJRLSfhBm2ZhdRDDllcN/QqNT1EvQ7ZNg=
|
||||
github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=
|
||||
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/marten-seemann/qpack v0.2.1/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc=
|
||||
github.com/marten-seemann/qtls v0.10.0 h1:ECsuYUKalRL240rRD4Ri33ISb7kAQ3qGDlrrl55b2pc=
|
||||
github.com/marten-seemann/qtls v0.10.0/go.mod h1:UvMd1oaYDACI99/oZUYLzMCkBXQVT0aGm99sJhbT8hs=
|
||||
github.com/marten-seemann/qtls-go1-15 v0.1.1 h1:LIH6K34bPVttyXnUWixk0bzH6/N07VxbSabxn5A5gZQ=
|
||||
github.com/marten-seemann/qtls-go1-15 v0.1.1/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I=
|
||||
github.com/marten-seemann/qtls-go1-16 v0.1.0-rc.1 h1:JCvEgXNTQjxa+vxOx5c8e84iRttJvyt+7Jo7GLgR7KI=
|
||||
github.com/marten-seemann/qtls-go1-16 v0.1.0-rc.1/go.mod h1:gNpI2Ol+lRS3WwSOtIUUtRwZEQMXjYK+dQSBFbethAk=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
|
||||
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
@ -368,9 +404,11 @@ github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Ky
|
||||
github.com/mattn/go-runewidth v0.0.7 h1:Ei8KR0497xHyKJPAv59M1dkC+rOZCMBJ+t3fZ+twI54=
|
||||
github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||
github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
|
||||
github.com/mattn/go-sqlite3 v2.0.3+incompatible h1:gXHsfypPkaMZrKbD5209QV9jbUTJKjyR5WD3HYQSd+U=
|
||||
github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4=
|
||||
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
github.com/minio/sha256-simd v0.0.0-20190328051042-05b4dd3047e5/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U=
|
||||
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
|
||||
@ -388,6 +426,8 @@ github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c h1:nXxl5PrvVm2L/wCy8d
|
||||
github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/nakagami/firebirdsql v0.0.0-20190310045651-3c02a58cfed8/go.mod h1:86wM1zFnC6/uDBfZGNwB65O+pR2OFi5q/YQaEUid1qA=
|
||||
github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
|
||||
github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
|
||||
github.com/nsf/jsondiff v0.0.0-20200515183724-f29ed568f4ce h1:RPclfga2SEJmgMmz2k+Mg7cowZ8yv4Trqw9UsJby758=
|
||||
github.com/nsf/jsondiff v0.0.0-20200515183724-f29ed568f4ce/go.mod h1:uFMI8w+ref4v2r9jz+c9i1IfIttS/OkmLfrk1jne5hs=
|
||||
github.com/nsf/termbox-go v0.0.0-20200418040025-38ba6e5628f1 h1:lh3PyZvY+B9nFliSGTn5uFuqQQJGuNrD0MLCokv09ag=
|
||||
@ -397,13 +437,11 @@ github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI
|
||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.10.3 h1:OoxbjfXVZyod1fmWYhI7SEyaD8B00ynP3T+D5GiyHOY=
|
||||
github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA=
|
||||
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.7.1 h1:K0jcRCwNQM3vFGh1ppMtDh/+7ApJrjldlX8fA0jDTLQ=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
@ -411,6 +449,7 @@ github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2i
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||
github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=
|
||||
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||
github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
|
||||
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
|
||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
|
||||
@ -423,6 +462,7 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
|
||||
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
|
||||
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
|
||||
@ -430,9 +470,11 @@ github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:
|
||||
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
@ -443,15 +485,39 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR
|
||||
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
|
||||
github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU=
|
||||
github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc=
|
||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
|
||||
github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
|
||||
github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ=
|
||||
github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
|
||||
github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY=
|
||||
github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM=
|
||||
github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0=
|
||||
github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
|
||||
github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ=
|
||||
github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw=
|
||||
github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI=
|
||||
github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU=
|
||||
github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag=
|
||||
github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg=
|
||||
github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw=
|
||||
github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y=
|
||||
github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
|
||||
github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q=
|
||||
github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ=
|
||||
github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I=
|
||||
github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0=
|
||||
github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ=
|
||||
github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk=
|
||||
github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4=
|
||||
github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
||||
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
|
||||
@ -461,10 +527,11 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1
|
||||
github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
|
||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE=
|
||||
github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA=
|
||||
github.com/spacemonkeygo/monkit/v3 v3.0.0-20191108235033-eacca33b3037/go.mod h1:JcK1pCbReQsOsMKF/POFSZCq7drXFybgGmbc27tuwes=
|
||||
github.com/spacemonkeygo/monkit/v3 v3.0.4/go.mod h1:JcK1pCbReQsOsMKF/POFSZCq7drXFybgGmbc27tuwes=
|
||||
github.com/spacemonkeygo/monkit/v3 v3.0.5/go.mod h1:JcK1pCbReQsOsMKF/POFSZCq7drXFybgGmbc27tuwes=
|
||||
github.com/spacemonkeygo/monkit/v3 v3.0.7-0.20200515175308-072401d8c752 h1:WcQDknqg0qajLNYKv3mXgbkWlYs5rPgZehGJFWePHVI=
|
||||
github.com/spacemonkeygo/monkit/v3 v3.0.7-0.20200515175308-072401d8c752/go.mod h1:kj1ViJhlyADa7DiA4xVnTuPA46lFKbM7mxQTrXCuJP4=
|
||||
github.com/spacemonkeygo/monkit/v3 v3.0.7 h1:LsGdIXl8mccqJrYEh4Uf4sLVGu/g0tjhNqQzdn9MzVk=
|
||||
github.com/spacemonkeygo/monkit/v3 v3.0.7/go.mod h1:kj1ViJhlyADa7DiA4xVnTuPA46lFKbM7mxQTrXCuJP4=
|
||||
@ -489,7 +556,6 @@ github.com/spf13/viper v1.7.1 h1:pM5oEahlgWv/WnHXpgbKz7iLIxRf65tye2Ci+XFK5sk=
|
||||
github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48=
|
||||
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
@ -501,9 +567,12 @@ github.com/stripe/stripe-go v70.15.0+incompatible h1:hNML7M1zx8RgtepEMlxyu/FpVPr
|
||||
github.com/stripe/stripe-go v70.15.0+incompatible/go.mod h1:A1dQZmO/QypXmsL0T8axYZkSN/uA/T/A64pfKdBAMiY=
|
||||
github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
|
||||
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
||||
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
|
||||
github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
|
||||
github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU=
|
||||
github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM=
|
||||
github.com/vivint/infectious v0.0.0-20200605153912-25a574ae18a3 h1:zMsHhfK9+Wdl1F7sIKLyx3wrOFofpb3rWFbA4HgcK5k=
|
||||
github.com/vivint/infectious v0.0.0-20200605153912-25a574ae18a3/go.mod h1:R0Gbuw7ElaGSLOZUSwBm/GgVwMd30jWxBDdAyMOeTuc=
|
||||
github.com/xanzy/go-gitlab v0.15.0/go.mod h1:8zdQa/ri1dfn8eS3Ir1SyfvOKlw7WBJ8DVThkpGiXrs=
|
||||
@ -535,6 +604,7 @@ go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0=
|
||||
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
|
||||
go.mongodb.org/mongo-driver v1.1.0/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
|
||||
go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA=
|
||||
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
@ -542,26 +612,27 @@ go.opencensus.io v0.22.2 h1:75k/FF0Q2YM8QYo07VPddOLBslDt1MZOdEslOHvmzAs=
|
||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk=
|
||||
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
|
||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A=
|
||||
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
|
||||
go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
|
||||
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
||||
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4=
|
||||
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
|
||||
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc=
|
||||
go.uber.org/zap v1.16.0 h1:uFRZXykJGK9lLY4HtgSw44DnIcAM+kRBP7x5m+NpAOM=
|
||||
go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ=
|
||||
go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
|
||||
golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw=
|
||||
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
|
||||
golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
@ -572,6 +643,7 @@ golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8U
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
@ -586,6 +658,7 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299 h1:zQpM52jfKHG6II1ISZY1Zcpyg
|
||||
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
@ -604,6 +677,8 @@ golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73r
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181108082009-03003ca0c849/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@ -612,6 +687,7 @@ golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73r
|
||||
golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190424112056-4829fb13d2c6/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
@ -621,18 +697,20 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa h1:F+8P+gmewFQYRk6JoLQLwjBCTu3mcIURZfNkVweuRKA=
|
||||
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU=
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@ -647,6 +725,7 @@ golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5h
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@ -655,6 +734,7 @@ golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5h
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@ -679,15 +759,19 @@ golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200610111108-226ff32320da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210112091331-59c308dcf3cc h1:y0Og6AYdwus7SIAnKnDxjc4gJetRiYEWOx4AKbOeyEI=
|
||||
golang.org/x/sys v0.0.0-20201231184435-2d18734c6014/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210112091331-59c308dcf3cc/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221 h1:/ZHdbVpdR/jk3g30/d4yUL0JU9kksj8+F/bnQUVLGDM=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s=
|
||||
@ -695,6 +779,7 @@ golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxb
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
@ -726,6 +811,9 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||
google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||
google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y=
|
||||
google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
|
||||
google.golang.org/api v0.3.2/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
|
||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
@ -737,6 +825,7 @@ google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsb
|
||||
google.golang.org/api v0.20.0 h1:jz2KixHX7EcCPiQrySzPdnYT7DbINAypCqKZ1Z7GM40=
|
||||
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
@ -744,6 +833,10 @@ google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww
|
||||
google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM=
|
||||
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg=
|
||||
google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
@ -753,10 +846,11 @@ google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
|
||||
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba h1:pRj9OXZbwNtbtZtOB4dLwfK4u+EVRMvP+e9zKkg2grM=
|
||||
google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
@ -764,7 +858,6 @@ google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ij
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.0 h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk=
|
||||
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
@ -774,7 +867,6 @@ google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQ
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c=
|
||||
@ -784,7 +876,6 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
@ -796,7 +887,6 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD
|
||||
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
@ -804,6 +894,7 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
|
||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||
grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o=
|
||||
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
@ -812,18 +903,22 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
|
||||
honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||
sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck=
|
||||
sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0=
|
||||
storj.io/common v0.0.0-20200424175742-65ac59022f4f/go.mod h1:pZyXiIE7bGETIRXtfs0nICqMwp7PM8HqnDuyUeldNA0=
|
||||
storj.io/common v0.0.0-20201026135900-1aaeec90670b/go.mod h1:GqdmNf3fLm2UZX/7Zr0BLFCJ4gFjgm6eHrk/fnmr5jQ=
|
||||
storj.io/common v0.0.0-20210113135631-07a5dc68dc1c/go.mod h1:KhVByBTvjV2rsaUQsft0pKgBRRMvCcY1JsDqt6BWr3I=
|
||||
storj.io/common v0.0.0-20210115161819-ee11aaf35a7f h1:VmLstwTDGwrbn/jrdQ33fotRiOI4q8Swjl7W9Mt7qdY=
|
||||
storj.io/common v0.0.0-20210115161819-ee11aaf35a7f/go.mod h1:KhVByBTvjV2rsaUQsft0pKgBRRMvCcY1JsDqt6BWr3I=
|
||||
storj.io/common v0.0.0-20210119231202-8321551aa24d h1:lOLCRtsKISuZlK2lBI5O0uBAc44mp/yO3CtUTXNNSUc=
|
||||
storj.io/common v0.0.0-20210119231202-8321551aa24d/go.mod h1:KhVByBTvjV2rsaUQsft0pKgBRRMvCcY1JsDqt6BWr3I=
|
||||
storj.io/drpc v0.0.11/go.mod h1:TiFc2obNjL9/3isMW1Rpxjy8V9uE0B2HMeMFGiiI7Iw=
|
||||
storj.io/drpc v0.0.14/go.mod h1:82nfl+6YwRwF6UG31cEWWUqv/FaKvP5SGqUvoqTxCMA=
|
||||
storj.io/drpc v0.0.16 h1:9sxypc5lKi/0D69cR21BR0S21+IvXfON8L5nXMVNTwQ=
|
||||
storj.io/drpc v0.0.16/go.mod h1:zdmQ93nx4Z35u11pQ+GAnBy4DGOK3HJCSOfeh2RryTo=
|
||||
storj.io/monkit-jaeger v0.0.0-20200518165323-80778fc3f91b h1:Bbg9JCtY6l3HrDxs3BXzT2UYnYCBLqNi6i84Y8QIPUs=
|
||||
storj.io/monkit-jaeger v0.0.0-20200518165323-80778fc3f91b/go.mod h1:gj4vuCeyCRjRmH8LIrgoyU9Dc9uR6H+/GcDUXmTbf80=
|
||||
storj.io/private v0.0.0-20210108233641-2ba1ef686d1f h1:ctEwD9AsWR8MGv+hKxATjsu114lOPuL2wL7fqO2qusg=
|
||||
storj.io/private v0.0.0-20210108233641-2ba1ef686d1f/go.mod h1:3KcGiA7phL3a0HUCe5ar90SlIU3iFb8hKInaEZQ5P7o=
|
||||
storj.io/private v0.0.0-20210120150301-bd3ac3e989f0 h1:y8GkOxqdj4fXkksn076nQxcVAaGR+4MhJmFb0q2yqsM=
|
||||
storj.io/private v0.0.0-20210120150301-bd3ac3e989f0/go.mod h1:VHaDkpBka3Pp5rXqFSDHbEmzMaFFW4BYrXJfGIN1Udo=
|
||||
storj.io/uplink v1.4.6-0.20210115090500-10cfa3d1c277 h1:H+YVCCYBgk3xRda52MwQO3svpQvqE6P/bjccVLyvROs=
|
||||
storj.io/uplink v1.4.6-0.20210115090500-10cfa3d1c277/go.mod h1:lpQO2Smf6gpOl7hkB/IEKdrMMdbIHeRF1QKhyZCoD5w=
|
||||
|
@ -5,6 +5,7 @@ storj.io/storj/satellite/accounting/tally."bucket_objects" IntVal
|
||||
storj.io/storj/satellite/accounting/tally."bucket_remote_bytes" IntVal
|
||||
storj.io/storj/satellite/accounting/tally."bucket_remote_segments" IntVal
|
||||
storj.io/storj/satellite/accounting/tally."bucket_segments" IntVal
|
||||
storj.io/storj/satellite/accounting/tally."nodetallies.totalsum" IntVal
|
||||
storj.io/storj/satellite/accounting/tally."total_bytes" IntVal
|
||||
storj.io/storj/satellite/accounting/tally."total_inline_bytes" IntVal
|
||||
storj.io/storj/satellite/accounting/tally."total_inline_segments" IntVal
|
||||
@ -111,7 +112,6 @@ storj.io/storj/satellite/satellitedb."audit_online_score" FloatVal
|
||||
storj.io/storj/satellite/satellitedb."audit_reputation_alpha" FloatVal
|
||||
storj.io/storj/satellite/satellitedb."audit_reputation_beta" FloatVal
|
||||
storj.io/storj/satellite/satellitedb."bad_audit_dqs" Meter
|
||||
storj.io/storj/satellite/satellitedb."nodetallies.totalsum" IntVal
|
||||
storj.io/storj/satellite/satellitedb."offline_dqs" Meter
|
||||
storj.io/storj/satellite/satellitedb."unknown_audit_reputation_alpha" FloatVal
|
||||
storj.io/storj/satellite/satellitedb."unknown_audit_reputation_beta" FloatVal
|
||||
|
@ -133,32 +133,15 @@ func (controller *Nodes) Get(w http.ResponseWriter, r *http.Request) {
|
||||
node, err := controller.service.Get(ctx, nodeID)
|
||||
if err != nil {
|
||||
controller.log.Error("get node not found error", zap.Error(err))
|
||||
controller.serveError(w, http.StatusNotFound, ErrNodes.Wrap(err))
|
||||
return
|
||||
}
|
||||
|
||||
if err = json.NewEncoder(w).Encode(node); err != nil {
|
||||
controller.log.Error("failed to write json response", zap.Error(err))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// List handles retrieving list of nodes.
|
||||
func (controller *Nodes) List(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
var err error
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
w.Header().Add("Content-Type", "application/json")
|
||||
|
||||
list, err := controller.service.List(ctx)
|
||||
if err != nil {
|
||||
controller.log.Error("list nodes internal error", zap.Error(err))
|
||||
if nodes.ErrNoNode.Has(err) {
|
||||
controller.serveError(w, http.StatusNotFound, ErrNodes.Wrap(err))
|
||||
return
|
||||
}
|
||||
controller.serveError(w, http.StatusInternalServerError, ErrNodes.Wrap(err))
|
||||
return
|
||||
}
|
||||
|
||||
if err = json.NewEncoder(w).Encode(list); err != nil {
|
||||
if err = json.NewEncoder(w).Encode(node); err != nil {
|
||||
controller.log.Error("failed to write json response", zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
@ -5,8 +5,10 @@ package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
"html/template"
|
||||
"net"
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/zeebo/errs"
|
||||
@ -39,6 +41,8 @@ type Server struct {
|
||||
|
||||
listener net.Listener
|
||||
http http.Server
|
||||
|
||||
index *template.Template
|
||||
}
|
||||
|
||||
// NewServer returns new instance of Multinode Dashboard http server.
|
||||
@ -51,13 +55,14 @@ func NewServer(log *zap.Logger, config Config, nodes *nodes.Service, listener ne
|
||||
}
|
||||
|
||||
router := mux.NewRouter()
|
||||
fs := http.FileServer(http.Dir(server.config.StaticDir))
|
||||
|
||||
apiRouter := router.PathPrefix("/api/v0").Subrouter()
|
||||
apiRouter.NotFoundHandler = controllers.NewNotFound(server.log)
|
||||
|
||||
nodesController := controllers.NewNodes(server.log, server.nodes)
|
||||
nodesRouter := apiRouter.PathPrefix("/nodes").Subrouter()
|
||||
nodesRouter.HandleFunc("", nodesController.Add).Methods(http.MethodPost)
|
||||
nodesRouter.HandleFunc("", nodesController.List).Methods(http.MethodGet)
|
||||
nodesRouter.HandleFunc("/infos", nodesController.ListInfos).Methods(http.MethodGet)
|
||||
nodesRouter.HandleFunc("/infos/{satelliteID}", nodesController.ListInfosSatellite).Methods(http.MethodGet)
|
||||
nodesRouter.HandleFunc("/trusted-satellites", nodesController.TrustedSatellites).Methods(http.MethodGet)
|
||||
@ -65,6 +70,11 @@ func NewServer(log *zap.Logger, config Config, nodes *nodes.Service, listener ne
|
||||
nodesRouter.HandleFunc("/{id}", nodesController.UpdateName).Methods(http.MethodPatch)
|
||||
nodesRouter.HandleFunc("/{id}", nodesController.Delete).Methods(http.MethodDelete)
|
||||
|
||||
if server.config.StaticDir != "" {
|
||||
router.PathPrefix("/static/").Handler(http.StripPrefix("/static", fs))
|
||||
router.PathPrefix("/").HandlerFunc(server.appHandler)
|
||||
}
|
||||
|
||||
server.http = http.Server{
|
||||
Handler: router,
|
||||
}
|
||||
@ -72,10 +82,33 @@ func NewServer(log *zap.Logger, config Config, nodes *nodes.Service, listener ne
|
||||
return &server, nil
|
||||
}
|
||||
|
||||
// appHandler is web app http handler function.
|
||||
func (server *Server) appHandler(w http.ResponseWriter, r *http.Request) {
|
||||
header := w.Header()
|
||||
|
||||
header.Set("Content-Type", "text/html; charset=UTF-8")
|
||||
header.Set("X-Content-Type-Options", "nosniff")
|
||||
header.Set("Referrer-Policy", "same-origin")
|
||||
|
||||
if server.index == nil {
|
||||
server.log.Error("index template is not set")
|
||||
return
|
||||
}
|
||||
|
||||
if err := server.index.Execute(w, nil); err != nil {
|
||||
server.log.Error("index template could not be executed", zap.Error(Error.Wrap(err)))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Run starts the server that host webapp and api endpoints.
|
||||
func (server *Server) Run(ctx context.Context) (err error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
err = server.initializeTemplates()
|
||||
if err != nil {
|
||||
return Error.Wrap(err)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
var group errgroup.Group
|
||||
|
||||
group.Go(func() error {
|
||||
@ -94,3 +127,13 @@ func (server *Server) Run(ctx context.Context) (err error) {
|
||||
func (server *Server) Close() error {
|
||||
return Error.Wrap(server.http.Close())
|
||||
}
|
||||
|
||||
// initializeTemplates is used to initialize all templates.
|
||||
func (server *Server) initializeTemplates() (err error) {
|
||||
server.index, err = template.ParseFiles(filepath.Join(server.config.StaticDir, "dist", "index.html"))
|
||||
if err != nil {
|
||||
server.log.Error("dist folder is not generated. use 'npm run build' command", zap.Error(err))
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -33,32 +33,33 @@ var ErrNoNode = errs.Class("no such node")
|
||||
|
||||
// Node is a representation of storagenode, that SNO could add to the Multinode Dashboard.
|
||||
type Node struct {
|
||||
ID storj.NodeID
|
||||
ID storj.NodeID `json:"id"`
|
||||
// APISecret is a secret issued by storagenode, that will be main auth mechanism in MND <-> SNO api.
|
||||
APISecret []byte
|
||||
PublicAddress string
|
||||
Name string
|
||||
APISecret []byte `json:"apiSecret"`
|
||||
PublicAddress string `json:"publicAddress"`
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
// NodeInfo contains basic node internal state.
|
||||
type NodeInfo struct {
|
||||
ID storj.NodeID
|
||||
Name string
|
||||
Version string
|
||||
LastContact time.Time
|
||||
DiskSpaceUsed int64
|
||||
DiskSpaceLeft int64
|
||||
BandwidthUsed int64
|
||||
TotalEarned int64
|
||||
ID storj.NodeID `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Version string `json:"version"`
|
||||
LastContact time.Time `json:"lastContact"`
|
||||
DiskSpaceUsed int64 `json:"diskSpaceUsed"`
|
||||
DiskSpaceLeft int64 `json:"diskSpaceLeft"`
|
||||
BandwidthUsed int64 `json:"bandwidthUsed"`
|
||||
TotalEarned int64 `json:"totalEarned"`
|
||||
}
|
||||
|
||||
// NodeInfoSatellite contains satellite specific node internal state.
|
||||
type NodeInfoSatellite struct {
|
||||
ID storj.NodeID
|
||||
Name string
|
||||
Version string
|
||||
LastContact time.Time
|
||||
OnlineScore float64
|
||||
AuditScore float64
|
||||
SuspensionScore float64
|
||||
ID storj.NodeID `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Version string `json:"version"`
|
||||
LastContact time.Time `json:"lastContact"`
|
||||
OnlineScore float64 `json:"onlineScore"`
|
||||
AuditScore float64 `json:"auditScore"`
|
||||
SuspensionScore float64 `json:"suspensionScore"`
|
||||
TotalEarned int64 `json:"totalEarned"`
|
||||
}
|
||||
|
@ -66,18 +66,6 @@ func (service *Service) Get(ctx context.Context, id storj.NodeID) (_ Node, err e
|
||||
|
||||
}
|
||||
|
||||
// List retrieves list of all added nodes.
|
||||
func (service *Service) List(ctx context.Context) (_ []Node, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
nodes, err := service.nodes.List(ctx)
|
||||
if err != nil {
|
||||
return nil, Error.Wrap(err)
|
||||
}
|
||||
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
// Remove removes node from the system.
|
||||
func (service *Service) Remove(ctx context.Context, id storj.NodeID) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
@ -90,6 +78,9 @@ func (service *Service) ListInfos(ctx context.Context) (_ []NodeInfo, err error)
|
||||
|
||||
nodes, err := service.nodes.List(ctx)
|
||||
if err != nil {
|
||||
if ErrNoNode.Has(err) {
|
||||
return []NodeInfo{}, nil
|
||||
}
|
||||
return nil, Error.Wrap(err)
|
||||
}
|
||||
|
||||
@ -172,6 +163,9 @@ func (service *Service) ListInfosSatellite(ctx context.Context, satelliteID stor
|
||||
|
||||
nodes, err := service.nodes.List(ctx)
|
||||
if err != nil {
|
||||
if ErrNoNode.Has(err) {
|
||||
return []NodeInfoSatellite{}, nil
|
||||
}
|
||||
return nil, Error.Wrap(err)
|
||||
}
|
||||
|
||||
@ -191,6 +185,7 @@ func (service *Service) ListInfosSatellite(ctx context.Context, satelliteID stor
|
||||
}()
|
||||
|
||||
nodeClient := multinodepb.NewDRPCNodeClient(conn)
|
||||
payoutClient := multinodepb.NewDRPCPayoutClient(conn)
|
||||
|
||||
header := &multinodepb.RequestHeader{
|
||||
ApiKey: node.APISecret,
|
||||
@ -214,6 +209,11 @@ func (service *Service) ListInfosSatellite(ctx context.Context, satelliteID stor
|
||||
return NodeInfoSatellite{}, Error.Wrap(err)
|
||||
}
|
||||
|
||||
earned, err := payoutClient.Earned(ctx, &multinodepb.EarnedRequest{Header: header})
|
||||
if err != nil {
|
||||
return NodeInfoSatellite{}, Error.Wrap(err)
|
||||
}
|
||||
|
||||
return NodeInfoSatellite{
|
||||
ID: node.ID,
|
||||
Name: node.Name,
|
||||
@ -222,6 +222,7 @@ func (service *Service) ListInfosSatellite(ctx context.Context, satelliteID stor
|
||||
OnlineScore: rep.Online.Score,
|
||||
AuditScore: rep.Audit.Score,
|
||||
SuspensionScore: rep.Audit.SuspensionScore,
|
||||
TotalEarned: earned.Total,
|
||||
}, nil
|
||||
}()
|
||||
if err != nil {
|
||||
|
16
pkg/quic/common.go
Normal file
16
pkg/quic/common.go
Normal file
@ -0,0 +1,16 @@
|
||||
// Copyright (C) 2021 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package quic
|
||||
|
||||
import (
|
||||
"github.com/spacemonkeygo/monkit/v3"
|
||||
"github.com/zeebo/errs"
|
||||
)
|
||||
|
||||
var (
|
||||
mon = monkit.Package()
|
||||
|
||||
// Error is a pkg/quic error.
|
||||
Error = errs.Class("quic error")
|
||||
)
|
197
pkg/quic/conn.go
Normal file
197
pkg/quic/conn.go
Normal file
@ -0,0 +1,197 @@
|
||||
// Copyright (C) 2021 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package quic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"net"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/lucas-clemente/quic-go"
|
||||
|
||||
"storj.io/common/memory"
|
||||
"storj.io/common/rpc"
|
||||
"storj.io/storj/pkg/quic/qtls"
|
||||
)
|
||||
|
||||
// Conn is a wrapper around a quic connection and fulfills net.Conn interface.
|
||||
type Conn struct {
|
||||
once sync.Once
|
||||
// The Conn.stream varible should never be directly accessed.
|
||||
// Always use Conn.getStream() instead.
|
||||
stream quic.Stream
|
||||
|
||||
acceptErr error
|
||||
session quic.Session
|
||||
}
|
||||
|
||||
// Read implements the Conn Read method.
|
||||
func (c *Conn) Read(b []byte) (n int, err error) {
|
||||
stream, err := c.getStream()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return stream.Read(b)
|
||||
}
|
||||
|
||||
// Write implements the Conn Write method.
|
||||
func (c *Conn) Write(b []byte) (int, error) {
|
||||
stream, err := c.getStream()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return stream.Write(b)
|
||||
}
|
||||
|
||||
func (c *Conn) getStream() (quic.Stream, error) {
|
||||
// Outgoing connections `stream` gets set when the Conn is initialized.
|
||||
// It's only with incoming connections that `stream == nil` and this
|
||||
// AcceptStream() code happens.
|
||||
if c.stream == nil {
|
||||
// When this function completes, it guarantees either c.acceptErr is not nil or c.stream is not nil
|
||||
c.once.Do(func() {
|
||||
stream, err := c.session.AcceptStream(context.Background())
|
||||
if err != nil {
|
||||
c.acceptErr = err
|
||||
return
|
||||
}
|
||||
|
||||
c.stream = stream
|
||||
})
|
||||
if c.acceptErr != nil {
|
||||
return nil, c.acceptErr
|
||||
}
|
||||
}
|
||||
|
||||
return c.stream, nil
|
||||
}
|
||||
|
||||
// ConnectionState converts quic session state to tls connection state and returns tls state.
|
||||
func (c *Conn) ConnectionState() tls.ConnectionState {
|
||||
return qtls.ToTLSConnectionState(c.session.ConnectionState())
|
||||
}
|
||||
|
||||
// Close closes the quic connection.
|
||||
func (c *Conn) Close() error {
|
||||
return c.session.CloseWithError(quic.ErrorCode(0), "")
|
||||
}
|
||||
|
||||
// LocalAddr returns the local address.
|
||||
func (c *Conn) LocalAddr() net.Addr {
|
||||
return c.session.LocalAddr()
|
||||
}
|
||||
|
||||
// RemoteAddr returns the address of the peer.
|
||||
func (c *Conn) RemoteAddr() net.Addr {
|
||||
return c.session.RemoteAddr()
|
||||
}
|
||||
|
||||
// SetReadDeadline sets the deadline for future Read calls
|
||||
// and any currently-blocked Read call.
|
||||
func (c *Conn) SetReadDeadline(t time.Time) error {
|
||||
stream, err := c.getStream()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return stream.SetReadDeadline(t)
|
||||
}
|
||||
|
||||
// SetWriteDeadline sets the deadline for future Write calls
|
||||
// and any currently-blocked Write call.
|
||||
func (c *Conn) SetWriteDeadline(t time.Time) error {
|
||||
stream, err := c.getStream()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return stream.SetWriteDeadline(t)
|
||||
}
|
||||
|
||||
// SetDeadline sets the read and write deadlines associated
|
||||
// with the connection. It is equivalent to calling both
|
||||
// SetReadDeadline and SetWriteDeadline.
|
||||
func (c *Conn) SetDeadline(t time.Time) error {
|
||||
stream, err := c.getStream()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return stream.SetDeadline(t)
|
||||
}
|
||||
|
||||
//
|
||||
// timed conns
|
||||
//
|
||||
|
||||
// timedConn wraps a rpc.ConnectorConn so that all reads and writes get the specified timeout and
|
||||
// return bytes no faster than the rate. If the timeout or rate are zero, they are
|
||||
// ignored.
|
||||
type timedConn struct {
|
||||
rpc.ConnectorConn
|
||||
rate memory.Size
|
||||
}
|
||||
|
||||
// now returns time.Now if there's a nonzero rate.
|
||||
func (t *timedConn) now() (now time.Time) {
|
||||
if t.rate > 0 {
|
||||
now = time.Now()
|
||||
}
|
||||
return now
|
||||
}
|
||||
|
||||
// delay ensures that we sleep to keep the rate if it is nonzero. n is the number of
|
||||
// bytes in the read or write operation we need to delay.
|
||||
func (t *timedConn) delay(start time.Time, n int) {
|
||||
if t.rate > 0 {
|
||||
expected := time.Duration(n * int(time.Second) / t.rate.Int())
|
||||
if actual := time.Since(start); expected > actual {
|
||||
time.Sleep(expected - actual)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Read wraps the connection read and adds sleeping to ensure the rate.
|
||||
func (t *timedConn) Read(p []byte) (int, error) {
|
||||
start := t.now()
|
||||
n, err := t.ConnectorConn.Read(p)
|
||||
t.delay(start, n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Write wraps the connection write and adds sleeping to ensure the rate.
|
||||
func (t *timedConn) Write(p []byte) (int, error) {
|
||||
start := t.now()
|
||||
n, err := t.ConnectorConn.Write(p)
|
||||
t.delay(start, n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
// closeTrackingConn wraps a rpc.ConnectorConn and keeps track of if it was closed
|
||||
// or if it was leaked (and closes it if it was leaked).
|
||||
type closeTrackingConn struct {
|
||||
rpc.ConnectorConn
|
||||
}
|
||||
|
||||
// TrackClose wraps the conn and sets a finalizer on the returned value to
|
||||
// close the conn and monitor that it was leaked.
|
||||
func TrackClose(conn rpc.ConnectorConn) rpc.ConnectorConn {
|
||||
tracked := &closeTrackingConn{ConnectorConn: conn}
|
||||
runtime.SetFinalizer(tracked, (*closeTrackingConn).finalize)
|
||||
return tracked
|
||||
}
|
||||
|
||||
// Close clears the finalizer and closes the connection.
|
||||
func (c *closeTrackingConn) Close() error {
|
||||
runtime.SetFinalizer(c, nil)
|
||||
mon.Event("quic_connection_closed")
|
||||
return c.ConnectorConn.Close()
|
||||
}
|
||||
|
||||
// finalize monitors that a connection was leaked and closes the connection.
|
||||
func (c *closeTrackingConn) finalize() {
|
||||
mon.Event("quic_connection_leaked")
|
||||
_ = c.ConnectorConn.Close()
|
||||
}
|
78
pkg/quic/connector.go
Normal file
78
pkg/quic/connector.go
Normal file
@ -0,0 +1,78 @@
|
||||
// Copyright (C) 2021 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package quic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"time"
|
||||
|
||||
"github.com/lucas-clemente/quic-go"
|
||||
|
||||
"storj.io/common/memory"
|
||||
"storj.io/common/peertls/tlsopts"
|
||||
"storj.io/common/rpc"
|
||||
)
|
||||
|
||||
// Connector implements a dialer that creates a quic connection.
|
||||
type Connector struct {
|
||||
transferRate memory.Size
|
||||
|
||||
config *quic.Config
|
||||
}
|
||||
|
||||
// NewDefaultConnector instantiates a new instance of Connector.
|
||||
// If no quic configuration is provided, default value will be used.
|
||||
func NewDefaultConnector(quicConfig *quic.Config) Connector {
|
||||
if quicConfig == nil {
|
||||
quicConfig = &quic.Config{
|
||||
MaxIdleTimeout: 15 * time.Minute,
|
||||
}
|
||||
}
|
||||
return Connector{
|
||||
config: quicConfig,
|
||||
}
|
||||
}
|
||||
|
||||
// DialContext creates a quic connection.
|
||||
func (c Connector) DialContext(ctx context.Context, tlsConfig *tls.Config, address string) (_ rpc.ConnectorConn, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
if tlsConfig == nil {
|
||||
return nil, Error.New("tls config is not set")
|
||||
}
|
||||
tlsConfigCopy := tlsConfig.Clone()
|
||||
tlsConfigCopy.NextProtos = []string{tlsopts.StorjApplicationProtocol}
|
||||
|
||||
sess, err := quic.DialAddrContext(ctx, address, tlsConfigCopy, c.config)
|
||||
if err != nil {
|
||||
return nil, Error.Wrap(err)
|
||||
}
|
||||
|
||||
stream, err := sess.OpenStreamSync(ctx)
|
||||
if err != nil {
|
||||
return nil, Error.Wrap(err)
|
||||
}
|
||||
|
||||
conn := &Conn{
|
||||
session: sess,
|
||||
stream: stream,
|
||||
}
|
||||
|
||||
return &timedConn{
|
||||
ConnectorConn: TrackClose(conn),
|
||||
rate: c.transferRate,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// SetTransferRate returns a QUIC connector with the given transfer rate.
|
||||
func (c Connector) SetTransferRate(rate memory.Size) Connector {
|
||||
c.transferRate = rate
|
||||
return c
|
||||
}
|
||||
|
||||
// TransferRate returns the transfer rate set on the connector.
|
||||
func (c Connector) TransferRate() memory.Size {
|
||||
return c.transferRate
|
||||
}
|
62
pkg/quic/listener.go
Normal file
62
pkg/quic/listener.go
Normal file
@ -0,0 +1,62 @@
|
||||
// Copyright (C) 2021 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package quic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"net"
|
||||
|
||||
"github.com/lucas-clemente/quic-go"
|
||||
|
||||
"storj.io/common/peertls/tlsopts"
|
||||
)
|
||||
|
||||
// Listener implements listener for QUIC.
|
||||
type Listener struct {
|
||||
listener quic.Listener
|
||||
}
|
||||
|
||||
// NewListener returns a new listener instance for QUIC.
|
||||
// The quic.Config may be nil, in that case the default values will be used.
|
||||
// if the provided context is closed, all existing or following Accept calls will return an error.
|
||||
func NewListener(tlsConfig *tls.Config, address string, quicConfig *quic.Config) (net.Listener, error) {
|
||||
if tlsConfig == nil {
|
||||
return nil, Error.New("tls config is not set")
|
||||
}
|
||||
tlsConfigCopy := tlsConfig.Clone()
|
||||
tlsConfigCopy.NextProtos = []string{tlsopts.StorjApplicationProtocol}
|
||||
|
||||
listener, err := quic.ListenAddr(address, tlsConfigCopy, quicConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Listener{
|
||||
listener: listener,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Accept waits for and returns the next available quic session to the listener.
|
||||
func (l *Listener) Accept() (net.Conn, error) {
|
||||
ctx := context.Background()
|
||||
session, err := l.listener.Accept(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Conn{
|
||||
session: session,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Close closes the QUIC listener.
|
||||
func (l *Listener) Close() error {
|
||||
return l.listener.Close()
|
||||
}
|
||||
|
||||
// Addr returns the local network addr that the server is listening on.
|
||||
func (l *Listener) Addr() net.Addr {
|
||||
return l.listener.Addr()
|
||||
}
|
29
pkg/quic/qtls/go114.go
Normal file
29
pkg/quic/qtls/go114.go
Normal file
@ -0,0 +1,29 @@
|
||||
// Copyright (C) 2021 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
// +build !go1.15
|
||||
|
||||
package qtls
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
|
||||
quicgo "github.com/lucas-clemente/quic-go"
|
||||
)
|
||||
|
||||
// ToTLSConnectionState converts a quic-go connection state to tls connection
|
||||
// state.
|
||||
func ToTLSConnectionState(state quicgo.ConnectionState) tls.ConnectionState {
|
||||
return tls.ConnectionState{
|
||||
Version: state.TLS.Version,
|
||||
HandshakeComplete: state.TLS.HandshakeComplete,
|
||||
DidResume: state.TLS.DidResume,
|
||||
CipherSuite: state.TLS.CipherSuite,
|
||||
NegotiatedProtocol: state.TLS.NegotiatedProtocol,
|
||||
ServerName: state.TLS.ServerName,
|
||||
PeerCertificates: state.TLS.PeerCertificates,
|
||||
VerifiedChains: state.TLS.VerifiedChains,
|
||||
SignedCertificateTimestamps: state.TLS.SignedCertificateTimestamps,
|
||||
OCSPResponse: state.TLS.OCSPResponse,
|
||||
}
|
||||
}
|
18
pkg/quic/qtls/go115.go
Normal file
18
pkg/quic/qtls/go115.go
Normal file
@ -0,0 +1,18 @@
|
||||
// Copyright (C) 2021 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
// +build go1.15
|
||||
|
||||
package qtls
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
|
||||
quicgo "github.com/lucas-clemente/quic-go"
|
||||
)
|
||||
|
||||
// ToTLSConnectionState converts a quic-go connection state to tls connection
|
||||
// state.
|
||||
func ToTLSConnectionState(state quicgo.ConnectionState) tls.ConnectionState {
|
||||
return state.TLS.ConnectionState
|
||||
}
|
@ -10,6 +10,8 @@ import (
|
||||
"github.com/zeebo/errs"
|
||||
|
||||
"storj.io/common/netutil"
|
||||
"storj.io/common/rpc"
|
||||
"storj.io/storj/pkg/quic"
|
||||
)
|
||||
|
||||
// defaultUserTimeout is the value we use for the TCP_USER_TIMEOUT setting.
|
||||
@ -19,24 +21,27 @@ const defaultUserTimeout = 60 * time.Second
|
||||
// and monitors if the returned connections are closed or leaked.
|
||||
func wrapListener(lis net.Listener) net.Listener {
|
||||
if lis, ok := lis.(*net.TCPListener); ok {
|
||||
return newUserTimeoutListener(lis)
|
||||
return newTCPUserTimeoutListener(lis)
|
||||
}
|
||||
if lis, ok := lis.(*quic.Listener); ok {
|
||||
return newQUICTrackedListener(lis)
|
||||
}
|
||||
return lis
|
||||
}
|
||||
|
||||
// userTimeoutListener wraps a tcp listener so that it sets the TCP_USER_TIMEOUT
|
||||
// tcpUserTimeoutListener wraps a tcp listener so that it sets the TCP_USER_TIMEOUT
|
||||
// value for each socket it returns.
|
||||
type userTimeoutListener struct {
|
||||
type tcpUserTimeoutListener struct {
|
||||
lis *net.TCPListener
|
||||
}
|
||||
|
||||
// newUserTimeoutListener wraps the tcp listener in a userTimeoutListener.
|
||||
func newUserTimeoutListener(lis *net.TCPListener) *userTimeoutListener {
|
||||
return &userTimeoutListener{lis: lis}
|
||||
// newTCPUserTimeoutListener wraps the tcp listener in a userTimeoutListener.
|
||||
func newTCPUserTimeoutListener(lis *net.TCPListener) *tcpUserTimeoutListener {
|
||||
return &tcpUserTimeoutListener{lis: lis}
|
||||
}
|
||||
|
||||
// Accept waits for and returns the next connection to the listener.
|
||||
func (lis *userTimeoutListener) Accept() (net.Conn, error) {
|
||||
func (lis *tcpUserTimeoutListener) Accept() (net.Conn, error) {
|
||||
conn, err := lis.lis.AcceptTCP()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -50,11 +55,44 @@ func (lis *userTimeoutListener) Accept() (net.Conn, error) {
|
||||
|
||||
// Close closes the listener.
|
||||
// Any blocked Accept operations will be unblocked and return errors.
|
||||
func (lis *userTimeoutListener) Close() error {
|
||||
func (lis *tcpUserTimeoutListener) Close() error {
|
||||
return lis.lis.Close()
|
||||
}
|
||||
|
||||
// Addr returns the listener's network address.
|
||||
func (lis *userTimeoutListener) Addr() net.Addr {
|
||||
func (lis *tcpUserTimeoutListener) Addr() net.Addr {
|
||||
return lis.lis.Addr()
|
||||
}
|
||||
|
||||
type quicTrackedListener struct {
|
||||
lis *quic.Listener
|
||||
}
|
||||
|
||||
func newQUICTrackedListener(lis *quic.Listener) *quicTrackedListener {
|
||||
return &quicTrackedListener{lis: lis}
|
||||
}
|
||||
|
||||
func (lis *quicTrackedListener) Accept() (net.Conn, error) {
|
||||
conn, err := lis.lis.Accept()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
connectorConn, ok := conn.(rpc.ConnectorConn)
|
||||
if !ok {
|
||||
return nil, Error.New("quic connection doesn't implement required methods")
|
||||
}
|
||||
|
||||
return quic.TrackClose(connectorConn), nil
|
||||
}
|
||||
|
||||
// Close closes the listener.
|
||||
// Any blocked Accept operations will be unblocked and return errors.
|
||||
func (lis *quicTrackedListener) Close() error {
|
||||
return lis.lis.Close()
|
||||
}
|
||||
|
||||
// Addr returns the listener's network address.
|
||||
func (lis *quicTrackedListener) Addr() net.Addr {
|
||||
return lis.lis.Addr()
|
||||
}
|
||||
|
@ -6,9 +6,14 @@ package server
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"net"
|
||||
"os"
|
||||
"runtime"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
quicgo "github.com/lucas-clemente/quic-go"
|
||||
"github.com/zeebo/errs"
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/sync/errgroup"
|
||||
@ -21,6 +26,7 @@ import (
|
||||
"storj.io/drpc/drpcserver"
|
||||
jaeger "storj.io/monkit-jaeger"
|
||||
"storj.io/storj/pkg/listenmux"
|
||||
"storj.io/storj/pkg/quic"
|
||||
)
|
||||
|
||||
// Config holds server specific configuration parameters.
|
||||
@ -33,9 +39,10 @@ type Config struct {
|
||||
}
|
||||
|
||||
type public struct {
|
||||
listener net.Listener
|
||||
drpc *drpcserver.Server
|
||||
mux *drpcmux.Mux
|
||||
tcpListener net.Listener
|
||||
quicListener net.Listener
|
||||
drpc *drpcserver.Server
|
||||
mux *drpcmux.Mux
|
||||
}
|
||||
|
||||
type private struct {
|
||||
@ -71,22 +78,44 @@ func New(log *zap.Logger, tlsOptions *tlsopts.Options, publicAddr, privateAddr s
|
||||
Manager: rpc.NewDefaultManagerOptions(),
|
||||
}
|
||||
|
||||
publicListener, err := net.Listen("tcp", publicAddr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
var err error
|
||||
var publicTCPListener, publicQUICListener net.Listener
|
||||
for retry := 0; ; retry++ {
|
||||
publicTCPListener, err = net.Listen("tcp", publicAddr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
publicQUICListener, err = quic.NewListener(tlsOptions.ServerTLSConfig(), publicTCPListener.Addr().String(), &quicgo.Config{MaxIdleTimeout: defaultUserTimeout})
|
||||
if err != nil {
|
||||
_, port, _ := net.SplitHostPort(publicAddr)
|
||||
if port == "0" && retry < 10 && isErrorAddressAlreadyInUse(err) {
|
||||
// from here, we know for sure that the tcp port chosen by the
|
||||
// os is available, but we don't know if the same port number
|
||||
// for udp is also available.
|
||||
// if a udp port is already in use, we will close the tcp port and retry
|
||||
// to find one that is available for both udp and tcp.
|
||||
_ = publicTCPListener.Close()
|
||||
continue
|
||||
}
|
||||
return nil, errs.Combine(err, publicTCPListener.Close())
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
publicMux := drpcmux.New()
|
||||
publicTracingHandler := rpctracing.NewHandler(publicMux, jaeger.RemoteTraceHandler)
|
||||
server.public = public{
|
||||
listener: wrapListener(publicListener),
|
||||
drpc: drpcserver.NewWithOptions(publicTracingHandler, serverOptions),
|
||||
mux: publicMux,
|
||||
tcpListener: wrapListener(publicTCPListener),
|
||||
quicListener: wrapListener(publicQUICListener),
|
||||
drpc: drpcserver.NewWithOptions(publicTracingHandler, serverOptions),
|
||||
mux: publicMux,
|
||||
}
|
||||
|
||||
privateListener, err := net.Listen("tcp", privateAddr)
|
||||
if err != nil {
|
||||
return nil, errs.Combine(err, publicListener.Close())
|
||||
return nil, errs.Combine(err, publicTCPListener.Close(), publicQUICListener.Close())
|
||||
}
|
||||
privateMux := drpcmux.New()
|
||||
privateTracingHandler := rpctracing.NewHandler(privateMux, jaeger.RemoteTraceHandler)
|
||||
@ -103,7 +132,7 @@ func New(log *zap.Logger, tlsOptions *tlsopts.Options, publicAddr, privateAddr s
|
||||
func (p *Server) Identity() *identity.FullIdentity { return p.tlsOptions.Ident }
|
||||
|
||||
// Addr returns the server's public listener address.
|
||||
func (p *Server) Addr() net.Addr { return p.public.listener.Addr() }
|
||||
func (p *Server) Addr() net.Addr { return p.public.tcpListener.Addr() }
|
||||
|
||||
// PrivateAddr returns the server's private listener address.
|
||||
func (p *Server) PrivateAddr() net.Addr { return p.private.listener.Addr() }
|
||||
@ -127,7 +156,8 @@ func (p *Server) Close() error {
|
||||
// We ignore these errors because there's not really anything to do
|
||||
// even if they happen, and they'll just be errors due to duplicate
|
||||
// closes anyway.
|
||||
_ = p.public.listener.Close()
|
||||
_ = p.public.quicListener.Close()
|
||||
_ = p.public.tcpListener.Close()
|
||||
_ = p.private.listener.Close()
|
||||
return nil
|
||||
}
|
||||
@ -156,7 +186,7 @@ func (p *Server) Run(ctx context.Context) (err error) {
|
||||
// a chance to be notified that they're done running.
|
||||
const drpcHeader = "DRPC!!!1"
|
||||
|
||||
publicMux := listenmux.New(p.public.listener, len(drpcHeader))
|
||||
publicMux := listenmux.New(p.public.tcpListener, len(drpcHeader))
|
||||
publicDRPCListener := tls.NewListener(publicMux.Route(drpcHeader), p.tlsOptions.ServerTLSConfig())
|
||||
|
||||
privateMux := listenmux.New(p.private.listener, len(drpcHeader))
|
||||
@ -197,6 +227,10 @@ func (p *Server) Run(ctx context.Context) (err error) {
|
||||
defer cancel()
|
||||
return p.public.drpc.Serve(ctx, publicDRPCListener)
|
||||
})
|
||||
group.Go(func() error {
|
||||
defer cancel()
|
||||
return p.public.drpc.Serve(ctx, p.public.quicListener)
|
||||
})
|
||||
group.Go(func() error {
|
||||
defer cancel()
|
||||
return p.private.drpc.Serve(ctx, privateDRPCListener)
|
||||
@ -209,3 +243,24 @@ func (p *Server) Run(ctx context.Context) (err error) {
|
||||
muxCancel()
|
||||
return errs.Combine(err, muxGroup.Wait())
|
||||
}
|
||||
|
||||
// isErrorAddressAlreadyInUse checks whether the error is corresponding to
|
||||
// EADDRINUSE. Taken from https://stackoverflow.com/a/65865898.
|
||||
func isErrorAddressAlreadyInUse(err error) bool {
|
||||
var eOsSyscall *os.SyscallError
|
||||
if !errors.As(err, &eOsSyscall) {
|
||||
return false
|
||||
}
|
||||
var errErrno syscall.Errno
|
||||
if !errors.As(eOsSyscall.Err, &errErrno) {
|
||||
return false
|
||||
}
|
||||
if errErrno == syscall.EADDRINUSE {
|
||||
return true
|
||||
}
|
||||
const WSAEADDRINUSE = 10048
|
||||
if runtime.GOOS == "windows" && errErrno == WSAEADDRINUSE {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
@ -51,3 +51,10 @@ func MonthsBetweenDates(from time.Time, to time.Time) int {
|
||||
func TruncateToHourInNano(t time.Time) int64 {
|
||||
return t.Truncate(1 * time.Hour).UnixNano()
|
||||
}
|
||||
|
||||
// UTCEndOfMonth returns utc end of month (f.e. to get last day in month).
|
||||
func UTCEndOfMonth(now time.Time) time.Time {
|
||||
now = now.UTC()
|
||||
y, m, _ := now.Date()
|
||||
return time.Date(y, m+1, 1, 0, 0, 0, 0, &time.Location{}).Add(-time.Nanosecond)
|
||||
}
|
||||
|
@ -18,6 +18,7 @@ import (
|
||||
"storj.io/common/rpc"
|
||||
"storj.io/common/storj"
|
||||
"storj.io/common/testcontext"
|
||||
"storj.io/storj/pkg/quic"
|
||||
"storj.io/storj/private/testplanet"
|
||||
"storj.io/storj/satellite"
|
||||
"storj.io/storj/storagenode"
|
||||
@ -43,82 +44,99 @@ func TestDialNodeURL(t *testing.T) {
|
||||
}, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
dialer := rpc.NewDefaultDialer(tlsOptions)
|
||||
tcpDialer := rpc.NewDefaultDialer(tlsOptions)
|
||||
quicDialer := rpc.NewDefaultDialer(tlsOptions)
|
||||
quicDialer.Connector = quic.NewDefaultConnector(nil)
|
||||
|
||||
unsignedClientOpts, err := tlsopts.NewOptions(unsignedIdent, tlsopts.Config{
|
||||
PeerIDVersions: "*",
|
||||
}, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
unsignedDialer := rpc.NewDefaultDialer(unsignedClientOpts)
|
||||
unsignedTCPDialer := rpc.NewDefaultDialer(unsignedClientOpts)
|
||||
unsignedQUICDialer := rpc.NewDefaultDialer(unsignedClientOpts)
|
||||
unsignedQUICDialer.Connector = quic.NewDefaultConnector(nil)
|
||||
|
||||
t.Run("DialNodeURL with invalid targets", func(t *testing.T) {
|
||||
targets := []storj.NodeURL{
|
||||
{
|
||||
ID: storj.NodeID{},
|
||||
Address: "",
|
||||
},
|
||||
{
|
||||
ID: storj.NodeID{123},
|
||||
Address: "127.0.0.1:100",
|
||||
},
|
||||
{
|
||||
ID: storj.NodeID{},
|
||||
Address: planet.StorageNodes[1].Addr(),
|
||||
},
|
||||
}
|
||||
test := func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet, dialer rpc.Dialer, unsignedDialer rpc.Dialer) {
|
||||
t.Run("DialNodeURL with invalid targets", func(t *testing.T) {
|
||||
targets := []storj.NodeURL{
|
||||
{
|
||||
ID: storj.NodeID{},
|
||||
Address: "",
|
||||
},
|
||||
{
|
||||
ID: storj.NodeID{123},
|
||||
Address: "127.0.0.1:100",
|
||||
},
|
||||
{
|
||||
ID: storj.NodeID{},
|
||||
Address: planet.StorageNodes[1].Addr(),
|
||||
},
|
||||
}
|
||||
|
||||
for _, target := range targets {
|
||||
tag := fmt.Sprintf("%+v", target)
|
||||
for _, target := range targets {
|
||||
tag := fmt.Sprintf("%+v", target)
|
||||
|
||||
timedCtx, cancel := context.WithTimeout(ctx, time.Second)
|
||||
conn, err := dialer.DialNodeURL(timedCtx, target)
|
||||
cancel()
|
||||
assert.Error(t, err, tag)
|
||||
assert.Nil(t, conn, tag)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("DialNode with valid signed target", func(t *testing.T) {
|
||||
timedCtx, cancel := context.WithTimeout(ctx, time.Second)
|
||||
conn, err := dialer.DialNodeURL(timedCtx, target)
|
||||
conn, err := dialer.DialNodeURL(timedCtx, planet.StorageNodes[1].NodeURL())
|
||||
cancel()
|
||||
assert.Error(t, err, tag)
|
||||
assert.Nil(t, conn, tag)
|
||||
}
|
||||
|
||||
assert.NoError(t, err)
|
||||
require.NotNil(t, conn)
|
||||
|
||||
assert.NoError(t, conn.Close())
|
||||
})
|
||||
|
||||
t.Run("DialNode with unsigned identity", func(t *testing.T) {
|
||||
timedCtx, cancel := context.WithTimeout(ctx, time.Second)
|
||||
conn, err := unsignedDialer.DialNodeURL(timedCtx, planet.StorageNodes[1].NodeURL())
|
||||
cancel()
|
||||
|
||||
assert.NotNil(t, conn)
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, conn.Close())
|
||||
})
|
||||
|
||||
t.Run("DialAddress with unsigned identity", func(t *testing.T) {
|
||||
timedCtx, cancel := context.WithTimeout(ctx, time.Second)
|
||||
conn, err := unsignedDialer.DialAddressInsecure(timedCtx, planet.StorageNodes[1].Addr())
|
||||
cancel()
|
||||
|
||||
assert.NotNil(t, conn)
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, conn.Close())
|
||||
})
|
||||
|
||||
t.Run("DialAddress with valid address", func(t *testing.T) {
|
||||
timedCtx, cancel := context.WithTimeout(ctx, time.Second)
|
||||
conn, err := dialer.DialAddressInsecure(timedCtx, planet.StorageNodes[1].Addr())
|
||||
cancel()
|
||||
|
||||
assert.NoError(t, err)
|
||||
require.NotNil(t, conn)
|
||||
assert.NoError(t, conn.Close())
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
// test with tcp
|
||||
t.Run("TCP", func(t *testing.T) {
|
||||
test(t, ctx, planet, tcpDialer, unsignedTCPDialer)
|
||||
})
|
||||
// test with quic
|
||||
t.Run("QUIC", func(t *testing.T) {
|
||||
test(t, ctx, planet, quicDialer, unsignedQUICDialer)
|
||||
})
|
||||
|
||||
t.Run("DialNode with valid signed target", func(t *testing.T) {
|
||||
timedCtx, cancel := context.WithTimeout(ctx, time.Second)
|
||||
conn, err := dialer.DialNodeURL(timedCtx, planet.StorageNodes[1].NodeURL())
|
||||
cancel()
|
||||
|
||||
assert.NoError(t, err)
|
||||
require.NotNil(t, conn)
|
||||
|
||||
assert.NoError(t, conn.Close())
|
||||
})
|
||||
|
||||
t.Run("DialNode with unsigned identity", func(t *testing.T) {
|
||||
timedCtx, cancel := context.WithTimeout(ctx, time.Second)
|
||||
conn, err := unsignedDialer.DialNodeURL(timedCtx, planet.StorageNodes[1].NodeURL())
|
||||
cancel()
|
||||
|
||||
assert.NotNil(t, conn)
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, conn.Close())
|
||||
})
|
||||
|
||||
t.Run("DialAddress with unsigned identity", func(t *testing.T) {
|
||||
timedCtx, cancel := context.WithTimeout(ctx, time.Second)
|
||||
conn, err := unsignedDialer.DialAddressInsecure(timedCtx, planet.StorageNodes[1].Addr())
|
||||
cancel()
|
||||
|
||||
assert.NotNil(t, conn)
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, conn.Close())
|
||||
})
|
||||
|
||||
t.Run("DialAddress with valid address", func(t *testing.T) {
|
||||
timedCtx, cancel := context.WithTimeout(ctx, time.Second)
|
||||
conn, err := dialer.DialAddressInsecure(timedCtx, planet.StorageNodes[1].Addr())
|
||||
cancel()
|
||||
|
||||
assert.NoError(t, err)
|
||||
require.NotNil(t, conn)
|
||||
assert.NoError(t, conn.Close())
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
@ -150,27 +168,40 @@ func TestDialNode_BadServerCertificate(t *testing.T) {
|
||||
}, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
dialer := rpc.NewDefaultDialer(tlsOptions)
|
||||
tcpDialer := rpc.NewDefaultDialer(tlsOptions)
|
||||
quicDialer := rpc.NewDefaultDialer(tlsOptions)
|
||||
quicDialer.Connector = quic.NewDefaultConnector(nil)
|
||||
|
||||
t.Run("DialNodeURL with bad server certificate", func(t *testing.T) {
|
||||
timedCtx, cancel := context.WithTimeout(ctx, time.Second)
|
||||
conn, err := dialer.DialNodeURL(timedCtx, planet.StorageNodes[1].NodeURL())
|
||||
cancel()
|
||||
test := func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet, dialer rpc.Dialer) {
|
||||
t.Run("DialNodeURL with bad server certificate", func(t *testing.T) {
|
||||
timedCtx, cancel := context.WithTimeout(ctx, time.Second)
|
||||
conn, err := dialer.DialNodeURL(timedCtx, planet.StorageNodes[1].NodeURL())
|
||||
cancel()
|
||||
|
||||
tag := fmt.Sprintf("%+v", planet.StorageNodes[1].NodeURL())
|
||||
assert.Nil(t, conn, tag)
|
||||
require.Error(t, err, tag)
|
||||
assert.Contains(t, err.Error(), "not signed by any CA in the whitelist")
|
||||
tag := fmt.Sprintf("%+v", planet.StorageNodes[1].NodeURL())
|
||||
assert.Nil(t, conn, tag)
|
||||
require.Error(t, err, tag)
|
||||
assert.Contains(t, err.Error(), "not signed by any CA in the whitelist")
|
||||
})
|
||||
|
||||
t.Run("DialAddress with bad server certificate", func(t *testing.T) {
|
||||
timedCtx, cancel := context.WithTimeout(ctx, time.Second)
|
||||
conn, err := dialer.DialNodeURL(timedCtx, planet.StorageNodes[1].NodeURL())
|
||||
cancel()
|
||||
|
||||
assert.Nil(t, conn)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "not signed by any CA in the whitelist")
|
||||
})
|
||||
}
|
||||
|
||||
// test with tcp
|
||||
t.Run("TCP", func(t *testing.T) {
|
||||
test(t, ctx, planet, tcpDialer)
|
||||
})
|
||||
|
||||
t.Run("DialAddress with bad server certificate", func(t *testing.T) {
|
||||
timedCtx, cancel := context.WithTimeout(ctx, time.Second)
|
||||
conn, err := dialer.DialNodeURL(timedCtx, planet.StorageNodes[1].NodeURL())
|
||||
cancel()
|
||||
|
||||
assert.Nil(t, conn)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "not signed by any CA in the whitelist")
|
||||
// test with quic
|
||||
t.Run("QUIC", func(t *testing.T) {
|
||||
test(t, ctx, planet, quicDialer)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
@ -445,7 +445,7 @@ func (planet *Planet) newSatellite(ctx context.Context, prefix string, index int
|
||||
SuspensionGracePeriod: time.Hour,
|
||||
SuspensionDQEnabled: true,
|
||||
},
|
||||
NodeSelectionCache: overlay.CacheConfig{
|
||||
NodeSelectionCache: overlay.UploadSelectionCacheConfig{
|
||||
Staleness: 3 * time.Minute,
|
||||
},
|
||||
UpdateStatsBatchSize: 100,
|
||||
|
@ -184,9 +184,12 @@ func (service *Service) Tally(ctx context.Context) (err error) {
|
||||
|
||||
// calculate byte hours, not just bytes
|
||||
hours := time.Since(lastTime).Hours()
|
||||
for id := range observer.Node {
|
||||
observer.Node[id] *= hours
|
||||
var totalSum float64
|
||||
for id, pieceSize := range observer.Node {
|
||||
totalSum += pieceSize
|
||||
observer.Node[id] = pieceSize * hours
|
||||
}
|
||||
mon.IntVal("nodetallies.totalsum").Observe(int64(totalSum)) //mon:locked
|
||||
|
||||
// save the new results
|
||||
var errAtRest, errBucketInfo error
|
||||
|
@ -157,7 +157,7 @@ type API struct {
|
||||
Endpoint *nodestats.Endpoint
|
||||
}
|
||||
|
||||
SnoPayout struct {
|
||||
SNOPayouts struct {
|
||||
Endpoint *snopayouts.Endpoint
|
||||
Service *snopayouts.Service
|
||||
DB snopayouts.DB
|
||||
@ -658,16 +658,16 @@ func NewAPI(log *zap.Logger, full *identity.FullIdentity, db DB,
|
||||
}
|
||||
|
||||
{ // setup SnoPayout endpoint
|
||||
peer.SnoPayout.DB = peer.DB.SnoPayout()
|
||||
peer.SnoPayout.Service = snopayouts.NewService(
|
||||
peer.SNOPayouts.DB = peer.DB.SNOPayouts()
|
||||
peer.SNOPayouts.Service = snopayouts.NewService(
|
||||
peer.Log.Named("payouts:service"),
|
||||
peer.SnoPayout.DB)
|
||||
peer.SnoPayout.Endpoint = snopayouts.NewEndpoint(
|
||||
peer.SNOPayouts.DB)
|
||||
peer.SNOPayouts.Endpoint = snopayouts.NewEndpoint(
|
||||
peer.Log.Named("payouts:endpoint"),
|
||||
peer.DB.StoragenodeAccounting(),
|
||||
peer.Overlay.DB,
|
||||
peer.SnoPayout.Service)
|
||||
if err := pb.DRPCRegisterHeldAmount(peer.Server.DRPC(), peer.SnoPayout.Endpoint); err != nil {
|
||||
peer.SNOPayouts.Service)
|
||||
if err := pb.DRPCRegisterHeldAmount(peer.Server.DRPC(), peer.SNOPayouts.Endpoint); err != nil {
|
||||
return nil, errs.Combine(err, peer.Close())
|
||||
}
|
||||
}
|
||||
|
@ -21,9 +21,6 @@ type DB interface {
|
||||
// QueryWithheldAmounts queries the WithheldAmounts for the given nodeID.
|
||||
QueryWithheldAmounts(ctx context.Context, nodeID storj.NodeID) (WithheldAmounts, error)
|
||||
|
||||
// QueryPaidInYear returns the total amount paid to the nodeID in the provided year.
|
||||
QueryPaidInYear(ctx context.Context, nodeID storj.NodeID, year int) (currency.MicroUnit, error)
|
||||
|
||||
// RecordPeriod records a set of paystubs and payments for some time period.
|
||||
RecordPeriod(ctx context.Context, paystubs []Paystub, payments []Payment) error
|
||||
|
||||
|
@ -40,7 +40,7 @@ type Invoice struct {
|
||||
Disposed currency.MicroUnit `csv:"disposed"` // Amount of owed that is due to graceful-exit or held period ending
|
||||
TotalHeld currency.MicroUnit `csv:"total-held"` // Total amount ever held from the node
|
||||
TotalDisposed currency.MicroUnit `csv:"total-disposed"` // Total amount ever disposed to the node
|
||||
PaidYTD currency.MicroUnit `csv:"paid-ytd"` // Total amount paid so far this year (not including this period)
|
||||
PaidYTD currency.MicroUnit `csv:"paid-ytd"` // Deprecated
|
||||
}
|
||||
|
||||
// MergeNodeInfo updates the fields representing the node information into the invoice.
|
||||
|
@ -19,6 +19,10 @@ const (
|
||||
ProjectInputType = "projectInput"
|
||||
// ProjectUsageType is a graphql type name for project usage.
|
||||
ProjectUsageType = "projectUsage"
|
||||
// ProjectsCursorInputType is a graphql input type name for projects cursor.
|
||||
ProjectsCursorInputType = "projectsCursor"
|
||||
// ProjectsPageType is a graphql type name for projects page.
|
||||
ProjectsPageType = "projectsPage"
|
||||
// BucketUsageCursorInputType is a graphql input
|
||||
// type name for bucket usage cursor.
|
||||
BucketUsageCursorInputType = "bucketUsageCursor"
|
||||
@ -62,6 +66,10 @@ const (
|
||||
FieldCurrentPage = "currentPage"
|
||||
// FieldTotalCount is a field name for bucket usage count total.
|
||||
FieldTotalCount = "totalCount"
|
||||
// FieldMemberCount is a field name for number of project members.
|
||||
FieldMemberCount = "memberCount"
|
||||
// FieldProjects is a field name for projects.
|
||||
FieldProjects = "projects"
|
||||
// FieldProjectMembers is a field name for project members.
|
||||
FieldProjectMembers = "projectMembers"
|
||||
// CursorArg is an argument name for cursor.
|
||||
@ -104,6 +112,9 @@ func graphqlProject(service *console.Service, types *TypeCreator) *graphql.Objec
|
||||
FieldCreatedAt: &graphql.Field{
|
||||
Type: graphql.DateTime,
|
||||
},
|
||||
FieldMemberCount: &graphql.Field{
|
||||
Type: graphql.Int,
|
||||
},
|
||||
FieldMembers: &graphql.Field{
|
||||
Type: types.projectMemberPage,
|
||||
Args: graphql.FieldConfigArgument{
|
||||
@ -255,6 +266,21 @@ func graphqlProjectInput() *graphql.InputObject {
|
||||
})
|
||||
}
|
||||
|
||||
// graphqlBucketUsageCursor creates bucket usage cursor graphql input type.
|
||||
func graphqlProjectsCursor() *graphql.InputObject {
|
||||
return graphql.NewInputObject(graphql.InputObjectConfig{
|
||||
Name: ProjectsCursorInputType,
|
||||
Fields: graphql.InputObjectConfigFieldMap{
|
||||
LimitArg: &graphql.InputObjectFieldConfig{
|
||||
Type: graphql.NewNonNull(graphql.Int),
|
||||
},
|
||||
PageArg: &graphql.InputObjectFieldConfig{
|
||||
Type: graphql.NewNonNull(graphql.Int),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// graphqlBucketUsageCursor creates bucket usage cursor graphql input type.
|
||||
func graphqlBucketUsageCursor() *graphql.InputObject {
|
||||
return graphql.NewInputObject(graphql.InputObjectConfig{
|
||||
@ -300,6 +326,33 @@ func graphqlBucketUsage() *graphql.Object {
|
||||
})
|
||||
}
|
||||
|
||||
// graphqlProjectsPage creates a projects page graphql object.
|
||||
func graphqlProjectsPage(types *TypeCreator) *graphql.Object {
|
||||
return graphql.NewObject(graphql.ObjectConfig{
|
||||
Name: ProjectsPageType,
|
||||
Fields: graphql.Fields{
|
||||
FieldProjects: &graphql.Field{
|
||||
Type: graphql.NewList(types.project),
|
||||
},
|
||||
LimitArg: &graphql.Field{
|
||||
Type: graphql.Int,
|
||||
},
|
||||
OffsetArg: &graphql.Field{
|
||||
Type: graphql.Int,
|
||||
},
|
||||
FieldPageCount: &graphql.Field{
|
||||
Type: graphql.Int,
|
||||
},
|
||||
FieldCurrentPage: &graphql.Field{
|
||||
Type: graphql.Int,
|
||||
},
|
||||
FieldTotalCount: &graphql.Field{
|
||||
Type: graphql.Int,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// graphqlBucketUsagePage creates bucket usage page graphql object.
|
||||
func graphqlBucketUsagePage(types *TypeCreator) *graphql.Object {
|
||||
return graphql.NewObject(graphql.ObjectConfig{
|
||||
@ -362,7 +415,14 @@ func fromMapProjectInfo(args map[string]interface{}) (project console.ProjectInf
|
||||
return
|
||||
}
|
||||
|
||||
// fromMapBucketUsageCursor creates console.BucketUsageCursor from input args.
|
||||
// fromMapProjectsCursor creates console.ProjectsCursor from input args.
|
||||
func fromMapProjectsCursor(args map[string]interface{}) (cursor console.ProjectsCursor) {
|
||||
cursor.Limit = args[LimitArg].(int)
|
||||
cursor.Page = args[PageArg].(int)
|
||||
return
|
||||
}
|
||||
|
||||
// fromMapBucketUsageCursor creates accounting.BucketUsageCursor from input args.
|
||||
func fromMapBucketUsageCursor(args map[string]interface{}) (cursor accounting.BucketUsageCursor) {
|
||||
limit, _ := args[LimitArg].(int)
|
||||
page, _ := args[PageArg].(int)
|
||||
|
@ -17,6 +17,8 @@ const (
|
||||
Query = "query"
|
||||
// ProjectQuery is a query name for project.
|
||||
ProjectQuery = "project"
|
||||
// OwnedProjectsQuery is a query name for projects owned by an account.
|
||||
OwnedProjectsQuery = "ownedProjects"
|
||||
// MyProjectsQuery is a query name for projects related to account.
|
||||
MyProjectsQuery = "myProjects"
|
||||
// ActiveRewardQuery is a query name for current active reward offer.
|
||||
@ -53,6 +55,19 @@ func rootQuery(service *console.Service, mailService *mailservice.Service, types
|
||||
return project, nil
|
||||
},
|
||||
},
|
||||
OwnedProjectsQuery: &graphql.Field{
|
||||
Type: types.projectsPage,
|
||||
Args: graphql.FieldConfigArgument{
|
||||
CursorArg: &graphql.ArgumentConfig{
|
||||
Type: graphql.NewNonNull(types.projectsCursor),
|
||||
},
|
||||
},
|
||||
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
|
||||
cursor := fromMapProjectsCursor(p.Args[CursorArg].(map[string]interface{}))
|
||||
page, err := service.GetUsersOwnedProjectsPage(p.Context, cursor)
|
||||
return page, err
|
||||
},
|
||||
},
|
||||
MyProjectsQuery: &graphql.Field{
|
||||
Type: graphql.NewList(types.project),
|
||||
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
|
||||
|
@ -409,6 +409,59 @@ func TestGraphqlQuery(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
assert.True(t, foundProj1)
|
||||
assert.True(t, foundProj2)
|
||||
})
|
||||
t.Run("OwnedProjects query", func(t *testing.T) {
|
||||
query := fmt.Sprintf(
|
||||
"query {ownedProjects( cursor: { limit: %d, page: %d } ) {projects{id, name, ownerId, description, createdAt, memberCount}, limit, offset, pageCount, currentPage, totalCount } }",
|
||||
5,
|
||||
1,
|
||||
)
|
||||
|
||||
result := testQuery(t, query)
|
||||
|
||||
data := result.(map[string]interface{})
|
||||
projectsPage := data[consoleql.OwnedProjectsQuery].(map[string]interface{})
|
||||
|
||||
projectsList := projectsPage[consoleql.FieldProjects].([]interface{})
|
||||
assert.Len(t, projectsList, 2)
|
||||
|
||||
assert.EqualValues(t, 1, projectsPage[consoleql.FieldCurrentPage])
|
||||
assert.EqualValues(t, 0, projectsPage[consoleql.OffsetArg])
|
||||
assert.EqualValues(t, 5, projectsPage[consoleql.LimitArg])
|
||||
assert.EqualValues(t, 1, projectsPage[consoleql.FieldPageCount])
|
||||
assert.EqualValues(t, 2, projectsPage[consoleql.FieldTotalCount])
|
||||
|
||||
testProject := func(t *testing.T, actual map[string]interface{}, expected *console.Project, expectedNumMembers int) {
|
||||
assert.Equal(t, expected.Name, actual[consoleql.FieldName])
|
||||
assert.Equal(t, expected.Description, actual[consoleql.FieldDescription])
|
||||
|
||||
createdAt := time.Time{}
|
||||
err := createdAt.UnmarshalText([]byte(actual[consoleql.FieldCreatedAt].(string)))
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, expected.CreatedAt.Equal(createdAt))
|
||||
|
||||
assert.EqualValues(t, expectedNumMembers, actual[consoleql.FieldMemberCount])
|
||||
}
|
||||
|
||||
var foundProj1, foundProj2 bool
|
||||
|
||||
for _, entry := range projectsList {
|
||||
project := entry.(map[string]interface{})
|
||||
|
||||
id := project[consoleql.FieldID].(string)
|
||||
switch id {
|
||||
case createdProject.ID.String():
|
||||
foundProj1 = true
|
||||
testProject(t, project, createdProject, 3)
|
||||
case project2.ID.String():
|
||||
foundProj2 = true
|
||||
testProject(t, project, project2, 1)
|
||||
}
|
||||
}
|
||||
|
||||
assert.True(t, foundProj1)
|
||||
assert.True(t, foundProj2)
|
||||
})
|
||||
|
@ -21,6 +21,7 @@ type TypeCreator struct {
|
||||
creditUsage *graphql.Object
|
||||
project *graphql.Object
|
||||
projectUsage *graphql.Object
|
||||
projectsPage *graphql.Object
|
||||
bucketUsage *graphql.Object
|
||||
bucketUsagePage *graphql.Object
|
||||
projectMember *graphql.Object
|
||||
@ -31,6 +32,7 @@ type TypeCreator struct {
|
||||
|
||||
userInput *graphql.InputObject
|
||||
projectInput *graphql.InputObject
|
||||
projectsCursor *graphql.InputObject
|
||||
bucketUsageCursor *graphql.InputObject
|
||||
projectMembersCursor *graphql.InputObject
|
||||
apiKeysCursor *graphql.InputObject
|
||||
@ -125,6 +127,16 @@ func (c *TypeCreator) Create(log *zap.Logger, service *console.Service, mailServ
|
||||
return err
|
||||
}
|
||||
|
||||
c.projectsCursor = graphqlProjectsCursor()
|
||||
if err := c.projectsCursor.Error(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.projectsPage = graphqlProjectsPage(c)
|
||||
if err := c.projectsPage.Error(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// root objects
|
||||
c.query = rootQuery(service, mailService, c)
|
||||
if err := c.query.Error(); err != nil {
|
||||
|
@ -34,7 +34,7 @@ type Projects interface {
|
||||
// List returns paginated projects, created before provided timestamp.
|
||||
List(ctx context.Context, offset int64, limit int, before time.Time) (ProjectsPage, error)
|
||||
// ListByOwnerID is a method for querying all projects from the database by ownerID. It also includes the number of members for each project.
|
||||
ListByOwnerID(ctx context.Context, userID uuid.UUID, limit int, offset int64) (ProjectsPage, error)
|
||||
ListByOwnerID(ctx context.Context, userID uuid.UUID, cursor ProjectsCursor) (ProjectsPage, error)
|
||||
|
||||
// UpdateRateLimit is a method for updating projects rate limit.
|
||||
UpdateRateLimit(ctx context.Context, id uuid.UUID, newLimit int) error
|
||||
@ -67,6 +67,13 @@ type ProjectInfo struct {
|
||||
CreatedAt time.Time `json:"createdAt"`
|
||||
}
|
||||
|
||||
// ProjectsCursor holds info for project
|
||||
// cursor pagination.
|
||||
type ProjectsCursor struct {
|
||||
Limit int
|
||||
Page int
|
||||
}
|
||||
|
||||
// ProjectsPage returns paginated projects,
|
||||
// providing next offset if there are more projects
|
||||
// to retrieve.
|
||||
@ -74,6 +81,13 @@ type ProjectsPage struct {
|
||||
Projects []Project
|
||||
Next bool
|
||||
NextOffset int64
|
||||
|
||||
Limit int
|
||||
Offset int64
|
||||
|
||||
PageCount int
|
||||
CurrentPage int
|
||||
TotalCount int64
|
||||
}
|
||||
|
||||
// ValidateNameAndDescription validates project name and description strings.
|
||||
|
@ -228,7 +228,7 @@ func TestProjectsList(t *testing.T) {
|
||||
}
|
||||
|
||||
require.False(t, projsPage.Next)
|
||||
require.Equal(t, int64(0), projsPage.NextOffset)
|
||||
require.EqualValues(t, 0, projsPage.NextOffset)
|
||||
require.Equal(t, length, len(projectsList))
|
||||
require.Empty(t, cmp.Diff(projects[0], projectsList[0],
|
||||
cmp.Transformer("Sort", func(xs []console.Project) []console.Project {
|
||||
@ -243,8 +243,9 @@ func TestProjectsList(t *testing.T) {
|
||||
|
||||
func TestProjectsListByOwner(t *testing.T) {
|
||||
const (
|
||||
limit = 5
|
||||
length = limit*4 - 1 // make length offset from page size so we can test incomplete page at end
|
||||
limit = 5
|
||||
length = limit*4 - 1 // make length offset from page size so we can test incomplete page at end
|
||||
totalPages = 4
|
||||
)
|
||||
|
||||
rateLimit := 100
|
||||
@ -333,23 +334,34 @@ func TestProjectsListByOwner(t *testing.T) {
|
||||
{id: owner2.ID, originalProjects: owner2Projects},
|
||||
}
|
||||
for _, tt := range testCases {
|
||||
projsPage, err := projectsDB.ListByOwnerID(ctx, tt.id, limit, 0)
|
||||
cursor := &console.ProjectsCursor{
|
||||
Limit: limit,
|
||||
Page: 1,
|
||||
}
|
||||
projsPage, err := projectsDB.ListByOwnerID(ctx, tt.id, *cursor)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, projsPage.Projects, limit)
|
||||
require.EqualValues(t, 1, projsPage.CurrentPage)
|
||||
require.EqualValues(t, totalPages, projsPage.PageCount)
|
||||
require.EqualValues(t, length, projsPage.TotalCount)
|
||||
|
||||
ownerProjectsDB := projsPage.Projects
|
||||
|
||||
for projsPage.Next {
|
||||
projsPage, err = projectsDB.ListByOwnerID(ctx, tt.id, limit, projsPage.NextOffset)
|
||||
cursor.Page++
|
||||
projsPage, err = projectsDB.ListByOwnerID(ctx, tt.id, *cursor)
|
||||
require.NoError(t, err)
|
||||
// number of projects should not exceed page limit
|
||||
require.True(t, len(projsPage.Projects) > 0 && len(projsPage.Projects) <= limit)
|
||||
require.EqualValues(t, cursor.Page, projsPage.CurrentPage)
|
||||
require.EqualValues(t, totalPages, projsPage.PageCount)
|
||||
require.EqualValues(t, length, projsPage.TotalCount)
|
||||
|
||||
ownerProjectsDB = append(ownerProjectsDB, projsPage.Projects...)
|
||||
}
|
||||
|
||||
require.False(t, projsPage.Next)
|
||||
require.Equal(t, int64(0), projsPage.NextOffset)
|
||||
require.EqualValues(t, 0, projsPage.NextOffset)
|
||||
require.Equal(t, length, len(ownerProjectsDB))
|
||||
// sort originalProjects by Name in alphabetical order
|
||||
originalProjects := tt.originalProjects
|
||||
|
@ -988,6 +988,22 @@ func (s *Service) GetUsersProjects(ctx context.Context) (ps []Project, err error
|
||||
return
|
||||
}
|
||||
|
||||
// GetUsersOwnedProjectsPage is a method for querying paged projects.
|
||||
func (s *Service) GetUsersOwnedProjectsPage(ctx context.Context, cursor ProjectsCursor) (_ ProjectsPage, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
auth, err := s.getAuthAndAuditLog(ctx, "get user's owned projects page")
|
||||
if err != nil {
|
||||
return ProjectsPage{}, Error.Wrap(err)
|
||||
}
|
||||
|
||||
projects, err := s.store.Projects().ListByOwnerID(ctx, auth.User.ID, cursor)
|
||||
if err != nil {
|
||||
return ProjectsPage{}, Error.Wrap(err)
|
||||
}
|
||||
|
||||
return projects, nil
|
||||
}
|
||||
|
||||
// GetCurrentRewardByType is a method for querying current active reward offer based on its type.
|
||||
func (s *Service) GetCurrentRewardByType(ctx context.Context, offerType rewards.OfferType) (offer *rewards.Offer, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
54
satellite/console/wasm/tests/main.spec.js
Normal file
54
satellite/console/wasm/tests/main.spec.js
Normal file
@ -0,0 +1,54 @@
|
||||
// Copyright (C) 2021 Storj Labs, Inc.
|
||||
// See LICENSE for copying information
|
||||
|
||||
require('./wasm_exec.js');
|
||||
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
|
||||
describe('main.wasm Tests', () => {
|
||||
beforeAll(async () => {
|
||||
const go = new Go();
|
||||
wasmPath = __dirname;
|
||||
if (process.env.WASM_PATH) {
|
||||
wasmPath = process.env.WASM_PATH;
|
||||
}
|
||||
wasmPath = path.resolve(wasmPath, 'main.wasm');
|
||||
const buffer = fs.readFileSync(wasmPath);
|
||||
await WebAssembly.instantiate(buffer, go.importObject).then(results => {
|
||||
go.run(results.instance);
|
||||
})
|
||||
})
|
||||
|
||||
describe('generateAccessGrant function', () => {
|
||||
test('returns an error when called without arguments', async () => {
|
||||
const result = generateAccessGrant();
|
||||
expect(result["error"]).toContain("not enough argument")
|
||||
expect(result["value"]).toBeNull()
|
||||
});
|
||||
test('happy path returns an access grant', async () => {
|
||||
const apiKey = "13YqeGFpvtzbUp1QAfpvy2E5ZqLUFFNhEkv7153UDGDVnSmTuYYa7tKUnENGgvFXCCSFP7zNhKw6fsuQmWG5JGdQJbXVaVYFhoM2LcA"
|
||||
const projectID = "b9e663e0-69e0-48e9-8eb2-4079be85e488"
|
||||
const result = generateAccessGrant("a",apiKey, "supersecretpassphrase", projectID);
|
||||
expect(result["error"]).toEqual("")
|
||||
expect(result["value"]).toEqual("158UWUf6FHtCk8RGQn2JAXETNRnVwyoF7yEQQnuvPrLbsCPpttuAVWwzQ2YgD2bpQLpdBnctHssvQsqeju7kn7gz3LEJZSdRqyRG6rA9Da3PLGsawWMtM3NdGVqq9akyEmotsN7eMJVC1mfTsupiYXeHioTTTg11kY")
|
||||
});
|
||||
});
|
||||
|
||||
describe('setAPIKeyPermission function', () => {
|
||||
test('returns an error when called without arguments', async () => {
|
||||
const result = setAPIKeyPermission();
|
||||
expect(result["error"]).toContain("not enough arguments")
|
||||
expect(result["value"]).toBeNull()
|
||||
});
|
||||
test('default permissions returns an access grant', async () => {
|
||||
const apiKey = "13YqeGFpvtzbUp1QAfpvy2E5ZqLUFFNhEkv7153UDGDVnSmTuYYa7tKUnENGgvFXCCSFP7zNhKw6fsuQmWG5JGdQJbXVaVYFhoM2LcA"
|
||||
const projectID = "b9e663e0-69e0-48e9-8eb2-4079be85e488"
|
||||
const perm = newPermission()
|
||||
perm["AllowDownload"] = true
|
||||
const result = setAPIKeyPermission(apiKey, [], perm);
|
||||
expect(result["error"]).toEqual("")
|
||||
expect(result["value"]).toEqual("19JjrwZJK1Ck5PdhRtxujGUnzbbiPYSSPZGyE8xrTbxVaJSEr9JL4vXpca3bSH2igjfeYsWeoe7rzo4QTGnwd29Pa924rtXzRjDzSxvkt4UdFd6iiCg")
|
||||
});
|
||||
});
|
||||
});
|
12
satellite/console/wasm/tests/package.json
Normal file
12
satellite/console/wasm/tests/package.json
Normal file
@ -0,0 +1,12 @@
|
||||
{
|
||||
"name": "testing-wasm",
|
||||
"version": "1.0.0",
|
||||
"scripts": {
|
||||
"build": "GOOS=js GOARCH=wasm go build -o main.wasm storj.io/storj/satellite/console/wasm",
|
||||
"pretest": "npm run build",
|
||||
"test": "jest || true"
|
||||
},
|
||||
"devDependencies": {
|
||||
"jest": "^23.5.0"
|
||||
}
|
||||
}
|
@ -22,8 +22,6 @@ type State struct {
|
||||
stats Stats
|
||||
// netByID returns subnet based on storj.NodeID
|
||||
netByID map[storj.NodeID]string
|
||||
// ipPortByID returns IP based on storj.NodeID
|
||||
ipPortByID map[storj.NodeID]string
|
||||
// nonDistinct contains selectors for non-distinct selection.
|
||||
nonDistinct struct {
|
||||
Reputable SelectByID
|
||||
@ -59,14 +57,11 @@ func NewState(reputableNodes, newNodes []*Node) *State {
|
||||
state := &State{}
|
||||
|
||||
state.netByID = map[storj.NodeID]string{}
|
||||
state.ipPortByID = map[storj.NodeID]string{}
|
||||
for _, node := range reputableNodes {
|
||||
state.netByID[node.ID] = node.LastNet
|
||||
state.ipPortByID[node.ID] = node.LastIPPort
|
||||
}
|
||||
for _, node := range newNodes {
|
||||
state.netByID[node.ID] = node.LastNet
|
||||
state.ipPortByID[node.ID] = node.LastIPPort
|
||||
}
|
||||
|
||||
state.nonDistinct.Reputable = SelectByID(reputableNodes)
|
||||
@ -140,18 +135,6 @@ func (state *State) Select(ctx context.Context, request Request) (_ []*Node, err
|
||||
return selected, nil
|
||||
}
|
||||
|
||||
// IPs returns node ip:port for nodes that are in state.
|
||||
func (state *State) IPs(ctx context.Context, nodes []storj.NodeID) map[storj.NodeID]string {
|
||||
defer mon.Task()(&ctx)(nil)
|
||||
xs := make(map[storj.NodeID]string, len(nodes))
|
||||
for _, nodeID := range nodes {
|
||||
if ip, exists := state.ipPortByID[nodeID]; exists {
|
||||
xs[nodeID] = ip
|
||||
}
|
||||
}
|
||||
return xs
|
||||
}
|
||||
|
||||
// Stats returns state information.
|
||||
func (state *State) Stats() Stats {
|
||||
state.mu.RLock()
|
||||
|
@ -203,27 +203,3 @@ next:
|
||||
|
||||
return xs
|
||||
}
|
||||
|
||||
func TestState_IPs(t *testing.T) {
|
||||
ctx := testcontext.New(t)
|
||||
defer ctx.Cleanup()
|
||||
|
||||
reputableNodes := createRandomNodes(2, "1.0.1")
|
||||
newNodes := createRandomNodes(2, "1.0.3")
|
||||
|
||||
state := nodeselection.NewState(reputableNodes, newNodes)
|
||||
|
||||
nodeIPs := state.IPs(ctx, nil)
|
||||
require.Equal(t, map[storj.NodeID]string{}, nodeIPs)
|
||||
|
||||
missing := storj.NodeID{}
|
||||
nodeIPs = state.IPs(ctx, []storj.NodeID{
|
||||
reputableNodes[0].ID,
|
||||
newNodes[1].ID,
|
||||
missing,
|
||||
})
|
||||
require.Equal(t, map[storj.NodeID]string{
|
||||
reputableNodes[0].ID: "1.0.1.0:8080",
|
||||
newNodes[1].ID: "1.0.3.1:8080",
|
||||
}, nodeIPs)
|
||||
}
|
||||
|
@ -442,7 +442,7 @@ func BenchmarkNodeSelection(b *testing.B) {
|
||||
|
||||
service, err := overlay.NewService(zap.NewNop(), overlaydb, overlay.Config{
|
||||
Node: nodeSelectionConfig,
|
||||
NodeSelectionCache: overlay.CacheConfig{
|
||||
NodeSelectionCache: overlay.UploadSelectionCacheConfig{
|
||||
Staleness: time.Hour,
|
||||
},
|
||||
})
|
||||
@ -496,9 +496,9 @@ func BenchmarkNodeSelection(b *testing.B) {
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("NodeSelectionCacheGetNodes", func(b *testing.B) {
|
||||
b.Run("UploadSelectionCacheGetNodes", func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
selected, err := service.SelectionCache.GetNodes(ctx, overlay.FindStorageNodesRequest{
|
||||
selected, err := service.UploadSelectionCache.GetNodes(ctx, overlay.FindStorageNodesRequest{
|
||||
RequestedCount: SelectCount,
|
||||
ExcludedIDs: nil,
|
||||
MinimumVersion: "v1.0.0",
|
||||
@ -508,9 +508,9 @@ func BenchmarkNodeSelection(b *testing.B) {
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("NodeSelectionCacheGetNodesExclusion", func(b *testing.B) {
|
||||
b.Run("UploadSelectionCacheGetNodesExclusion", func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
selected, err := service.SelectionCache.GetNodes(ctx, overlay.FindStorageNodesRequest{
|
||||
selected, err := service.UploadSelectionCache.GetNodes(ctx, overlay.FindStorageNodesRequest{
|
||||
RequestedCount: SelectCount,
|
||||
ExcludedIDs: excludedIDs,
|
||||
MinimumVersion: "v1.0.0",
|
||||
|
@ -21,7 +21,7 @@ var (
|
||||
// Config is a configuration for overlay service.
|
||||
type Config struct {
|
||||
Node NodeSelectionConfig
|
||||
NodeSelectionCache CacheConfig
|
||||
NodeSelectionCache UploadSelectionCacheConfig
|
||||
UpdateStatsBatchSize int `help:"number of update requests to process per transaction" default:"100"`
|
||||
AuditHistory AuditHistoryConfig
|
||||
}
|
||||
|
148
satellite/overlay/downloadselection.go
Normal file
148
satellite/overlay/downloadselection.go
Normal file
@ -0,0 +1,148 @@
|
||||
// Copyright (C) 2019 Storj Labs, Incache.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package overlay
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.uber.org/zap"
|
||||
|
||||
"storj.io/common/storj"
|
||||
)
|
||||
|
||||
// DownloadSelectionDB implements the database for download selection cache.
|
||||
//
|
||||
// architecture: Database
|
||||
type DownloadSelectionDB interface {
|
||||
// SelectAllStorageNodesDownload returns nodes that are ready for downloading
|
||||
SelectAllStorageNodesDownload(ctx context.Context, onlineWindow time.Duration, asOf AsOfSystemTimeConfig) ([]*SelectedNode, error)
|
||||
}
|
||||
|
||||
// DownloadSelectionCacheConfig contains configuration for the selection cache.
|
||||
type DownloadSelectionCacheConfig struct {
|
||||
Staleness time.Duration
|
||||
OnlineWindow time.Duration
|
||||
AsOfSystemTime AsOfSystemTimeConfig
|
||||
}
|
||||
|
||||
// DownloadSelectionCache keeps a list of all the storage nodes that are qualified to download data from.
|
||||
// The cache will sync with the nodes table in the database and get refreshed once the staleness time has past.
|
||||
type DownloadSelectionCache struct {
|
||||
log *zap.Logger
|
||||
db DownloadSelectionDB
|
||||
config DownloadSelectionCacheConfig
|
||||
|
||||
mu sync.RWMutex
|
||||
lastRefresh time.Time
|
||||
state *DownloadSelectionCacheState
|
||||
}
|
||||
|
||||
// NewDownloadSelectionCache creates a new cache that keeps a list of all the storage nodes that are qualified to download data from.
|
||||
func NewDownloadSelectionCache(log *zap.Logger, db DownloadSelectionDB, config DownloadSelectionCacheConfig) *DownloadSelectionCache {
|
||||
return &DownloadSelectionCache{
|
||||
log: log,
|
||||
db: db,
|
||||
config: config,
|
||||
}
|
||||
}
|
||||
|
||||
// Refresh populates the cache with all of the reputableNodes and newNode nodes
|
||||
// This method is useful for tests.
|
||||
func (cache *DownloadSelectionCache) Refresh(ctx context.Context) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
_, err = cache.refresh(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// refresh calls out to the database and refreshes the cache with the most up-to-date
|
||||
// data from the nodes table, then sets time that the last refresh occurred so we know when
|
||||
// to refresh again in the future.
|
||||
func (cache *DownloadSelectionCache) refresh(ctx context.Context) (state *DownloadSelectionCacheState, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
cache.mu.Lock()
|
||||
defer cache.mu.Unlock()
|
||||
|
||||
if cache.state != nil && time.Since(cache.lastRefresh) <= cache.config.Staleness {
|
||||
return cache.state, nil
|
||||
}
|
||||
|
||||
onlineNodes, err := cache.db.SelectAllStorageNodesDownload(ctx, cache.config.OnlineWindow, cache.config.AsOfSystemTime)
|
||||
if err != nil {
|
||||
return cache.state, err
|
||||
}
|
||||
|
||||
cache.lastRefresh = time.Now().UTC()
|
||||
cache.state = NewDownloadSelectionCacheState(onlineNodes)
|
||||
|
||||
mon.IntVal("refresh_cache_size_online").Observe(int64(len(onlineNodes)))
|
||||
return cache.state, nil
|
||||
}
|
||||
|
||||
// GetNodeIPs gets the last node ip:port from the cache, refreshing when needed.
|
||||
func (cache *DownloadSelectionCache) GetNodeIPs(ctx context.Context, nodes []storj.NodeID) (_ map[storj.NodeID]string, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
cache.mu.RLock()
|
||||
lastRefresh := cache.lastRefresh
|
||||
state := cache.state
|
||||
cache.mu.RUnlock()
|
||||
|
||||
// if the cache is stale, then refresh it before we get nodes
|
||||
if state == nil || time.Since(lastRefresh) > cache.config.Staleness {
|
||||
state, err = cache.refresh(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return state.IPs(nodes), nil
|
||||
}
|
||||
|
||||
// Size returns how many nodes are in the cache.
|
||||
func (cache *DownloadSelectionCache) Size() int {
|
||||
cache.mu.RLock()
|
||||
state := cache.state
|
||||
cache.mu.RUnlock()
|
||||
|
||||
if state == nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
return state.Size()
|
||||
}
|
||||
|
||||
// DownloadSelectionCacheState contains state of download selection cache.
|
||||
type DownloadSelectionCacheState struct {
|
||||
// ipPortByID returns IP based on storj.NodeID
|
||||
ipPortByID map[storj.NodeID]string
|
||||
}
|
||||
|
||||
// NewDownloadSelectionCacheState creates a new state from the nodes.
|
||||
func NewDownloadSelectionCacheState(nodes []*SelectedNode) *DownloadSelectionCacheState {
|
||||
ipPortByID := map[storj.NodeID]string{}
|
||||
for _, n := range nodes {
|
||||
ipPortByID[n.ID] = n.LastIPPort
|
||||
}
|
||||
return &DownloadSelectionCacheState{
|
||||
ipPortByID: ipPortByID,
|
||||
}
|
||||
}
|
||||
|
||||
// Size returns how many nodes are in the state.
|
||||
func (state *DownloadSelectionCacheState) Size() int {
|
||||
return len(state.ipPortByID)
|
||||
}
|
||||
|
||||
// IPs returns node ip:port for nodes that are in state.
|
||||
func (state *DownloadSelectionCacheState) IPs(nodes []storj.NodeID) map[storj.NodeID]string {
|
||||
xs := make(map[storj.NodeID]string, len(nodes))
|
||||
for _, nodeID := range nodes {
|
||||
if ip, exists := state.ipPortByID[nodeID]; exists {
|
||||
xs[nodeID] = ip
|
||||
}
|
||||
}
|
||||
return xs
|
||||
}
|
90
satellite/overlay/downloadselection_test.go
Normal file
90
satellite/overlay/downloadselection_test.go
Normal file
@ -0,0 +1,90 @@
|
||||
// Copyright (C) 2021 Storj Labs, Incache.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package overlay_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"storj.io/common/pb"
|
||||
"storj.io/common/storj"
|
||||
"storj.io/common/testcontext"
|
||||
"storj.io/common/testrand"
|
||||
"storj.io/storj/satellite"
|
||||
"storj.io/storj/satellite/overlay"
|
||||
"storj.io/storj/satellite/satellitedb/satellitedbtest"
|
||||
)
|
||||
|
||||
var downloadSelectionCacheConfig = overlay.DownloadSelectionCacheConfig{
|
||||
Staleness: lowStaleness,
|
||||
OnlineWindow: time.Hour,
|
||||
AsOfSystemTime: overlay.AsOfSystemTimeConfig{Enabled: true, DefaultInterval: time.Minute},
|
||||
}
|
||||
|
||||
func TestDownloadSelectionCacheState_Refresh(t *testing.T) {
|
||||
satellitedbtest.Run(t, func(ctx *testcontext.Context, t *testing.T, db satellite.DB) {
|
||||
cache := overlay.NewDownloadSelectionCache(zap.NewNop(),
|
||||
db.OverlayCache(),
|
||||
downloadSelectionCacheConfig,
|
||||
)
|
||||
|
||||
// the cache should have no nodes to start
|
||||
err := cache.Refresh(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, cache.Size())
|
||||
|
||||
// add some nodes to the database
|
||||
const nodeCount = 2
|
||||
addNodesToNodesTable(ctx, t, db.OverlayCache(), nodeCount, 0)
|
||||
|
||||
// confirm nodes are in the cache once
|
||||
err = cache.Refresh(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, nodeCount, cache.Size())
|
||||
})
|
||||
}
|
||||
|
||||
func TestDownloadSelectionCacheState_GetNodeIPs(t *testing.T) {
|
||||
satellitedbtest.Run(t, func(ctx *testcontext.Context, t *testing.T, db satellite.DB) {
|
||||
cache := overlay.NewDownloadSelectionCache(zap.NewNop(),
|
||||
db.OverlayCache(),
|
||||
downloadSelectionCacheConfig,
|
||||
)
|
||||
|
||||
// add some nodes to the database
|
||||
const nodeCount = 2
|
||||
ids := addNodesToNodesTable(ctx, t, db.OverlayCache(), nodeCount, 0)
|
||||
|
||||
// confirm nodes are in the cache once
|
||||
nodeips, err := cache.GetNodeIPs(ctx, ids)
|
||||
require.NoError(t, err)
|
||||
for _, id := range ids {
|
||||
require.NotEmpty(t, nodeips[id])
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestDownloadSelectionCacheState_IPs(t *testing.T) {
|
||||
ctx := testcontext.New(t)
|
||||
defer ctx.Cleanup()
|
||||
|
||||
node := &overlay.SelectedNode{
|
||||
ID: testrand.NodeID(),
|
||||
Address: &pb.NodeAddress{
|
||||
Address: "1.0.1.1:8080",
|
||||
},
|
||||
LastNet: "1.0.1",
|
||||
LastIPPort: "1.0.1.1:8080",
|
||||
}
|
||||
|
||||
state := overlay.NewDownloadSelectionCacheState([]*overlay.SelectedNode{node})
|
||||
require.Equal(t, state.Size(), 1)
|
||||
|
||||
ips := state.IPs([]storj.NodeID{testrand.NodeID(), node.ID})
|
||||
require.Len(t, ips, 1)
|
||||
require.Equal(t, node.LastIPPort, ips[node.ID])
|
||||
}
|
@ -78,7 +78,7 @@ func TestMinimumDiskSpace(t *testing.T) {
|
||||
n1, err := saOverlay.Service.FindStorageNodesForUpload(ctx, req)
|
||||
require.Error(t, err)
|
||||
require.True(t, overlay.ErrNotEnoughNodes.Has(err))
|
||||
n2, err := saOverlay.Service.SelectionCache.GetNodes(ctx, req)
|
||||
n2, err := saOverlay.Service.UploadSelectionCache.GetNodes(ctx, req)
|
||||
require.Error(t, err)
|
||||
require.True(t, overlay.ErrNotEnoughNodes.Has(err))
|
||||
require.Equal(t, len(n2), len(n1))
|
||||
@ -104,7 +104,7 @@ func TestMinimumDiskSpace(t *testing.T) {
|
||||
n2, err = saOverlay.Service.FindStorageNodesWithPreferences(ctx, req, &nodeConfig)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(n1), len(n2))
|
||||
n3, err = saOverlay.Service.SelectionCache.GetNodes(ctx, req)
|
||||
n3, err = saOverlay.Service.UploadSelectionCache.GetNodes(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(n1), len(n3))
|
||||
})
|
||||
@ -218,7 +218,7 @@ func TestEnsureMinimumRequested(t *testing.T) {
|
||||
require.Len(t, nodes, requestedCount)
|
||||
require.Equal(t, 0, countReputable(nodes))
|
||||
|
||||
n2, err := service.SelectionCache.GetNodes(ctx, req)
|
||||
n2, err := service.UploadSelectionCache.GetNodes(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, requestedCount, len(n2))
|
||||
})
|
||||
@ -521,7 +521,7 @@ func TestFindStorageNodesDistinctNetworks(t *testing.T) {
|
||||
require.NotEqual(t, nodes[0].LastIPPort, nodes[1].LastIPPort)
|
||||
require.NotEqual(t, nodes[0].LastIPPort, excludedNodeAddr)
|
||||
require.NotEqual(t, nodes[1].LastIPPort, excludedNodeAddr)
|
||||
n2, err := satellite.Overlay.Service.SelectionCache.GetNodes(ctx, req)
|
||||
n2, err := satellite.Overlay.Service.UploadSelectionCache.GetNodes(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, n2, 2)
|
||||
require.NotEqual(t, n2[0].LastIPPort, n2[1].LastIPPort)
|
||||
@ -543,7 +543,7 @@ func TestFindStorageNodesDistinctNetworks(t *testing.T) {
|
||||
n1, err := satellite.Overlay.Service.FindStorageNodesWithPreferences(ctx, req, &satellite.Config.Overlay.Node)
|
||||
require.Error(t, err)
|
||||
require.Equal(t, len(n), len(n1))
|
||||
n2, err = satellite.Overlay.Service.SelectionCache.GetNodes(ctx, req)
|
||||
n2, err = satellite.Overlay.Service.UploadSelectionCache.GetNodes(ctx, req)
|
||||
require.Error(t, err)
|
||||
require.Equal(t, len(n1), len(n2))
|
||||
})
|
||||
@ -593,7 +593,7 @@ func TestSelectNewStorageNodesExcludedIPs(t *testing.T) {
|
||||
require.NotEqual(t, nodes[0].LastIPPort, nodes[1].LastIPPort)
|
||||
require.NotEqual(t, nodes[0].LastIPPort, excludedNodeAddr)
|
||||
require.NotEqual(t, nodes[1].LastIPPort, excludedNodeAddr)
|
||||
n2, err := satellite.Overlay.Service.SelectionCache.GetNodes(ctx, req)
|
||||
n2, err := satellite.Overlay.Service.UploadSelectionCache.GetNodes(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, n2, 2)
|
||||
require.NotEqual(t, n2[0].LastIPPort, n2[1].LastIPPort)
|
||||
@ -749,7 +749,7 @@ func TestCacheSelectionVsDBSelection(t *testing.T) {
|
||||
req := overlay.FindStorageNodesRequest{RequestedCount: 5}
|
||||
n1, err := saOverlay.Service.FindStorageNodesForUpload(ctx, req)
|
||||
require.NoError(t, err)
|
||||
n2, err := saOverlay.Service.SelectionCache.GetNodes(ctx, req)
|
||||
n2, err := saOverlay.Service.UploadSelectionCache.GetNodes(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(n2), len(n1))
|
||||
n3, err := saOverlay.Service.FindStorageNodesWithPreferences(ctx, req, &nodeConfig)
|
||||
|
@ -46,6 +46,8 @@ type DB interface {
|
||||
SelectStorageNodes(ctx context.Context, totalNeededNodes, newNodeCount int, criteria *NodeCriteria) ([]*SelectedNode, error)
|
||||
// SelectAllStorageNodesUpload returns all nodes that qualify to store data, organized as reputable nodes and new nodes
|
||||
SelectAllStorageNodesUpload(ctx context.Context, selectionCfg NodeSelectionConfig) (reputable, new []*SelectedNode, err error)
|
||||
// SelectAllStorageNodesDownload returns a nodes that are ready for downloading
|
||||
SelectAllStorageNodesDownload(ctx context.Context, onlineWindow time.Duration, asOf AsOfSystemTimeConfig) ([]*SelectedNode, error)
|
||||
|
||||
// Get looks up the node by nodeID
|
||||
Get(ctx context.Context, nodeID storj.NodeID) (*NodeDossier, error)
|
||||
@ -263,10 +265,12 @@ func (node *SelectedNode) Clone() *SelectedNode {
|
||||
//
|
||||
// architecture: Service
|
||||
type Service struct {
|
||||
log *zap.Logger
|
||||
db DB
|
||||
config Config
|
||||
SelectionCache *NodeSelectionCache
|
||||
log *zap.Logger
|
||||
db DB
|
||||
config Config
|
||||
|
||||
UploadSelectionCache *UploadSelectionCache
|
||||
DownloadSelectionCache *DownloadSelectionCache
|
||||
}
|
||||
|
||||
// NewService returns a new Service.
|
||||
@ -279,9 +283,16 @@ func NewService(log *zap.Logger, db DB, config Config) (*Service, error) {
|
||||
log: log,
|
||||
db: db,
|
||||
config: config,
|
||||
SelectionCache: NewNodeSelectionCache(log, db,
|
||||
|
||||
UploadSelectionCache: NewUploadSelectionCache(log, db,
|
||||
config.NodeSelectionCache.Staleness, config.Node,
|
||||
),
|
||||
|
||||
DownloadSelectionCache: NewDownloadSelectionCache(log, db, DownloadSelectionCacheConfig{
|
||||
Staleness: config.NodeSelectionCache.Staleness,
|
||||
OnlineWindow: config.Node.OnlineWindow,
|
||||
AsOfSystemTime: config.Node.AsOfSystemTime,
|
||||
}),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -314,7 +325,7 @@ func (service *Service) GetOnlineNodesForGetDelete(ctx context.Context, nodeIDs
|
||||
// GetNodeIPs returns a map of node ip:port for the supplied nodeIDs.
|
||||
func (service *Service) GetNodeIPs(ctx context.Context, nodeIDs []storj.NodeID) (_ map[storj.NodeID]string, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
return service.SelectionCache.GetNodeIPs(ctx, nodeIDs)
|
||||
return service.DownloadSelectionCache.GetNodeIPs(ctx, nodeIDs)
|
||||
}
|
||||
|
||||
// IsOnline checks if a node is 'online' based on the collected statistics.
|
||||
@ -347,7 +358,7 @@ func (service *Service) FindStorageNodesForUpload(ctx context.Context, req FindS
|
||||
return service.FindStorageNodesWithPreferences(ctx, req, &service.config.Node)
|
||||
}
|
||||
|
||||
selectedNodes, err := service.SelectionCache.GetNodes(ctx, req)
|
||||
selectedNodes, err := service.UploadSelectionCache.GetNodes(ctx, req)
|
||||
if err != nil {
|
||||
service.log.Warn("error selecting from node selection cache", zap.String("err", err.Error()))
|
||||
}
|
||||
@ -545,7 +556,7 @@ func (service *Service) TestVetNode(ctx context.Context, nodeID storj.NodeID) (v
|
||||
service.log.Warn("error vetting node", zap.Stringer("node ID", nodeID))
|
||||
return nil, err
|
||||
}
|
||||
err = service.SelectionCache.Refresh(ctx)
|
||||
err = service.UploadSelectionCache.Refresh(ctx)
|
||||
service.log.Warn("nodecache refresh err", zap.Error(err))
|
||||
return vettedTime, err
|
||||
}
|
||||
@ -557,7 +568,7 @@ func (service *Service) TestUnvetNode(ctx context.Context, nodeID storj.NodeID)
|
||||
service.log.Warn("error unvetting node", zap.Stringer("node ID", nodeID), zap.Error(err))
|
||||
return err
|
||||
}
|
||||
err = service.SelectionCache.Refresh(ctx)
|
||||
err = service.UploadSelectionCache.Refresh(ctx)
|
||||
service.log.Warn("nodecache refresh err", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
@ -274,7 +274,7 @@ func TestRandomizedSelectionCache(t *testing.T) {
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
satellite := planet.Satellites[0]
|
||||
overlaydb := satellite.Overlay.DB
|
||||
nodeSelectionCache := satellite.Overlay.Service.SelectionCache
|
||||
uploadSelectionCache := satellite.Overlay.Service.UploadSelectionCache
|
||||
allIDs := make(storj.NodeIDList, totalNodes)
|
||||
nodeCounts := make(map[storj.NodeID]int)
|
||||
expectedNewCount := int(float64(totalNodes) * satellite.Config.Overlay.Node.NewNodeFraction)
|
||||
@ -324,9 +324,9 @@ func TestRandomizedSelectionCache(t *testing.T) {
|
||||
nodeCounts[newID] = 0
|
||||
}
|
||||
|
||||
err := nodeSelectionCache.Refresh(ctx)
|
||||
err := uploadSelectionCache.Refresh(ctx)
|
||||
require.NoError(t, err)
|
||||
reputable, new := nodeSelectionCache.Size()
|
||||
reputable, new := uploadSelectionCache.Size()
|
||||
require.Equal(t, totalNodes-expectedNewCount, reputable)
|
||||
require.Equal(t, expectedNewCount, new)
|
||||
|
||||
@ -338,7 +338,7 @@ func TestRandomizedSelectionCache(t *testing.T) {
|
||||
RequestedCount: numNodesToSelect,
|
||||
}
|
||||
|
||||
nodes, err = nodeSelectionCache.GetNodes(ctx, req)
|
||||
nodes, err = uploadSelectionCache.GetNodes(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, nodes, numNodesToSelect)
|
||||
|
||||
|
@ -20,7 +20,7 @@ var mon = monkit.Package()
|
||||
type Config struct {
|
||||
EnableDQ bool `help:"whether nodes will be disqualified if they have not been contacted in some time" releaseDefault:"false" devDefault:"true"`
|
||||
Interval time.Duration `help:"how often to check for and DQ stray nodes" releaseDefault:"168h" devDefault:"5m"`
|
||||
MaxDurationWithoutContact time.Duration `help:"length of time a node can go without contacting satellite before being disqualified" releaseDefault:"720h" devDefault:"5m"`
|
||||
MaxDurationWithoutContact time.Duration `help:"length of time a node can go without contacting satellite before being disqualified" releaseDefault:"720h" devDefault:"7200h"`
|
||||
}
|
||||
|
||||
// Chore disqualifies stray nodes.
|
||||
|
@ -15,26 +15,26 @@ import (
|
||||
"storj.io/storj/satellite/nodeselection"
|
||||
)
|
||||
|
||||
// CacheDB implements the database for overlay node selection cache.
|
||||
// UploadSelectionDB implements the database for upload selection cache.
|
||||
//
|
||||
// architecture: Database
|
||||
type CacheDB interface {
|
||||
type UploadSelectionDB interface {
|
||||
// SelectAllStorageNodesUpload returns all nodes that qualify to store data, organized as reputable nodes and new nodes
|
||||
SelectAllStorageNodesUpload(ctx context.Context, selectionCfg NodeSelectionConfig) (reputable, new []*SelectedNode, err error)
|
||||
}
|
||||
|
||||
// CacheConfig is a configuration for overlay node selection cache.
|
||||
type CacheConfig struct {
|
||||
// UploadSelectionCacheConfig is a configuration for upload selection cache.
|
||||
type UploadSelectionCacheConfig struct {
|
||||
Disabled bool `help:"disable node cache" default:"false"`
|
||||
Staleness time.Duration `help:"how stale the node selection cache can be" releaseDefault:"3m" devDefault:"5m"`
|
||||
}
|
||||
|
||||
// NodeSelectionCache keeps a list of all the storage nodes that are qualified to store data
|
||||
// UploadSelectionCache keeps a list of all the storage nodes that are qualified to store data
|
||||
// We organize the nodes by if they are reputable or a new node on the network.
|
||||
// The cache will sync with the nodes table in the database and get refreshed once the staleness time has past.
|
||||
type NodeSelectionCache struct {
|
||||
type UploadSelectionCache struct {
|
||||
log *zap.Logger
|
||||
db CacheDB
|
||||
db UploadSelectionDB
|
||||
selectionConfig NodeSelectionConfig
|
||||
staleness time.Duration
|
||||
|
||||
@ -43,9 +43,9 @@ type NodeSelectionCache struct {
|
||||
state *nodeselection.State
|
||||
}
|
||||
|
||||
// NewNodeSelectionCache creates a new cache that keeps a list of all the storage nodes that are qualified to store data.
|
||||
func NewNodeSelectionCache(log *zap.Logger, db CacheDB, staleness time.Duration, config NodeSelectionConfig) *NodeSelectionCache {
|
||||
return &NodeSelectionCache{
|
||||
// NewUploadSelectionCache creates a new cache that keeps a list of all the storage nodes that are qualified to store data.
|
||||
func NewUploadSelectionCache(log *zap.Logger, db UploadSelectionDB, staleness time.Duration, config NodeSelectionConfig) *UploadSelectionCache {
|
||||
return &UploadSelectionCache{
|
||||
log: log,
|
||||
db: db,
|
||||
staleness: staleness,
|
||||
@ -55,7 +55,7 @@ func NewNodeSelectionCache(log *zap.Logger, db CacheDB, staleness time.Duration,
|
||||
|
||||
// Refresh populates the cache with all of the reputableNodes and newNode nodes
|
||||
// This method is useful for tests.
|
||||
func (cache *NodeSelectionCache) Refresh(ctx context.Context) (err error) {
|
||||
func (cache *UploadSelectionCache) Refresh(ctx context.Context) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
_, err = cache.refresh(ctx)
|
||||
return err
|
||||
@ -64,7 +64,7 @@ func (cache *NodeSelectionCache) Refresh(ctx context.Context) (err error) {
|
||||
// refresh calls out to the database and refreshes the cache with the most up-to-date
|
||||
// data from the nodes table, then sets time that the last refresh occurred so we know when
|
||||
// to refresh again in the future.
|
||||
func (cache *NodeSelectionCache) refresh(ctx context.Context) (state *nodeselection.State, err error) {
|
||||
func (cache *UploadSelectionCache) refresh(ctx context.Context) (state *nodeselection.State, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
cache.mu.Lock()
|
||||
defer cache.mu.Unlock()
|
||||
@ -89,7 +89,7 @@ func (cache *NodeSelectionCache) refresh(ctx context.Context) (state *nodeselect
|
||||
// GetNodes selects nodes from the cache that will be used to upload a file.
|
||||
// Every node selected will be from a distinct network.
|
||||
// If the cache hasn't been refreshed recently it will do so first.
|
||||
func (cache *NodeSelectionCache) GetNodes(ctx context.Context, req FindStorageNodesRequest) (_ []*SelectedNode, err error) {
|
||||
func (cache *UploadSelectionCache) GetNodes(ctx context.Context, req FindStorageNodesRequest) (_ []*SelectedNode, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
cache.mu.RLock()
|
||||
@ -118,28 +118,8 @@ func (cache *NodeSelectionCache) GetNodes(ctx context.Context, req FindStorageNo
|
||||
return convNodesToSelectedNodes(selected), err
|
||||
}
|
||||
|
||||
// GetNodeIPs gets the last node ip:port from the cache, refreshing when needed.
|
||||
func (cache *NodeSelectionCache) GetNodeIPs(ctx context.Context, nodes []storj.NodeID) (_ map[storj.NodeID]string, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
cache.mu.RLock()
|
||||
lastRefresh := cache.lastRefresh
|
||||
state := cache.state
|
||||
cache.mu.RUnlock()
|
||||
|
||||
// if the cache is stale, then refresh it before we get nodes
|
||||
if state == nil || time.Since(lastRefresh) > cache.staleness {
|
||||
state, err = cache.refresh(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return state.IPs(ctx, nodes), nil
|
||||
}
|
||||
|
||||
// Size returns how many reputable nodes and new nodes are in the cache.
|
||||
func (cache *NodeSelectionCache) Size() (reputableNodeCount int, newNodeCount int) {
|
||||
func (cache *UploadSelectionCache) Size() (reputableNodeCount int, newNodeCount int) {
|
||||
cache.mu.RLock()
|
||||
state := cache.state
|
||||
cache.mu.RUnlock()
|
@ -48,7 +48,7 @@ const (
|
||||
|
||||
func TestRefresh(t *testing.T) {
|
||||
satellitedbtest.Run(t, func(ctx *testcontext.Context, t *testing.T, db satellite.DB) {
|
||||
cache := overlay.NewNodeSelectionCache(zap.NewNop(),
|
||||
cache := overlay.NewUploadSelectionCache(zap.NewNop(),
|
||||
db.OverlayCache(),
|
||||
lowStaleness,
|
||||
nodeSelectionConfig,
|
||||
@ -73,7 +73,7 @@ func TestRefresh(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func addNodesToNodesTable(ctx context.Context, t *testing.T, db overlay.DB, count, makeReputable int) (reputableIds []storj.NodeID) {
|
||||
func addNodesToNodesTable(ctx context.Context, t *testing.T, db overlay.DB, count, makeReputable int) (ids []storj.NodeID) {
|
||||
for i := 0; i < count; i++ {
|
||||
subnet := strconv.Itoa(i) + ".1.2"
|
||||
addr := subnet + ".3:8080"
|
||||
@ -109,10 +109,10 @@ func addNodesToNodesTable(ctx context.Context, t *testing.T, db overlay.DB, coun
|
||||
}, time.Now())
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, stats.VettedAt)
|
||||
reputableIds = append(reputableIds, storj.NodeID{byte(i)})
|
||||
ids = append(ids, storj.NodeID{byte(i)})
|
||||
}
|
||||
}
|
||||
return reputableIds
|
||||
return ids
|
||||
}
|
||||
|
||||
type mockdb struct {
|
||||
@ -147,7 +147,7 @@ func TestRefreshConcurrent(t *testing.T) {
|
||||
// concurrent cache.Refresh with high staleness, where high staleness means the
|
||||
// cache should only be refreshed the first time we call cache.Refresh
|
||||
mockDB := mockdb{}
|
||||
cache := overlay.NewNodeSelectionCache(zap.NewNop(),
|
||||
cache := overlay.NewUploadSelectionCache(zap.NewNop(),
|
||||
&mockDB,
|
||||
highStaleness,
|
||||
nodeSelectionConfig,
|
||||
@ -168,7 +168,7 @@ func TestRefreshConcurrent(t *testing.T) {
|
||||
// concurrent cache.Refresh with low staleness, where low staleness
|
||||
// means that the cache will refresh *every time* cache.Refresh is called
|
||||
mockDB = mockdb{}
|
||||
cache = overlay.NewNodeSelectionCache(zap.NewNop(),
|
||||
cache = overlay.NewUploadSelectionCache(zap.NewNop(),
|
||||
&mockDB,
|
||||
lowStaleness,
|
||||
nodeSelectionConfig,
|
||||
@ -194,7 +194,7 @@ func TestGetNodes(t *testing.T) {
|
||||
DistinctIP: true,
|
||||
MinimumDiskSpace: 100 * memory.MiB,
|
||||
}
|
||||
cache := overlay.NewNodeSelectionCache(zap.NewNop(),
|
||||
cache := overlay.NewUploadSelectionCache(zap.NewNop(),
|
||||
db.OverlayCache(),
|
||||
lowStaleness,
|
||||
nodeSelectionConfig,
|
||||
@ -249,7 +249,7 @@ func TestGetNodesConcurrent(t *testing.T) {
|
||||
reputable: reputableNodes,
|
||||
new: newNodes,
|
||||
}
|
||||
cache := overlay.NewNodeSelectionCache(zap.NewNop(),
|
||||
cache := overlay.NewUploadSelectionCache(zap.NewNop(),
|
||||
&mockDB,
|
||||
highStaleness,
|
||||
nodeSelectionConfig,
|
||||
@ -289,7 +289,7 @@ func TestGetNodesConcurrent(t *testing.T) {
|
||||
reputable: reputableNodes,
|
||||
new: newNodes,
|
||||
}
|
||||
cache = overlay.NewNodeSelectionCache(zap.NewNop(),
|
||||
cache = overlay.NewUploadSelectionCache(zap.NewNop(),
|
||||
&mockDB,
|
||||
lowStaleness,
|
||||
nodeSelectionConfig,
|
||||
@ -376,7 +376,7 @@ func TestGetNodesDistinct(t *testing.T) {
|
||||
config := nodeSelectionConfig
|
||||
config.NewNodeFraction = 0.5
|
||||
config.DistinctIP = true
|
||||
cache := overlay.NewNodeSelectionCache(zap.NewNop(),
|
||||
cache := overlay.NewUploadSelectionCache(zap.NewNop(),
|
||||
&mockDB,
|
||||
highStaleness,
|
||||
config,
|
||||
@ -404,7 +404,7 @@ func TestGetNodesDistinct(t *testing.T) {
|
||||
config := nodeSelectionConfig
|
||||
config.NewNodeFraction = 0.5
|
||||
config.DistinctIP = false
|
||||
cache := overlay.NewNodeSelectionCache(zap.NewNop(),
|
||||
cache := overlay.NewUploadSelectionCache(zap.NewNop(),
|
||||
&mockDB,
|
||||
highStaleness,
|
||||
config,
|
||||
@ -422,7 +422,7 @@ func TestGetNodesError(t *testing.T) {
|
||||
defer ctx.Cleanup()
|
||||
|
||||
mockDB := mockdb{}
|
||||
cache := overlay.NewNodeSelectionCache(zap.NewNop(),
|
||||
cache := overlay.NewUploadSelectionCache(zap.NewNop(),
|
||||
&mockDB,
|
||||
highStaleness,
|
||||
nodeSelectionConfig,
|
||||
@ -450,7 +450,7 @@ func TestNewNodeFraction(t *testing.T) {
|
||||
DistinctIP: true,
|
||||
MinimumDiskSpace: 10 * memory.MiB,
|
||||
}
|
||||
cache := overlay.NewNodeSelectionCache(zap.NewNop(),
|
||||
cache := overlay.NewUploadSelectionCache(zap.NewNop(),
|
||||
db.OverlayCache(),
|
||||
lowStaleness,
|
||||
nodeSelectionConfig,
|
@ -98,7 +98,7 @@ type DB interface {
|
||||
// StripeCoinPayments returns stripecoinpayments database.
|
||||
StripeCoinPayments() stripecoinpayments.DB
|
||||
// SnoPayout returns database for payouts.
|
||||
SnoPayout() snopayouts.DB
|
||||
SNOPayouts() snopayouts.DB
|
||||
// Compoensation tracks storage node compensation
|
||||
Compensation() compensation.DB
|
||||
// Revocation tracks revoked macaroons
|
||||
|
@ -19,7 +19,7 @@ type Config struct {
|
||||
IrreparableInterval time.Duration `help:"how frequently irrepairable checker should check for lost pieces" releaseDefault:"30m" devDefault:"0h0m5s"`
|
||||
|
||||
ReliabilityCacheStaleness time.Duration `help:"how stale reliable node cache can be" releaseDefault:"5m" devDefault:"5m"`
|
||||
RepairOverrides RepairOverrides `help:"comma-separated override values for repair threshold in the format k/o/n-override (min/optimal/total-override)" releaseDefault:"29/80/110-52,29/80/95-52" devDefault:""`
|
||||
RepairOverrides RepairOverrides `help:"comma-separated override values for repair threshold in the format k/o/n-override (min/optimal/total-override)" releaseDefault:"29/80/110-52,29/80/95-52,29/80/130-52" devDefault:""`
|
||||
// Node failure rate is an estimation based on a 6 hour checker run interval (4 checker iterations per day), a network of about 9200 nodes, and about 2 nodes churning per day.
|
||||
// This results in `2/9200/4 = 0.00005435` being the probability of any single node going down in the interval of one checker iteration.
|
||||
NodeFailureRate float64 `help:"the probability of a single node going down within the next checker iteration" default:"0.00005435"`
|
||||
|
@ -83,6 +83,10 @@ func DefaultPartners() PartnerList {
|
||||
Name: "Heroku",
|
||||
ID: "706011f3-400e-45eb-a796-90cce2a7d67e",
|
||||
UUID: parseUUID("706011f3-400e-45eb-a796-90cce2a7d67e"),
|
||||
}, {
|
||||
Name: "Hypernet",
|
||||
ID: "5abfc372-1d59-44fa-bbcc-bc3aa03a9542",
|
||||
UUID: parseUUID("5abfc372-1d59-44fa-bbcc-bc3aa03a9542"),
|
||||
}, {
|
||||
Name: "Infura",
|
||||
ID: "1519bdee-ed18-45fe-86c6-4c7fa9668a14",
|
||||
|
@ -5,7 +5,6 @@ package satellitedb
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"storj.io/common/storj"
|
||||
"storj.io/storj/private/currency"
|
||||
@ -17,31 +16,6 @@ type compensationDB struct {
|
||||
db *satelliteDB
|
||||
}
|
||||
|
||||
func (comp *compensationDB) QueryPaidInYear(ctx context.Context, nodeID storj.NodeID, year int) (totalPaid currency.MicroUnit, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
start := fmt.Sprintf("%04d-01", year)
|
||||
endExclusive := fmt.Sprintf("%04d-01", year+1)
|
||||
|
||||
stmt := comp.db.Rebind(`
|
||||
SELECT
|
||||
coalesce(SUM(amount), 0) AS sum_paid
|
||||
FROM
|
||||
storagenode_payments
|
||||
WHERE
|
||||
node_id = ?
|
||||
AND
|
||||
period >= ? AND period < ?
|
||||
`)
|
||||
|
||||
var sumPaid int64
|
||||
if err := comp.db.DB.QueryRow(ctx, stmt, nodeID, start, endExclusive).Scan(&sumPaid); err != nil {
|
||||
return currency.Zero, Error.Wrap(err)
|
||||
}
|
||||
|
||||
return currency.NewMicroUnit(sumPaid), nil
|
||||
}
|
||||
|
||||
// QueryWithheldAmounts returns withheld data for the given node.
|
||||
func (comp *compensationDB) QueryWithheldAmounts(ctx context.Context, nodeID storj.NodeID) (_ compensation.WithheldAmounts, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
@ -277,9 +277,9 @@ func (dbc *satelliteDBCollection) StripeCoinPayments() stripecoinpayments.DB {
|
||||
return &stripeCoinPaymentsDB{db: dbc.getByName("stripecoinpayments")}
|
||||
}
|
||||
|
||||
// SnoPayout returns database for storagenode payStubs and payments info.
|
||||
func (dbc *satelliteDBCollection) SnoPayout() snopayouts.DB {
|
||||
return &paymentStubs{db: dbc.getByName("snopayouts")}
|
||||
// SNOPayouts returns database for storagenode payStubs and payments info.
|
||||
func (dbc *satelliteDBCollection) SNOPayouts() snopayouts.DB {
|
||||
return &snopayoutsDB{db: dbc.getByName("snopayouts")}
|
||||
}
|
||||
|
||||
// Compenstation returns database for storage node compensation.
|
||||
|
@ -128,7 +128,7 @@ model node (
|
||||
index (
|
||||
name nodes_dis_unk_exit_fin_last_success_index
|
||||
fields disqualified unknown_audit_suspended exit_finished_at last_contact_success
|
||||
)
|
||||
)
|
||||
|
||||
field id blob
|
||||
// address is how to contact the node, this can be a hostname or IP and it contains the port
|
||||
@ -290,6 +290,13 @@ model user (
|
||||
field partner_id blob ( nullable )
|
||||
field created_at timestamp ( autoinsert )
|
||||
field project_limit int ( updatable, default 0 )
|
||||
|
||||
field position text ( updatable, nullable )
|
||||
field company_name text ( updatable, nullable )
|
||||
field company_size int ( updatable, nullable )
|
||||
field working_on text ( updatable, nullable )
|
||||
field is_professional bool ( updatable, default false )
|
||||
|
||||
)
|
||||
|
||||
create user ( )
|
||||
@ -771,6 +778,17 @@ model storagenode_paystub (
|
||||
|
||||
create storagenode_paystub ( noreturn )
|
||||
|
||||
read one (
|
||||
select storagenode_paystub
|
||||
where storagenode_paystub.node_id = ?
|
||||
where storagenode_paystub.period = ?
|
||||
)
|
||||
|
||||
read all (
|
||||
select storagenode_paystub
|
||||
where storagenode_paystub.node_id = ?
|
||||
)
|
||||
|
||||
model storagenode_payment (
|
||||
key id
|
||||
|
||||
@ -787,6 +805,19 @@ model storagenode_payment (
|
||||
|
||||
create storagenode_payment ( noreturn )
|
||||
|
||||
read limitoffset (
|
||||
select storagenode_payment
|
||||
where storagenode_payment.node_id = ?
|
||||
where storagenode_payment.period = ?
|
||||
orderby desc storagenode_payment.id
|
||||
)
|
||||
|
||||
read all (
|
||||
select storagenode_payment
|
||||
where storagenode_payment.node_id = ?
|
||||
)
|
||||
|
||||
|
||||
//--- peer_identity ---//
|
||||
|
||||
model peer_identity (
|
||||
@ -950,7 +981,6 @@ create user_credit ()
|
||||
|
||||
model bucket_metainfo (
|
||||
key id
|
||||
unique name project_id //to remove later
|
||||
unique project_id name
|
||||
|
||||
field id blob
|
||||
|
@ -684,6 +684,11 @@ CREATE TABLE users (
|
||||
partner_id bytea,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
project_limit integer NOT NULL DEFAULT 0,
|
||||
position text,
|
||||
company_name text,
|
||||
company_size integer,
|
||||
working_on text,
|
||||
is_professional boolean NOT NULL DEFAULT false,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE value_attributions (
|
||||
@ -722,7 +727,6 @@ CREATE TABLE bucket_metainfos (
|
||||
default_redundancy_optimal_shares integer NOT NULL,
|
||||
default_redundancy_total_shares integer NOT NULL,
|
||||
PRIMARY KEY ( id ),
|
||||
UNIQUE ( name, project_id ),
|
||||
UNIQUE ( project_id, name )
|
||||
);
|
||||
CREATE TABLE project_members (
|
||||
@ -1236,6 +1240,11 @@ CREATE TABLE users (
|
||||
partner_id bytea,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
project_limit integer NOT NULL DEFAULT 0,
|
||||
position text,
|
||||
company_name text,
|
||||
company_size integer,
|
||||
working_on text,
|
||||
is_professional boolean NOT NULL DEFAULT false,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE value_attributions (
|
||||
@ -1274,7 +1283,6 @@ CREATE TABLE bucket_metainfos (
|
||||
default_redundancy_optimal_shares integer NOT NULL,
|
||||
default_redundancy_total_shares integer NOT NULL,
|
||||
PRIMARY KEY ( id ),
|
||||
UNIQUE ( name, project_id ),
|
||||
UNIQUE ( project_id, name )
|
||||
);
|
||||
CREATE TABLE project_members (
|
||||
@ -7217,14 +7225,24 @@ type User struct {
|
||||
PartnerId []byte
|
||||
CreatedAt time.Time
|
||||
ProjectLimit int
|
||||
Position *string
|
||||
CompanyName *string
|
||||
CompanySize *int
|
||||
WorkingOn *string
|
||||
IsProfessional bool
|
||||
}
|
||||
|
||||
func (User) _Table() string { return "users" }
|
||||
|
||||
type User_Create_Fields struct {
|
||||
ShortName User_ShortName_Field
|
||||
PartnerId User_PartnerId_Field
|
||||
ProjectLimit User_ProjectLimit_Field
|
||||
ShortName User_ShortName_Field
|
||||
PartnerId User_PartnerId_Field
|
||||
ProjectLimit User_ProjectLimit_Field
|
||||
Position User_Position_Field
|
||||
CompanyName User_CompanyName_Field
|
||||
CompanySize User_CompanySize_Field
|
||||
WorkingOn User_WorkingOn_Field
|
||||
IsProfessional User_IsProfessional_Field
|
||||
}
|
||||
|
||||
type User_Update_Fields struct {
|
||||
@ -7235,6 +7253,11 @@ type User_Update_Fields struct {
|
||||
PasswordHash User_PasswordHash_Field
|
||||
Status User_Status_Field
|
||||
ProjectLimit User_ProjectLimit_Field
|
||||
Position User_Position_Field
|
||||
CompanyName User_CompanyName_Field
|
||||
CompanySize User_CompanySize_Field
|
||||
WorkingOn User_WorkingOn_Field
|
||||
IsProfessional User_IsProfessional_Field
|
||||
}
|
||||
|
||||
type User_Id_Field struct {
|
||||
@ -7453,6 +7476,153 @@ func (f User_ProjectLimit_Field) value() interface{} {
|
||||
|
||||
func (User_ProjectLimit_Field) _Column() string { return "project_limit" }
|
||||
|
||||
type User_Position_Field struct {
|
||||
_set bool
|
||||
_null bool
|
||||
_value *string
|
||||
}
|
||||
|
||||
func User_Position(v string) User_Position_Field {
|
||||
return User_Position_Field{_set: true, _value: &v}
|
||||
}
|
||||
|
||||
func User_Position_Raw(v *string) User_Position_Field {
|
||||
if v == nil {
|
||||
return User_Position_Null()
|
||||
}
|
||||
return User_Position(*v)
|
||||
}
|
||||
|
||||
func User_Position_Null() User_Position_Field {
|
||||
return User_Position_Field{_set: true, _null: true}
|
||||
}
|
||||
|
||||
func (f User_Position_Field) isnull() bool { return !f._set || f._null || f._value == nil }
|
||||
|
||||
func (f User_Position_Field) value() interface{} {
|
||||
if !f._set || f._null {
|
||||
return nil
|
||||
}
|
||||
return f._value
|
||||
}
|
||||
|
||||
func (User_Position_Field) _Column() string { return "position" }
|
||||
|
||||
type User_CompanyName_Field struct {
|
||||
_set bool
|
||||
_null bool
|
||||
_value *string
|
||||
}
|
||||
|
||||
func User_CompanyName(v string) User_CompanyName_Field {
|
||||
return User_CompanyName_Field{_set: true, _value: &v}
|
||||
}
|
||||
|
||||
func User_CompanyName_Raw(v *string) User_CompanyName_Field {
|
||||
if v == nil {
|
||||
return User_CompanyName_Null()
|
||||
}
|
||||
return User_CompanyName(*v)
|
||||
}
|
||||
|
||||
func User_CompanyName_Null() User_CompanyName_Field {
|
||||
return User_CompanyName_Field{_set: true, _null: true}
|
||||
}
|
||||
|
||||
func (f User_CompanyName_Field) isnull() bool { return !f._set || f._null || f._value == nil }
|
||||
|
||||
func (f User_CompanyName_Field) value() interface{} {
|
||||
if !f._set || f._null {
|
||||
return nil
|
||||
}
|
||||
return f._value
|
||||
}
|
||||
|
||||
func (User_CompanyName_Field) _Column() string { return "company_name" }
|
||||
|
||||
type User_CompanySize_Field struct {
|
||||
_set bool
|
||||
_null bool
|
||||
_value *int
|
||||
}
|
||||
|
||||
func User_CompanySize(v int) User_CompanySize_Field {
|
||||
return User_CompanySize_Field{_set: true, _value: &v}
|
||||
}
|
||||
|
||||
func User_CompanySize_Raw(v *int) User_CompanySize_Field {
|
||||
if v == nil {
|
||||
return User_CompanySize_Null()
|
||||
}
|
||||
return User_CompanySize(*v)
|
||||
}
|
||||
|
||||
func User_CompanySize_Null() User_CompanySize_Field {
|
||||
return User_CompanySize_Field{_set: true, _null: true}
|
||||
}
|
||||
|
||||
func (f User_CompanySize_Field) isnull() bool { return !f._set || f._null || f._value == nil }
|
||||
|
||||
func (f User_CompanySize_Field) value() interface{} {
|
||||
if !f._set || f._null {
|
||||
return nil
|
||||
}
|
||||
return f._value
|
||||
}
|
||||
|
||||
func (User_CompanySize_Field) _Column() string { return "company_size" }
|
||||
|
||||
type User_WorkingOn_Field struct {
|
||||
_set bool
|
||||
_null bool
|
||||
_value *string
|
||||
}
|
||||
|
||||
func User_WorkingOn(v string) User_WorkingOn_Field {
|
||||
return User_WorkingOn_Field{_set: true, _value: &v}
|
||||
}
|
||||
|
||||
func User_WorkingOn_Raw(v *string) User_WorkingOn_Field {
|
||||
if v == nil {
|
||||
return User_WorkingOn_Null()
|
||||
}
|
||||
return User_WorkingOn(*v)
|
||||
}
|
||||
|
||||
func User_WorkingOn_Null() User_WorkingOn_Field {
|
||||
return User_WorkingOn_Field{_set: true, _null: true}
|
||||
}
|
||||
|
||||
func (f User_WorkingOn_Field) isnull() bool { return !f._set || f._null || f._value == nil }
|
||||
|
||||
func (f User_WorkingOn_Field) value() interface{} {
|
||||
if !f._set || f._null {
|
||||
return nil
|
||||
}
|
||||
return f._value
|
||||
}
|
||||
|
||||
func (User_WorkingOn_Field) _Column() string { return "working_on" }
|
||||
|
||||
type User_IsProfessional_Field struct {
|
||||
_set bool
|
||||
_null bool
|
||||
_value bool
|
||||
}
|
||||
|
||||
func User_IsProfessional(v bool) User_IsProfessional_Field {
|
||||
return User_IsProfessional_Field{_set: true, _value: v}
|
||||
}
|
||||
|
||||
func (f User_IsProfessional_Field) value() interface{} {
|
||||
if !f._set || f._null {
|
||||
return nil
|
||||
}
|
||||
return f._value
|
||||
}
|
||||
|
||||
func (User_IsProfessional_Field) _Column() string { return "is_professional" }
|
||||
|
||||
type ValueAttribution struct {
|
||||
ProjectId []byte
|
||||
BucketName []byte
|
||||
@ -9357,15 +9527,19 @@ func (obj *pgxImpl) Create_User(ctx context.Context,
|
||||
__status_val := int(0)
|
||||
__partner_id_val := optional.PartnerId.value()
|
||||
__created_at_val := __now
|
||||
__position_val := optional.Position.value()
|
||||
__company_name_val := optional.CompanyName.value()
|
||||
__company_size_val := optional.CompanySize.value()
|
||||
__working_on_val := optional.WorkingOn.value()
|
||||
|
||||
var __columns = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("id, email, normalized_email, full_name, short_name, password_hash, status, partner_id, created_at")}
|
||||
var __placeholders = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("?, ?, ?, ?, ?, ?, ?, ?, ?")}
|
||||
var __columns = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("id, email, normalized_email, full_name, short_name, password_hash, status, partner_id, created_at, position, company_name, company_size, working_on")}
|
||||
var __placeholders = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?")}
|
||||
var __clause = &__sqlbundle_Hole{SQL: __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("("), __columns, __sqlbundle_Literal(") VALUES ("), __placeholders, __sqlbundle_Literal(")")}}}
|
||||
|
||||
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("INSERT INTO users "), __clause, __sqlbundle_Literal(" RETURNING users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.created_at, users.project_limit")}}
|
||||
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("INSERT INTO users "), __clause, __sqlbundle_Literal(" RETURNING users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.created_at, users.project_limit, users.position, users.company_name, users.company_size, users.working_on, users.is_professional")}}
|
||||
|
||||
var __values []interface{}
|
||||
__values = append(__values, __id_val, __email_val, __normalized_email_val, __full_name_val, __short_name_val, __password_hash_val, __status_val, __partner_id_val, __created_at_val)
|
||||
__values = append(__values, __id_val, __email_val, __normalized_email_val, __full_name_val, __short_name_val, __password_hash_val, __status_val, __partner_id_val, __created_at_val, __position_val, __company_name_val, __company_size_val, __working_on_val)
|
||||
|
||||
__optional_columns := __sqlbundle_Literals{Join: ", "}
|
||||
__optional_placeholders := __sqlbundle_Literals{Join: ", "}
|
||||
@ -9376,6 +9550,12 @@ func (obj *pgxImpl) Create_User(ctx context.Context,
|
||||
__optional_placeholders.SQLs = append(__optional_placeholders.SQLs, __sqlbundle_Literal("?"))
|
||||
}
|
||||
|
||||
if optional.IsProfessional._set {
|
||||
__values = append(__values, optional.IsProfessional.value())
|
||||
__optional_columns.SQLs = append(__optional_columns.SQLs, __sqlbundle_Literal("is_professional"))
|
||||
__optional_placeholders.SQLs = append(__optional_placeholders.SQLs, __sqlbundle_Literal("?"))
|
||||
}
|
||||
|
||||
if len(__optional_columns.SQLs) == 0 {
|
||||
if __columns.SQL == nil {
|
||||
__clause.SQL = __sqlbundle_Literal("DEFAULT VALUES")
|
||||
@ -9388,7 +9568,7 @@ func (obj *pgxImpl) Create_User(ctx context.Context,
|
||||
obj.logStmt(__stmt, __values...)
|
||||
|
||||
user = &User{}
|
||||
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.CreatedAt, &user.ProjectLimit)
|
||||
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.CreatedAt, &user.ProjectLimit, &user.Position, &user.CompanyName, &user.CompanySize, &user.WorkingOn, &user.IsProfessional)
|
||||
if err != nil {
|
||||
return nil, obj.makeErr(err)
|
||||
}
|
||||
@ -10921,7 +11101,7 @@ func (obj *pgxImpl) Get_User_By_NormalizedEmail_And_Status_Not_Number(ctx contex
|
||||
user *User, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.created_at, users.project_limit FROM users WHERE users.normalized_email = ? AND users.status != 0 LIMIT 2")
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.created_at, users.project_limit, users.position, users.company_name, users.company_size, users.working_on, users.is_professional FROM users WHERE users.normalized_email = ? AND users.status != 0 LIMIT 2")
|
||||
|
||||
var __values []interface{}
|
||||
__values = append(__values, user_normalized_email.value())
|
||||
@ -10945,7 +11125,7 @@ func (obj *pgxImpl) Get_User_By_NormalizedEmail_And_Status_Not_Number(ctx contex
|
||||
}
|
||||
|
||||
user = &User{}
|
||||
err = __rows.Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.CreatedAt, &user.ProjectLimit)
|
||||
err = __rows.Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.CreatedAt, &user.ProjectLimit, &user.Position, &user.CompanyName, &user.CompanySize, &user.WorkingOn, &user.IsProfessional)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -10979,7 +11159,7 @@ func (obj *pgxImpl) Get_User_By_Id(ctx context.Context,
|
||||
user *User, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.created_at, users.project_limit FROM users WHERE users.id = ?")
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.created_at, users.project_limit, users.position, users.company_name, users.company_size, users.working_on, users.is_professional FROM users WHERE users.id = ?")
|
||||
|
||||
var __values []interface{}
|
||||
__values = append(__values, user_id.value())
|
||||
@ -10988,7 +11168,7 @@ func (obj *pgxImpl) Get_User_By_Id(ctx context.Context,
|
||||
obj.logStmt(__stmt, __values...)
|
||||
|
||||
user = &User{}
|
||||
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.CreatedAt, &user.ProjectLimit)
|
||||
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.CreatedAt, &user.ProjectLimit, &user.Position, &user.CompanyName, &user.CompanySize, &user.WorkingOn, &user.IsProfessional)
|
||||
if err != nil {
|
||||
return (*User)(nil), obj.makeErr(err)
|
||||
}
|
||||
@ -12208,6 +12388,169 @@ func (obj *pgxImpl) All_StoragenodeStorageTally_By_IntervalEndTime_GreaterOrEqua
|
||||
|
||||
}
|
||||
|
||||
func (obj *pgxImpl) Get_StoragenodePaystub_By_NodeId_And_Period(ctx context.Context,
|
||||
storagenode_paystub_node_id StoragenodePaystub_NodeId_Field,
|
||||
storagenode_paystub_period StoragenodePaystub_Period_Field) (
|
||||
storagenode_paystub *StoragenodePaystub, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT storagenode_paystubs.period, storagenode_paystubs.node_id, storagenode_paystubs.created_at, storagenode_paystubs.codes, storagenode_paystubs.usage_at_rest, storagenode_paystubs.usage_get, storagenode_paystubs.usage_put, storagenode_paystubs.usage_get_repair, storagenode_paystubs.usage_put_repair, storagenode_paystubs.usage_get_audit, storagenode_paystubs.comp_at_rest, storagenode_paystubs.comp_get, storagenode_paystubs.comp_put, storagenode_paystubs.comp_get_repair, storagenode_paystubs.comp_put_repair, storagenode_paystubs.comp_get_audit, storagenode_paystubs.surge_percent, storagenode_paystubs.held, storagenode_paystubs.owed, storagenode_paystubs.disposed, storagenode_paystubs.paid FROM storagenode_paystubs WHERE storagenode_paystubs.node_id = ? AND storagenode_paystubs.period = ?")
|
||||
|
||||
var __values []interface{}
|
||||
__values = append(__values, storagenode_paystub_node_id.value(), storagenode_paystub_period.value())
|
||||
|
||||
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
||||
obj.logStmt(__stmt, __values...)
|
||||
|
||||
storagenode_paystub = &StoragenodePaystub{}
|
||||
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&storagenode_paystub.Period, &storagenode_paystub.NodeId, &storagenode_paystub.CreatedAt, &storagenode_paystub.Codes, &storagenode_paystub.UsageAtRest, &storagenode_paystub.UsageGet, &storagenode_paystub.UsagePut, &storagenode_paystub.UsageGetRepair, &storagenode_paystub.UsagePutRepair, &storagenode_paystub.UsageGetAudit, &storagenode_paystub.CompAtRest, &storagenode_paystub.CompGet, &storagenode_paystub.CompPut, &storagenode_paystub.CompGetRepair, &storagenode_paystub.CompPutRepair, &storagenode_paystub.CompGetAudit, &storagenode_paystub.SurgePercent, &storagenode_paystub.Held, &storagenode_paystub.Owed, &storagenode_paystub.Disposed, &storagenode_paystub.Paid)
|
||||
if err != nil {
|
||||
return (*StoragenodePaystub)(nil), obj.makeErr(err)
|
||||
}
|
||||
return storagenode_paystub, nil
|
||||
|
||||
}
|
||||
|
||||
func (obj *pgxImpl) All_StoragenodePaystub_By_NodeId(ctx context.Context,
|
||||
storagenode_paystub_node_id StoragenodePaystub_NodeId_Field) (
|
||||
rows []*StoragenodePaystub, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT storagenode_paystubs.period, storagenode_paystubs.node_id, storagenode_paystubs.created_at, storagenode_paystubs.codes, storagenode_paystubs.usage_at_rest, storagenode_paystubs.usage_get, storagenode_paystubs.usage_put, storagenode_paystubs.usage_get_repair, storagenode_paystubs.usage_put_repair, storagenode_paystubs.usage_get_audit, storagenode_paystubs.comp_at_rest, storagenode_paystubs.comp_get, storagenode_paystubs.comp_put, storagenode_paystubs.comp_get_repair, storagenode_paystubs.comp_put_repair, storagenode_paystubs.comp_get_audit, storagenode_paystubs.surge_percent, storagenode_paystubs.held, storagenode_paystubs.owed, storagenode_paystubs.disposed, storagenode_paystubs.paid FROM storagenode_paystubs WHERE storagenode_paystubs.node_id = ?")
|
||||
|
||||
var __values []interface{}
|
||||
__values = append(__values, storagenode_paystub_node_id.value())
|
||||
|
||||
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
||||
obj.logStmt(__stmt, __values...)
|
||||
|
||||
for {
|
||||
rows, err = func() (rows []*StoragenodePaystub, err error) {
|
||||
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer __rows.Close()
|
||||
|
||||
for __rows.Next() {
|
||||
storagenode_paystub := &StoragenodePaystub{}
|
||||
err = __rows.Scan(&storagenode_paystub.Period, &storagenode_paystub.NodeId, &storagenode_paystub.CreatedAt, &storagenode_paystub.Codes, &storagenode_paystub.UsageAtRest, &storagenode_paystub.UsageGet, &storagenode_paystub.UsagePut, &storagenode_paystub.UsageGetRepair, &storagenode_paystub.UsagePutRepair, &storagenode_paystub.UsageGetAudit, &storagenode_paystub.CompAtRest, &storagenode_paystub.CompGet, &storagenode_paystub.CompPut, &storagenode_paystub.CompGetRepair, &storagenode_paystub.CompPutRepair, &storagenode_paystub.CompGetAudit, &storagenode_paystub.SurgePercent, &storagenode_paystub.Held, &storagenode_paystub.Owed, &storagenode_paystub.Disposed, &storagenode_paystub.Paid)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rows = append(rows, storagenode_paystub)
|
||||
}
|
||||
if err := __rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return rows, nil
|
||||
}()
|
||||
if err != nil {
|
||||
if obj.shouldRetry(err) {
|
||||
continue
|
||||
}
|
||||
return nil, obj.makeErr(err)
|
||||
}
|
||||
return rows, nil
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (obj *pgxImpl) Limited_StoragenodePayment_By_NodeId_And_Period_OrderBy_Desc_Id(ctx context.Context,
|
||||
storagenode_payment_node_id StoragenodePayment_NodeId_Field,
|
||||
storagenode_payment_period StoragenodePayment_Period_Field,
|
||||
limit int, offset int64) (
|
||||
rows []*StoragenodePayment, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT storagenode_payments.id, storagenode_payments.created_at, storagenode_payments.node_id, storagenode_payments.period, storagenode_payments.amount, storagenode_payments.receipt, storagenode_payments.notes FROM storagenode_payments WHERE storagenode_payments.node_id = ? AND storagenode_payments.period = ? ORDER BY storagenode_payments.id DESC LIMIT ? OFFSET ?")
|
||||
|
||||
var __values []interface{}
|
||||
__values = append(__values, storagenode_payment_node_id.value(), storagenode_payment_period.value())
|
||||
|
||||
__values = append(__values, limit, offset)
|
||||
|
||||
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
||||
obj.logStmt(__stmt, __values...)
|
||||
|
||||
for {
|
||||
rows, err = func() (rows []*StoragenodePayment, err error) {
|
||||
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer __rows.Close()
|
||||
|
||||
for __rows.Next() {
|
||||
storagenode_payment := &StoragenodePayment{}
|
||||
err = __rows.Scan(&storagenode_payment.Id, &storagenode_payment.CreatedAt, &storagenode_payment.NodeId, &storagenode_payment.Period, &storagenode_payment.Amount, &storagenode_payment.Receipt, &storagenode_payment.Notes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rows = append(rows, storagenode_payment)
|
||||
}
|
||||
err = __rows.Err()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return rows, nil
|
||||
}()
|
||||
if err != nil {
|
||||
if obj.shouldRetry(err) {
|
||||
continue
|
||||
}
|
||||
return nil, obj.makeErr(err)
|
||||
}
|
||||
return rows, nil
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (obj *pgxImpl) All_StoragenodePayment_By_NodeId(ctx context.Context,
|
||||
storagenode_payment_node_id StoragenodePayment_NodeId_Field) (
|
||||
rows []*StoragenodePayment, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT storagenode_payments.id, storagenode_payments.created_at, storagenode_payments.node_id, storagenode_payments.period, storagenode_payments.amount, storagenode_payments.receipt, storagenode_payments.notes FROM storagenode_payments WHERE storagenode_payments.node_id = ?")
|
||||
|
||||
var __values []interface{}
|
||||
__values = append(__values, storagenode_payment_node_id.value())
|
||||
|
||||
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
||||
obj.logStmt(__stmt, __values...)
|
||||
|
||||
for {
|
||||
rows, err = func() (rows []*StoragenodePayment, err error) {
|
||||
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer __rows.Close()
|
||||
|
||||
for __rows.Next() {
|
||||
storagenode_payment := &StoragenodePayment{}
|
||||
err = __rows.Scan(&storagenode_payment.Id, &storagenode_payment.CreatedAt, &storagenode_payment.NodeId, &storagenode_payment.Period, &storagenode_payment.Amount, &storagenode_payment.Receipt, &storagenode_payment.Notes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rows = append(rows, storagenode_payment)
|
||||
}
|
||||
if err := __rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return rows, nil
|
||||
}()
|
||||
if err != nil {
|
||||
if obj.shouldRetry(err) {
|
||||
continue
|
||||
}
|
||||
return nil, obj.makeErr(err)
|
||||
}
|
||||
return rows, nil
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (obj *pgxImpl) Get_PeerIdentity_By_NodeId(ctx context.Context,
|
||||
peer_identity_node_id PeerIdentity_NodeId_Field) (
|
||||
peer_identity *PeerIdentity, err error) {
|
||||
@ -13937,7 +14280,7 @@ func (obj *pgxImpl) Update_User_By_Id(ctx context.Context,
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
var __sets = &__sqlbundle_Hole{}
|
||||
|
||||
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE users SET "), __sets, __sqlbundle_Literal(" WHERE users.id = ? RETURNING users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.created_at, users.project_limit")}}
|
||||
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE users SET "), __sets, __sqlbundle_Literal(" WHERE users.id = ? RETURNING users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.created_at, users.project_limit, users.position, users.company_name, users.company_size, users.working_on, users.is_professional")}}
|
||||
|
||||
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
||||
var __values []interface{}
|
||||
@ -13978,6 +14321,31 @@ func (obj *pgxImpl) Update_User_By_Id(ctx context.Context,
|
||||
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("project_limit = ?"))
|
||||
}
|
||||
|
||||
if update.Position._set {
|
||||
__values = append(__values, update.Position.value())
|
||||
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("position = ?"))
|
||||
}
|
||||
|
||||
if update.CompanyName._set {
|
||||
__values = append(__values, update.CompanyName.value())
|
||||
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("company_name = ?"))
|
||||
}
|
||||
|
||||
if update.CompanySize._set {
|
||||
__values = append(__values, update.CompanySize.value())
|
||||
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("company_size = ?"))
|
||||
}
|
||||
|
||||
if update.WorkingOn._set {
|
||||
__values = append(__values, update.WorkingOn.value())
|
||||
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("working_on = ?"))
|
||||
}
|
||||
|
||||
if update.IsProfessional._set {
|
||||
__values = append(__values, update.IsProfessional.value())
|
||||
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("is_professional = ?"))
|
||||
}
|
||||
|
||||
if len(__sets_sql.SQLs) == 0 {
|
||||
return nil, emptyUpdate()
|
||||
}
|
||||
@ -13991,7 +14359,7 @@ func (obj *pgxImpl) Update_User_By_Id(ctx context.Context,
|
||||
obj.logStmt(__stmt, __values...)
|
||||
|
||||
user = &User{}
|
||||
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.CreatedAt, &user.ProjectLimit)
|
||||
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.CreatedAt, &user.ProjectLimit, &user.Position, &user.CompanyName, &user.CompanySize, &user.WorkingOn, &user.IsProfessional)
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
@ -16118,15 +16486,19 @@ func (obj *pgxcockroachImpl) Create_User(ctx context.Context,
|
||||
__status_val := int(0)
|
||||
__partner_id_val := optional.PartnerId.value()
|
||||
__created_at_val := __now
|
||||
__position_val := optional.Position.value()
|
||||
__company_name_val := optional.CompanyName.value()
|
||||
__company_size_val := optional.CompanySize.value()
|
||||
__working_on_val := optional.WorkingOn.value()
|
||||
|
||||
var __columns = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("id, email, normalized_email, full_name, short_name, password_hash, status, partner_id, created_at")}
|
||||
var __placeholders = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("?, ?, ?, ?, ?, ?, ?, ?, ?")}
|
||||
var __columns = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("id, email, normalized_email, full_name, short_name, password_hash, status, partner_id, created_at, position, company_name, company_size, working_on")}
|
||||
var __placeholders = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?")}
|
||||
var __clause = &__sqlbundle_Hole{SQL: __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("("), __columns, __sqlbundle_Literal(") VALUES ("), __placeholders, __sqlbundle_Literal(")")}}}
|
||||
|
||||
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("INSERT INTO users "), __clause, __sqlbundle_Literal(" RETURNING users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.created_at, users.project_limit")}}
|
||||
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("INSERT INTO users "), __clause, __sqlbundle_Literal(" RETURNING users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.created_at, users.project_limit, users.position, users.company_name, users.company_size, users.working_on, users.is_professional")}}
|
||||
|
||||
var __values []interface{}
|
||||
__values = append(__values, __id_val, __email_val, __normalized_email_val, __full_name_val, __short_name_val, __password_hash_val, __status_val, __partner_id_val, __created_at_val)
|
||||
__values = append(__values, __id_val, __email_val, __normalized_email_val, __full_name_val, __short_name_val, __password_hash_val, __status_val, __partner_id_val, __created_at_val, __position_val, __company_name_val, __company_size_val, __working_on_val)
|
||||
|
||||
__optional_columns := __sqlbundle_Literals{Join: ", "}
|
||||
__optional_placeholders := __sqlbundle_Literals{Join: ", "}
|
||||
@ -16137,6 +16509,12 @@ func (obj *pgxcockroachImpl) Create_User(ctx context.Context,
|
||||
__optional_placeholders.SQLs = append(__optional_placeholders.SQLs, __sqlbundle_Literal("?"))
|
||||
}
|
||||
|
||||
if optional.IsProfessional._set {
|
||||
__values = append(__values, optional.IsProfessional.value())
|
||||
__optional_columns.SQLs = append(__optional_columns.SQLs, __sqlbundle_Literal("is_professional"))
|
||||
__optional_placeholders.SQLs = append(__optional_placeholders.SQLs, __sqlbundle_Literal("?"))
|
||||
}
|
||||
|
||||
if len(__optional_columns.SQLs) == 0 {
|
||||
if __columns.SQL == nil {
|
||||
__clause.SQL = __sqlbundle_Literal("DEFAULT VALUES")
|
||||
@ -16149,7 +16527,7 @@ func (obj *pgxcockroachImpl) Create_User(ctx context.Context,
|
||||
obj.logStmt(__stmt, __values...)
|
||||
|
||||
user = &User{}
|
||||
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.CreatedAt, &user.ProjectLimit)
|
||||
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.CreatedAt, &user.ProjectLimit, &user.Position, &user.CompanyName, &user.CompanySize, &user.WorkingOn, &user.IsProfessional)
|
||||
if err != nil {
|
||||
return nil, obj.makeErr(err)
|
||||
}
|
||||
@ -17682,7 +18060,7 @@ func (obj *pgxcockroachImpl) Get_User_By_NormalizedEmail_And_Status_Not_Number(c
|
||||
user *User, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.created_at, users.project_limit FROM users WHERE users.normalized_email = ? AND users.status != 0 LIMIT 2")
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.created_at, users.project_limit, users.position, users.company_name, users.company_size, users.working_on, users.is_professional FROM users WHERE users.normalized_email = ? AND users.status != 0 LIMIT 2")
|
||||
|
||||
var __values []interface{}
|
||||
__values = append(__values, user_normalized_email.value())
|
||||
@ -17706,7 +18084,7 @@ func (obj *pgxcockroachImpl) Get_User_By_NormalizedEmail_And_Status_Not_Number(c
|
||||
}
|
||||
|
||||
user = &User{}
|
||||
err = __rows.Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.CreatedAt, &user.ProjectLimit)
|
||||
err = __rows.Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.CreatedAt, &user.ProjectLimit, &user.Position, &user.CompanyName, &user.CompanySize, &user.WorkingOn, &user.IsProfessional)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -17740,7 +18118,7 @@ func (obj *pgxcockroachImpl) Get_User_By_Id(ctx context.Context,
|
||||
user *User, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.created_at, users.project_limit FROM users WHERE users.id = ?")
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.created_at, users.project_limit, users.position, users.company_name, users.company_size, users.working_on, users.is_professional FROM users WHERE users.id = ?")
|
||||
|
||||
var __values []interface{}
|
||||
__values = append(__values, user_id.value())
|
||||
@ -17749,7 +18127,7 @@ func (obj *pgxcockroachImpl) Get_User_By_Id(ctx context.Context,
|
||||
obj.logStmt(__stmt, __values...)
|
||||
|
||||
user = &User{}
|
||||
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.CreatedAt, &user.ProjectLimit)
|
||||
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.CreatedAt, &user.ProjectLimit, &user.Position, &user.CompanyName, &user.CompanySize, &user.WorkingOn, &user.IsProfessional)
|
||||
if err != nil {
|
||||
return (*User)(nil), obj.makeErr(err)
|
||||
}
|
||||
@ -18969,6 +19347,169 @@ func (obj *pgxcockroachImpl) All_StoragenodeStorageTally_By_IntervalEndTime_Grea
|
||||
|
||||
}
|
||||
|
||||
func (obj *pgxcockroachImpl) Get_StoragenodePaystub_By_NodeId_And_Period(ctx context.Context,
|
||||
storagenode_paystub_node_id StoragenodePaystub_NodeId_Field,
|
||||
storagenode_paystub_period StoragenodePaystub_Period_Field) (
|
||||
storagenode_paystub *StoragenodePaystub, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT storagenode_paystubs.period, storagenode_paystubs.node_id, storagenode_paystubs.created_at, storagenode_paystubs.codes, storagenode_paystubs.usage_at_rest, storagenode_paystubs.usage_get, storagenode_paystubs.usage_put, storagenode_paystubs.usage_get_repair, storagenode_paystubs.usage_put_repair, storagenode_paystubs.usage_get_audit, storagenode_paystubs.comp_at_rest, storagenode_paystubs.comp_get, storagenode_paystubs.comp_put, storagenode_paystubs.comp_get_repair, storagenode_paystubs.comp_put_repair, storagenode_paystubs.comp_get_audit, storagenode_paystubs.surge_percent, storagenode_paystubs.held, storagenode_paystubs.owed, storagenode_paystubs.disposed, storagenode_paystubs.paid FROM storagenode_paystubs WHERE storagenode_paystubs.node_id = ? AND storagenode_paystubs.period = ?")
|
||||
|
||||
var __values []interface{}
|
||||
__values = append(__values, storagenode_paystub_node_id.value(), storagenode_paystub_period.value())
|
||||
|
||||
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
||||
obj.logStmt(__stmt, __values...)
|
||||
|
||||
storagenode_paystub = &StoragenodePaystub{}
|
||||
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&storagenode_paystub.Period, &storagenode_paystub.NodeId, &storagenode_paystub.CreatedAt, &storagenode_paystub.Codes, &storagenode_paystub.UsageAtRest, &storagenode_paystub.UsageGet, &storagenode_paystub.UsagePut, &storagenode_paystub.UsageGetRepair, &storagenode_paystub.UsagePutRepair, &storagenode_paystub.UsageGetAudit, &storagenode_paystub.CompAtRest, &storagenode_paystub.CompGet, &storagenode_paystub.CompPut, &storagenode_paystub.CompGetRepair, &storagenode_paystub.CompPutRepair, &storagenode_paystub.CompGetAudit, &storagenode_paystub.SurgePercent, &storagenode_paystub.Held, &storagenode_paystub.Owed, &storagenode_paystub.Disposed, &storagenode_paystub.Paid)
|
||||
if err != nil {
|
||||
return (*StoragenodePaystub)(nil), obj.makeErr(err)
|
||||
}
|
||||
return storagenode_paystub, nil
|
||||
|
||||
}
|
||||
|
||||
func (obj *pgxcockroachImpl) All_StoragenodePaystub_By_NodeId(ctx context.Context,
|
||||
storagenode_paystub_node_id StoragenodePaystub_NodeId_Field) (
|
||||
rows []*StoragenodePaystub, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT storagenode_paystubs.period, storagenode_paystubs.node_id, storagenode_paystubs.created_at, storagenode_paystubs.codes, storagenode_paystubs.usage_at_rest, storagenode_paystubs.usage_get, storagenode_paystubs.usage_put, storagenode_paystubs.usage_get_repair, storagenode_paystubs.usage_put_repair, storagenode_paystubs.usage_get_audit, storagenode_paystubs.comp_at_rest, storagenode_paystubs.comp_get, storagenode_paystubs.comp_put, storagenode_paystubs.comp_get_repair, storagenode_paystubs.comp_put_repair, storagenode_paystubs.comp_get_audit, storagenode_paystubs.surge_percent, storagenode_paystubs.held, storagenode_paystubs.owed, storagenode_paystubs.disposed, storagenode_paystubs.paid FROM storagenode_paystubs WHERE storagenode_paystubs.node_id = ?")
|
||||
|
||||
var __values []interface{}
|
||||
__values = append(__values, storagenode_paystub_node_id.value())
|
||||
|
||||
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
||||
obj.logStmt(__stmt, __values...)
|
||||
|
||||
for {
|
||||
rows, err = func() (rows []*StoragenodePaystub, err error) {
|
||||
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer __rows.Close()
|
||||
|
||||
for __rows.Next() {
|
||||
storagenode_paystub := &StoragenodePaystub{}
|
||||
err = __rows.Scan(&storagenode_paystub.Period, &storagenode_paystub.NodeId, &storagenode_paystub.CreatedAt, &storagenode_paystub.Codes, &storagenode_paystub.UsageAtRest, &storagenode_paystub.UsageGet, &storagenode_paystub.UsagePut, &storagenode_paystub.UsageGetRepair, &storagenode_paystub.UsagePutRepair, &storagenode_paystub.UsageGetAudit, &storagenode_paystub.CompAtRest, &storagenode_paystub.CompGet, &storagenode_paystub.CompPut, &storagenode_paystub.CompGetRepair, &storagenode_paystub.CompPutRepair, &storagenode_paystub.CompGetAudit, &storagenode_paystub.SurgePercent, &storagenode_paystub.Held, &storagenode_paystub.Owed, &storagenode_paystub.Disposed, &storagenode_paystub.Paid)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rows = append(rows, storagenode_paystub)
|
||||
}
|
||||
if err := __rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return rows, nil
|
||||
}()
|
||||
if err != nil {
|
||||
if obj.shouldRetry(err) {
|
||||
continue
|
||||
}
|
||||
return nil, obj.makeErr(err)
|
||||
}
|
||||
return rows, nil
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (obj *pgxcockroachImpl) Limited_StoragenodePayment_By_NodeId_And_Period_OrderBy_Desc_Id(ctx context.Context,
|
||||
storagenode_payment_node_id StoragenodePayment_NodeId_Field,
|
||||
storagenode_payment_period StoragenodePayment_Period_Field,
|
||||
limit int, offset int64) (
|
||||
rows []*StoragenodePayment, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT storagenode_payments.id, storagenode_payments.created_at, storagenode_payments.node_id, storagenode_payments.period, storagenode_payments.amount, storagenode_payments.receipt, storagenode_payments.notes FROM storagenode_payments WHERE storagenode_payments.node_id = ? AND storagenode_payments.period = ? ORDER BY storagenode_payments.id DESC LIMIT ? OFFSET ?")
|
||||
|
||||
var __values []interface{}
|
||||
__values = append(__values, storagenode_payment_node_id.value(), storagenode_payment_period.value())
|
||||
|
||||
__values = append(__values, limit, offset)
|
||||
|
||||
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
||||
obj.logStmt(__stmt, __values...)
|
||||
|
||||
for {
|
||||
rows, err = func() (rows []*StoragenodePayment, err error) {
|
||||
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer __rows.Close()
|
||||
|
||||
for __rows.Next() {
|
||||
storagenode_payment := &StoragenodePayment{}
|
||||
err = __rows.Scan(&storagenode_payment.Id, &storagenode_payment.CreatedAt, &storagenode_payment.NodeId, &storagenode_payment.Period, &storagenode_payment.Amount, &storagenode_payment.Receipt, &storagenode_payment.Notes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rows = append(rows, storagenode_payment)
|
||||
}
|
||||
err = __rows.Err()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return rows, nil
|
||||
}()
|
||||
if err != nil {
|
||||
if obj.shouldRetry(err) {
|
||||
continue
|
||||
}
|
||||
return nil, obj.makeErr(err)
|
||||
}
|
||||
return rows, nil
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (obj *pgxcockroachImpl) All_StoragenodePayment_By_NodeId(ctx context.Context,
|
||||
storagenode_payment_node_id StoragenodePayment_NodeId_Field) (
|
||||
rows []*StoragenodePayment, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
var __embed_stmt = __sqlbundle_Literal("SELECT storagenode_payments.id, storagenode_payments.created_at, storagenode_payments.node_id, storagenode_payments.period, storagenode_payments.amount, storagenode_payments.receipt, storagenode_payments.notes FROM storagenode_payments WHERE storagenode_payments.node_id = ?")
|
||||
|
||||
var __values []interface{}
|
||||
__values = append(__values, storagenode_payment_node_id.value())
|
||||
|
||||
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
||||
obj.logStmt(__stmt, __values...)
|
||||
|
||||
for {
|
||||
rows, err = func() (rows []*StoragenodePayment, err error) {
|
||||
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer __rows.Close()
|
||||
|
||||
for __rows.Next() {
|
||||
storagenode_payment := &StoragenodePayment{}
|
||||
err = __rows.Scan(&storagenode_payment.Id, &storagenode_payment.CreatedAt, &storagenode_payment.NodeId, &storagenode_payment.Period, &storagenode_payment.Amount, &storagenode_payment.Receipt, &storagenode_payment.Notes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rows = append(rows, storagenode_payment)
|
||||
}
|
||||
if err := __rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return rows, nil
|
||||
}()
|
||||
if err != nil {
|
||||
if obj.shouldRetry(err) {
|
||||
continue
|
||||
}
|
||||
return nil, obj.makeErr(err)
|
||||
}
|
||||
return rows, nil
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (obj *pgxcockroachImpl) Get_PeerIdentity_By_NodeId(ctx context.Context,
|
||||
peer_identity_node_id PeerIdentity_NodeId_Field) (
|
||||
peer_identity *PeerIdentity, err error) {
|
||||
@ -20698,7 +21239,7 @@ func (obj *pgxcockroachImpl) Update_User_By_Id(ctx context.Context,
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
var __sets = &__sqlbundle_Hole{}
|
||||
|
||||
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE users SET "), __sets, __sqlbundle_Literal(" WHERE users.id = ? RETURNING users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.created_at, users.project_limit")}}
|
||||
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE users SET "), __sets, __sqlbundle_Literal(" WHERE users.id = ? RETURNING users.id, users.email, users.normalized_email, users.full_name, users.short_name, users.password_hash, users.status, users.partner_id, users.created_at, users.project_limit, users.position, users.company_name, users.company_size, users.working_on, users.is_professional")}}
|
||||
|
||||
__sets_sql := __sqlbundle_Literals{Join: ", "}
|
||||
var __values []interface{}
|
||||
@ -20739,6 +21280,31 @@ func (obj *pgxcockroachImpl) Update_User_By_Id(ctx context.Context,
|
||||
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("project_limit = ?"))
|
||||
}
|
||||
|
||||
if update.Position._set {
|
||||
__values = append(__values, update.Position.value())
|
||||
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("position = ?"))
|
||||
}
|
||||
|
||||
if update.CompanyName._set {
|
||||
__values = append(__values, update.CompanyName.value())
|
||||
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("company_name = ?"))
|
||||
}
|
||||
|
||||
if update.CompanySize._set {
|
||||
__values = append(__values, update.CompanySize.value())
|
||||
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("company_size = ?"))
|
||||
}
|
||||
|
||||
if update.WorkingOn._set {
|
||||
__values = append(__values, update.WorkingOn.value())
|
||||
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("working_on = ?"))
|
||||
}
|
||||
|
||||
if update.IsProfessional._set {
|
||||
__values = append(__values, update.IsProfessional.value())
|
||||
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("is_professional = ?"))
|
||||
}
|
||||
|
||||
if len(__sets_sql.SQLs) == 0 {
|
||||
return nil, emptyUpdate()
|
||||
}
|
||||
@ -20752,7 +21318,7 @@ func (obj *pgxcockroachImpl) Update_User_By_Id(ctx context.Context,
|
||||
obj.logStmt(__stmt, __values...)
|
||||
|
||||
user = &User{}
|
||||
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.CreatedAt, &user.ProjectLimit)
|
||||
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&user.Id, &user.Email, &user.NormalizedEmail, &user.FullName, &user.ShortName, &user.PasswordHash, &user.Status, &user.PartnerId, &user.CreatedAt, &user.ProjectLimit, &user.Position, &user.CompanyName, &user.CompanySize, &user.WorkingOn, &user.IsProfessional)
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
@ -22691,6 +23257,26 @@ func (rx *Rx) All_StoragenodeBandwidthRollup_By_StoragenodeId_And_IntervalStart(
|
||||
return tx.All_StoragenodeBandwidthRollup_By_StoragenodeId_And_IntervalStart(ctx, storagenode_bandwidth_rollup_storagenode_id, storagenode_bandwidth_rollup_interval_start)
|
||||
}
|
||||
|
||||
func (rx *Rx) All_StoragenodePayment_By_NodeId(ctx context.Context,
|
||||
storagenode_payment_node_id StoragenodePayment_NodeId_Field) (
|
||||
rows []*StoragenodePayment, err error) {
|
||||
var tx *Tx
|
||||
if tx, err = rx.getTx(ctx); err != nil {
|
||||
return
|
||||
}
|
||||
return tx.All_StoragenodePayment_By_NodeId(ctx, storagenode_payment_node_id)
|
||||
}
|
||||
|
||||
func (rx *Rx) All_StoragenodePaystub_By_NodeId(ctx context.Context,
|
||||
storagenode_paystub_node_id StoragenodePaystub_NodeId_Field) (
|
||||
rows []*StoragenodePaystub, err error) {
|
||||
var tx *Tx
|
||||
if tx, err = rx.getTx(ctx); err != nil {
|
||||
return
|
||||
}
|
||||
return tx.All_StoragenodePaystub_By_NodeId(ctx, storagenode_paystub_node_id)
|
||||
}
|
||||
|
||||
func (rx *Rx) All_StoragenodeStorageTally(ctx context.Context) (
|
||||
rows []*StoragenodeStorageTally, err error) {
|
||||
var tx *Tx
|
||||
@ -23772,6 +24358,17 @@ func (rx *Rx) Get_SerialNumber_BucketId_By_SerialNumber(ctx context.Context,
|
||||
return tx.Get_SerialNumber_BucketId_By_SerialNumber(ctx, serial_number_serial_number)
|
||||
}
|
||||
|
||||
func (rx *Rx) Get_StoragenodePaystub_By_NodeId_And_Period(ctx context.Context,
|
||||
storagenode_paystub_node_id StoragenodePaystub_NodeId_Field,
|
||||
storagenode_paystub_period StoragenodePaystub_Period_Field) (
|
||||
storagenode_paystub *StoragenodePaystub, err error) {
|
||||
var tx *Tx
|
||||
if tx, err = rx.getTx(ctx); err != nil {
|
||||
return
|
||||
}
|
||||
return tx.Get_StoragenodePaystub_By_NodeId_And_Period(ctx, storagenode_paystub_node_id, storagenode_paystub_period)
|
||||
}
|
||||
|
||||
func (rx *Rx) Get_StripeCustomer_CustomerId_By_UserId(ctx context.Context,
|
||||
stripe_customer_user_id StripeCustomer_UserId_Field) (
|
||||
row *CustomerId_Row, err error) {
|
||||
@ -23992,6 +24589,18 @@ func (rx *Rx) Limited_Project_By_CreatedAt_Less_OrderBy_Asc_CreatedAt(ctx contex
|
||||
return tx.Limited_Project_By_CreatedAt_Less_OrderBy_Asc_CreatedAt(ctx, project_created_at_less, limit, offset)
|
||||
}
|
||||
|
||||
func (rx *Rx) Limited_StoragenodePayment_By_NodeId_And_Period_OrderBy_Desc_Id(ctx context.Context,
|
||||
storagenode_payment_node_id StoragenodePayment_NodeId_Field,
|
||||
storagenode_payment_period StoragenodePayment_Period_Field,
|
||||
limit int, offset int64) (
|
||||
rows []*StoragenodePayment, err error) {
|
||||
var tx *Tx
|
||||
if tx, err = rx.getTx(ctx); err != nil {
|
||||
return
|
||||
}
|
||||
return tx.Limited_StoragenodePayment_By_NodeId_And_Period_OrderBy_Desc_Id(ctx, storagenode_payment_node_id, storagenode_payment_period, limit, offset)
|
||||
}
|
||||
|
||||
func (rx *Rx) Limited_StripeCustomer_By_CreatedAt_LessOrEqual_OrderBy_Desc_CreatedAt(ctx context.Context,
|
||||
stripe_customer_created_at_less_or_equal StripeCustomer_CreatedAt_Field,
|
||||
limit int, offset int64) (
|
||||
@ -24405,6 +25014,14 @@ type Methods interface {
|
||||
storagenode_bandwidth_rollup_interval_start StoragenodeBandwidthRollup_IntervalStart_Field) (
|
||||
rows []*StoragenodeBandwidthRollup, err error)
|
||||
|
||||
All_StoragenodePayment_By_NodeId(ctx context.Context,
|
||||
storagenode_payment_node_id StoragenodePayment_NodeId_Field) (
|
||||
rows []*StoragenodePayment, err error)
|
||||
|
||||
All_StoragenodePaystub_By_NodeId(ctx context.Context,
|
||||
storagenode_paystub_node_id StoragenodePaystub_NodeId_Field) (
|
||||
rows []*StoragenodePaystub, err error)
|
||||
|
||||
All_StoragenodeStorageTally(ctx context.Context) (
|
||||
rows []*StoragenodeStorageTally, err error)
|
||||
|
||||
@ -24913,6 +25530,11 @@ type Methods interface {
|
||||
serial_number_serial_number SerialNumber_SerialNumber_Field) (
|
||||
row *BucketId_Row, err error)
|
||||
|
||||
Get_StoragenodePaystub_By_NodeId_And_Period(ctx context.Context,
|
||||
storagenode_paystub_node_id StoragenodePaystub_NodeId_Field,
|
||||
storagenode_paystub_period StoragenodePaystub_Period_Field) (
|
||||
storagenode_paystub *StoragenodePaystub, err error)
|
||||
|
||||
Get_StripeCustomer_CustomerId_By_UserId(ctx context.Context,
|
||||
stripe_customer_user_id StripeCustomer_UserId_Field) (
|
||||
row *CustomerId_Row, err error)
|
||||
@ -25013,6 +25635,12 @@ type Methods interface {
|
||||
limit int, offset int64) (
|
||||
rows []*Project, err error)
|
||||
|
||||
Limited_StoragenodePayment_By_NodeId_And_Period_OrderBy_Desc_Id(ctx context.Context,
|
||||
storagenode_payment_node_id StoragenodePayment_NodeId_Field,
|
||||
storagenode_payment_period StoragenodePayment_Period_Field,
|
||||
limit int, offset int64) (
|
||||
rows []*StoragenodePayment, err error)
|
||||
|
||||
Limited_StripeCustomer_By_CreatedAt_LessOrEqual_OrderBy_Desc_CreatedAt(ctx context.Context,
|
||||
stripe_customer_created_at_less_or_equal StripeCustomer_CreatedAt_Field,
|
||||
limit int, offset int64) (
|
||||
|
@ -364,6 +364,11 @@ CREATE TABLE users (
|
||||
partner_id bytea,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
project_limit integer NOT NULL DEFAULT 0,
|
||||
position text,
|
||||
company_name text,
|
||||
company_size integer,
|
||||
working_on text,
|
||||
is_professional boolean NOT NULL DEFAULT false,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE value_attributions (
|
||||
@ -402,7 +407,6 @@ CREATE TABLE bucket_metainfos (
|
||||
default_redundancy_optimal_shares integer NOT NULL,
|
||||
default_redundancy_total_shares integer NOT NULL,
|
||||
PRIMARY KEY ( id ),
|
||||
UNIQUE ( name, project_id ),
|
||||
UNIQUE ( project_id, name )
|
||||
);
|
||||
CREATE TABLE project_members (
|
||||
|
@ -364,6 +364,11 @@ CREATE TABLE users (
|
||||
partner_id bytea,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
project_limit integer NOT NULL DEFAULT 0,
|
||||
position text,
|
||||
company_name text,
|
||||
company_size integer,
|
||||
working_on text,
|
||||
is_professional boolean NOT NULL DEFAULT false,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE value_attributions (
|
||||
@ -402,7 +407,6 @@ CREATE TABLE bucket_metainfos (
|
||||
default_redundancy_optimal_shares integer NOT NULL,
|
||||
default_redundancy_total_shares integer NOT NULL,
|
||||
PRIMARY KEY ( id ),
|
||||
UNIQUE ( name, project_id ),
|
||||
UNIQUE ( project_id, name )
|
||||
);
|
||||
CREATE TABLE project_members (
|
||||
|
@ -1176,6 +1176,42 @@ func (db *satelliteDB) PostgresMigration() *migrate.Migration {
|
||||
`ALTER TABLE nodes ALTER COLUMN total_uptime_count SET DEFAULT 0;`,
|
||||
},
|
||||
},
|
||||
{
|
||||
DB: &db.migrationDB,
|
||||
Description: "add columns for professional users",
|
||||
Version: 140,
|
||||
Action: migrate.SQL{
|
||||
`ALTER TABLE users ADD COLUMN position text;`,
|
||||
`ALTER TABLE users ADD COLUMN company_name text;`,
|
||||
`ALTER TABLE users ADD COLUMN working_on text;`,
|
||||
`ALTER TABLE users ADD COLUMN company_size int;`,
|
||||
`ALTER TABLE users ADD COLUMN is_professional boolean NOT NULL DEFAULT false;`,
|
||||
},
|
||||
},
|
||||
{
|
||||
DB: &db.migrationDB,
|
||||
Description: "drop the obsolete (name, project_id) index from bucket_metainfos table.",
|
||||
Version: 141,
|
||||
Action: migrate.Func(func(ctx context.Context, log *zap.Logger, db tagsql.DB, tx tagsql.Tx) error {
|
||||
if _, ok := db.Driver().(*cockroachutil.Driver); ok {
|
||||
_, err := db.Exec(ctx,
|
||||
`DROP INDEX bucket_metainfos_name_project_id_key CASCADE;`,
|
||||
)
|
||||
if err != nil {
|
||||
return ErrMigrate.Wrap(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
_, err := db.Exec(ctx,
|
||||
`ALTER TABLE bucket_metainfos DROP CONSTRAINT bucket_metainfos_name_project_id_key;`,
|
||||
)
|
||||
if err != nil {
|
||||
return ErrMigrate.Wrap(err)
|
||||
}
|
||||
return nil
|
||||
}),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -118,6 +118,62 @@ func (cache *overlaycache) selectAllStorageNodesUpload(ctx context.Context, sele
|
||||
return reputableNodes, newNodes, Error.Wrap(rows.Err())
|
||||
}
|
||||
|
||||
// SelectAllStorageNodesDownload returns all nodes that qualify to store data, organized as reputable nodes and new nodes.
|
||||
func (cache *overlaycache) SelectAllStorageNodesDownload(ctx context.Context, onlineWindow time.Duration, asOf overlay.AsOfSystemTimeConfig) (nodes []*overlay.SelectedNode, err error) {
|
||||
for {
|
||||
nodes, err = cache.selectAllStorageNodesDownload(ctx, onlineWindow, asOf)
|
||||
if err != nil {
|
||||
if cockroachutil.NeedsRetry(err) {
|
||||
continue
|
||||
}
|
||||
return nodes, err
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
return nodes, err
|
||||
}
|
||||
|
||||
func (cache *overlaycache) selectAllStorageNodesDownload(ctx context.Context, onlineWindow time.Duration, asOfConfig overlay.AsOfSystemTimeConfig) (_ []*overlay.SelectedNode, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
asOf := cache.db.AsOfSystemTimeClause(asOfConfig.DefaultInterval)
|
||||
|
||||
query := `
|
||||
SELECT id, address, last_net, last_ip_port
|
||||
FROM nodes ` + asOf + `
|
||||
WHERE disqualified IS NULL
|
||||
AND exit_finished_at IS NULL
|
||||
AND last_contact_success > $1
|
||||
`
|
||||
args := []interface{}{
|
||||
// $1
|
||||
time.Now().Add(-onlineWindow),
|
||||
}
|
||||
|
||||
rows, err := cache.db.Query(ctx, query, args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() { err = errs.Combine(err, rows.Close()) }()
|
||||
|
||||
var nodes []*overlay.SelectedNode
|
||||
for rows.Next() {
|
||||
var node overlay.SelectedNode
|
||||
node.Address = &pb.NodeAddress{}
|
||||
var lastIPPort sql.NullString
|
||||
err = rows.Scan(&node.ID, &node.Address.Address, &node.LastNet, &lastIPPort)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if lastIPPort.Valid {
|
||||
node.LastIPPort = lastIPPort.String
|
||||
}
|
||||
nodes = append(nodes, &node)
|
||||
}
|
||||
return nodes, Error.Wrap(rows.Err())
|
||||
}
|
||||
|
||||
// GetNodesNetwork returns the /24 subnet for each storage node, order is not guaranteed.
|
||||
func (cache *overlaycache) GetNodesNetwork(ctx context.Context, nodeIDs []storj.NodeID) (nodeNets []string, err error) {
|
||||
for {
|
||||
|
@ -8,114 +8,155 @@ import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
|
||||
"github.com/zeebo/errs"
|
||||
|
||||
"storj.io/common/storj"
|
||||
"storj.io/storj/satellite/satellitedb/dbx"
|
||||
"storj.io/storj/satellite/snopayouts"
|
||||
)
|
||||
|
||||
// paymentStubs is payment data for specific storagenode for some specific period by working with satellite.
|
||||
// snopayoutsDB is payment data for specific storagenode for some specific period by working with satellite.
|
||||
//
|
||||
// architecture: Database
|
||||
type paymentStubs struct {
|
||||
type snopayoutsDB struct {
|
||||
db *satelliteDB
|
||||
}
|
||||
|
||||
// GetPaystub returns payStub by nodeID and period.
|
||||
func (paystubs *paymentStubs) GetPaystub(ctx context.Context, nodeID storj.NodeID, period string) (payStub snopayouts.PayStub, err error) {
|
||||
query := `SELECT * FROM storagenode_paystubs WHERE node_id = $1 AND period = $2;`
|
||||
|
||||
row := paystubs.db.QueryRowContext(ctx, query, nodeID, period)
|
||||
err = row.Scan(
|
||||
&payStub.Period,
|
||||
&payStub.NodeID,
|
||||
&payStub.Created,
|
||||
&payStub.Codes,
|
||||
&payStub.UsageAtRest,
|
||||
&payStub.UsageGet,
|
||||
&payStub.UsagePut,
|
||||
&payStub.UsageGetRepair,
|
||||
&payStub.UsagePutRepair,
|
||||
&payStub.UsageGetAudit,
|
||||
&payStub.CompAtRest,
|
||||
&payStub.CompGet,
|
||||
&payStub.CompPut,
|
||||
&payStub.CompGetRepair,
|
||||
&payStub.CompPutRepair,
|
||||
&payStub.CompGetAudit,
|
||||
&payStub.SurgePercent,
|
||||
&payStub.Held,
|
||||
&payStub.Owed,
|
||||
&payStub.Disposed,
|
||||
&payStub.Paid,
|
||||
)
|
||||
func (db *snopayoutsDB) GetPaystub(ctx context.Context, nodeID storj.NodeID, period string) (paystub snopayouts.Paystub, err error) {
|
||||
dbxPaystub, err := db.db.Get_StoragenodePaystub_By_NodeId_And_Period(ctx,
|
||||
dbx.StoragenodePaystub_NodeId(nodeID.Bytes()),
|
||||
dbx.StoragenodePaystub_Period(period))
|
||||
if err != nil {
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return snopayouts.PayStub{}, snopayouts.ErrNoDataForPeriod.Wrap(err)
|
||||
return snopayouts.Paystub{}, snopayouts.ErrNoDataForPeriod.Wrap(err)
|
||||
}
|
||||
|
||||
return snopayouts.PayStub{}, Error.Wrap(err)
|
||||
return snopayouts.Paystub{}, Error.Wrap(err)
|
||||
}
|
||||
|
||||
return payStub, nil
|
||||
return convertDBXPaystub(dbxPaystub)
|
||||
}
|
||||
|
||||
// GetAllPaystubs return all payStubs by nodeID.
|
||||
func (paystubs *paymentStubs) GetAllPaystubs(ctx context.Context, nodeID storj.NodeID) (payStubs []snopayouts.PayStub, err error) {
|
||||
query := `SELECT * FROM storagenode_paystubs WHERE node_id = $1;`
|
||||
|
||||
rows, err := paystubs.db.QueryContext(ctx, query, nodeID)
|
||||
func (db *snopayoutsDB) GetAllPaystubs(ctx context.Context, nodeID storj.NodeID) (paystubs []snopayouts.Paystub, err error) {
|
||||
dbxPaystubs, err := db.db.All_StoragenodePaystub_By_NodeId(ctx,
|
||||
dbx.StoragenodePaystub_NodeId(nodeID.Bytes()))
|
||||
if err != nil {
|
||||
return []snopayouts.PayStub{}, Error.Wrap(err)
|
||||
return nil, Error.Wrap(err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
err = errs.Combine(err, Error.Wrap(rows.Close()))
|
||||
}()
|
||||
|
||||
for rows.Next() {
|
||||
paystub := snopayouts.PayStub{}
|
||||
|
||||
err = rows.Scan(
|
||||
&paystub.Period,
|
||||
&paystub.NodeID,
|
||||
&paystub.Created,
|
||||
&paystub.Codes,
|
||||
&paystub.UsageAtRest,
|
||||
&paystub.UsageGet,
|
||||
&paystub.UsagePut,
|
||||
&paystub.UsageGetRepair,
|
||||
&paystub.UsagePutRepair,
|
||||
&paystub.UsageGetAudit,
|
||||
&paystub.CompAtRest,
|
||||
&paystub.CompGet,
|
||||
&paystub.CompPut,
|
||||
&paystub.CompGetRepair,
|
||||
&paystub.CompPutRepair,
|
||||
&paystub.CompGetAudit,
|
||||
&paystub.SurgePercent,
|
||||
&paystub.Held,
|
||||
&paystub.Owed,
|
||||
&paystub.Disposed,
|
||||
&paystub.Paid,
|
||||
)
|
||||
if err = rows.Err(); err != nil {
|
||||
return []snopayouts.PayStub{}, Error.Wrap(err)
|
||||
for _, dbxPaystub := range dbxPaystubs {
|
||||
payStub, err := convertDBXPaystub(dbxPaystub)
|
||||
if err != nil {
|
||||
return nil, Error.Wrap(err)
|
||||
}
|
||||
|
||||
payStubs = append(payStubs, paystub)
|
||||
paystubs = append(paystubs, payStub)
|
||||
}
|
||||
|
||||
return payStubs, Error.Wrap(rows.Err())
|
||||
return paystubs, nil
|
||||
}
|
||||
|
||||
// CreatePaystub inserts storagenode_paystub into database.
|
||||
func (paystubs *paymentStubs) CreatePaystub(ctx context.Context, stub snopayouts.PayStub) (err error) {
|
||||
return paystubs.db.CreateNoReturn_StoragenodePaystub(
|
||||
ctx,
|
||||
func convertDBXPaystub(dbxPaystub *dbx.StoragenodePaystub) (snopayouts.Paystub, error) {
|
||||
nodeID, err := storj.NodeIDFromBytes(dbxPaystub.NodeId)
|
||||
if err != nil {
|
||||
return snopayouts.Paystub{}, Error.Wrap(err)
|
||||
}
|
||||
return snopayouts.Paystub{
|
||||
Period: dbxPaystub.Period,
|
||||
NodeID: nodeID,
|
||||
Created: dbxPaystub.CreatedAt,
|
||||
Codes: dbxPaystub.Codes,
|
||||
UsageAtRest: dbxPaystub.UsageAtRest,
|
||||
UsageGet: dbxPaystub.UsageGet,
|
||||
UsagePut: dbxPaystub.UsagePut,
|
||||
UsageGetRepair: dbxPaystub.UsageGetRepair,
|
||||
UsagePutRepair: dbxPaystub.UsagePutRepair,
|
||||
UsageGetAudit: dbxPaystub.UsageGetAudit,
|
||||
CompAtRest: dbxPaystub.CompAtRest,
|
||||
CompGet: dbxPaystub.CompGet,
|
||||
CompPut: dbxPaystub.CompPut,
|
||||
CompGetRepair: dbxPaystub.CompGetRepair,
|
||||
CompPutRepair: dbxPaystub.CompPutRepair,
|
||||
CompGetAudit: dbxPaystub.CompGetAudit,
|
||||
SurgePercent: dbxPaystub.SurgePercent,
|
||||
Held: dbxPaystub.Held,
|
||||
Owed: dbxPaystub.Owed,
|
||||
Disposed: dbxPaystub.Disposed,
|
||||
Paid: dbxPaystub.Paid,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetPayment returns payment by nodeID and period.
|
||||
func (db *snopayoutsDB) GetPayment(ctx context.Context, nodeID storj.NodeID, period string) (payment snopayouts.Payment, err error) {
|
||||
// N.B. There can be multiple payments for a single node id and period, but the old query
|
||||
// here did not take that into account. Indeed, all above layers do not take it into account
|
||||
// from the service endpoints to the protobuf rpcs to the node client side. Instead of fixing
|
||||
// all of those things now, emulate the behavior with dbx as much as possible.
|
||||
|
||||
dbxPayments, err := db.db.Limited_StoragenodePayment_By_NodeId_And_Period_OrderBy_Desc_Id(ctx,
|
||||
dbx.StoragenodePayment_NodeId(nodeID.Bytes()),
|
||||
dbx.StoragenodePayment_Period(period),
|
||||
1, 0)
|
||||
if err != nil {
|
||||
return snopayouts.Payment{}, Error.Wrap(err)
|
||||
}
|
||||
|
||||
switch len(dbxPayments) {
|
||||
case 0:
|
||||
return snopayouts.Payment{}, snopayouts.ErrNoDataForPeriod.Wrap(sql.ErrNoRows)
|
||||
case 1:
|
||||
return convertDBXPayment(dbxPayments[0])
|
||||
default:
|
||||
return snopayouts.Payment{}, Error.New("impossible number of rows returned: %d", len(dbxPayments))
|
||||
}
|
||||
}
|
||||
|
||||
// GetAllPayments return all payments by nodeID.
|
||||
func (db *snopayoutsDB) GetAllPayments(ctx context.Context, nodeID storj.NodeID) (payments []snopayouts.Payment, err error) {
|
||||
dbxPayments, err := db.db.All_StoragenodePayment_By_NodeId(ctx,
|
||||
dbx.StoragenodePayment_NodeId(nodeID.Bytes()))
|
||||
if err != nil {
|
||||
return nil, Error.Wrap(err)
|
||||
}
|
||||
|
||||
for _, dbxPayment := range dbxPayments {
|
||||
payment, err := convertDBXPayment(dbxPayment)
|
||||
if err != nil {
|
||||
return nil, Error.Wrap(err)
|
||||
}
|
||||
payments = append(payments, payment)
|
||||
}
|
||||
|
||||
return payments, nil
|
||||
}
|
||||
|
||||
func convertDBXPayment(dbxPayment *dbx.StoragenodePayment) (snopayouts.Payment, error) {
|
||||
nodeID, err := storj.NodeIDFromBytes(dbxPayment.NodeId)
|
||||
if err != nil {
|
||||
return snopayouts.Payment{}, Error.Wrap(err)
|
||||
}
|
||||
return snopayouts.Payment{
|
||||
ID: dbxPayment.Id,
|
||||
Created: dbxPayment.CreatedAt,
|
||||
NodeID: nodeID,
|
||||
Period: dbxPayment.Period,
|
||||
Amount: dbxPayment.Amount,
|
||||
Receipt: derefStringOr(dbxPayment.Receipt, ""),
|
||||
Notes: derefStringOr(dbxPayment.Notes, ""),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func derefStringOr(v *string, def string) string {
|
||||
if v != nil {
|
||||
return *v
|
||||
}
|
||||
return def
|
||||
}
|
||||
|
||||
//
|
||||
// test helpers
|
||||
//
|
||||
|
||||
// TestCreatePaystub inserts storagenode_paystub into database. Only used for tests.
|
||||
func (db *snopayoutsDB) TestCreatePaystub(ctx context.Context, stub snopayouts.Paystub) (err error) {
|
||||
return db.db.CreateNoReturn_StoragenodePaystub(ctx,
|
||||
dbx.StoragenodePaystub_Period(stub.Period),
|
||||
dbx.StoragenodePaystub_NodeId(stub.NodeID[:]),
|
||||
dbx.StoragenodePaystub_NodeId(stub.NodeID.Bytes()),
|
||||
dbx.StoragenodePaystub_Codes(stub.Codes),
|
||||
dbx.StoragenodePaystub_UsageAtRest(stub.UsageAtRest),
|
||||
dbx.StoragenodePaystub_UsageGet(stub.UsageGet),
|
||||
@ -137,36 +178,10 @@ func (paystubs *paymentStubs) CreatePaystub(ctx context.Context, stub snopayouts
|
||||
)
|
||||
}
|
||||
|
||||
// GetPayment returns payment by nodeID and period.
|
||||
func (paystubs *paymentStubs) GetPayment(ctx context.Context, nodeID storj.NodeID, period string) (payment snopayouts.StoragenodePayment, err error) {
|
||||
query := `SELECT * FROM storagenode_payments WHERE node_id = $1 AND period = $2;`
|
||||
|
||||
row := paystubs.db.QueryRowContext(ctx, query, nodeID, period)
|
||||
err = row.Scan(
|
||||
&payment.ID,
|
||||
&payment.Created,
|
||||
&payment.NodeID,
|
||||
&payment.Period,
|
||||
&payment.Amount,
|
||||
&payment.Receipt,
|
||||
&payment.Notes,
|
||||
)
|
||||
if err != nil {
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return snopayouts.StoragenodePayment{}, snopayouts.ErrNoDataForPeriod.Wrap(err)
|
||||
}
|
||||
|
||||
return snopayouts.StoragenodePayment{}, Error.Wrap(err)
|
||||
}
|
||||
|
||||
return payment, nil
|
||||
}
|
||||
|
||||
// CreatePayment inserts storagenode_payment into database.
|
||||
func (paystubs *paymentStubs) CreatePayment(ctx context.Context, payment snopayouts.StoragenodePayment) (err error) {
|
||||
return paystubs.db.CreateNoReturn_StoragenodePayment(
|
||||
ctx,
|
||||
dbx.StoragenodePayment_NodeId(payment.NodeID[:]),
|
||||
// TestCreatePayment inserts storagenode_payment into database. Only used for tests.
|
||||
func (db *snopayoutsDB) TestCreatePayment(ctx context.Context, payment snopayouts.Payment) (err error) {
|
||||
return db.db.CreateNoReturn_StoragenodePayment(ctx,
|
||||
dbx.StoragenodePayment_NodeId(payment.NodeID.Bytes()),
|
||||
dbx.StoragenodePayment_Period(payment.Period),
|
||||
dbx.StoragenodePayment_Amount(payment.Amount),
|
||||
dbx.StoragenodePayment_Create_Fields{
|
||||
@ -175,39 +190,3 @@ func (paystubs *paymentStubs) CreatePayment(ctx context.Context, payment snopayo
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// GetAllPayments return all payments by nodeID.
|
||||
func (paystubs *paymentStubs) GetAllPayments(ctx context.Context, nodeID storj.NodeID) (payments []snopayouts.StoragenodePayment, err error) {
|
||||
query := `SELECT * FROM storagenode_payments WHERE node_id = $1;`
|
||||
|
||||
rows, err := paystubs.db.QueryContext(ctx, query, nodeID)
|
||||
if err != nil {
|
||||
return nil, Error.Wrap(err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
err = errs.Combine(err, Error.Wrap(rows.Close()))
|
||||
}()
|
||||
|
||||
for rows.Next() {
|
||||
payment := snopayouts.StoragenodePayment{}
|
||||
|
||||
err = rows.Scan(
|
||||
&payment.ID,
|
||||
&payment.Created,
|
||||
&payment.NodeID,
|
||||
&payment.Period,
|
||||
&payment.Amount,
|
||||
&payment.Receipt,
|
||||
&payment.Notes,
|
||||
)
|
||||
|
||||
if err = rows.Err(); err != nil {
|
||||
return nil, Error.Wrap(err)
|
||||
}
|
||||
|
||||
payments = append(payments, payment)
|
||||
}
|
||||
|
||||
return payments, Error.Wrap(rows.Err())
|
||||
}
|
||||
|
@ -203,10 +203,34 @@ func (projects *projects) List(ctx context.Context, offset int64, limit int, bef
|
||||
}
|
||||
|
||||
// ListByOwnerID is a method for querying all projects from the database by ownerID. It also includes the number of members for each project.
|
||||
func (projects *projects) ListByOwnerID(ctx context.Context, ownerID uuid.UUID, limit int, offset int64) (_ console.ProjectsPage, err error) {
|
||||
// cursor.Limit is set to 50 if it exceeds 50.
|
||||
func (projects *projects) ListByOwnerID(ctx context.Context, ownerID uuid.UUID, cursor console.ProjectsCursor) (_ console.ProjectsPage, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
var page console.ProjectsPage
|
||||
if cursor.Limit > 50 {
|
||||
cursor.Limit = 50
|
||||
}
|
||||
if cursor.Page == 0 {
|
||||
return console.ProjectsPage{}, errs.New("page can not be 0")
|
||||
}
|
||||
|
||||
page := console.ProjectsPage{
|
||||
CurrentPage: cursor.Page,
|
||||
Limit: cursor.Limit,
|
||||
Offset: int64((cursor.Page - 1) * cursor.Limit),
|
||||
}
|
||||
|
||||
countRow := projects.sdb.QueryRowContext(ctx, projects.sdb.Rebind(`
|
||||
SELECT COUNT(*) FROM projects WHERE owner_id = ?
|
||||
`), ownerID)
|
||||
err = countRow.Scan(&page.TotalCount)
|
||||
if err != nil {
|
||||
return console.ProjectsPage{}, err
|
||||
}
|
||||
page.PageCount = int(page.TotalCount / int64(cursor.Limit))
|
||||
if page.TotalCount%int64(cursor.Limit) != 0 {
|
||||
page.PageCount++
|
||||
}
|
||||
|
||||
rows, err := projects.sdb.Query(ctx, projects.sdb.Rebind(`
|
||||
SELECT id, name, description, owner_id, rate_limit, max_buckets, created_at,
|
||||
@ -216,20 +240,20 @@ func (projects *projects) ListByOwnerID(ctx context.Context, ownerID uuid.UUID,
|
||||
ORDER BY name ASC
|
||||
OFFSET ? ROWS
|
||||
LIMIT ?
|
||||
`), ownerID, offset, limit+1) // add 1 to limit to see if there is another page
|
||||
`), ownerID, page.Offset, page.Limit+1) // add 1 to limit to see if there is another page
|
||||
if err != nil {
|
||||
return console.ProjectsPage{}, err
|
||||
}
|
||||
defer func() { err = errs.Combine(err, rows.Close()) }()
|
||||
|
||||
count := 0
|
||||
projectsToSend := make([]console.Project, 0, limit)
|
||||
projectsToSend := make([]console.Project, 0, page.Limit)
|
||||
for rows.Next() {
|
||||
count++
|
||||
if count == limit+1 {
|
||||
if count == page.Limit+1 {
|
||||
// we are done with this page; do not include this project
|
||||
page.Next = true
|
||||
page.NextOffset = offset + int64(limit)
|
||||
page.NextOffset = page.Offset + int64(page.Limit)
|
||||
break
|
||||
}
|
||||
var rateLimit, maxBuckets sql.NullInt32
|
||||
|
@ -32,13 +32,10 @@ func (db *StoragenodeAccounting) SaveTallies(ctx context.Context, latestTally ti
|
||||
}
|
||||
var nodeIDs []storj.NodeID
|
||||
var totals []float64
|
||||
var totalSum float64
|
||||
for id, total := range nodeData {
|
||||
nodeIDs = append(nodeIDs, id)
|
||||
totals = append(totals, total)
|
||||
totalSum += total
|
||||
}
|
||||
mon.IntVal("nodetallies.totalsum").Observe(int64(totalSum)) //mon:locked
|
||||
|
||||
err = db.db.WithTx(ctx, func(ctx context.Context, tx *dbx.Tx) error {
|
||||
_, err = tx.Tx.ExecContext(ctx, db.db.Rebind(`
|
||||
|
@ -568,4 +568,4 @@ INSERT INTO "storagenode_bandwidth_rollups_phase2" ("storagenode_id", "interval_
|
||||
|
||||
-- NEW DATA --
|
||||
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\363\\341\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 5, false, 1);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\363\\341\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 5, false, 1);
|
||||
|
576
satellite/satellitedb/testdata/postgres.v140.sql
vendored
Normal file
576
satellite/satellitedb/testdata/postgres.v140.sql
vendored
Normal file
@ -0,0 +1,576 @@
|
||||
-- AUTOGENERATED BY storj.io/dbx
|
||||
-- DO NOT EDIT
|
||||
CREATE TABLE accounting_rollups (
|
||||
node_id bytea NOT NULL,
|
||||
start_time timestamp with time zone NOT NULL,
|
||||
put_total bigint NOT NULL,
|
||||
get_total bigint NOT NULL,
|
||||
get_audit_total bigint NOT NULL,
|
||||
get_repair_total bigint NOT NULL,
|
||||
put_repair_total bigint NOT NULL,
|
||||
at_rest_total double precision NOT NULL,
|
||||
PRIMARY KEY ( node_id, start_time )
|
||||
);
|
||||
CREATE TABLE accounting_timestamps (
|
||||
name text NOT NULL,
|
||||
value timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( name )
|
||||
);
|
||||
CREATE TABLE audit_histories (
|
||||
node_id bytea NOT NULL,
|
||||
history bytea NOT NULL,
|
||||
PRIMARY KEY ( node_id )
|
||||
);
|
||||
CREATE TABLE bucket_bandwidth_rollups (
|
||||
bucket_name bytea NOT NULL,
|
||||
project_id bytea NOT NULL,
|
||||
interval_start timestamp with time zone NOT NULL,
|
||||
interval_seconds integer NOT NULL,
|
||||
action integer NOT NULL,
|
||||
inline bigint NOT NULL,
|
||||
allocated bigint NOT NULL,
|
||||
settled bigint NOT NULL,
|
||||
PRIMARY KEY ( bucket_name, project_id, interval_start, action )
|
||||
);
|
||||
CREATE TABLE bucket_storage_tallies (
|
||||
bucket_name bytea NOT NULL,
|
||||
project_id bytea NOT NULL,
|
||||
interval_start timestamp with time zone NOT NULL,
|
||||
inline bigint NOT NULL,
|
||||
remote bigint NOT NULL,
|
||||
remote_segments_count integer NOT NULL,
|
||||
inline_segments_count integer NOT NULL,
|
||||
object_count integer NOT NULL,
|
||||
metadata_size bigint NOT NULL,
|
||||
PRIMARY KEY ( bucket_name, project_id, interval_start )
|
||||
);
|
||||
CREATE TABLE coinpayments_transactions (
|
||||
id text NOT NULL,
|
||||
user_id bytea NOT NULL,
|
||||
address text NOT NULL,
|
||||
amount bytea NOT NULL,
|
||||
received bytea NOT NULL,
|
||||
status integer NOT NULL,
|
||||
key text NOT NULL,
|
||||
timeout integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE consumed_serials (
|
||||
storage_node_id bytea NOT NULL,
|
||||
serial_number bytea NOT NULL,
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( storage_node_id, serial_number )
|
||||
);
|
||||
CREATE TABLE coupons (
|
||||
id bytea NOT NULL,
|
||||
user_id bytea NOT NULL,
|
||||
amount bigint NOT NULL,
|
||||
description text NOT NULL,
|
||||
type integer NOT NULL,
|
||||
status integer NOT NULL,
|
||||
duration bigint NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE coupon_usages (
|
||||
coupon_id bytea NOT NULL,
|
||||
amount bigint NOT NULL,
|
||||
status integer NOT NULL,
|
||||
period timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( coupon_id, period )
|
||||
);
|
||||
CREATE TABLE graceful_exit_progress (
|
||||
node_id bytea NOT NULL,
|
||||
bytes_transferred bigint NOT NULL,
|
||||
pieces_transferred bigint NOT NULL DEFAULT 0,
|
||||
pieces_failed bigint NOT NULL DEFAULT 0,
|
||||
updated_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( node_id )
|
||||
);
|
||||
CREATE TABLE graceful_exit_transfer_queue (
|
||||
node_id bytea NOT NULL,
|
||||
path bytea NOT NULL,
|
||||
piece_num integer NOT NULL,
|
||||
root_piece_id bytea,
|
||||
durability_ratio double precision NOT NULL,
|
||||
queued_at timestamp with time zone NOT NULL,
|
||||
requested_at timestamp with time zone,
|
||||
last_failed_at timestamp with time zone,
|
||||
last_failed_code integer,
|
||||
failed_count integer,
|
||||
finished_at timestamp with time zone,
|
||||
order_limit_send_count integer NOT NULL DEFAULT 0,
|
||||
PRIMARY KEY ( node_id, path, piece_num )
|
||||
);
|
||||
CREATE TABLE injuredsegments (
|
||||
path bytea NOT NULL,
|
||||
data bytea NOT NULL,
|
||||
attempted timestamp with time zone,
|
||||
updated_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
|
||||
segment_health double precision NOT NULL DEFAULT 1,
|
||||
PRIMARY KEY ( path )
|
||||
);
|
||||
CREATE TABLE irreparabledbs (
|
||||
segmentpath bytea NOT NULL,
|
||||
segmentdetail bytea NOT NULL,
|
||||
pieces_lost_count bigint NOT NULL,
|
||||
seg_damaged_unix_sec bigint NOT NULL,
|
||||
repair_attempt_count bigint NOT NULL,
|
||||
PRIMARY KEY ( segmentpath )
|
||||
);
|
||||
CREATE TABLE nodes (
|
||||
id bytea NOT NULL,
|
||||
address text NOT NULL DEFAULT '',
|
||||
last_net text NOT NULL,
|
||||
last_ip_port text,
|
||||
protocol integer NOT NULL DEFAULT 0,
|
||||
type integer NOT NULL DEFAULT 0,
|
||||
email text NOT NULL,
|
||||
wallet text NOT NULL,
|
||||
free_disk bigint NOT NULL DEFAULT -1,
|
||||
piece_count bigint NOT NULL DEFAULT 0,
|
||||
major bigint NOT NULL DEFAULT 0,
|
||||
minor bigint NOT NULL DEFAULT 0,
|
||||
patch bigint NOT NULL DEFAULT 0,
|
||||
hash text NOT NULL DEFAULT '',
|
||||
timestamp timestamp with time zone NOT NULL DEFAULT '0001-01-01 00:00:00+00',
|
||||
release boolean NOT NULL DEFAULT false,
|
||||
latency_90 bigint NOT NULL DEFAULT 0,
|
||||
audit_success_count bigint NOT NULL DEFAULT 0,
|
||||
total_audit_count bigint NOT NULL DEFAULT 0,
|
||||
vetted_at timestamp with time zone,
|
||||
uptime_success_count bigint NOT NULL DEFAULT 0,
|
||||
total_uptime_count bigint NOT NULL DEFAULT 0,
|
||||
created_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
|
||||
updated_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
|
||||
last_contact_success timestamp with time zone NOT NULL DEFAULT 'epoch',
|
||||
last_contact_failure timestamp with time zone NOT NULL DEFAULT 'epoch',
|
||||
contained boolean NOT NULL DEFAULT false,
|
||||
disqualified timestamp with time zone,
|
||||
suspended timestamp with time zone,
|
||||
unknown_audit_suspended timestamp with time zone,
|
||||
offline_suspended timestamp with time zone,
|
||||
under_review timestamp with time zone,
|
||||
online_score double precision NOT NULL DEFAULT 1,
|
||||
audit_reputation_alpha double precision NOT NULL DEFAULT 1,
|
||||
audit_reputation_beta double precision NOT NULL DEFAULT 0,
|
||||
unknown_audit_reputation_alpha double precision NOT NULL DEFAULT 1,
|
||||
unknown_audit_reputation_beta double precision NOT NULL DEFAULT 0,
|
||||
uptime_reputation_alpha double precision NOT NULL DEFAULT 1,
|
||||
uptime_reputation_beta double precision NOT NULL DEFAULT 0,
|
||||
exit_initiated_at timestamp with time zone,
|
||||
exit_loop_completed_at timestamp with time zone,
|
||||
exit_finished_at timestamp with time zone,
|
||||
exit_success boolean NOT NULL DEFAULT false,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE node_api_versions (
|
||||
id bytea NOT NULL,
|
||||
api_version integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
updated_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE offers (
|
||||
id serial NOT NULL,
|
||||
name text NOT NULL,
|
||||
description text NOT NULL,
|
||||
award_credit_in_cents integer NOT NULL DEFAULT 0,
|
||||
invitee_credit_in_cents integer NOT NULL DEFAULT 0,
|
||||
award_credit_duration_days integer,
|
||||
invitee_credit_duration_days integer,
|
||||
redeemable_cap integer,
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
status integer NOT NULL,
|
||||
type integer NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE peer_identities (
|
||||
node_id bytea NOT NULL,
|
||||
leaf_serial_number bytea NOT NULL,
|
||||
chain bytea NOT NULL,
|
||||
updated_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( node_id )
|
||||
);
|
||||
CREATE TABLE pending_audits (
|
||||
node_id bytea NOT NULL,
|
||||
piece_id bytea NOT NULL,
|
||||
stripe_index bigint NOT NULL,
|
||||
share_size bigint NOT NULL,
|
||||
expected_share_hash bytea NOT NULL,
|
||||
reverify_count bigint NOT NULL,
|
||||
path bytea NOT NULL,
|
||||
PRIMARY KEY ( node_id )
|
||||
);
|
||||
CREATE TABLE pending_serial_queue (
|
||||
storage_node_id bytea NOT NULL,
|
||||
bucket_id bytea NOT NULL,
|
||||
serial_number bytea NOT NULL,
|
||||
action integer NOT NULL,
|
||||
settled bigint NOT NULL,
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( storage_node_id, bucket_id, serial_number )
|
||||
);
|
||||
CREATE TABLE projects (
|
||||
id bytea NOT NULL,
|
||||
name text NOT NULL,
|
||||
description text NOT NULL,
|
||||
usage_limit bigint,
|
||||
bandwidth_limit bigint,
|
||||
rate_limit integer,
|
||||
max_buckets integer,
|
||||
partner_id bytea,
|
||||
owner_id bytea NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE project_bandwidth_rollups (
|
||||
project_id bytea NOT NULL,
|
||||
interval_month date NOT NULL,
|
||||
egress_allocated bigint NOT NULL,
|
||||
PRIMARY KEY ( project_id, interval_month )
|
||||
);
|
||||
CREATE TABLE registration_tokens (
|
||||
secret bytea NOT NULL,
|
||||
owner_id bytea,
|
||||
project_limit integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( secret ),
|
||||
UNIQUE ( owner_id )
|
||||
);
|
||||
CREATE TABLE reported_serials (
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
storage_node_id bytea NOT NULL,
|
||||
bucket_id bytea NOT NULL,
|
||||
action integer NOT NULL,
|
||||
serial_number bytea NOT NULL,
|
||||
settled bigint NOT NULL,
|
||||
observed_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( expires_at, storage_node_id, bucket_id, action, serial_number )
|
||||
);
|
||||
CREATE TABLE reset_password_tokens (
|
||||
secret bytea NOT NULL,
|
||||
owner_id bytea NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( secret ),
|
||||
UNIQUE ( owner_id )
|
||||
);
|
||||
CREATE TABLE revocations (
|
||||
revoked bytea NOT NULL,
|
||||
api_key_id bytea NOT NULL,
|
||||
PRIMARY KEY ( revoked )
|
||||
);
|
||||
CREATE TABLE serial_numbers (
|
||||
id serial NOT NULL,
|
||||
serial_number bytea NOT NULL,
|
||||
bucket_id bytea NOT NULL,
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE storagenode_bandwidth_rollups (
|
||||
storagenode_id bytea NOT NULL,
|
||||
interval_start timestamp with time zone NOT NULL,
|
||||
interval_seconds integer NOT NULL,
|
||||
action integer NOT NULL,
|
||||
allocated bigint DEFAULT 0,
|
||||
settled bigint NOT NULL,
|
||||
PRIMARY KEY ( storagenode_id, interval_start, action )
|
||||
);
|
||||
CREATE TABLE storagenode_bandwidth_rollups_phase2 (
|
||||
storagenode_id bytea NOT NULL,
|
||||
interval_start timestamp with time zone NOT NULL,
|
||||
interval_seconds integer NOT NULL,
|
||||
action integer NOT NULL,
|
||||
allocated bigint DEFAULT 0,
|
||||
settled bigint NOT NULL,
|
||||
PRIMARY KEY ( storagenode_id, interval_start, action )
|
||||
);
|
||||
CREATE TABLE storagenode_payments (
|
||||
id bigserial NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
node_id bytea NOT NULL,
|
||||
period text NOT NULL,
|
||||
amount bigint NOT NULL,
|
||||
receipt text,
|
||||
notes text,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE storagenode_paystubs (
|
||||
period text NOT NULL,
|
||||
node_id bytea NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
codes text NOT NULL,
|
||||
usage_at_rest double precision NOT NULL,
|
||||
usage_get bigint NOT NULL,
|
||||
usage_put bigint NOT NULL,
|
||||
usage_get_repair bigint NOT NULL,
|
||||
usage_put_repair bigint NOT NULL,
|
||||
usage_get_audit bigint NOT NULL,
|
||||
comp_at_rest bigint NOT NULL,
|
||||
comp_get bigint NOT NULL,
|
||||
comp_put bigint NOT NULL,
|
||||
comp_get_repair bigint NOT NULL,
|
||||
comp_put_repair bigint NOT NULL,
|
||||
comp_get_audit bigint NOT NULL,
|
||||
surge_percent bigint NOT NULL,
|
||||
held bigint NOT NULL,
|
||||
owed bigint NOT NULL,
|
||||
disposed bigint NOT NULL,
|
||||
paid bigint NOT NULL,
|
||||
PRIMARY KEY ( period, node_id )
|
||||
);
|
||||
CREATE TABLE storagenode_storage_tallies (
|
||||
node_id bytea NOT NULL,
|
||||
interval_end_time timestamp with time zone NOT NULL,
|
||||
data_total double precision NOT NULL,
|
||||
PRIMARY KEY ( interval_end_time, node_id )
|
||||
);
|
||||
CREATE TABLE stripe_customers (
|
||||
user_id bytea NOT NULL,
|
||||
customer_id text NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( user_id ),
|
||||
UNIQUE ( customer_id )
|
||||
);
|
||||
CREATE TABLE stripecoinpayments_invoice_project_records (
|
||||
id bytea NOT NULL,
|
||||
project_id bytea NOT NULL,
|
||||
storage double precision NOT NULL,
|
||||
egress bigint NOT NULL,
|
||||
objects bigint NOT NULL,
|
||||
period_start timestamp with time zone NOT NULL,
|
||||
period_end timestamp with time zone NOT NULL,
|
||||
state integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id ),
|
||||
UNIQUE ( project_id, period_start, period_end )
|
||||
);
|
||||
CREATE TABLE stripecoinpayments_tx_conversion_rates (
|
||||
tx_id text NOT NULL,
|
||||
rate bytea NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( tx_id )
|
||||
);
|
||||
CREATE TABLE users (
|
||||
id bytea NOT NULL,
|
||||
email text NOT NULL,
|
||||
normalized_email text NOT NULL,
|
||||
full_name text NOT NULL,
|
||||
short_name text,
|
||||
password_hash bytea NOT NULL,
|
||||
status integer NOT NULL,
|
||||
partner_id bytea,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
project_limit integer NOT NULL DEFAULT 0,
|
||||
position text,
|
||||
company_name text,
|
||||
company_size integer,
|
||||
working_on text,
|
||||
is_professional boolean NOT NULL DEFAULT false,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE value_attributions (
|
||||
project_id bytea NOT NULL,
|
||||
bucket_name bytea NOT NULL,
|
||||
partner_id bytea NOT NULL,
|
||||
last_updated timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( project_id, bucket_name )
|
||||
);
|
||||
CREATE TABLE api_keys (
|
||||
id bytea NOT NULL,
|
||||
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
||||
head bytea NOT NULL,
|
||||
name text NOT NULL,
|
||||
secret bytea NOT NULL,
|
||||
partner_id bytea,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id ),
|
||||
UNIQUE ( head ),
|
||||
UNIQUE ( name, project_id )
|
||||
);
|
||||
CREATE TABLE bucket_metainfos (
|
||||
id bytea NOT NULL,
|
||||
project_id bytea NOT NULL REFERENCES projects( id ),
|
||||
name bytea NOT NULL,
|
||||
partner_id bytea,
|
||||
path_cipher integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
default_segment_size integer NOT NULL,
|
||||
default_encryption_cipher_suite integer NOT NULL,
|
||||
default_encryption_block_size integer NOT NULL,
|
||||
default_redundancy_algorithm integer NOT NULL,
|
||||
default_redundancy_share_size integer NOT NULL,
|
||||
default_redundancy_required_shares integer NOT NULL,
|
||||
default_redundancy_repair_shares integer NOT NULL,
|
||||
default_redundancy_optimal_shares integer NOT NULL,
|
||||
default_redundancy_total_shares integer NOT NULL,
|
||||
PRIMARY KEY ( id ),
|
||||
UNIQUE ( name, project_id ),
|
||||
UNIQUE ( project_id, name )
|
||||
);
|
||||
CREATE TABLE project_members (
|
||||
member_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
|
||||
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( member_id, project_id )
|
||||
);
|
||||
CREATE TABLE stripecoinpayments_apply_balance_intents (
|
||||
tx_id text NOT NULL REFERENCES coinpayments_transactions( id ) ON DELETE CASCADE,
|
||||
state integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( tx_id )
|
||||
);
|
||||
CREATE TABLE used_serials (
|
||||
serial_number_id integer NOT NULL REFERENCES serial_numbers( id ) ON DELETE CASCADE,
|
||||
storage_node_id bytea NOT NULL,
|
||||
PRIMARY KEY ( serial_number_id, storage_node_id )
|
||||
);
|
||||
CREATE TABLE user_credits (
|
||||
id serial NOT NULL,
|
||||
user_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
|
||||
offer_id integer NOT NULL REFERENCES offers( id ),
|
||||
referred_by bytea REFERENCES users( id ) ON DELETE SET NULL,
|
||||
type text NOT NULL,
|
||||
credits_earned_in_cents integer NOT NULL,
|
||||
credits_used_in_cents integer NOT NULL,
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id ),
|
||||
UNIQUE ( id, offer_id )
|
||||
);
|
||||
CREATE INDEX accounting_rollups_start_time_index ON accounting_rollups ( start_time );
|
||||
CREATE INDEX bucket_bandwidth_rollups_project_id_action_interval_index ON bucket_bandwidth_rollups ( project_id, action, interval_start );
|
||||
CREATE INDEX bucket_bandwidth_rollups_action_interval_project_id_index ON bucket_bandwidth_rollups ( action, interval_start, project_id );
|
||||
CREATE INDEX bucket_storage_tallies_project_id_index ON bucket_storage_tallies ( project_id );
|
||||
CREATE INDEX consumed_serials_expires_at_index ON consumed_serials ( expires_at );
|
||||
CREATE INDEX graceful_exit_transfer_queue_nid_dr_qa_fa_lfa_index ON graceful_exit_transfer_queue ( node_id, durability_ratio, queued_at, finished_at, last_failed_at );
|
||||
CREATE INDEX injuredsegments_attempted_index ON injuredsegments ( attempted );
|
||||
CREATE INDEX injuredsegments_segment_health_index ON injuredsegments ( segment_health );
|
||||
CREATE INDEX injuredsegments_updated_at_index ON injuredsegments ( updated_at );
|
||||
CREATE INDEX node_last_ip ON nodes ( last_net );
|
||||
CREATE INDEX nodes_dis_unk_exit_fin_last_success_index ON nodes ( disqualified, unknown_audit_suspended, exit_finished_at, last_contact_success );
|
||||
CREATE UNIQUE INDEX serial_number_index ON serial_numbers ( serial_number );
|
||||
CREATE INDEX serial_numbers_expires_at_index ON serial_numbers ( expires_at );
|
||||
CREATE INDEX storagenode_bandwidth_rollups_interval_start_index ON storagenode_bandwidth_rollups ( interval_start );
|
||||
CREATE INDEX storagenode_payments_node_id_period_index ON storagenode_payments ( node_id, period );
|
||||
CREATE INDEX storagenode_paystubs_node_id_index ON storagenode_paystubs ( node_id );
|
||||
CREATE INDEX storagenode_storage_tallies_node_id_index ON storagenode_storage_tallies ( node_id );
|
||||
CREATE UNIQUE INDEX credits_earned_user_id_offer_id ON user_credits ( id, offer_id );
|
||||
|
||||
INSERT INTO "accounting_rollups"("node_id", "start_time", "put_total", "get_total", "get_audit_total", "get_repair_total", "put_repair_total", "at_rest_total") VALUES (E'\\367M\\177\\251]t/\\022\\256\\214\\265\\025\\224\\204:\\217\\212\\0102<\\321\\374\\020&\\271Qc\\325\\261\\354\\246\\233'::bytea, '2019-02-09 00:00:00+00', 3000, 6000, 9000, 12000, 0, 15000);
|
||||
|
||||
INSERT INTO "accounting_timestamps" VALUES ('LastAtRestTally', '0001-01-01 00:00:00+00');
|
||||
INSERT INTO "accounting_timestamps" VALUES ('LastRollup', '0001-01-01 00:00:00+00');
|
||||
INSERT INTO "accounting_timestamps" VALUES ('LastBandwidthTally', '0001-01-01 00:00:00+00');
|
||||
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 5, false, 1);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '127.0.0.1:55518', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 0, 3, 3, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 0, false, 1);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014', '127.0.0.1:55517', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 0, 0, 0, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 0, false, 1);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\015', '127.0.0.1:55519', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 1, 2, 1, 2, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 1, false, 1);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "vetted_at", "online_score") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', '127.0.0.1:55520', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 300, 400, 300, 400, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 300, 0, 1, 0, 300, 100, false, '2020-03-18 12:00:00.000000+00', 1);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\154\\313\\233\\074\\327\\177\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 75, 25, 100, 5, false, 1);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "last_ip_port", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\154\\313\\233\\074\\327\\177\\136\\070\\346\\002', '127.0.0.1:55516', '127.0.0.0', '127.0.0.1:55516', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 75, 25, 100, 5, false, 1);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\363\\341\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 5, false, 1);
|
||||
|
||||
INSERT INTO "users"("id", "full_name", "short_name", "email", "normalized_email", "password_hash", "status", "partner_id", "created_at", "is_professional") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'Noahson', 'William', '1email1@mail.test', '1EMAIL1@MAIL.TEST', E'some_readable_hash'::bytea, 1, NULL, '2019-02-14 08:28:24.614594+00', false);
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets", "partner_id", "owner_id", "created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, 'ProjectName', 'projects description', NULL, NULL, NULL, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:28:24.254934+00');
|
||||
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets", "partner_id", "owner_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, 'projName1', 'Test project 1', NULL, NULL, NULL, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:28:24.636949+00');
|
||||
INSERT INTO "project_members"("member_id", "project_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, '2019-02-14 08:28:24.677953+00');
|
||||
INSERT INTO "project_members"("member_id", "project_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, '2019-02-13 08:28:24.677953+00');
|
||||
|
||||
INSERT INTO "irreparabledbs" ("segmentpath", "segmentdetail", "pieces_lost_count", "seg_damaged_unix_sec", "repair_attempt_count") VALUES ('\x49616d5365676d656e746b6579696e666f30', '\x49616d5365676d656e7464657461696c696e666f30', 10, 1550159554, 10);
|
||||
|
||||
INSERT INTO "registration_tokens" ("secret", "owner_id", "project_limit", "created_at") VALUES (E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, null, 1, '2019-02-14 08:28:24.677953+00');
|
||||
|
||||
INSERT INTO "serial_numbers" ("id", "serial_number", "bucket_id", "expires_at") VALUES (1, E'0123456701234567'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014/testbucket'::bytea, '2019-03-06 08:28:24.677953+00');
|
||||
INSERT INTO "used_serials" ("serial_number_id", "storage_node_id") VALUES (1, E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n');
|
||||
|
||||
INSERT INTO "storagenode_bandwidth_rollups" ("storagenode_id", "interval_start", "interval_seconds", "action", "allocated", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024);
|
||||
INSERT INTO "storagenode_storage_tallies" VALUES (E'\\3510\\323\\225"~\\036<\\342\\330m\\0253Jhr\\246\\233K\\246#\\2303\\351\\256\\275j\\212UM\\362\\207', '2019-02-14 08:16:57.812849+00', 1000);
|
||||
|
||||
INSERT INTO "bucket_bandwidth_rollups" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024, 3024);
|
||||
INSERT INTO "bucket_storage_tallies" ("bucket_name", "project_id", "interval_start", "inline", "remote", "remote_segments_count", "inline_segments_count", "object_count", "metadata_size") VALUES (E'testbucket'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 4024, 5024, 0, 0, 0, 0);
|
||||
INSERT INTO "bucket_bandwidth_rollups" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\170\\160\\157\\370\\274\\366\\113\\364\\272\\235\\301\\243\\321\\102\\321\\136'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024, 3024);
|
||||
INSERT INTO "bucket_storage_tallies" ("bucket_name", "project_id", "interval_start", "inline", "remote", "remote_segments_count", "inline_segments_count", "object_count", "metadata_size") VALUES (E'testbucket'::bytea, E'\\170\\160\\157\\370\\274\\366\\113\\364\\272\\235\\301\\243\\321\\102\\321\\136'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 4024, 5024, 0, 0, 0, 0);
|
||||
|
||||
INSERT INTO "reset_password_tokens" ("secret", "owner_id", "created_at") VALUES (E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-05-08 08:28:24.677953+00');
|
||||
|
||||
INSERT INTO "offers" ("id", "name", "description", "award_credit_in_cents", "invitee_credit_in_cents", "expires_at", "created_at", "status", "type", "award_credit_duration_days", "invitee_credit_duration_days") VALUES (1, 'Default referral offer', 'Is active when no other active referral offer', 300, 600, '2119-03-14 08:28:24.636949+00', '2019-07-14 08:28:24.636949+00', 1, 2, 365, 14);
|
||||
INSERT INTO "offers" ("id", "name", "description", "award_credit_in_cents", "invitee_credit_in_cents", "expires_at", "created_at", "status", "type", "award_credit_duration_days", "invitee_credit_duration_days") VALUES (2, 'Default free credit offer', 'Is active when no active free credit offer', 0, 300, '2119-03-14 08:28:24.636949+00', '2019-07-14 08:28:24.636949+00', 1, 1, NULL, 14);
|
||||
|
||||
INSERT INTO "api_keys" ("id", "project_id", "head", "name", "secret", "partner_id", "created_at") VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\111\\142\\147\\304\\132\\375\\070\\163\\270\\160\\251\\370\\126\\063\\351\\037\\257\\071\\143\\375\\351\\320\\253\\232\\220\\260\\075\\173\\306\\307\\115\\136'::bytea, 'key 2', E'\\254\\011\\315\\333\\273\\365\\001\\071\\024\\154\\253\\332\\301\\216\\361\\074\\221\\367\\251\\231\\274\\333\\300\\367\\001\\272\\327\\111\\315\\123\\042\\016'::bytea, NULL, '2019-02-14 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "value_attributions" ("project_id", "bucket_name", "partner_id", "last_updated") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E''::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-02-14 08:07:31.028103+00');
|
||||
|
||||
INSERT INTO "user_credits" ("id", "user_id", "offer_id", "referred_by", "credits_earned_in_cents", "credits_used_in_cents", "type", "expires_at", "created_at") VALUES (1, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 1, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 200, 0, 'invalid', '2019-10-01 08:28:24.267934+00', '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "bucket_metainfos" ("id", "project_id", "name", "partner_id", "created_at", "path_cipher", "default_segment_size", "default_encryption_cipher_suite", "default_encryption_block_size", "default_redundancy_algorithm", "default_redundancy_share_size", "default_redundancy_required_shares", "default_redundancy_repair_shares", "default_redundancy_optimal_shares", "default_redundancy_total_shares") VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'testbucketuniquename'::bytea, NULL, '2019-06-14 08:28:24.677953+00', 1, 65536, 1, 8192, 1, 4096, 4, 6, 8, 10);
|
||||
|
||||
INSERT INTO "pending_audits" ("node_id", "piece_id", "stripe_index", "share_size", "expected_share_hash", "reverify_count", "path") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 5, 1024, E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, 1, 'not null');
|
||||
|
||||
INSERT INTO "peer_identities" VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:07:31.335028+00');
|
||||
|
||||
INSERT INTO "graceful_exit_progress" ("node_id", "bytes_transferred", "pieces_transferred", "pieces_failed", "updated_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', 1000000000000000, 0, 0, '2019-09-12 10:07:31.028103+00');
|
||||
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n\\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\311', 8, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
|
||||
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n\\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\312', 8, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
|
||||
|
||||
INSERT INTO "stripe_customers" ("user_id", "customer_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'stripe_id', '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n\\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\311', 9, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
|
||||
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n\\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\312', 9, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
|
||||
|
||||
INSERT INTO "stripecoinpayments_invoice_project_records"("id", "project_id", "storage", "egress", "objects", "period_start", "period_end", "state", "created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\021\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, 0, 0, 0, '2019-06-01 08:28:24.267934+00', '2019-06-01 08:28:24.267934+00', 0, '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "root_piece_id", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n\\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\311', 10, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
|
||||
|
||||
INSERT INTO "stripecoinpayments_tx_conversion_rates" ("tx_id", "rate", "created_at") VALUES ('tx_id', E'\\363\\311\\033w\\222\\303Ci,'::bytea, '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "coinpayments_transactions" ("id", "user_id", "address", "amount", "received", "status", "key", "timeout", "created_at") VALUES ('tx_id', E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'address', E'\\363\\311\\033w'::bytea, E'\\363\\311\\033w'::bytea, 1, 'key', 60, '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "storagenode_bandwidth_rollups" ("storagenode_id", "interval_start", "interval_seconds", "action", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2020-01-11 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 2024);
|
||||
|
||||
INSERT INTO "coupons" ("id", "user_id", "amount", "description", "type", "status", "duration", "created_at") VALUES (E'\\362\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 50, 'description', 0, 0, 2, '2019-06-01 08:28:24.267934+00');
|
||||
INSERT INTO "coupon_usages" ("coupon_id", "amount", "status", "period") VALUES (E'\\362\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, 22, 0, '2019-06-01 09:28:24.267934+00');
|
||||
|
||||
INSERT INTO "reported_serials" ("expires_at", "storage_node_id", "bucket_id", "action", "serial_number", "settled", "observed_at") VALUES ('2020-01-11 08:00:00.000000+00', E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014/testbucket'::bytea, 1, E'0123456701234567'::bytea, 100, '2020-01-11 08:00:00.000000+00');
|
||||
|
||||
INSERT INTO "stripecoinpayments_apply_balance_intents" ("tx_id", "state", "created_at") VALUES ('tx_id', 0, '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets", "rate_limit", "partner_id", "owner_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\347'::bytea, 'projName1', 'Test project 1', NULL, NULL, NULL, 2000000, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2020-01-15 08:28:24.636949+00');
|
||||
|
||||
INSERT INTO "pending_serial_queue" ("storage_node_id", "bucket_id", "serial_number", "action", "settled", "expires_at") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014/testbucket'::bytea, E'5123456701234567'::bytea, 1, 100, '2020-01-11 08:00:00.000000+00');
|
||||
|
||||
INSERT INTO "consumed_serials" ("storage_node_id", "serial_number", "expires_at") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', E'1234567012345678'::bytea, '2020-01-12 08:00:00.000000+00');
|
||||
|
||||
INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('0', '\x0a0130120100', 1.0, '2020-09-01 00:00:00.000000+00');
|
||||
INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('here''s/a/great/path', '\x0a136865726527732f612f67726561742f70617468120a0102030405060708090a', 1.0, '2020-09-01 00:00:00.000000+00');
|
||||
INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('yet/another/cool/path', '\x0a157965742f616e6f746865722f636f6f6c2f70617468120a0102030405060708090a', 1.0, '2020-09-01 00:00:00.000000+00');
|
||||
INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('/this/is/a/new/path', '\x0a23736f2f6d616e792f69636f6e69632f70617468732f746f2f63686f6f73652f66726f6d120a0102030405060708090a', 1.0, '2020-09-01 00:00:00.000000+00');
|
||||
INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('/some/path/1/23/4', '\x0a23736f2f6d618e792f69636f6e69632f70617468732f746f2f63686f6f73652f66726f6d120a0102030405060708090a', 0.2, '2020-09-01 00:00:00.000000+00');
|
||||
|
||||
INSERT INTO "project_bandwidth_rollups"("project_id", "interval_month", egress_allocated) VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\347'::bytea, '2020-04-01', 10000);
|
||||
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets","rate_limit", "partner_id", "owner_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\345'::bytea, 'egress101', 'High Bandwidth Project', NULL, NULL, NULL, 2000000, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2020-05-15 08:46:24.000000+00');
|
||||
|
||||
INSERT INTO "storagenode_paystubs"("period", "node_id", "created_at", "codes", "usage_at_rest", "usage_get", "usage_put", "usage_get_repair", "usage_put_repair", "usage_get_audit", "comp_at_rest", "comp_get", "comp_put", "comp_get_repair", "comp_put_repair", "comp_get_audit", "surge_percent", "held", "owed", "disposed", "paid") VALUES ('2020-01', '\xf2a3b4c4dfdf7221310382fd5db5aa73e1d227d6df09734ec4e5305000000000', '2020-04-07T20:14:21.479141Z', '', 1327959864508416, 294054066688, 159031363328, 226751, 0, 836608, 2861984, 5881081, 0, 226751, 0, 8, 300, 0, 26909472, 0, 26909472);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "unknown_audit_suspended", "offline_suspended", "under_review") VALUES (E'\\153\\313\\233\\074\\327\\255\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 5, false, '2019-02-14 08:07:31.108963+00', '2019-02-14 08:07:31.108963+00', '2019-02-14 08:07:31.108963+00');
|
||||
|
||||
INSERT INTO "audit_histories" ("node_id", "history") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', '\x0a23736f2f6d616e792f69636f6e69632f70617468732f746f2f63686f6f73652f66726f6d120a0102030405060708090a');
|
||||
|
||||
INSERT INTO "node_api_versions"("id", "api_version", "created_at", "updated_at") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', 1, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00');
|
||||
INSERT INTO "node_api_versions"("id", "api_version", "created_at", "updated_at") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', 2, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00');
|
||||
INSERT INTO "node_api_versions"("id", "api_version", "created_at", "updated_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014', 3, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00');
|
||||
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\256\\263'::bytea, 'egress102', 'High Bandwidth Project 2', NULL, NULL, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-05-15 08:46:24.000000+00', 1000);
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\255\\244'::bytea, 'egress103', 'High Bandwidth Project 3', NULL, NULL, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-05-15 08:46:24.000000+00', 1000);
|
||||
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\253\\231'::bytea, 'Limit Test 1', 'This project is above the default', 50000000001, 50000000001, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-10-14 10:10:10.000000+00', 101);
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\252\\230'::bytea, 'Limit Test 2', 'This project is below the default', NULL, NULL, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-10-14 10:10:11.000000+00', NULL);
|
||||
|
||||
INSERT INTO "storagenode_bandwidth_rollups_phase2" ("storagenode_id", "interval_start", "interval_seconds", "action", "allocated", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024);
|
||||
|
||||
-- NEW DATA --
|
||||
INSERT INTO "users"("id", "full_name", "short_name", "email", "normalized_email", "password_hash", "status", "partner_id", "created_at", "position", "company_name", "working_on", "company_size", "is_professional") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\205\\311",'::bytea, 'Thierry', 'Berg', '2email2@mail.test', '2EMAIL2@MAIL.TEST', E'some_readable_hash'::bytea, 2, NULL, '2020-05-16 10:28:24.614594+00', 'engineer', 'storj', 'data storage', 55, true);
|
576
satellite/satellitedb/testdata/postgres.v141.sql
vendored
Normal file
576
satellite/satellitedb/testdata/postgres.v141.sql
vendored
Normal file
@ -0,0 +1,576 @@
|
||||
-- AUTOGENERATED BY storj.io/dbx
|
||||
-- DO NOT EDIT
|
||||
CREATE TABLE accounting_rollups (
|
||||
node_id bytea NOT NULL,
|
||||
start_time timestamp with time zone NOT NULL,
|
||||
put_total bigint NOT NULL,
|
||||
get_total bigint NOT NULL,
|
||||
get_audit_total bigint NOT NULL,
|
||||
get_repair_total bigint NOT NULL,
|
||||
put_repair_total bigint NOT NULL,
|
||||
at_rest_total double precision NOT NULL,
|
||||
PRIMARY KEY ( node_id, start_time )
|
||||
);
|
||||
CREATE TABLE accounting_timestamps (
|
||||
name text NOT NULL,
|
||||
value timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( name )
|
||||
);
|
||||
CREATE TABLE audit_histories (
|
||||
node_id bytea NOT NULL,
|
||||
history bytea NOT NULL,
|
||||
PRIMARY KEY ( node_id )
|
||||
);
|
||||
CREATE TABLE bucket_bandwidth_rollups (
|
||||
bucket_name bytea NOT NULL,
|
||||
project_id bytea NOT NULL,
|
||||
interval_start timestamp with time zone NOT NULL,
|
||||
interval_seconds integer NOT NULL,
|
||||
action integer NOT NULL,
|
||||
inline bigint NOT NULL,
|
||||
allocated bigint NOT NULL,
|
||||
settled bigint NOT NULL,
|
||||
PRIMARY KEY ( bucket_name, project_id, interval_start, action )
|
||||
);
|
||||
CREATE TABLE bucket_storage_tallies (
|
||||
bucket_name bytea NOT NULL,
|
||||
project_id bytea NOT NULL,
|
||||
interval_start timestamp with time zone NOT NULL,
|
||||
inline bigint NOT NULL,
|
||||
remote bigint NOT NULL,
|
||||
remote_segments_count integer NOT NULL,
|
||||
inline_segments_count integer NOT NULL,
|
||||
object_count integer NOT NULL,
|
||||
metadata_size bigint NOT NULL,
|
||||
PRIMARY KEY ( bucket_name, project_id, interval_start )
|
||||
);
|
||||
CREATE TABLE coinpayments_transactions (
|
||||
id text NOT NULL,
|
||||
user_id bytea NOT NULL,
|
||||
address text NOT NULL,
|
||||
amount bytea NOT NULL,
|
||||
received bytea NOT NULL,
|
||||
status integer NOT NULL,
|
||||
key text NOT NULL,
|
||||
timeout integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE consumed_serials (
|
||||
storage_node_id bytea NOT NULL,
|
||||
serial_number bytea NOT NULL,
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( storage_node_id, serial_number )
|
||||
);
|
||||
CREATE TABLE coupons (
|
||||
id bytea NOT NULL,
|
||||
user_id bytea NOT NULL,
|
||||
amount bigint NOT NULL,
|
||||
description text NOT NULL,
|
||||
type integer NOT NULL,
|
||||
status integer NOT NULL,
|
||||
duration bigint NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE coupon_usages (
|
||||
coupon_id bytea NOT NULL,
|
||||
amount bigint NOT NULL,
|
||||
status integer NOT NULL,
|
||||
period timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( coupon_id, period )
|
||||
);
|
||||
CREATE TABLE graceful_exit_progress (
|
||||
node_id bytea NOT NULL,
|
||||
bytes_transferred bigint NOT NULL,
|
||||
pieces_transferred bigint NOT NULL DEFAULT 0,
|
||||
pieces_failed bigint NOT NULL DEFAULT 0,
|
||||
updated_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( node_id )
|
||||
);
|
||||
CREATE TABLE graceful_exit_transfer_queue (
|
||||
node_id bytea NOT NULL,
|
||||
path bytea NOT NULL,
|
||||
piece_num integer NOT NULL,
|
||||
root_piece_id bytea,
|
||||
durability_ratio double precision NOT NULL,
|
||||
queued_at timestamp with time zone NOT NULL,
|
||||
requested_at timestamp with time zone,
|
||||
last_failed_at timestamp with time zone,
|
||||
last_failed_code integer,
|
||||
failed_count integer,
|
||||
finished_at timestamp with time zone,
|
||||
order_limit_send_count integer NOT NULL DEFAULT 0,
|
||||
PRIMARY KEY ( node_id, path, piece_num )
|
||||
);
|
||||
CREATE TABLE injuredsegments (
|
||||
path bytea NOT NULL,
|
||||
data bytea NOT NULL,
|
||||
attempted timestamp with time zone,
|
||||
updated_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
|
||||
segment_health double precision NOT NULL DEFAULT 1,
|
||||
PRIMARY KEY ( path )
|
||||
);
|
||||
CREATE TABLE irreparabledbs (
|
||||
segmentpath bytea NOT NULL,
|
||||
segmentdetail bytea NOT NULL,
|
||||
pieces_lost_count bigint NOT NULL,
|
||||
seg_damaged_unix_sec bigint NOT NULL,
|
||||
repair_attempt_count bigint NOT NULL,
|
||||
PRIMARY KEY ( segmentpath )
|
||||
);
|
||||
CREATE TABLE nodes (
|
||||
id bytea NOT NULL,
|
||||
address text NOT NULL DEFAULT '',
|
||||
last_net text NOT NULL,
|
||||
last_ip_port text,
|
||||
protocol integer NOT NULL DEFAULT 0,
|
||||
type integer NOT NULL DEFAULT 0,
|
||||
email text NOT NULL,
|
||||
wallet text NOT NULL,
|
||||
free_disk bigint NOT NULL DEFAULT -1,
|
||||
piece_count bigint NOT NULL DEFAULT 0,
|
||||
major bigint NOT NULL DEFAULT 0,
|
||||
minor bigint NOT NULL DEFAULT 0,
|
||||
patch bigint NOT NULL DEFAULT 0,
|
||||
hash text NOT NULL DEFAULT '',
|
||||
timestamp timestamp with time zone NOT NULL DEFAULT '0001-01-01 00:00:00+00',
|
||||
release boolean NOT NULL DEFAULT false,
|
||||
latency_90 bigint NOT NULL DEFAULT 0,
|
||||
audit_success_count bigint NOT NULL DEFAULT 0,
|
||||
total_audit_count bigint NOT NULL DEFAULT 0,
|
||||
vetted_at timestamp with time zone,
|
||||
uptime_success_count bigint NOT NULL DEFAULT 0,
|
||||
total_uptime_count bigint NOT NULL DEFAULT 0,
|
||||
created_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
|
||||
updated_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
|
||||
last_contact_success timestamp with time zone NOT NULL DEFAULT 'epoch',
|
||||
last_contact_failure timestamp with time zone NOT NULL DEFAULT 'epoch',
|
||||
contained boolean NOT NULL DEFAULT false,
|
||||
disqualified timestamp with time zone,
|
||||
suspended timestamp with time zone,
|
||||
unknown_audit_suspended timestamp with time zone,
|
||||
offline_suspended timestamp with time zone,
|
||||
under_review timestamp with time zone,
|
||||
online_score double precision NOT NULL DEFAULT 1,
|
||||
audit_reputation_alpha double precision NOT NULL DEFAULT 1,
|
||||
audit_reputation_beta double precision NOT NULL DEFAULT 0,
|
||||
unknown_audit_reputation_alpha double precision NOT NULL DEFAULT 1,
|
||||
unknown_audit_reputation_beta double precision NOT NULL DEFAULT 0,
|
||||
uptime_reputation_alpha double precision NOT NULL DEFAULT 1,
|
||||
uptime_reputation_beta double precision NOT NULL DEFAULT 0,
|
||||
exit_initiated_at timestamp with time zone,
|
||||
exit_loop_completed_at timestamp with time zone,
|
||||
exit_finished_at timestamp with time zone,
|
||||
exit_success boolean NOT NULL DEFAULT false,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE node_api_versions (
|
||||
id bytea NOT NULL,
|
||||
api_version integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
updated_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE offers (
|
||||
id serial NOT NULL,
|
||||
name text NOT NULL,
|
||||
description text NOT NULL,
|
||||
award_credit_in_cents integer NOT NULL DEFAULT 0,
|
||||
invitee_credit_in_cents integer NOT NULL DEFAULT 0,
|
||||
award_credit_duration_days integer,
|
||||
invitee_credit_duration_days integer,
|
||||
redeemable_cap integer,
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
status integer NOT NULL,
|
||||
type integer NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE peer_identities (
|
||||
node_id bytea NOT NULL,
|
||||
leaf_serial_number bytea NOT NULL,
|
||||
chain bytea NOT NULL,
|
||||
updated_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( node_id )
|
||||
);
|
||||
CREATE TABLE pending_audits (
|
||||
node_id bytea NOT NULL,
|
||||
piece_id bytea NOT NULL,
|
||||
stripe_index bigint NOT NULL,
|
||||
share_size bigint NOT NULL,
|
||||
expected_share_hash bytea NOT NULL,
|
||||
reverify_count bigint NOT NULL,
|
||||
path bytea NOT NULL,
|
||||
PRIMARY KEY ( node_id )
|
||||
);
|
||||
CREATE TABLE pending_serial_queue (
|
||||
storage_node_id bytea NOT NULL,
|
||||
bucket_id bytea NOT NULL,
|
||||
serial_number bytea NOT NULL,
|
||||
action integer NOT NULL,
|
||||
settled bigint NOT NULL,
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( storage_node_id, bucket_id, serial_number )
|
||||
);
|
||||
CREATE TABLE projects (
|
||||
id bytea NOT NULL,
|
||||
name text NOT NULL,
|
||||
description text NOT NULL,
|
||||
usage_limit bigint,
|
||||
bandwidth_limit bigint,
|
||||
rate_limit integer,
|
||||
max_buckets integer,
|
||||
partner_id bytea,
|
||||
owner_id bytea NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE project_bandwidth_rollups (
|
||||
project_id bytea NOT NULL,
|
||||
interval_month date NOT NULL,
|
||||
egress_allocated bigint NOT NULL,
|
||||
PRIMARY KEY ( project_id, interval_month )
|
||||
);
|
||||
CREATE TABLE registration_tokens (
|
||||
secret bytea NOT NULL,
|
||||
owner_id bytea,
|
||||
project_limit integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( secret ),
|
||||
UNIQUE ( owner_id )
|
||||
);
|
||||
CREATE TABLE reported_serials (
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
storage_node_id bytea NOT NULL,
|
||||
bucket_id bytea NOT NULL,
|
||||
action integer NOT NULL,
|
||||
serial_number bytea NOT NULL,
|
||||
settled bigint NOT NULL,
|
||||
observed_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( expires_at, storage_node_id, bucket_id, action, serial_number )
|
||||
);
|
||||
CREATE TABLE reset_password_tokens (
|
||||
secret bytea NOT NULL,
|
||||
owner_id bytea NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( secret ),
|
||||
UNIQUE ( owner_id )
|
||||
);
|
||||
CREATE TABLE revocations (
|
||||
revoked bytea NOT NULL,
|
||||
api_key_id bytea NOT NULL,
|
||||
PRIMARY KEY ( revoked )
|
||||
);
|
||||
CREATE TABLE serial_numbers (
|
||||
id serial NOT NULL,
|
||||
serial_number bytea NOT NULL,
|
||||
bucket_id bytea NOT NULL,
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE storagenode_bandwidth_rollups (
|
||||
storagenode_id bytea NOT NULL,
|
||||
interval_start timestamp with time zone NOT NULL,
|
||||
interval_seconds integer NOT NULL,
|
||||
action integer NOT NULL,
|
||||
allocated bigint DEFAULT 0,
|
||||
settled bigint NOT NULL,
|
||||
PRIMARY KEY ( storagenode_id, interval_start, action )
|
||||
);
|
||||
CREATE TABLE storagenode_bandwidth_rollups_phase2 (
|
||||
storagenode_id bytea NOT NULL,
|
||||
interval_start timestamp with time zone NOT NULL,
|
||||
interval_seconds integer NOT NULL,
|
||||
action integer NOT NULL,
|
||||
allocated bigint DEFAULT 0,
|
||||
settled bigint NOT NULL,
|
||||
PRIMARY KEY ( storagenode_id, interval_start, action )
|
||||
);
|
||||
CREATE TABLE storagenode_payments (
|
||||
id bigserial NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
node_id bytea NOT NULL,
|
||||
period text NOT NULL,
|
||||
amount bigint NOT NULL,
|
||||
receipt text,
|
||||
notes text,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE storagenode_paystubs (
|
||||
period text NOT NULL,
|
||||
node_id bytea NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
codes text NOT NULL,
|
||||
usage_at_rest double precision NOT NULL,
|
||||
usage_get bigint NOT NULL,
|
||||
usage_put bigint NOT NULL,
|
||||
usage_get_repair bigint NOT NULL,
|
||||
usage_put_repair bigint NOT NULL,
|
||||
usage_get_audit bigint NOT NULL,
|
||||
comp_at_rest bigint NOT NULL,
|
||||
comp_get bigint NOT NULL,
|
||||
comp_put bigint NOT NULL,
|
||||
comp_get_repair bigint NOT NULL,
|
||||
comp_put_repair bigint NOT NULL,
|
||||
comp_get_audit bigint NOT NULL,
|
||||
surge_percent bigint NOT NULL,
|
||||
held bigint NOT NULL,
|
||||
owed bigint NOT NULL,
|
||||
disposed bigint NOT NULL,
|
||||
paid bigint NOT NULL,
|
||||
PRIMARY KEY ( period, node_id )
|
||||
);
|
||||
CREATE TABLE storagenode_storage_tallies (
|
||||
node_id bytea NOT NULL,
|
||||
interval_end_time timestamp with time zone NOT NULL,
|
||||
data_total double precision NOT NULL,
|
||||
PRIMARY KEY ( interval_end_time, node_id )
|
||||
);
|
||||
CREATE TABLE stripe_customers (
|
||||
user_id bytea NOT NULL,
|
||||
customer_id text NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( user_id ),
|
||||
UNIQUE ( customer_id )
|
||||
);
|
||||
CREATE TABLE stripecoinpayments_invoice_project_records (
|
||||
id bytea NOT NULL,
|
||||
project_id bytea NOT NULL,
|
||||
storage double precision NOT NULL,
|
||||
egress bigint NOT NULL,
|
||||
objects bigint NOT NULL,
|
||||
period_start timestamp with time zone NOT NULL,
|
||||
period_end timestamp with time zone NOT NULL,
|
||||
state integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id ),
|
||||
UNIQUE ( project_id, period_start, period_end )
|
||||
);
|
||||
CREATE TABLE stripecoinpayments_tx_conversion_rates (
|
||||
tx_id text NOT NULL,
|
||||
rate bytea NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( tx_id )
|
||||
);
|
||||
CREATE TABLE users (
|
||||
id bytea NOT NULL,
|
||||
email text NOT NULL,
|
||||
normalized_email text NOT NULL,
|
||||
full_name text NOT NULL,
|
||||
short_name text,
|
||||
password_hash bytea NOT NULL,
|
||||
status integer NOT NULL,
|
||||
partner_id bytea,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
project_limit integer NOT NULL DEFAULT 0,
|
||||
position text,
|
||||
company_name text,
|
||||
company_size integer,
|
||||
working_on text,
|
||||
is_professional boolean NOT NULL DEFAULT false,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE value_attributions (
|
||||
project_id bytea NOT NULL,
|
||||
bucket_name bytea NOT NULL,
|
||||
partner_id bytea NOT NULL,
|
||||
last_updated timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( project_id, bucket_name )
|
||||
);
|
||||
CREATE TABLE api_keys (
|
||||
id bytea NOT NULL,
|
||||
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
||||
head bytea NOT NULL,
|
||||
name text NOT NULL,
|
||||
secret bytea NOT NULL,
|
||||
partner_id bytea,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id ),
|
||||
UNIQUE ( head ),
|
||||
UNIQUE ( name, project_id )
|
||||
);
|
||||
CREATE TABLE bucket_metainfos (
|
||||
id bytea NOT NULL,
|
||||
project_id bytea NOT NULL REFERENCES projects( id ),
|
||||
name bytea NOT NULL,
|
||||
partner_id bytea,
|
||||
path_cipher integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
default_segment_size integer NOT NULL,
|
||||
default_encryption_cipher_suite integer NOT NULL,
|
||||
default_encryption_block_size integer NOT NULL,
|
||||
default_redundancy_algorithm integer NOT NULL,
|
||||
default_redundancy_share_size integer NOT NULL,
|
||||
default_redundancy_required_shares integer NOT NULL,
|
||||
default_redundancy_repair_shares integer NOT NULL,
|
||||
default_redundancy_optimal_shares integer NOT NULL,
|
||||
default_redundancy_total_shares integer NOT NULL,
|
||||
PRIMARY KEY ( id ),
|
||||
UNIQUE ( project_id, name )
|
||||
);
|
||||
CREATE TABLE project_members (
|
||||
member_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
|
||||
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( member_id, project_id )
|
||||
);
|
||||
CREATE TABLE stripecoinpayments_apply_balance_intents (
|
||||
tx_id text NOT NULL REFERENCES coinpayments_transactions( id ) ON DELETE CASCADE,
|
||||
state integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( tx_id )
|
||||
);
|
||||
CREATE TABLE used_serials (
|
||||
serial_number_id integer NOT NULL REFERENCES serial_numbers( id ) ON DELETE CASCADE,
|
||||
storage_node_id bytea NOT NULL,
|
||||
PRIMARY KEY ( serial_number_id, storage_node_id )
|
||||
);
|
||||
CREATE TABLE user_credits (
|
||||
id serial NOT NULL,
|
||||
user_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
|
||||
offer_id integer NOT NULL REFERENCES offers( id ),
|
||||
referred_by bytea REFERENCES users( id ) ON DELETE SET NULL,
|
||||
type text NOT NULL,
|
||||
credits_earned_in_cents integer NOT NULL,
|
||||
credits_used_in_cents integer NOT NULL,
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id ),
|
||||
UNIQUE ( id, offer_id )
|
||||
);
|
||||
CREATE INDEX accounting_rollups_start_time_index ON accounting_rollups ( start_time );
|
||||
CREATE INDEX bucket_bandwidth_rollups_project_id_action_interval_index ON bucket_bandwidth_rollups ( project_id, action, interval_start );
|
||||
CREATE INDEX bucket_bandwidth_rollups_action_interval_project_id_index ON bucket_bandwidth_rollups ( action, interval_start, project_id );
|
||||
CREATE INDEX bucket_storage_tallies_project_id_index ON bucket_storage_tallies ( project_id );
|
||||
CREATE INDEX consumed_serials_expires_at_index ON consumed_serials ( expires_at );
|
||||
CREATE INDEX graceful_exit_transfer_queue_nid_dr_qa_fa_lfa_index ON graceful_exit_transfer_queue ( node_id, durability_ratio, queued_at, finished_at, last_failed_at );
|
||||
CREATE INDEX injuredsegments_attempted_index ON injuredsegments ( attempted );
|
||||
CREATE INDEX injuredsegments_segment_health_index ON injuredsegments ( segment_health );
|
||||
CREATE INDEX injuredsegments_updated_at_index ON injuredsegments ( updated_at );
|
||||
CREATE INDEX node_last_ip ON nodes ( last_net );
|
||||
CREATE INDEX nodes_dis_unk_exit_fin_last_success_index ON nodes ( disqualified, unknown_audit_suspended, exit_finished_at, last_contact_success );
|
||||
CREATE UNIQUE INDEX serial_number_index ON serial_numbers ( serial_number );
|
||||
CREATE INDEX serial_numbers_expires_at_index ON serial_numbers ( expires_at );
|
||||
CREATE INDEX storagenode_bandwidth_rollups_interval_start_index ON storagenode_bandwidth_rollups ( interval_start );
|
||||
CREATE INDEX storagenode_payments_node_id_period_index ON storagenode_payments ( node_id, period );
|
||||
CREATE INDEX storagenode_paystubs_node_id_index ON storagenode_paystubs ( node_id );
|
||||
CREATE INDEX storagenode_storage_tallies_node_id_index ON storagenode_storage_tallies ( node_id );
|
||||
CREATE UNIQUE INDEX credits_earned_user_id_offer_id ON user_credits ( id, offer_id );
|
||||
|
||||
INSERT INTO "accounting_rollups"("node_id", "start_time", "put_total", "get_total", "get_audit_total", "get_repair_total", "put_repair_total", "at_rest_total") VALUES (E'\\367M\\177\\251]t/\\022\\256\\214\\265\\025\\224\\204:\\217\\212\\0102<\\321\\374\\020&\\271Qc\\325\\261\\354\\246\\233'::bytea, '2019-02-09 00:00:00+00', 3000, 6000, 9000, 12000, 0, 15000);
|
||||
|
||||
INSERT INTO "accounting_timestamps" VALUES ('LastAtRestTally', '0001-01-01 00:00:00+00');
|
||||
INSERT INTO "accounting_timestamps" VALUES ('LastRollup', '0001-01-01 00:00:00+00');
|
||||
INSERT INTO "accounting_timestamps" VALUES ('LastBandwidthTally', '0001-01-01 00:00:00+00');
|
||||
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 5, false, 1);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '127.0.0.1:55518', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 0, 3, 3, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 0, false, 1);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014', '127.0.0.1:55517', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 0, 0, 0, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 0, false, 1);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\015', '127.0.0.1:55519', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 1, 2, 1, 2, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 1, false, 1);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "vetted_at", "online_score") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', '127.0.0.1:55520', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 300, 400, 300, 400, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 300, 0, 1, 0, 300, 100, false, '2020-03-18 12:00:00.000000+00', 1);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\154\\313\\233\\074\\327\\177\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 75, 25, 100, 5, false, 1);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "last_ip_port", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\154\\313\\233\\074\\327\\177\\136\\070\\346\\002', '127.0.0.1:55516', '127.0.0.0', '127.0.0.1:55516', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 75, 25, 100, 5, false, 1);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\363\\341\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 5, false, 1);
|
||||
|
||||
INSERT INTO "users"("id", "full_name", "short_name", "email", "normalized_email", "password_hash", "status", "partner_id", "created_at", "is_professional") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'Noahson', 'William', '1email1@mail.test', '1EMAIL1@MAIL.TEST', E'some_readable_hash'::bytea, 1, NULL, '2019-02-14 08:28:24.614594+00', false);
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets", "partner_id", "owner_id", "created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, 'ProjectName', 'projects description', NULL, NULL, NULL, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:28:24.254934+00');
|
||||
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets", "partner_id", "owner_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, 'projName1', 'Test project 1', NULL, NULL, NULL, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:28:24.636949+00');
|
||||
INSERT INTO "project_members"("member_id", "project_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, '2019-02-14 08:28:24.677953+00');
|
||||
INSERT INTO "project_members"("member_id", "project_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, '2019-02-13 08:28:24.677953+00');
|
||||
|
||||
INSERT INTO "irreparabledbs" ("segmentpath", "segmentdetail", "pieces_lost_count", "seg_damaged_unix_sec", "repair_attempt_count") VALUES ('\x49616d5365676d656e746b6579696e666f30', '\x49616d5365676d656e7464657461696c696e666f30', 10, 1550159554, 10);
|
||||
|
||||
INSERT INTO "registration_tokens" ("secret", "owner_id", "project_limit", "created_at") VALUES (E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, null, 1, '2019-02-14 08:28:24.677953+00');
|
||||
|
||||
INSERT INTO "serial_numbers" ("id", "serial_number", "bucket_id", "expires_at") VALUES (1, E'0123456701234567'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014/testbucket'::bytea, '2019-03-06 08:28:24.677953+00');
|
||||
INSERT INTO "used_serials" ("serial_number_id", "storage_node_id") VALUES (1, E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n');
|
||||
|
||||
INSERT INTO "storagenode_bandwidth_rollups" ("storagenode_id", "interval_start", "interval_seconds", "action", "allocated", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024);
|
||||
INSERT INTO "storagenode_storage_tallies" VALUES (E'\\3510\\323\\225"~\\036<\\342\\330m\\0253Jhr\\246\\233K\\246#\\2303\\351\\256\\275j\\212UM\\362\\207', '2019-02-14 08:16:57.812849+00', 1000);
|
||||
|
||||
INSERT INTO "bucket_bandwidth_rollups" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024, 3024);
|
||||
INSERT INTO "bucket_storage_tallies" ("bucket_name", "project_id", "interval_start", "inline", "remote", "remote_segments_count", "inline_segments_count", "object_count", "metadata_size") VALUES (E'testbucket'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 4024, 5024, 0, 0, 0, 0);
|
||||
INSERT INTO "bucket_bandwidth_rollups" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\170\\160\\157\\370\\274\\366\\113\\364\\272\\235\\301\\243\\321\\102\\321\\136'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024, 3024);
|
||||
INSERT INTO "bucket_storage_tallies" ("bucket_name", "project_id", "interval_start", "inline", "remote", "remote_segments_count", "inline_segments_count", "object_count", "metadata_size") VALUES (E'testbucket'::bytea, E'\\170\\160\\157\\370\\274\\366\\113\\364\\272\\235\\301\\243\\321\\102\\321\\136'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 4024, 5024, 0, 0, 0, 0);
|
||||
|
||||
INSERT INTO "reset_password_tokens" ("secret", "owner_id", "created_at") VALUES (E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-05-08 08:28:24.677953+00');
|
||||
|
||||
INSERT INTO "offers" ("id", "name", "description", "award_credit_in_cents", "invitee_credit_in_cents", "expires_at", "created_at", "status", "type", "award_credit_duration_days", "invitee_credit_duration_days") VALUES (1, 'Default referral offer', 'Is active when no other active referral offer', 300, 600, '2119-03-14 08:28:24.636949+00', '2019-07-14 08:28:24.636949+00', 1, 2, 365, 14);
|
||||
INSERT INTO "offers" ("id", "name", "description", "award_credit_in_cents", "invitee_credit_in_cents", "expires_at", "created_at", "status", "type", "award_credit_duration_days", "invitee_credit_duration_days") VALUES (2, 'Default free credit offer', 'Is active when no active free credit offer', 0, 300, '2119-03-14 08:28:24.636949+00', '2019-07-14 08:28:24.636949+00', 1, 1, NULL, 14);
|
||||
|
||||
INSERT INTO "api_keys" ("id", "project_id", "head", "name", "secret", "partner_id", "created_at") VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\111\\142\\147\\304\\132\\375\\070\\163\\270\\160\\251\\370\\126\\063\\351\\037\\257\\071\\143\\375\\351\\320\\253\\232\\220\\260\\075\\173\\306\\307\\115\\136'::bytea, 'key 2', E'\\254\\011\\315\\333\\273\\365\\001\\071\\024\\154\\253\\332\\301\\216\\361\\074\\221\\367\\251\\231\\274\\333\\300\\367\\001\\272\\327\\111\\315\\123\\042\\016'::bytea, NULL, '2019-02-14 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "value_attributions" ("project_id", "bucket_name", "partner_id", "last_updated") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E''::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-02-14 08:07:31.028103+00');
|
||||
|
||||
INSERT INTO "user_credits" ("id", "user_id", "offer_id", "referred_by", "credits_earned_in_cents", "credits_used_in_cents", "type", "expires_at", "created_at") VALUES (1, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 1, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 200, 0, 'invalid', '2019-10-01 08:28:24.267934+00', '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "bucket_metainfos" ("id", "project_id", "name", "partner_id", "created_at", "path_cipher", "default_segment_size", "default_encryption_cipher_suite", "default_encryption_block_size", "default_redundancy_algorithm", "default_redundancy_share_size", "default_redundancy_required_shares", "default_redundancy_repair_shares", "default_redundancy_optimal_shares", "default_redundancy_total_shares") VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'testbucketuniquename'::bytea, NULL, '2019-06-14 08:28:24.677953+00', 1, 65536, 1, 8192, 1, 4096, 4, 6, 8, 10);
|
||||
|
||||
INSERT INTO "pending_audits" ("node_id", "piece_id", "stripe_index", "share_size", "expected_share_hash", "reverify_count", "path") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 5, 1024, E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, 1, 'not null');
|
||||
|
||||
INSERT INTO "peer_identities" VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:07:31.335028+00');
|
||||
|
||||
INSERT INTO "graceful_exit_progress" ("node_id", "bytes_transferred", "pieces_transferred", "pieces_failed", "updated_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', 1000000000000000, 0, 0, '2019-09-12 10:07:31.028103+00');
|
||||
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n\\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\311', 8, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
|
||||
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n\\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\312', 8, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
|
||||
|
||||
INSERT INTO "stripe_customers" ("user_id", "customer_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'stripe_id', '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n\\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\311', 9, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
|
||||
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n\\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\312', 9, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
|
||||
|
||||
INSERT INTO "stripecoinpayments_invoice_project_records"("id", "project_id", "storage", "egress", "objects", "period_start", "period_end", "state", "created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\021\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, 0, 0, 0, '2019-06-01 08:28:24.267934+00', '2019-06-01 08:28:24.267934+00', 0, '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "root_piece_id", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n\\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\311', 10, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
|
||||
|
||||
INSERT INTO "stripecoinpayments_tx_conversion_rates" ("tx_id", "rate", "created_at") VALUES ('tx_id', E'\\363\\311\\033w\\222\\303Ci,'::bytea, '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "coinpayments_transactions" ("id", "user_id", "address", "amount", "received", "status", "key", "timeout", "created_at") VALUES ('tx_id', E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'address', E'\\363\\311\\033w'::bytea, E'\\363\\311\\033w'::bytea, 1, 'key', 60, '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "storagenode_bandwidth_rollups" ("storagenode_id", "interval_start", "interval_seconds", "action", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2020-01-11 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 2024);
|
||||
|
||||
INSERT INTO "coupons" ("id", "user_id", "amount", "description", "type", "status", "duration", "created_at") VALUES (E'\\362\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 50, 'description', 0, 0, 2, '2019-06-01 08:28:24.267934+00');
|
||||
INSERT INTO "coupon_usages" ("coupon_id", "amount", "status", "period") VALUES (E'\\362\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, 22, 0, '2019-06-01 09:28:24.267934+00');
|
||||
|
||||
INSERT INTO "reported_serials" ("expires_at", "storage_node_id", "bucket_id", "action", "serial_number", "settled", "observed_at") VALUES ('2020-01-11 08:00:00.000000+00', E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014/testbucket'::bytea, 1, E'0123456701234567'::bytea, 100, '2020-01-11 08:00:00.000000+00');
|
||||
|
||||
INSERT INTO "stripecoinpayments_apply_balance_intents" ("tx_id", "state", "created_at") VALUES ('tx_id', 0, '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets", "rate_limit", "partner_id", "owner_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\347'::bytea, 'projName1', 'Test project 1', NULL, NULL, NULL, 2000000, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2020-01-15 08:28:24.636949+00');
|
||||
|
||||
INSERT INTO "pending_serial_queue" ("storage_node_id", "bucket_id", "serial_number", "action", "settled", "expires_at") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014/testbucket'::bytea, E'5123456701234567'::bytea, 1, 100, '2020-01-11 08:00:00.000000+00');
|
||||
|
||||
INSERT INTO "consumed_serials" ("storage_node_id", "serial_number", "expires_at") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', E'1234567012345678'::bytea, '2020-01-12 08:00:00.000000+00');
|
||||
|
||||
INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('0', '\x0a0130120100', 1.0, '2020-09-01 00:00:00.000000+00');
|
||||
INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('here''s/a/great/path', '\x0a136865726527732f612f67726561742f70617468120a0102030405060708090a', 1.0, '2020-09-01 00:00:00.000000+00');
|
||||
INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('yet/another/cool/path', '\x0a157965742f616e6f746865722f636f6f6c2f70617468120a0102030405060708090a', 1.0, '2020-09-01 00:00:00.000000+00');
|
||||
INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('/this/is/a/new/path', '\x0a23736f2f6d616e792f69636f6e69632f70617468732f746f2f63686f6f73652f66726f6d120a0102030405060708090a', 1.0, '2020-09-01 00:00:00.000000+00');
|
||||
INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('/some/path/1/23/4', '\x0a23736f2f6d618e792f69636f6e69632f70617468732f746f2f63686f6f73652f66726f6d120a0102030405060708090a', 0.2, '2020-09-01 00:00:00.000000+00');
|
||||
|
||||
INSERT INTO "project_bandwidth_rollups"("project_id", "interval_month", egress_allocated) VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\347'::bytea, '2020-04-01', 10000);
|
||||
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets","rate_limit", "partner_id", "owner_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\345'::bytea, 'egress101', 'High Bandwidth Project', NULL, NULL, NULL, 2000000, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2020-05-15 08:46:24.000000+00');
|
||||
|
||||
INSERT INTO "storagenode_paystubs"("period", "node_id", "created_at", "codes", "usage_at_rest", "usage_get", "usage_put", "usage_get_repair", "usage_put_repair", "usage_get_audit", "comp_at_rest", "comp_get", "comp_put", "comp_get_repair", "comp_put_repair", "comp_get_audit", "surge_percent", "held", "owed", "disposed", "paid") VALUES ('2020-01', '\xf2a3b4c4dfdf7221310382fd5db5aa73e1d227d6df09734ec4e5305000000000', '2020-04-07T20:14:21.479141Z', '', 1327959864508416, 294054066688, 159031363328, 226751, 0, 836608, 2861984, 5881081, 0, 226751, 0, 8, 300, 0, 26909472, 0, 26909472);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "unknown_audit_suspended", "offline_suspended", "under_review") VALUES (E'\\153\\313\\233\\074\\327\\255\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 5, false, '2019-02-14 08:07:31.108963+00', '2019-02-14 08:07:31.108963+00', '2019-02-14 08:07:31.108963+00');
|
||||
|
||||
INSERT INTO "audit_histories" ("node_id", "history") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', '\x0a23736f2f6d616e792f69636f6e69632f70617468732f746f2f63686f6f73652f66726f6d120a0102030405060708090a');
|
||||
|
||||
INSERT INTO "node_api_versions"("id", "api_version", "created_at", "updated_at") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', 1, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00');
|
||||
INSERT INTO "node_api_versions"("id", "api_version", "created_at", "updated_at") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', 2, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00');
|
||||
INSERT INTO "node_api_versions"("id", "api_version", "created_at", "updated_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014', 3, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00');
|
||||
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\256\\263'::bytea, 'egress102', 'High Bandwidth Project 2', NULL, NULL, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-05-15 08:46:24.000000+00', 1000);
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\255\\244'::bytea, 'egress103', 'High Bandwidth Project 3', NULL, NULL, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-05-15 08:46:24.000000+00', 1000);
|
||||
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\253\\231'::bytea, 'Limit Test 1', 'This project is above the default', 50000000001, 50000000001, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-10-14 10:10:10.000000+00', 101);
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\252\\230'::bytea, 'Limit Test 2', 'This project is below the default', NULL, NULL, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-10-14 10:10:11.000000+00', NULL);
|
||||
|
||||
INSERT INTO "storagenode_bandwidth_rollups_phase2" ("storagenode_id", "interval_start", "interval_seconds", "action", "allocated", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024);
|
||||
|
||||
INSERT INTO "users"("id", "full_name", "short_name", "email", "normalized_email", "password_hash", "status", "partner_id", "created_at", "position", "company_name", "working_on", "company_size", "is_professional") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\205\\311",'::bytea, 'Thierry', 'Berg', '2email2@mail.test', '2EMAIL2@MAIL.TEST', E'some_readable_hash'::bytea, 2, NULL, '2020-05-16 10:28:24.614594+00', 'engineer', 'storj', 'data storage', 55, true);
|
||||
|
||||
-- NEW DATA --
|
@ -47,49 +47,23 @@ func (e *Endpoint) GetPayStub(ctx context.Context, req *pb.GetHeldAmountRequest)
|
||||
|
||||
peer, err := identity.PeerIdentityFromContext(ctx)
|
||||
if err != nil {
|
||||
return nil, rpcstatus.Error(rpcstatus.Unauthenticated, err.Error())
|
||||
return nil, rpcstatus.Wrap(rpcstatus.Unauthenticated, err)
|
||||
}
|
||||
|
||||
node, err := e.overlay.Get(ctx, peer.ID)
|
||||
if err != nil {
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
|
||||
return nil, rpcstatus.Wrap(rpcstatus.Internal, err)
|
||||
}
|
||||
|
||||
period := req.Period.String()[0:7]
|
||||
stub, err := e.service.GetPayStub(ctx, node.Id, period)
|
||||
paystub, err := e.service.GetPaystub(ctx, node.Id, req.Period.Format("2006-01"))
|
||||
if err != nil {
|
||||
if ErrNoDataForPeriod.Has(err) {
|
||||
return nil, rpcstatus.Error(rpcstatus.OutOfRange, err.Error())
|
||||
return nil, rpcstatus.Wrap(rpcstatus.OutOfRange, err)
|
||||
}
|
||||
return nil, Error.Wrap(err)
|
||||
return nil, rpcstatus.Wrap(rpcstatus.Internal, err)
|
||||
}
|
||||
|
||||
periodTime, err := date.PeriodToTime(stub.Period)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &pb.GetHeldAmountResponse{
|
||||
Period: periodTime,
|
||||
NodeId: stub.NodeID,
|
||||
CreatedAt: stub.Created,
|
||||
Codes: stub.Codes,
|
||||
UsageAtRest: stub.UsageAtRest,
|
||||
UsageGet: stub.UsageGet,
|
||||
UsagePut: stub.UsagePut,
|
||||
UsageGetRepair: stub.UsageGetRepair,
|
||||
UsagePutRepair: stub.UsagePutRepair,
|
||||
UsageGetAudit: stub.UsageGetAudit,
|
||||
CompAtRest: stub.CompAtRest,
|
||||
CompGet: stub.CompGet,
|
||||
CompPut: stub.CompPut,
|
||||
CompGetRepair: stub.CompGetRepair,
|
||||
CompPutRepair: stub.CompPutRepair,
|
||||
CompGetAudit: stub.CompGetAudit,
|
||||
SurgePercent: stub.SurgePercent,
|
||||
Held: stub.Held,
|
||||
Owed: stub.Owed,
|
||||
Disposed: stub.Disposed,
|
||||
Paid: stub.Paid,
|
||||
}, nil
|
||||
return convertPaystub(paystub)
|
||||
}
|
||||
|
||||
// GetAllPaystubs sends all paystubs for client node.
|
||||
@ -98,65 +72,65 @@ func (e *Endpoint) GetAllPaystubs(ctx context.Context, req *pb.GetAllPaystubsReq
|
||||
|
||||
peer, err := identity.PeerIdentityFromContext(ctx)
|
||||
if err != nil {
|
||||
return nil, rpcstatus.Error(rpcstatus.Unauthenticated, err.Error())
|
||||
return nil, rpcstatus.Wrap(rpcstatus.Unauthenticated, err)
|
||||
}
|
||||
|
||||
node, err := e.overlay.Get(ctx, peer.ID)
|
||||
if err != nil {
|
||||
if overlay.ErrNodeNotFound.Has(err) {
|
||||
return &pb.GetAllPaystubsResponse{}, nil
|
||||
}
|
||||
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
|
||||
return nil, rpcstatus.Wrap(rpcstatus.Internal, err)
|
||||
}
|
||||
|
||||
stubs, err := e.service.GetAllPaystubs(ctx, node.Id)
|
||||
paystubs, err := e.service.GetAllPaystubs(ctx, node.Id)
|
||||
if err != nil {
|
||||
if ErrNoDataForPeriod.Has(err) {
|
||||
return nil, rpcstatus.Error(rpcstatus.OutOfRange, err.Error())
|
||||
return nil, rpcstatus.Wrap(rpcstatus.OutOfRange, err)
|
||||
}
|
||||
return nil, Error.Wrap(err)
|
||||
}
|
||||
|
||||
var paystubs []*pb.GetHeldAmountResponse
|
||||
|
||||
response := pb.GetAllPaystubsResponse{
|
||||
Paystub: paystubs,
|
||||
}
|
||||
|
||||
for i := 0; i < len(stubs); i++ {
|
||||
period, err := date.PeriodToTime(stubs[i].Period)
|
||||
response := &pb.GetAllPaystubsResponse{}
|
||||
for _, paystub := range paystubs {
|
||||
pbPaystub, err := convertPaystub(paystub)
|
||||
if err != nil {
|
||||
return nil, Error.Wrap(err)
|
||||
return nil, rpcstatus.Wrap(rpcstatus.Internal, err)
|
||||
}
|
||||
response.Paystub = append(response.Paystub, pbPaystub)
|
||||
}
|
||||
return response, nil
|
||||
}
|
||||
|
||||
heldAmountResponse := pb.GetHeldAmountResponse{
|
||||
Period: period,
|
||||
NodeId: stubs[i].NodeID,
|
||||
CreatedAt: stubs[i].Created,
|
||||
Codes: stubs[i].Codes,
|
||||
UsageAtRest: stubs[i].UsageAtRest,
|
||||
UsageGet: stubs[i].UsageGet,
|
||||
UsagePut: stubs[i].UsagePut,
|
||||
UsageGetRepair: stubs[i].UsageGetRepair,
|
||||
UsagePutRepair: stubs[i].UsagePutRepair,
|
||||
UsageGetAudit: stubs[i].UsageGetAudit,
|
||||
CompAtRest: stubs[i].CompAtRest,
|
||||
CompGet: stubs[i].CompGet,
|
||||
CompPut: stubs[i].CompPut,
|
||||
CompGetRepair: stubs[i].CompGetRepair,
|
||||
CompPutRepair: stubs[i].CompPutRepair,
|
||||
CompGetAudit: stubs[i].CompGetAudit,
|
||||
SurgePercent: stubs[i].SurgePercent,
|
||||
Held: stubs[i].Held,
|
||||
Owed: stubs[i].Owed,
|
||||
Disposed: stubs[i].Disposed,
|
||||
Paid: stubs[i].Paid,
|
||||
}
|
||||
|
||||
response.Paystub = append(response.Paystub, &heldAmountResponse)
|
||||
func convertPaystub(paystub Paystub) (*pb.GetHeldAmountResponse, error) {
|
||||
period, err := date.PeriodToTime(paystub.Period)
|
||||
if err != nil {
|
||||
return nil, rpcstatus.Wrap(rpcstatus.Internal, Error.Wrap(err))
|
||||
}
|
||||
|
||||
return &response, nil
|
||||
return &pb.GetHeldAmountResponse{
|
||||
Period: period,
|
||||
NodeId: paystub.NodeID,
|
||||
CreatedAt: paystub.Created,
|
||||
Codes: paystub.Codes,
|
||||
UsageAtRest: paystub.UsageAtRest,
|
||||
UsageGet: paystub.UsageGet,
|
||||
UsagePut: paystub.UsagePut,
|
||||
UsageGetRepair: paystub.UsageGetRepair,
|
||||
UsagePutRepair: paystub.UsagePutRepair,
|
||||
UsageGetAudit: paystub.UsageGetAudit,
|
||||
CompAtRest: paystub.CompAtRest,
|
||||
CompGet: paystub.CompGet,
|
||||
CompPut: paystub.CompPut,
|
||||
CompGetRepair: paystub.CompGetRepair,
|
||||
CompPutRepair: paystub.CompPutRepair,
|
||||
CompGetAudit: paystub.CompGetAudit,
|
||||
SurgePercent: paystub.SurgePercent,
|
||||
Held: paystub.Held,
|
||||
Owed: paystub.Owed,
|
||||
Disposed: paystub.Disposed,
|
||||
Paid: paystub.Paid,
|
||||
}, err
|
||||
}
|
||||
|
||||
// GetPayment sends node payment data for client node.
|
||||
@ -165,35 +139,23 @@ func (e *Endpoint) GetPayment(ctx context.Context, req *pb.GetPaymentRequest) (_
|
||||
|
||||
peer, err := identity.PeerIdentityFromContext(ctx)
|
||||
if err != nil {
|
||||
return nil, rpcstatus.Error(rpcstatus.Unauthenticated, err.Error())
|
||||
return nil, rpcstatus.Wrap(rpcstatus.Unauthenticated, err)
|
||||
}
|
||||
|
||||
node, err := e.overlay.Get(ctx, peer.ID)
|
||||
if err != nil {
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
|
||||
return nil, rpcstatus.Wrap(rpcstatus.Internal, err)
|
||||
}
|
||||
|
||||
payment, err := e.service.GetPayment(ctx, node.Id, req.Period.String())
|
||||
if err != nil {
|
||||
if ErrNoDataForPeriod.Has(err) {
|
||||
return nil, rpcstatus.Error(rpcstatus.OutOfRange, err.Error())
|
||||
return nil, rpcstatus.Wrap(rpcstatus.OutOfRange, err)
|
||||
}
|
||||
return nil, Error.Wrap(err)
|
||||
}
|
||||
|
||||
timePeriod, err := date.PeriodToTime(payment.Period)
|
||||
if err != nil {
|
||||
return nil, Error.Wrap(err)
|
||||
}
|
||||
|
||||
return &pb.GetPaymentResponse{
|
||||
NodeId: payment.NodeID,
|
||||
CreatedAt: payment.Created,
|
||||
Period: timePeriod,
|
||||
Amount: payment.Amount,
|
||||
Receipt: payment.Receipt,
|
||||
Notes: payment.Notes,
|
||||
Id: payment.ID,
|
||||
}, nil
|
||||
return convertPayment(payment)
|
||||
}
|
||||
|
||||
// GetAllPayments sends all payments to node.
|
||||
@ -202,49 +164,49 @@ func (e *Endpoint) GetAllPayments(ctx context.Context, req *pb.GetAllPaymentsReq
|
||||
|
||||
peer, err := identity.PeerIdentityFromContext(ctx)
|
||||
if err != nil {
|
||||
return nil, rpcstatus.Error(rpcstatus.Unauthenticated, err.Error())
|
||||
return nil, rpcstatus.Wrap(rpcstatus.Unauthenticated, err)
|
||||
}
|
||||
|
||||
node, err := e.overlay.Get(ctx, peer.ID)
|
||||
if err != nil {
|
||||
if overlay.ErrNodeNotFound.Has(err) {
|
||||
return &pb.GetAllPaymentsResponse{}, nil
|
||||
}
|
||||
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
|
||||
return nil, rpcstatus.Wrap(rpcstatus.Internal, err)
|
||||
}
|
||||
|
||||
allPayments, err := e.service.GetAllPayments(ctx, node.Id)
|
||||
payments, err := e.service.GetAllPayments(ctx, node.Id)
|
||||
if err != nil {
|
||||
if ErrNoDataForPeriod.Has(err) {
|
||||
return nil, rpcstatus.Error(rpcstatus.OutOfRange, err.Error())
|
||||
return nil, rpcstatus.Wrap(rpcstatus.OutOfRange, err)
|
||||
}
|
||||
return nil, Error.Wrap(err)
|
||||
}
|
||||
|
||||
var payments []*pb.GetPaymentResponse
|
||||
|
||||
response := pb.GetAllPaymentsResponse{
|
||||
Payment: payments,
|
||||
}
|
||||
|
||||
for i := 0; i < len(allPayments); i++ {
|
||||
period, err := date.PeriodToTime(allPayments[i].Period)
|
||||
response := &pb.GetAllPaymentsResponse{}
|
||||
for _, payment := range payments {
|
||||
pbPayment, err := convertPayment(payment)
|
||||
if err != nil {
|
||||
return nil, Error.Wrap(err)
|
||||
return nil, rpcstatus.Wrap(rpcstatus.Internal, err)
|
||||
}
|
||||
response.Payment = append(response.Payment, pbPayment)
|
||||
}
|
||||
return response, nil
|
||||
}
|
||||
|
||||
paymentResponse := pb.GetPaymentResponse{
|
||||
NodeId: allPayments[i].NodeID,
|
||||
CreatedAt: allPayments[i].Created,
|
||||
Period: period,
|
||||
Amount: allPayments[i].Amount,
|
||||
Receipt: allPayments[i].Receipt,
|
||||
Notes: allPayments[i].Notes,
|
||||
Id: allPayments[i].ID,
|
||||
}
|
||||
|
||||
response.Payment = append(response.Payment, &paymentResponse)
|
||||
func convertPayment(payment Payment) (*pb.GetPaymentResponse, error) {
|
||||
period, err := date.PeriodToTime(payment.Period)
|
||||
if err != nil {
|
||||
return nil, rpcstatus.Wrap(rpcstatus.Internal, Error.Wrap(err))
|
||||
}
|
||||
|
||||
return &response, nil
|
||||
return &pb.GetPaymentResponse{
|
||||
Id: payment.ID,
|
||||
CreatedAt: payment.Created,
|
||||
NodeId: payment.NodeID,
|
||||
Period: period,
|
||||
Amount: payment.Amount,
|
||||
Receipt: payment.Receipt,
|
||||
Notes: payment.Notes,
|
||||
}, nil
|
||||
}
|
||||
|
@ -18,17 +18,19 @@ import (
|
||||
// architecture: Service
|
||||
type DB interface {
|
||||
// GetPaystub return payStub by nodeID and period.
|
||||
GetPaystub(ctx context.Context, nodeID storj.NodeID, period string) (PayStub, error)
|
||||
GetPaystub(ctx context.Context, nodeID storj.NodeID, period string) (Paystub, error)
|
||||
// GetAllPaystubs return all payStubs by nodeID.
|
||||
GetAllPaystubs(ctx context.Context, nodeID storj.NodeID) ([]PayStub, error)
|
||||
// CreatePaystub insert paystub into db.
|
||||
CreatePaystub(ctx context.Context, stub PayStub) (err error)
|
||||
GetAllPaystubs(ctx context.Context, nodeID storj.NodeID) ([]Paystub, error)
|
||||
|
||||
// GetPayment return storagenode payment by nodeID and period.
|
||||
GetPayment(ctx context.Context, nodeID storj.NodeID, period string) (StoragenodePayment, error)
|
||||
// CreatePayment insert payment into db.
|
||||
CreatePayment(ctx context.Context, payment StoragenodePayment) (err error)
|
||||
GetPayment(ctx context.Context, nodeID storj.NodeID, period string) (Payment, error)
|
||||
// GetAllPayments return all payments by nodeID.
|
||||
GetAllPayments(ctx context.Context, nodeID storj.NodeID) ([]StoragenodePayment, error)
|
||||
GetAllPayments(ctx context.Context, nodeID storj.NodeID) ([]Payment, error)
|
||||
|
||||
// TestCreatePaystub insert paystub into db. Only used for tests.
|
||||
TestCreatePaystub(ctx context.Context, stub Paystub) (err error)
|
||||
// TestCreatePayment insert payment into db. Only used for tests.
|
||||
TestCreatePayment(ctx context.Context, payment Payment) (err error)
|
||||
}
|
||||
|
||||
// ErrNoDataForPeriod represents errors from the payouts database.
|
||||
@ -37,8 +39,8 @@ var ErrNoDataForPeriod = errs.Class("no payStub/payments for period error")
|
||||
// Error is the default error class for payouts package.
|
||||
var Error = errs.Class("payouts db error")
|
||||
|
||||
// PayStub is an entity that holds held amount of cash that will be paid to storagenode operator after some period.
|
||||
type PayStub struct {
|
||||
// Paystub is an entity that holds held amount of cash that will be paid to storagenode operator after some period.
|
||||
type Paystub struct {
|
||||
Period string `json:"period"`
|
||||
NodeID storj.NodeID `json:"nodeId"`
|
||||
Created time.Time `json:"created"`
|
||||
@ -62,8 +64,8 @@ type PayStub struct {
|
||||
Paid int64 `json:"paid"`
|
||||
}
|
||||
|
||||
// StoragenodePayment is an entity that holds payment to storagenode operator parameters.
|
||||
type StoragenodePayment struct {
|
||||
// Payment is an entity that holds payment to storagenode operator parameters.
|
||||
type Payment struct {
|
||||
ID int64 `json:"id"`
|
||||
Created time.Time `json:"created"`
|
||||
NodeID storj.NodeID `json:"nodeId"`
|
||||
@ -89,38 +91,38 @@ func NewService(log *zap.Logger, db DB) *Service {
|
||||
}
|
||||
}
|
||||
|
||||
// GetPayStub returns PayStub by nodeID and period.
|
||||
func (service *Service) GetPayStub(ctx context.Context, nodeID storj.NodeID, period string) (PayStub, error) {
|
||||
payStub, err := service.db.GetPaystub(ctx, nodeID, period)
|
||||
// GetPaystub returns Paystub by nodeID and period.
|
||||
func (service *Service) GetPaystub(ctx context.Context, nodeID storj.NodeID, period string) (Paystub, error) {
|
||||
paystub, err := service.db.GetPaystub(ctx, nodeID, period)
|
||||
if err != nil {
|
||||
return PayStub{}, Error.Wrap(err)
|
||||
return Paystub{}, Error.Wrap(err)
|
||||
}
|
||||
|
||||
return payStub, nil
|
||||
return paystub, nil
|
||||
}
|
||||
|
||||
// GetAllPaystubs returns all paystubs by nodeID.
|
||||
func (service *Service) GetAllPaystubs(ctx context.Context, nodeID storj.NodeID) ([]PayStub, error) {
|
||||
payStubs, err := service.db.GetAllPaystubs(ctx, nodeID)
|
||||
func (service *Service) GetAllPaystubs(ctx context.Context, nodeID storj.NodeID) ([]Paystub, error) {
|
||||
paystubs, err := service.db.GetAllPaystubs(ctx, nodeID)
|
||||
if err != nil {
|
||||
return []PayStub{}, Error.Wrap(err)
|
||||
return []Paystub{}, Error.Wrap(err)
|
||||
}
|
||||
|
||||
return payStubs, nil
|
||||
return paystubs, nil
|
||||
}
|
||||
|
||||
// GetPayment returns storagenode payment data by nodeID and period.
|
||||
func (service *Service) GetPayment(ctx context.Context, nodeID storj.NodeID, period string) (StoragenodePayment, error) {
|
||||
func (service *Service) GetPayment(ctx context.Context, nodeID storj.NodeID, period string) (Payment, error) {
|
||||
payment, err := service.db.GetPayment(ctx, nodeID, period)
|
||||
if err != nil {
|
||||
return StoragenodePayment{}, Error.Wrap(err)
|
||||
return Payment{}, Error.Wrap(err)
|
||||
}
|
||||
|
||||
return payment, nil
|
||||
}
|
||||
|
||||
// GetAllPayments returns all payments by nodeID.
|
||||
func (service *Service) GetAllPayments(ctx context.Context, nodeID storj.NodeID) ([]StoragenodePayment, error) {
|
||||
func (service *Service) GetAllPayments(ctx context.Context, nodeID storj.NodeID) ([]Payment, error) {
|
||||
payments, err := service.db.GetAllPayments(ctx, nodeID)
|
||||
if err != nil {
|
||||
return nil, Error.Wrap(err)
|
||||
|
@ -7,7 +7,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"storj.io/common/storj"
|
||||
"storj.io/common/testcontext"
|
||||
@ -19,13 +19,12 @@ import (
|
||||
|
||||
func TestPayoutDB(t *testing.T) {
|
||||
satellitedbtest.Run(t, func(ctx *testcontext.Context, t *testing.T, db satellite.DB) {
|
||||
snoPayout := db.SnoPayout()
|
||||
snoPayoutDB := db.SNOPayouts()
|
||||
NodeID := storj.NodeID{}
|
||||
period := "2020-01"
|
||||
paystub := snopayouts.PayStub{
|
||||
|
||||
paystub := snopayouts.Paystub{
|
||||
Period: "2020-01",
|
||||
NodeID: NodeID,
|
||||
Created: time.Now().UTC(),
|
||||
Codes: "1",
|
||||
UsageAtRest: 1,
|
||||
UsageGet: 2,
|
||||
@ -46,10 +45,9 @@ func TestPayoutDB(t *testing.T) {
|
||||
Paid: 17,
|
||||
}
|
||||
|
||||
paystub2 := snopayouts.PayStub{
|
||||
paystub2 := snopayouts.Paystub{
|
||||
Period: "2020-02",
|
||||
NodeID: NodeID,
|
||||
Created: time.Now().UTC(),
|
||||
Codes: "2",
|
||||
UsageAtRest: 4,
|
||||
UsageGet: 5,
|
||||
@ -70,10 +68,9 @@ func TestPayoutDB(t *testing.T) {
|
||||
Paid: 20,
|
||||
}
|
||||
|
||||
paystub3 := snopayouts.PayStub{
|
||||
paystub3 := snopayouts.Paystub{
|
||||
Period: "2020-03",
|
||||
NodeID: NodeID,
|
||||
Created: time.Now().UTC(),
|
||||
Codes: "33",
|
||||
UsageAtRest: 10,
|
||||
UsageGet: 11,
|
||||
@ -94,122 +91,44 @@ func TestPayoutDB(t *testing.T) {
|
||||
Paid: 26,
|
||||
}
|
||||
|
||||
t.Run("Test StorePayStub", func(t *testing.T) {
|
||||
err := snoPayout.CreatePaystub(ctx, paystub)
|
||||
assert.NoError(t, err)
|
||||
err = snoPayout.CreatePaystub(ctx, paystub2)
|
||||
assert.NoError(t, err)
|
||||
err = snoPayout.CreatePaystub(ctx, paystub3)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
{
|
||||
err := snoPayoutDB.TestCreatePaystub(ctx, paystub)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("Test GetPayStub", func(t *testing.T) {
|
||||
stub, err := snoPayout.GetPaystub(ctx, NodeID, period)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, stub.Period, paystub.Period)
|
||||
assert.Equal(t, stub.Codes, paystub.Codes)
|
||||
assert.Equal(t, stub.CompAtRest, paystub.CompAtRest)
|
||||
assert.Equal(t, stub.CompGet, paystub.CompGet)
|
||||
assert.Equal(t, stub.CompGetAudit, paystub.CompGetAudit)
|
||||
assert.Equal(t, stub.CompGetRepair, paystub.CompGetRepair)
|
||||
assert.Equal(t, stub.CompPut, paystub.CompPut)
|
||||
assert.Equal(t, stub.CompPutRepair, paystub.CompPutRepair)
|
||||
assert.Equal(t, stub.Disposed, paystub.Disposed)
|
||||
assert.Equal(t, stub.Held, paystub.Held)
|
||||
assert.Equal(t, stub.Owed, paystub.Owed)
|
||||
assert.Equal(t, stub.Paid, paystub.Paid)
|
||||
assert.Equal(t, stub.NodeID, paystub.NodeID)
|
||||
assert.Equal(t, stub.SurgePercent, paystub.SurgePercent)
|
||||
assert.Equal(t, stub.UsageAtRest, paystub.UsageAtRest)
|
||||
assert.Equal(t, stub.UsageGet, paystub.UsageGet)
|
||||
assert.Equal(t, stub.UsageGetAudit, paystub.UsageGetAudit)
|
||||
assert.Equal(t, stub.UsageGetRepair, paystub.UsageGetRepair)
|
||||
assert.Equal(t, stub.UsagePut, paystub.UsagePut)
|
||||
assert.Equal(t, stub.UsagePutRepair, paystub.UsagePutRepair)
|
||||
err = snoPayoutDB.TestCreatePaystub(ctx, paystub2)
|
||||
require.NoError(t, err)
|
||||
|
||||
stub, err = snoPayout.GetPaystub(ctx, NodeID, "")
|
||||
assert.Error(t, err)
|
||||
err = snoPayoutDB.TestCreatePaystub(ctx, paystub3)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
stub, err = snoPayout.GetPaystub(ctx, storj.NodeID{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}, period)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
{
|
||||
actual, err := snoPayoutDB.GetPaystub(ctx, NodeID, "2020-01")
|
||||
require.NoError(t, err)
|
||||
actual.Created = time.Time{} // created is chosen by the database layer
|
||||
require.Equal(t, paystub, actual)
|
||||
|
||||
t.Run("Test GetAllPaystubs", func(t *testing.T) {
|
||||
stubs, err := snoPayout.GetAllPaystubs(ctx, NodeID)
|
||||
assert.NoError(t, err)
|
||||
for i := 0; i < len(stubs); i++ {
|
||||
if stubs[i].Period == "2020-01" {
|
||||
assert.Equal(t, stubs[i].Period, paystub.Period)
|
||||
assert.Equal(t, stubs[i].Codes, paystub.Codes)
|
||||
assert.Equal(t, stubs[i].CompAtRest, paystub.CompAtRest)
|
||||
assert.Equal(t, stubs[i].CompGet, paystub.CompGet)
|
||||
assert.Equal(t, stubs[i].CompGetAudit, paystub.CompGetAudit)
|
||||
assert.Equal(t, stubs[i].CompGetRepair, paystub.CompGetRepair)
|
||||
assert.Equal(t, stubs[i].CompPut, paystub.CompPut)
|
||||
assert.Equal(t, stubs[i].CompPutRepair, paystub.CompPutRepair)
|
||||
assert.Equal(t, stubs[i].Disposed, paystub.Disposed)
|
||||
assert.Equal(t, stubs[i].Held, paystub.Held)
|
||||
assert.Equal(t, stubs[i].Owed, paystub.Owed)
|
||||
assert.Equal(t, stubs[i].Paid, paystub.Paid)
|
||||
assert.Equal(t, stubs[i].NodeID, paystub.NodeID)
|
||||
assert.Equal(t, stubs[i].SurgePercent, paystub.SurgePercent)
|
||||
assert.Equal(t, stubs[i].UsageAtRest, paystub.UsageAtRest)
|
||||
assert.Equal(t, stubs[i].UsageGet, paystub.UsageGet)
|
||||
assert.Equal(t, stubs[i].UsageGetAudit, paystub.UsageGetAudit)
|
||||
assert.Equal(t, stubs[i].UsageGetRepair, paystub.UsageGetRepair)
|
||||
assert.Equal(t, stubs[i].UsagePut, paystub.UsagePut)
|
||||
assert.Equal(t, stubs[i].UsagePutRepair, paystub.UsagePutRepair)
|
||||
}
|
||||
if stubs[i].Period == "2020-02" {
|
||||
assert.Equal(t, stubs[i].Period, paystub2.Period)
|
||||
assert.Equal(t, stubs[i].Codes, paystub2.Codes)
|
||||
assert.Equal(t, stubs[i].CompAtRest, paystub2.CompAtRest)
|
||||
assert.Equal(t, stubs[i].CompGet, paystub2.CompGet)
|
||||
assert.Equal(t, stubs[i].CompGetAudit, paystub2.CompGetAudit)
|
||||
assert.Equal(t, stubs[i].CompGetRepair, paystub2.CompGetRepair)
|
||||
assert.Equal(t, stubs[i].CompPut, paystub2.CompPut)
|
||||
assert.Equal(t, stubs[i].CompPutRepair, paystub2.CompPutRepair)
|
||||
assert.Equal(t, stubs[i].Disposed, paystub2.Disposed)
|
||||
assert.Equal(t, stubs[i].Held, paystub2.Held)
|
||||
assert.Equal(t, stubs[i].Owed, paystub2.Owed)
|
||||
assert.Equal(t, stubs[i].Paid, paystub2.Paid)
|
||||
assert.Equal(t, stubs[i].NodeID, paystub2.NodeID)
|
||||
assert.Equal(t, stubs[i].SurgePercent, paystub2.SurgePercent)
|
||||
assert.Equal(t, stubs[i].UsageAtRest, paystub2.UsageAtRest)
|
||||
assert.Equal(t, stubs[i].UsageGet, paystub2.UsageGet)
|
||||
assert.Equal(t, stubs[i].UsageGetAudit, paystub2.UsageGetAudit)
|
||||
assert.Equal(t, stubs[i].UsageGetRepair, paystub2.UsageGetRepair)
|
||||
assert.Equal(t, stubs[i].UsagePut, paystub2.UsagePut)
|
||||
assert.Equal(t, stubs[i].UsagePutRepair, paystub2.UsagePutRepair)
|
||||
}
|
||||
if stubs[i].Period == "2020-03" {
|
||||
assert.Equal(t, stubs[i].Period, paystub3.Period)
|
||||
assert.Equal(t, stubs[i].Codes, paystub3.Codes)
|
||||
assert.Equal(t, stubs[i].CompAtRest, paystub3.CompAtRest)
|
||||
assert.Equal(t, stubs[i].CompGet, paystub3.CompGet)
|
||||
assert.Equal(t, stubs[i].CompGetAudit, paystub3.CompGetAudit)
|
||||
assert.Equal(t, stubs[i].CompGetRepair, paystub3.CompGetRepair)
|
||||
assert.Equal(t, stubs[i].CompPut, paystub3.CompPut)
|
||||
assert.Equal(t, stubs[i].CompPutRepair, paystub3.CompPutRepair)
|
||||
assert.Equal(t, stubs[i].Disposed, paystub3.Disposed)
|
||||
assert.Equal(t, stubs[i].Held, paystub3.Held)
|
||||
assert.Equal(t, stubs[i].Owed, paystub3.Owed)
|
||||
assert.Equal(t, stubs[i].Paid, paystub3.Paid)
|
||||
assert.Equal(t, stubs[i].NodeID, paystub3.NodeID)
|
||||
assert.Equal(t, stubs[i].SurgePercent, paystub3.SurgePercent)
|
||||
assert.Equal(t, stubs[i].UsageAtRest, paystub3.UsageAtRest)
|
||||
assert.Equal(t, stubs[i].UsageGet, paystub3.UsageGet)
|
||||
assert.Equal(t, stubs[i].UsageGetAudit, paystub3.UsageGetAudit)
|
||||
assert.Equal(t, stubs[i].UsageGetRepair, paystub3.UsageGetRepair)
|
||||
assert.Equal(t, stubs[i].UsagePut, paystub3.UsagePut)
|
||||
assert.Equal(t, stubs[i].UsagePutRepair, paystub3.UsagePutRepair)
|
||||
}
|
||||
_, err = snoPayoutDB.GetPaystub(ctx, NodeID, "")
|
||||
require.Error(t, err)
|
||||
|
||||
_, err = snoPayoutDB.GetPaystub(ctx, testrand.NodeID(), "2020-01")
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
{
|
||||
stubs, err := snoPayoutDB.GetAllPaystubs(ctx, NodeID)
|
||||
require.NoError(t, err)
|
||||
for _, actual := range stubs {
|
||||
actual.Created = time.Time{} // created is chosen by the database layer
|
||||
require.Equal(t, actual, map[string]snopayouts.Paystub{
|
||||
"2020-01": paystub,
|
||||
"2020-02": paystub2,
|
||||
"2020-03": paystub3,
|
||||
}[actual.Period])
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
payment := snopayouts.StoragenodePayment{
|
||||
ID: 1,
|
||||
Created: time.Now().UTC(),
|
||||
payment := snopayouts.Payment{
|
||||
NodeID: NodeID,
|
||||
Period: "2020-01",
|
||||
Amount: 123,
|
||||
@ -217,25 +136,23 @@ func TestPayoutDB(t *testing.T) {
|
||||
Notes: "notes",
|
||||
}
|
||||
|
||||
t.Run("Test StorePayment", func(t *testing.T) {
|
||||
err := snoPayout.CreatePayment(ctx, payment)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
{
|
||||
err := snoPayoutDB.TestCreatePayment(ctx, payment)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
t.Run("Test GetPayment", func(t *testing.T) {
|
||||
paym, err := snoPayout.GetPayment(ctx, NodeID, period)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, paym.NodeID, payment.NodeID)
|
||||
assert.Equal(t, paym.Period, payment.Period)
|
||||
assert.Equal(t, paym.Amount, payment.Amount)
|
||||
assert.Equal(t, paym.Notes, payment.Notes)
|
||||
assert.Equal(t, paym.Receipt, payment.Receipt)
|
||||
{
|
||||
actual, err := snoPayoutDB.GetPayment(ctx, NodeID, "2020-01")
|
||||
require.NoError(t, err)
|
||||
actual.Created = time.Time{} // created is chosen by the database layer
|
||||
actual.ID = 0 // id is chosen by the database layer
|
||||
require.Equal(t, payment, actual)
|
||||
|
||||
paym, err = snoPayout.GetPayment(ctx, NodeID, "")
|
||||
assert.Error(t, err)
|
||||
_, err = snoPayoutDB.GetPayment(ctx, NodeID, "")
|
||||
require.Error(t, err)
|
||||
|
||||
paym, err = snoPayout.GetPayment(ctx, testrand.NodeID(), period)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
_, err = snoPayoutDB.GetPayment(ctx, testrand.NodeID(), "2020-01")
|
||||
require.Error(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
8
scripts/test-sim-redis-up-and-down-dev.sh
Executable file
8
scripts/test-sim-redis-up-and-down-dev.sh
Executable file
@ -0,0 +1,8 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
SCRIPTDIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||
|
||||
# shellcheck source=/postgres-dev.sh
|
||||
source "${SCRIPTDIR}/postgres-dev.sh"
|
||||
|
||||
"${SCRIPTDIR}/test-sim-redis-up-and-down.sh"
|
72
scripts/test-sim-redis-up-and-down.sh
Executable file
72
scripts/test-sim-redis-up-and-down.sh
Executable file
@ -0,0 +1,72 @@
|
||||
#!/usr/bin/env bash
|
||||
set -Eeuo pipefail
|
||||
set +x
|
||||
|
||||
# Required environment variables
|
||||
if [ -z "${STORJ_SIM_POSTGRES}" ]; then
|
||||
echo "STORJ_SIM_POSTGRES environment variable must be set to a non-empty string"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# constants
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||
readonly SCRIPT_DIR
|
||||
REDIS_CONTAINER_NAME=storj_sim_redis
|
||||
readonly REDIS_CONTAINER_NAME
|
||||
TMP_DIR=$(mktemp -d -t tmp.XXXXXXXXXX)
|
||||
readonly TMP_DIR
|
||||
|
||||
# setup tmpdir for testfiles and cleanup
|
||||
cleanup() {
|
||||
trap - EXIT
|
||||
|
||||
rm -rf "${TMP_DIR}"
|
||||
docker container rm -f "${REDIS_CONTAINER_NAME}" >/dev/null 2>&1 || true
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
echo "install sim"
|
||||
make -C "$SCRIPT_DIR"/.. install-sim
|
||||
|
||||
echo "overriding default max segment size to 6MiB"
|
||||
GOBIN="${TMP_DIR}" go install -v -ldflags "-X 'storj.io/uplink.maxSegmentSize=6MiB'" storj.io/storj/cmd/uplink
|
||||
|
||||
# use modified version of uplink
|
||||
export PATH="${TMP_DIR}:${PATH}"
|
||||
export STORJ_NETWORK_DIR="${TMP_DIR}"
|
||||
|
||||
STORJ_NETWORK_HOST4=${STORJ_NETWORK_HOST4:-127.0.0.1}
|
||||
|
||||
redis_run() {
|
||||
local retries=10
|
||||
|
||||
docker container run -d -p 6379:6379 --name "${REDIS_CONTAINER_NAME}" redis:5.0-alpine
|
||||
until docker container exec "${REDIS_CONTAINER_NAME}" redis-cli ping >/dev/null 2>&1 ||
|
||||
[ ${retries} -eq 0 ]; do
|
||||
echo "waiting for Redis server to be ready, $((retries--)) remaining attemps..."
|
||||
sleep 1
|
||||
done
|
||||
|
||||
if [ ${retries} -eq 0 ]; then
|
||||
echo "aborting, Redis server is not ready after several retrials"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
redis_stop() {
|
||||
docker container stop "${REDIS_CONTAINER_NAME}"
|
||||
}
|
||||
|
||||
# setup the network
|
||||
storj-sim --failfast -x --satellites 1 --host "${STORJ_NETWORK_HOST4}" network \
|
||||
--postgres="${STORJ_SIM_POSTGRES}" --redis="127.0.0.1:6379" setup
|
||||
|
||||
# run test that checks that the satellite runs when Redis is up and down
|
||||
redis_run
|
||||
storj-sim --failfast -x --satellites 1 --host "${STORJ_NETWORK_HOST4}" network \
|
||||
--redis="127.0.0.1:6379" test bash "${SCRIPT_DIR}/test-uplink-redis-up-and-down.sh" "${REDIS_CONTAINER_NAME}"
|
||||
|
||||
# run test that checks that the satellite runs despite of not being able to connect to Redis
|
||||
redis_stop
|
||||
storj-sim --failfast -x --satellites 1 --host "${STORJ_NETWORK_HOST4}" network \
|
||||
--redis="127.0.0.1:6379" test bash "${SCRIPT_DIR}/test-uplink.sh"
|
150
scripts/test-uplink-redis-up-and-down.sh
Executable file
150
scripts/test-uplink-redis-up-and-down.sh
Executable file
@ -0,0 +1,150 @@
|
||||
#!/usr/bin/env bash
|
||||
set -ueo pipefail
|
||||
|
||||
redis_container_name="${1-}"
|
||||
|
||||
# Required positional arguments
|
||||
if [ -z "${redis_container_name}" ]; then
|
||||
echo "redis container name is required as a first positional script argument"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# constants
|
||||
BUCKET="bucket-123"
|
||||
readonly BUCKET
|
||||
UPLINK_DEBUG_ADDR=""
|
||||
readonly UPLINK_DEBUG_ADDR
|
||||
|
||||
export STORJ_ACCESS="${GATEWAY_0_ACCESS}"
|
||||
export STORJ_DEBUG_ADDR="${UPLINK_DEBUG_ADDR}"
|
||||
|
||||
# Vars
|
||||
temp_dirs=() # used to track all the created temporary directories
|
||||
|
||||
cleanup() {
|
||||
trap - EXIT
|
||||
|
||||
rm -rf "${temp_dirs[@]}"
|
||||
echo "cleaned up test successfully"
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
random_bytes_file() {
|
||||
size="${1}"
|
||||
output="${2}"
|
||||
head -c "${size}" </dev/urandom >"${output}"
|
||||
}
|
||||
|
||||
compare_files() {
|
||||
name=$(basename "${2}")
|
||||
if cmp "${1}" "${2}"; then
|
||||
echo "${name} matches uploaded file"
|
||||
else
|
||||
echo "${name} does not match uploaded file"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
redis_start() {
|
||||
docker container start "${redis_container_name}"
|
||||
}
|
||||
|
||||
redis_stop() {
|
||||
docker container stop "${redis_container_name}"
|
||||
}
|
||||
|
||||
uplink_test() {
|
||||
local temp_dir
|
||||
temp_dir=$(mktemp -d -t tmp.XXXXXXXXXX)
|
||||
temp_dirs+=("${temp_dir}")
|
||||
|
||||
local src_dir="${temp_dir}/source"
|
||||
local dst_dir="${temp_dir}/dst"
|
||||
mkdir -p "${src_dir}" "${dst_dir}"
|
||||
|
||||
local uplink_dir="${temp_dir}/uplink"
|
||||
|
||||
random_bytes_file "2KiB" "${src_dir}/small-upload-testfile" # create 2KiB file of random bytes (inline)
|
||||
random_bytes_file "5MiB" "${src_dir}/big-upload-testfile" # create 5MiB file of random bytes (remote)
|
||||
# this is special case where we need to test at least one remote segment and inline segment of exact size 0
|
||||
random_bytes_file "12MiB" "${src_dir}/multisegment-upload-testfile" # create 12MiB file of random bytes (1 remote segments + inline)
|
||||
random_bytes_file "13MiB" "${src_dir}/diff-size-segments" # create 13MiB file of random bytes (2 remote segments)
|
||||
|
||||
random_bytes_file "100KiB" "${src_dir}/put-file" # create 100KiB file of random bytes (remote)
|
||||
|
||||
uplink mb "sj://$BUCKET/"
|
||||
uplink cp "${src_dir}/small-upload-testfile" "sj://$BUCKET/" --progress=false
|
||||
uplink cp "${src_dir}/big-upload-testfile" "sj://$BUCKET/" --progress=false
|
||||
uplink cp "${src_dir}/multisegment-upload-testfile" "sj://$BUCKET/" --progress=false
|
||||
uplink cp "${src_dir}/diff-size-segments" "sj://$BUCKET/" --progress=false
|
||||
|
||||
uplink <"${src_dir}/put-file" put "sj://$BUCKET/put-file"
|
||||
|
||||
uplink --config-dir "${uplink_dir}" import named-access "${STORJ_ACCESS}"
|
||||
|
||||
local files
|
||||
files=$(STORJ_ACCESS='' uplink --config-dir "${uplink_dir}" --access named-access \
|
||||
ls "sj://${BUCKET}" | tee "${temp_dir}/list" | wc -l)
|
||||
local expected_files="5"
|
||||
if [ "${files}" == "${expected_files}" ]; then
|
||||
echo "listing returns ${files} files"
|
||||
else
|
||||
echo "listing returns ${files} files but want ${expected_files}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
local size_check
|
||||
size_check=$(awk <"${temp_dir}/list" '{if($4 == "0") print "invalid size";}')
|
||||
if [ "${size_check}" != "" ]; then
|
||||
echo "listing returns invalid size for one of the objects:"
|
||||
cat "${temp_dir}/list"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
uplink ls "sj://$BUCKET/non-existing-prefix"
|
||||
|
||||
uplink cp "sj://$BUCKET/small-upload-testfile" "${dst_dir}" --progress=false
|
||||
uplink cp "sj://$BUCKET/big-upload-testfile" "${dst_dir}" --progress=false
|
||||
uplink cp "sj://$BUCKET/multisegment-upload-testfile" "${dst_dir}" --progress=false
|
||||
uplink cp "sj://$BUCKET/diff-size-segments" "${dst_dir}" --progress=false
|
||||
uplink cp "sj://$BUCKET/put-file" "${dst_dir}" --progress=false
|
||||
uplink cat "sj://$BUCKET/put-file" >>"${dst_dir}/put-file-from-cat"
|
||||
|
||||
uplink rm "sj://$BUCKET/small-upload-testfile"
|
||||
uplink rm "sj://$BUCKET/big-upload-testfile"
|
||||
uplink rm "sj://$BUCKET/multisegment-upload-testfile"
|
||||
uplink rm "sj://$BUCKET/diff-size-segments"
|
||||
uplink rm "sj://$BUCKET/put-file"
|
||||
|
||||
uplink ls "sj://$BUCKET"
|
||||
|
||||
uplink rb "sj://$BUCKET"
|
||||
|
||||
compare_files "${src_dir}/small-upload-testfile" "${dst_dir}/small-upload-testfile"
|
||||
compare_files "${src_dir}/big-upload-testfile" "${dst_dir}/big-upload-testfile"
|
||||
compare_files "${src_dir}/multisegment-upload-testfile" "${dst_dir}/multisegment-upload-testfile"
|
||||
compare_files "${src_dir}/diff-size-segments" "${dst_dir}/diff-size-segments"
|
||||
compare_files "${src_dir}/put-file" "${dst_dir}/put-file"
|
||||
compare_files "${src_dir}/put-file" "${dst_dir}/put-file-from-cat"
|
||||
|
||||
# test deleting non empty bucket with --force flag
|
||||
uplink mb "sj://$BUCKET/"
|
||||
|
||||
for i in $(seq -w 1 16); do
|
||||
uplink cp "${src_dir}/small-upload-testfile" "sj://$BUCKET/small-file-$i" --progress=false
|
||||
done
|
||||
|
||||
uplink rb "sj://$BUCKET" --force
|
||||
|
||||
if [ "$(uplink ls | grep -c "No buckets")" = "0" ]; then
|
||||
echo "uplink didn't remove the entire bucket with the 'force' flag"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Run the test with Redis container running
|
||||
uplink_test
|
||||
|
||||
# Run the test with Redis container not running
|
||||
redis_stop
|
||||
uplink_test
|
19
scripts/test-wasm-size.sh
Executable file
19
scripts/test-wasm-size.sh
Executable file
@ -0,0 +1,19 @@
|
||||
#!/usr/bin/env bash
|
||||
set -ueo pipefail
|
||||
set +x
|
||||
|
||||
cleanup(){
|
||||
rm main.wasm
|
||||
echo "cleaned up test successfully"
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
cd satellite/console/wasm && pwd && GOOS=js GOARCH=wasm go build -o main.wasm .
|
||||
BUILD_SIZE=$(stat -c %s main.wasm)
|
||||
CURRENT_SIZE=10000000
|
||||
if [ $BUILD_SIZE -gt $CURRENT_SIZE ]; then
|
||||
echo "Wasm size is too big, was $CURRENT_SIZE but now it is $BUILD_SIZE"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Wasm size did not increase and it is $BUILD_SIZE (limit: $CURRENT_SIZE)"
|
2
scripts/testdata/satellite-config.yaml.lock
vendored
2
scripts/testdata/satellite-config.yaml.lock
vendored
@ -38,7 +38,7 @@
|
||||
# checker.reliability-cache-staleness: 5m0s
|
||||
|
||||
# comma-separated override values for repair threshold in the format k/o/n-override (min/optimal/total-override)
|
||||
# checker.repair-overrides: 29/80/110-52,29/80/95-52
|
||||
# checker.repair-overrides: 29/80/110-52,29/80/95-52,29/80/130-52
|
||||
|
||||
# percent of held amount disposed to node after leaving withheld
|
||||
compensation.dispose-percent: 50
|
||||
|
@ -6,6 +6,7 @@ package consoleapi
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/zeebo/errs"
|
||||
@ -119,10 +120,12 @@ func (dashboard *StorageNode) EstimatedPayout(w http.ResponseWriter, r *http.Req
|
||||
|
||||
w.Header().Set(contentType, applicationJSON)
|
||||
|
||||
now := time.Now()
|
||||
|
||||
queryParams := r.URL.Query()
|
||||
id := queryParams.Get("id")
|
||||
if id == "" {
|
||||
data, err := dashboard.service.GetAllSatellitesEstimatedPayout(ctx)
|
||||
data, err := dashboard.service.GetAllSatellitesEstimatedPayout(ctx, now)
|
||||
if err != nil {
|
||||
dashboard.serveJSONError(w, http.StatusInternalServerError, ErrStorageNodeAPI.Wrap(err))
|
||||
return
|
||||
@ -139,7 +142,7 @@ func (dashboard *StorageNode) EstimatedPayout(w http.ResponseWriter, r *http.Req
|
||||
return
|
||||
}
|
||||
|
||||
data, err := dashboard.service.GetSatelliteEstimatedPayout(ctx, satelliteID)
|
||||
data, err := dashboard.service.GetSatelliteEstimatedPayout(ctx, satelliteID, now)
|
||||
if err != nil {
|
||||
dashboard.serveJSONError(w, http.StatusInternalServerError, ErrStorageNodeAPI.Wrap(err))
|
||||
return
|
||||
|
@ -115,7 +115,7 @@ func TestStorageNodeApi(t *testing.T) {
|
||||
body, err := ioutil.ReadAll(res.Body)
|
||||
require.NoError(t, err)
|
||||
|
||||
estimation, err := sno.Console.Service.GetAllSatellitesEstimatedPayout(ctx)
|
||||
estimation, err := sno.Console.Service.GetAllSatellitesEstimatedPayout(ctx, time.Now())
|
||||
require.NoError(t, err)
|
||||
expected, err := json.Marshal(estimatedpayouts.EstimatedPayout{
|
||||
CurrentMonth: estimation.CurrentMonth,
|
||||
|
@ -429,8 +429,8 @@ func (s *Service) GetAllSatellitesData(ctx context.Context) (_ *Satellites, err
|
||||
}
|
||||
|
||||
// GetSatelliteEstimatedPayout returns estimated payouts for current and previous months for selected satellite.
|
||||
func (s *Service) GetSatelliteEstimatedPayout(ctx context.Context, satelliteID storj.NodeID) (estimatedPayout estimatedpayouts.EstimatedPayout, err error) {
|
||||
estimatedPayout, err = s.estimation.GetSatelliteEstimatedPayout(ctx, satelliteID)
|
||||
func (s *Service) GetSatelliteEstimatedPayout(ctx context.Context, satelliteID storj.NodeID, now time.Time) (estimatedPayout estimatedpayouts.EstimatedPayout, err error) {
|
||||
estimatedPayout, err = s.estimation.GetSatelliteEstimatedPayout(ctx, satelliteID, now)
|
||||
if err != nil {
|
||||
return estimatedpayouts.EstimatedPayout{}, SNOServiceErr.Wrap(err)
|
||||
}
|
||||
@ -439,8 +439,8 @@ func (s *Service) GetSatelliteEstimatedPayout(ctx context.Context, satelliteID s
|
||||
}
|
||||
|
||||
// GetAllSatellitesEstimatedPayout returns estimated payouts for current and previous months for all satellites.
|
||||
func (s *Service) GetAllSatellitesEstimatedPayout(ctx context.Context) (estimatedPayout estimatedpayouts.EstimatedPayout, err error) {
|
||||
estimatedPayout, err = s.estimation.GetAllSatellitesEstimatedPayout(ctx)
|
||||
func (s *Service) GetAllSatellitesEstimatedPayout(ctx context.Context, now time.Time) (estimatedPayout estimatedpayouts.EstimatedPayout, err error) {
|
||||
estimatedPayout, err = s.estimation.GetAllSatellitesEstimatedPayout(ctx, now)
|
||||
if err != nil {
|
||||
return estimatedpayouts.EstimatedPayout{}, SNOServiceErr.Wrap(err)
|
||||
}
|
||||
|
@ -74,15 +74,10 @@ func (s *Service) GetReputationStats(ctx context.Context, satelliteID storj.Node
|
||||
return nil, NodeStatsServiceErr.Wrap(err)
|
||||
}
|
||||
|
||||
uptime := resp.GetUptimeCheck()
|
||||
audit := resp.GetAuditCheck()
|
||||
|
||||
return &reputation.Stats{
|
||||
SatelliteID: satelliteID,
|
||||
Uptime: reputation.Metric{
|
||||
TotalCount: uptime.GetTotalCount(),
|
||||
SuccessCount: uptime.GetSuccessCount(),
|
||||
},
|
||||
Audit: reputation.Metric{
|
||||
TotalCount: audit.GetTotalCount(),
|
||||
SuccessCount: audit.GetSuccessCount(),
|
||||
|
@ -31,12 +31,10 @@ const (
|
||||
TypeCustom Type = 0
|
||||
// TypeAuditCheckFailure is a notification type which describes node's audit check failure.
|
||||
TypeAuditCheckFailure Type = 1
|
||||
// TypeUptimeCheckFailure is a notification type which describes node's uptime check failure.
|
||||
TypeUptimeCheckFailure Type = 2
|
||||
// TypeDisqualification is a notification type which describes node's disqualification status.
|
||||
TypeDisqualification Type = 3
|
||||
TypeDisqualification Type = 2
|
||||
// TypeSuspension is a notification type which describes node's suspension status.
|
||||
TypeSuspension Type = 4
|
||||
TypeSuspension Type = 3
|
||||
)
|
||||
|
||||
// NewNotification holds notification entity info which is being received from satellite or local client.
|
||||
|
@ -5,6 +5,9 @@ package estimatedpayouts
|
||||
|
||||
import (
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"storj.io/storj/private/date"
|
||||
)
|
||||
|
||||
// EstimatedPayout contains usage and estimated payouts data for current and previous months.
|
||||
@ -61,3 +64,16 @@ func (pm *PayoutMonthly) SetPayout() {
|
||||
func RoundFloat(value float64) float64 {
|
||||
return math.Round(value*100) / 100
|
||||
}
|
||||
|
||||
// SetExpectedMonth set current month expectations.
|
||||
func (estimatedPayout *EstimatedPayout) SetExpectedMonth(now time.Time) {
|
||||
daysPast := float64(now.Day()) - 1
|
||||
if daysPast < 1 {
|
||||
daysPast = 1
|
||||
}
|
||||
|
||||
daysPerMonth := float64(date.UTCEndOfMonth(now).Day())
|
||||
payoutPerDay := estimatedPayout.CurrentMonth.Payout / daysPast
|
||||
|
||||
estimatedPayout.CurrentMonthExpectations = payoutPerDay * daysPerMonth
|
||||
}
|
||||
|
@ -0,0 +1,50 @@
|
||||
// Copyright (C) 2021 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package estimatedpayouts_test
|
||||
|
||||
import (
|
||||
"math"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"storj.io/common/testcontext"
|
||||
"storj.io/storj/private/testplanet"
|
||||
"storj.io/storj/storagenode/payouts/estimatedpayouts"
|
||||
)
|
||||
|
||||
func TestCurrentMonthExpectations(t *testing.T) {
|
||||
testplanet.Run(t, testplanet.Config{
|
||||
StorageNodeCount: 1,
|
||||
SatelliteCount: 2,
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
const payout = 100.0
|
||||
|
||||
type test struct {
|
||||
time time.Time
|
||||
expected float64
|
||||
}
|
||||
tests := []test{
|
||||
// 28 days in month
|
||||
{time.Date(2021, 2, 1, 16, 0, 0, 0, time.UTC), 2800.00},
|
||||
{time.Date(2021, 2, 28, 10, 0, 0, 0, time.UTC), 103.70},
|
||||
// 31 days in month
|
||||
{time.Date(2021, 3, 1, 19, 0, 0, 0, time.UTC), 3100.0},
|
||||
{time.Date(2021, 3, 31, 21, 0, 0, 0, time.UTC), 103.33},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
estimates := estimatedpayouts.EstimatedPayout{
|
||||
CurrentMonth: estimatedpayouts.PayoutMonthly{
|
||||
Payout: payout,
|
||||
},
|
||||
}
|
||||
|
||||
estimates.SetExpectedMonth(test.time)
|
||||
require.False(t, math.IsNaN(estimates.CurrentMonthExpectations))
|
||||
require.InDelta(t, test.expected, estimates.CurrentMonthExpectations, 0.01)
|
||||
}
|
||||
})
|
||||
}
|
@ -7,7 +7,6 @@ import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/jinzhu/now"
|
||||
"github.com/spacemonkeygo/monkit/v3"
|
||||
"github.com/zeebo/errs"
|
||||
|
||||
@ -54,28 +53,40 @@ func NewService(bandwidthDB bandwidth.DB, reputationDB reputation.DB, storageUsa
|
||||
}
|
||||
|
||||
// GetSatelliteEstimatedPayout returns estimated payouts for current and previous months from specific satellite with current level of load.
|
||||
func (s *Service) GetSatelliteEstimatedPayout(ctx context.Context, satelliteID storj.NodeID) (payout EstimatedPayout, err error) {
|
||||
func (s *Service) GetSatelliteEstimatedPayout(ctx context.Context, satelliteID storj.NodeID, now time.Time) (payout EstimatedPayout, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
currentMonthPayout, previousMonthPayout, err := s.estimatedPayout(ctx, satelliteID)
|
||||
currentMonthPayout, previousMonthPayout, err := s.estimatedPayout(ctx, satelliteID, now)
|
||||
if err != nil {
|
||||
return EstimatedPayout{}, EstimationServiceErr.Wrap(err)
|
||||
}
|
||||
|
||||
payout.CurrentMonth = currentMonthPayout
|
||||
payout.PreviousMonth = previousMonthPayout
|
||||
payout.setExpectations(ctx)
|
||||
|
||||
stats, err := s.reputationDB.Get(ctx, satelliteID)
|
||||
if err != nil {
|
||||
return EstimatedPayout{}, EstimationServiceErr.Wrap(err)
|
||||
}
|
||||
|
||||
daysSinceJoined := stats.JoinedAt.Sub(now).Hours() / 24
|
||||
if daysSinceJoined >= float64(now.Day()) {
|
||||
payout.SetExpectedMonth(now)
|
||||
|
||||
return payout, nil
|
||||
}
|
||||
|
||||
payout.CurrentMonthExpectations = (payout.CurrentMonth.Payout / daysSinceJoined) * float64(date.UTCEndOfMonth(now).Day())
|
||||
return payout, nil
|
||||
}
|
||||
|
||||
// GetAllSatellitesEstimatedPayout returns estimated payouts for current and previous months from all satellites with current level of load.
|
||||
func (s *Service) GetAllSatellitesEstimatedPayout(ctx context.Context) (payout EstimatedPayout, err error) {
|
||||
func (s *Service) GetAllSatellitesEstimatedPayout(ctx context.Context, now time.Time) (payout EstimatedPayout, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
satelliteIDs := s.trust.GetSatellites(ctx)
|
||||
for i := 0; i < len(satelliteIDs); i++ {
|
||||
current, previous, err := s.estimatedPayout(ctx, satelliteIDs[i])
|
||||
current, previous, err := s.estimatedPayout(ctx, satelliteIDs[i], now)
|
||||
if err != nil {
|
||||
return EstimatedPayout{}, EstimationServiceErr.Wrap(err)
|
||||
}
|
||||
@ -97,20 +108,14 @@ func (s *Service) GetAllSatellitesEstimatedPayout(ctx context.Context) (payout E
|
||||
payout.PreviousMonth.EgressRepairAudit += previous.EgressRepairAudit
|
||||
payout.PreviousMonth.Held += previous.Held
|
||||
}
|
||||
payout.setExpectations(ctx)
|
||||
|
||||
payout.SetExpectedMonth(now)
|
||||
|
||||
return payout, nil
|
||||
}
|
||||
|
||||
// setExpectations set current month expectations.
|
||||
func (estimatedPayout *EstimatedPayout) setExpectations(ctx context.Context) {
|
||||
daysPaste := float64(time.Now().Day() - 1)
|
||||
DaysInMonth := float64(now.EndOfMonth().Day())
|
||||
estimatedPayout.CurrentMonthExpectations = (estimatedPayout.CurrentMonth.Payout / daysPaste) * DaysInMonth
|
||||
}
|
||||
|
||||
// estimatedPayout returns estimated payouts data for current and previous months from specific satellite.
|
||||
func (s *Service) estimatedPayout(ctx context.Context, satelliteID storj.NodeID) (currentMonthPayout PayoutMonthly, previousMonthPayout PayoutMonthly, err error) {
|
||||
func (s *Service) estimatedPayout(ctx context.Context, satelliteID storj.NodeID, now time.Time) (currentMonthPayout PayoutMonthly, previousMonthPayout PayoutMonthly, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
priceModel, err := s.pricingDB.Get(ctx, satelliteID)
|
||||
@ -123,8 +128,8 @@ func (s *Service) estimatedPayout(ctx context.Context, satelliteID storj.NodeID)
|
||||
return PayoutMonthly{}, PayoutMonthly{}, EstimationServiceErr.Wrap(err)
|
||||
}
|
||||
|
||||
currentMonthPayout, err = s.estimationUsagePeriod(ctx, time.Now().UTC(), stats.JoinedAt, priceModel)
|
||||
previousMonthPayout, err = s.estimationUsagePeriod(ctx, time.Now().UTC().AddDate(0, -1, 0), stats.JoinedAt, priceModel)
|
||||
currentMonthPayout, err = s.estimationUsagePeriod(ctx, now.UTC(), stats.JoinedAt, priceModel)
|
||||
previousMonthPayout, err = s.estimationUsagePeriod(ctx, now.UTC().AddDate(0, -1, 0), stats.JoinedAt, priceModel)
|
||||
|
||||
return currentMonthPayout, previousMonthPayout, nil
|
||||
}
|
||||
|
@ -286,6 +286,7 @@ type Peer struct {
|
||||
Storage *multinode.StorageEndpoint
|
||||
Bandwidth *multinode.BandwidthEndpoint
|
||||
Node *multinode.NodeEndpoint
|
||||
Payout *multinode.PayoutEndpoint
|
||||
}
|
||||
}
|
||||
|
||||
@ -799,6 +800,11 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB, revocationDB exten
|
||||
peer.DB.Reputation(),
|
||||
peer.Storage2.Trust)
|
||||
|
||||
peer.Multinode.Payout = multinode.NewPayoutEndpoint(
|
||||
peer.Log.Named("multinode:payout-endpoint"),
|
||||
apiKeys,
|
||||
peer.DB.Payout())
|
||||
|
||||
if err = multinodepb.DRPCRegisterStorage(peer.Server.DRPC(), peer.Multinode.Storage); err != nil {
|
||||
return nil, errs.Combine(err, peer.Close())
|
||||
}
|
||||
@ -808,6 +814,9 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB, revocationDB exten
|
||||
if err = multinodepb.DRPCRegisterNode(peer.Server.DRPC(), peer.Multinode.Node); err != nil {
|
||||
return nil, errs.Combine(err, peer.Close())
|
||||
}
|
||||
if err = multinodepb.DRPCRegisterPayout(peer.Server.DRPC(), peer.Multinode.Payout); err != nil {
|
||||
return nil, errs.Combine(err, peer.Close())
|
||||
}
|
||||
}
|
||||
|
||||
return peer, nil
|
||||
|
@ -27,7 +27,6 @@ type DB interface {
|
||||
type Stats struct {
|
||||
SatelliteID storj.NodeID
|
||||
|
||||
Uptime Metric
|
||||
Audit Metric
|
||||
OnlineScore float64
|
||||
|
||||
|
@ -25,13 +25,6 @@ func TestReputationDBGetInsert(t *testing.T) {
|
||||
|
||||
stats := reputation.Stats{
|
||||
SatelliteID: testrand.NodeID(),
|
||||
Uptime: reputation.Metric{
|
||||
TotalCount: 1,
|
||||
SuccessCount: 2,
|
||||
Alpha: 3,
|
||||
Beta: 4,
|
||||
Score: 5,
|
||||
},
|
||||
Audit: reputation.Metric{
|
||||
TotalCount: 6,
|
||||
SuccessCount: 7,
|
||||
@ -70,7 +63,6 @@ func TestReputationDBGetInsert(t *testing.T) {
|
||||
assert.Equal(t, res.OnlineScore, stats.OnlineScore)
|
||||
assert.Nil(t, res.AuditHistory)
|
||||
|
||||
compareReputationMetric(t, &res.Uptime, &stats.Uptime)
|
||||
compareReputationMetric(t, &res.Audit, &stats.Audit)
|
||||
})
|
||||
})
|
||||
@ -87,13 +79,6 @@ func TestReputationDBGetAll(t *testing.T) {
|
||||
|
||||
rep := reputation.Stats{
|
||||
SatelliteID: testrand.NodeID(),
|
||||
Uptime: reputation.Metric{
|
||||
TotalCount: int64(i + 1),
|
||||
SuccessCount: int64(i + 2),
|
||||
Alpha: float64(i + 3),
|
||||
Beta: float64(i + 4),
|
||||
Score: float64(i + 5),
|
||||
},
|
||||
Audit: reputation.Metric{
|
||||
TotalCount: int64(i + 6),
|
||||
SuccessCount: int64(i + 7),
|
||||
@ -137,7 +122,6 @@ func TestReputationDBGetAll(t *testing.T) {
|
||||
assert.Equal(t, rep.OnlineScore, stats[0].OnlineScore)
|
||||
assert.Nil(t, rep.AuditHistory)
|
||||
|
||||
compareReputationMetric(t, &rep.Uptime, &stats[0].Uptime)
|
||||
compareReputationMetric(t, &rep.Audit, &stats[0].Audit)
|
||||
}
|
||||
}
|
||||
@ -160,7 +144,6 @@ func TestReputationDBGetInsertAuditHistory(t *testing.T) {
|
||||
|
||||
stats := reputation.Stats{
|
||||
SatelliteID: testrand.NodeID(),
|
||||
Uptime: reputation.Metric{},
|
||||
Audit: reputation.Metric{},
|
||||
AuditHistory: &pb.AuditHistory{
|
||||
Score: 0.5,
|
||||
|
@ -1838,6 +1838,61 @@ func (db *DB) Migration(ctx context.Context) *migrate.Migration {
|
||||
`ALTER TABLE reputation ADD COLUMN audit_history BLOB`,
|
||||
},
|
||||
},
|
||||
{
|
||||
DB: &db.reputationDB.DB,
|
||||
Description: "drop uptime columns",
|
||||
Version: 48,
|
||||
Action: migrate.Func(func(ctx context.Context, _ *zap.Logger, rdb tagsql.DB, rtx tagsql.Tx) (err error) {
|
||||
_, err = rtx.Exec(ctx, `
|
||||
CREATE TABLE reputation_new (
|
||||
satellite_id BLOB NOT NULL,
|
||||
audit_success_count INTEGER NOT NULL,
|
||||
audit_total_count INTEGER NOT NULL,
|
||||
audit_reputation_alpha REAL NOT NULL,
|
||||
audit_reputation_beta REAL NOT NULL,
|
||||
audit_reputation_score REAL NOT NULL,
|
||||
audit_unknown_reputation_alpha REAL NOT NULL,
|
||||
audit_unknown_reputation_beta REAL NOT NULL,
|
||||
audit_unknown_reputation_score REAL NOT NULL,
|
||||
online_score REAL NOT NULL,
|
||||
audit_history BLOB,
|
||||
disqualified_at TIMESTAMP,
|
||||
updated_at TIMESTAMP NOT NULL,
|
||||
suspended_at TIMESTAMP,
|
||||
offline_suspended_at TIMESTAMP,
|
||||
offline_under_review_at TIMESTAMP,
|
||||
joined_at TIMESTAMP NOT NULL,
|
||||
PRIMARY KEY (satellite_id)
|
||||
);
|
||||
INSERT INTO reputation_new SELECT
|
||||
satellite_id,
|
||||
audit_success_count,
|
||||
audit_total_count,
|
||||
audit_reputation_alpha,
|
||||
audit_reputation_beta,
|
||||
audit_reputation_score,
|
||||
audit_unknown_reputation_alpha,
|
||||
audit_unknown_reputation_beta,
|
||||
audit_unknown_reputation_score,
|
||||
online_score,
|
||||
audit_history,
|
||||
disqualified_at,
|
||||
updated_at,
|
||||
suspended_at,
|
||||
offline_suspended_at,
|
||||
offline_under_review_at,
|
||||
joined_at
|
||||
FROM reputation;
|
||||
DROP TABLE reputation;
|
||||
ALTER TABLE reputation_new RENAME TO reputation;
|
||||
`)
|
||||
if err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -32,11 +32,6 @@ func (db *reputationDB) Store(ctx context.Context, stats reputation.Stats) (err
|
||||
|
||||
query := `INSERT OR REPLACE INTO reputation (
|
||||
satellite_id,
|
||||
uptime_success_count,
|
||||
uptime_total_count,
|
||||
uptime_reputation_alpha,
|
||||
uptime_reputation_beta,
|
||||
uptime_reputation_score,
|
||||
audit_success_count,
|
||||
audit_total_count,
|
||||
audit_reputation_alpha,
|
||||
@ -53,7 +48,7 @@ func (db *reputationDB) Store(ctx context.Context, stats reputation.Stats) (err
|
||||
offline_under_review_at,
|
||||
updated_at,
|
||||
joined_at
|
||||
) VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)`
|
||||
) VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)`
|
||||
|
||||
// ensure we insert utc
|
||||
if stats.DisqualifiedAt != nil {
|
||||
@ -83,11 +78,6 @@ func (db *reputationDB) Store(ctx context.Context, stats reputation.Stats) (err
|
||||
|
||||
_, err = db.ExecContext(ctx, query,
|
||||
stats.SatelliteID,
|
||||
stats.Uptime.SuccessCount,
|
||||
stats.Uptime.TotalCount,
|
||||
stats.Uptime.Alpha,
|
||||
stats.Uptime.Beta,
|
||||
stats.Uptime.Score,
|
||||
stats.Audit.SuccessCount,
|
||||
stats.Audit.TotalCount,
|
||||
stats.Audit.Alpha,
|
||||
@ -118,12 +108,7 @@ func (db *reputationDB) Get(ctx context.Context, satelliteID storj.NodeID) (_ *r
|
||||
}
|
||||
|
||||
row := db.QueryRowContext(ctx,
|
||||
`SELECT uptime_success_count,
|
||||
uptime_total_count,
|
||||
uptime_reputation_alpha,
|
||||
uptime_reputation_beta,
|
||||
uptime_reputation_score,
|
||||
audit_success_count,
|
||||
`SELECT audit_success_count,
|
||||
audit_total_count,
|
||||
audit_reputation_alpha,
|
||||
audit_reputation_beta,
|
||||
@ -145,11 +130,6 @@ func (db *reputationDB) Get(ctx context.Context, satelliteID storj.NodeID) (_ *r
|
||||
|
||||
var auditHistoryBytes []byte
|
||||
err = row.Scan(
|
||||
&stats.Uptime.SuccessCount,
|
||||
&stats.Uptime.TotalCount,
|
||||
&stats.Uptime.Alpha,
|
||||
&stats.Uptime.Beta,
|
||||
&stats.Uptime.Score,
|
||||
&stats.Audit.SuccessCount,
|
||||
&stats.Audit.TotalCount,
|
||||
&stats.Audit.Alpha,
|
||||
@ -188,11 +168,6 @@ func (db *reputationDB) All(ctx context.Context) (_ []reputation.Stats, err erro
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
query := `SELECT satellite_id,
|
||||
uptime_success_count,
|
||||
uptime_total_count,
|
||||
uptime_reputation_alpha,
|
||||
uptime_reputation_beta,
|
||||
uptime_reputation_score,
|
||||
audit_success_count,
|
||||
audit_total_count,
|
||||
audit_reputation_alpha,
|
||||
@ -222,11 +197,6 @@ func (db *reputationDB) All(ctx context.Context) (_ []reputation.Stats, err erro
|
||||
var stats reputation.Stats
|
||||
|
||||
err := rows.Scan(&stats.SatelliteID,
|
||||
&stats.Uptime.SuccessCount,
|
||||
&stats.Uptime.TotalCount,
|
||||
&stats.Uptime.Alpha,
|
||||
&stats.Uptime.Beta,
|
||||
&stats.Uptime.Score,
|
||||
&stats.Audit.SuccessCount,
|
||||
&stats.Audit.TotalCount,
|
||||
&stats.Audit.Alpha,
|
||||
|
@ -606,31 +606,6 @@ func Schema() map[string]*dbschema.Schema {
|
||||
Type: "TIMESTAMP",
|
||||
IsNullable: false,
|
||||
},
|
||||
&dbschema.Column{
|
||||
Name: "uptime_reputation_alpha",
|
||||
Type: "REAL",
|
||||
IsNullable: false,
|
||||
},
|
||||
&dbschema.Column{
|
||||
Name: "uptime_reputation_beta",
|
||||
Type: "REAL",
|
||||
IsNullable: false,
|
||||
},
|
||||
&dbschema.Column{
|
||||
Name: "uptime_reputation_score",
|
||||
Type: "REAL",
|
||||
IsNullable: false,
|
||||
},
|
||||
&dbschema.Column{
|
||||
Name: "uptime_success_count",
|
||||
Type: "INTEGER",
|
||||
IsNullable: false,
|
||||
},
|
||||
&dbschema.Column{
|
||||
Name: "uptime_total_count",
|
||||
Type: "INTEGER",
|
||||
IsNullable: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -62,6 +62,7 @@ var States = MultiDBStates{
|
||||
&v45,
|
||||
&v46,
|
||||
&v47,
|
||||
&v48,
|
||||
},
|
||||
}
|
||||
|
||||
|
50
storagenode/storagenodedb/testdata/v48.go
vendored
Normal file
50
storagenode/storagenodedb/testdata/v48.go
vendored
Normal file
@ -0,0 +1,50 @@
|
||||
// Copyright (C) 2020 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package testdata
|
||||
|
||||
import "storj.io/storj/storagenode/storagenodedb"
|
||||
|
||||
var v48 = MultiDBState{
|
||||
Version: 48,
|
||||
DBStates: DBStates{
|
||||
storagenodedb.UsedSerialsDBName: v47.DBStates[storagenodedb.UsedSerialsDBName],
|
||||
storagenodedb.StorageUsageDBName: v47.DBStates[storagenodedb.StorageUsageDBName],
|
||||
storagenodedb.ReputationDBName: &DBState{
|
||||
SQL: `
|
||||
-- table to store nodestats cache
|
||||
CREATE TABLE reputation (
|
||||
satellite_id BLOB NOT NULL,
|
||||
audit_success_count INTEGER NOT NULL,
|
||||
audit_total_count INTEGER NOT NULL,
|
||||
audit_reputation_alpha REAL NOT NULL,
|
||||
audit_reputation_beta REAL NOT NULL,
|
||||
audit_reputation_score REAL NOT NULL,
|
||||
audit_unknown_reputation_alpha REAL NOT NULL,
|
||||
audit_unknown_reputation_beta REAL NOT NULL,
|
||||
audit_unknown_reputation_score REAL NOT NULL,
|
||||
online_score REAL NOT NULL,
|
||||
audit_history BLOB,
|
||||
disqualified_at TIMESTAMP,
|
||||
updated_at TIMESTAMP NOT NULL,
|
||||
suspended_at TIMESTAMP,
|
||||
offline_suspended_at TIMESTAMP,
|
||||
offline_under_review_at TIMESTAMP,
|
||||
joined_at TIMESTAMP NOT NULL,
|
||||
PRIMARY KEY (satellite_id)
|
||||
);
|
||||
INSERT INTO reputation VALUES(X'0ed28abb2813e184a1e98b0f6605c4911ea468c7e8433eb583e0fca7ceac3000',1,1,1.0,1.0,1.0,1.0,1.0,1.0,1.0,NULL,'2019-07-19 20:00:00+00:00','2019-08-23 20:00:00+00:00',NULL,NULL,NULL,'1970-01-01 00:00:00+00:00');
|
||||
`,
|
||||
},
|
||||
storagenodedb.PieceSpaceUsedDBName: v47.DBStates[storagenodedb.PieceSpaceUsedDBName],
|
||||
storagenodedb.PieceInfoDBName: v47.DBStates[storagenodedb.PieceInfoDBName],
|
||||
storagenodedb.PieceExpirationDBName: v47.DBStates[storagenodedb.PieceExpirationDBName],
|
||||
storagenodedb.OrdersDBName: v47.DBStates[storagenodedb.OrdersDBName],
|
||||
storagenodedb.BandwidthDBName: v47.DBStates[storagenodedb.BandwidthDBName],
|
||||
storagenodedb.SatellitesDBName: v47.DBStates[storagenodedb.SatellitesDBName],
|
||||
storagenodedb.DeprecatedInfoDBName: v47.DBStates[storagenodedb.DeprecatedInfoDBName],
|
||||
storagenodedb.NotificationsDBName: v47.DBStates[storagenodedb.NotificationsDBName],
|
||||
storagenodedb.HeldAmountDBName: v47.DBStates[storagenodedb.HeldAmountDBName],
|
||||
storagenodedb.PricingDBName: v47.DBStates[storagenodedb.PricingDBName],
|
||||
storagenodedb.APIKeysDBName: v47.DBStates[storagenodedb.APIKeysDBName]},
|
||||
}
|
1132
web/multinode/package-lock.json
generated
1132
web/multinode/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@ -5,7 +5,7 @@
|
||||
"serve": "vue-cli-service serve",
|
||||
"lint": "vue-cli-service lint && stylelint '**/*.{vue,scss}' --fix",
|
||||
"build": "vue-cli-service build",
|
||||
"debug": "vue-cli-service build --mode development",
|
||||
"dev": "vue-cli-service build --mode development",
|
||||
"test": "vue-cli-service test:unit"
|
||||
},
|
||||
"dependencies": {
|
||||
@ -23,7 +23,7 @@
|
||||
"@vue/cli-service": "4.5.9",
|
||||
"babel-core": "6.26.3",
|
||||
"core-js": "3.8.1",
|
||||
"node-sass": "4.14.1",
|
||||
"sass": "^1.32.0",
|
||||
"sass-loader": "8.0.0",
|
||||
"stylelint": "13.8.0",
|
||||
"stylelint-config-standard": "20.0.0",
|
||||
|
29
web/multinode/src/api/index.ts
Normal file
29
web/multinode/src/api/index.ts
Normal file
@ -0,0 +1,29 @@
|
||||
// Copyright (C) 2021 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
/**
|
||||
* ErrorUnauthorized is a custom error type for performing unauthorized operations.
|
||||
*/
|
||||
export class UnauthorizedError extends Error {
|
||||
public constructor(message: string = 'authorization required') {
|
||||
super(message);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* BadRequestError is a custom error type for performing bad request.
|
||||
*/
|
||||
export class BadRequestError extends Error {
|
||||
public constructor(message: string = 'bad request') {
|
||||
super(message);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* InternalError is a custom error type for internal server error.
|
||||
*/
|
||||
export class InternalError extends Error {
|
||||
public constructor(message: string = 'internal server error') {
|
||||
super(message);
|
||||
}
|
||||
}
|
206
web/multinode/src/api/nodes.ts
Normal file
206
web/multinode/src/api/nodes.ts
Normal file
@ -0,0 +1,206 @@
|
||||
// Copyright (C) 2021 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
import { BadRequestError, InternalError, UnauthorizedError } from '@/api/index';
|
||||
import { CreateNodeFields, Node, NodeURL } from '@/nodes';
|
||||
import { HttpClient } from '@/private/http/client';
|
||||
|
||||
/**
|
||||
* client for nodes controller of MND api.
|
||||
*/
|
||||
export class NodesClient {
|
||||
private readonly http: HttpClient = new HttpClient();
|
||||
private readonly ROOT_PATH: string = '/api/v0/nodes';
|
||||
|
||||
/**
|
||||
* handles node addition.
|
||||
*
|
||||
* @param node - node to add.
|
||||
*
|
||||
* @throws {@link BadRequestError}
|
||||
* This exception is thrown if the input is not a valid.
|
||||
*
|
||||
* @throws {@link UnauthorizedError}
|
||||
* Thrown if the auth cookie is missing or invalid.
|
||||
*
|
||||
* @throws {@link InternalError}
|
||||
* Thrown if something goes wrong on server side.
|
||||
*/
|
||||
public async add(node: CreateNodeFields): Promise<void> {
|
||||
const path = `${this.ROOT_PATH}`;
|
||||
const response = await this.http.post(path, JSON.stringify(node));
|
||||
|
||||
if (!response.ok) {
|
||||
await this.handleError(response);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* returns list of node infos.
|
||||
*
|
||||
* @throws {@link BadRequestError}
|
||||
* This exception is thrown if the input is not a valid.
|
||||
*
|
||||
* @throws {@link UnauthorizedError}
|
||||
* Thrown if the auth cookie is missing or invalid.
|
||||
*
|
||||
* @throws {@link InternalError}
|
||||
* Thrown if something goes wrong on server side.
|
||||
*/
|
||||
public async list(): Promise<Node[]> {
|
||||
const path = `${this.ROOT_PATH}/infos`;
|
||||
const response = await this.http.get(path);
|
||||
|
||||
if (!response.ok) {
|
||||
await this.handleError(response);
|
||||
}
|
||||
|
||||
const nodeListJson = await response.json();
|
||||
|
||||
return nodeListJson.map(node => new Node(
|
||||
node.id,
|
||||
node.name,
|
||||
node.version,
|
||||
new Date(node.lastContact),
|
||||
node.diskSpaceUsed,
|
||||
node.diskSpaceLeft,
|
||||
node.bandwidthUsed,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
node.totalEarned,
|
||||
));
|
||||
}
|
||||
|
||||
/**
|
||||
* returns list of node infos by satellite.
|
||||
*
|
||||
* @param satelliteId - id of the satellite.
|
||||
*
|
||||
* @throws {@link BadRequestError}
|
||||
* This exception is thrown if the input is not a valid.
|
||||
*
|
||||
* @throws {@link UnauthorizedError}
|
||||
* Thrown if the auth cookie is missing or invalid.
|
||||
*
|
||||
* @throws {@link InternalError}
|
||||
* Thrown if something goes wrong on server side.
|
||||
*/
|
||||
public async listBySatellite(satelliteId: string): Promise<Node[]> {
|
||||
const path = `${this.ROOT_PATH}/infos/${satelliteId}`;
|
||||
const response = await this.http.get(path);
|
||||
|
||||
if (!response.ok) {
|
||||
await this.handleError(response);
|
||||
}
|
||||
|
||||
const nodeListJson = await response.json();
|
||||
|
||||
return nodeListJson.map(node => new Node(
|
||||
node.id,
|
||||
node.name,
|
||||
node.version,
|
||||
new Date(node.lastContact),
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
node.onlineScore,
|
||||
node.auditScore,
|
||||
node.suspensionScore,
|
||||
node.totalEarned,
|
||||
));
|
||||
}
|
||||
|
||||
/**
|
||||
* updates nodes name.
|
||||
*
|
||||
* @param id - id of the node.
|
||||
* @param name - new node name.
|
||||
*
|
||||
* @throws {@link BadRequestError}
|
||||
* This exception is thrown if the input is not a valid.
|
||||
*
|
||||
* @throws {@link UnauthorizedError}
|
||||
* Thrown if the auth cookie is missing or invalid.
|
||||
*
|
||||
* @throws {@link InternalError}
|
||||
* Thrown if something goes wrong on server side.
|
||||
*/
|
||||
public async updateName(id: string, name: string): Promise<void> {
|
||||
const path = `${this.ROOT_PATH}/${id}`;
|
||||
const response = await this.http.patch(path, JSON.stringify({name: name}));
|
||||
|
||||
if (!response.ok) {
|
||||
await this.handleError(response);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* deletes node.
|
||||
*
|
||||
* @param id - id of the node.
|
||||
*
|
||||
* @throws {@link BadRequestError}
|
||||
* This exception is thrown if the input is not a valid.
|
||||
*
|
||||
* @throws {@link UnauthorizedError}
|
||||
* Thrown if the auth cookie is missing or invalid.
|
||||
*
|
||||
* @throws {@link InternalError}
|
||||
* Thrown if something goes wrong on server side.
|
||||
*/
|
||||
public async delete(id: string): Promise<void> {
|
||||
const path = `${this.ROOT_PATH}/${id}`;
|
||||
const response = await this.http.delete(path);
|
||||
|
||||
if (!response.ok) {
|
||||
await this.handleError(response);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* retrieves list of trusted satellites node urls for a node.
|
||||
*/
|
||||
public async trustedSatellites(): Promise<NodeURL[]> {
|
||||
const path = `${this.ROOT_PATH}/trusted-satellites`;
|
||||
const response = await this.http.get(path);
|
||||
|
||||
if (!response.ok) {
|
||||
await this.handleError(response);
|
||||
}
|
||||
|
||||
const urlListJson = await response.json();
|
||||
|
||||
return urlListJson.map(url => new NodeURL(
|
||||
url.ID,
|
||||
url.Name,
|
||||
));
|
||||
}
|
||||
|
||||
/**
|
||||
* handles error due to response code.
|
||||
* @param response - response from server.
|
||||
*
|
||||
* @throws {@link BadRequestError}
|
||||
* This exception is thrown if the input is not a valid ISBN number.
|
||||
*
|
||||
* @throws {@link UnauthorizedError}
|
||||
* Thrown if the ISBN number is valid, but no such book exists in the catalog.
|
||||
*
|
||||
* @throws {@link InternalError}
|
||||
* Thrown if the ISBN number is valid, but no such book exists in the catalog.
|
||||
*
|
||||
* @private
|
||||
*/
|
||||
private async handleError(response: Response): Promise<void> {
|
||||
const body = await response.json();
|
||||
|
||||
switch (response.status) {
|
||||
case 401: throw new UnauthorizedError(body.error);
|
||||
case 400: throw new BadRequestError(body.error);
|
||||
case 500:
|
||||
default:
|
||||
throw new InternalError(body.error);
|
||||
}
|
||||
}
|
||||
}
|
@ -5,7 +5,7 @@
|
||||
<div class="input-container">
|
||||
<div v-if="!isOptional" class="label-container">
|
||||
<div class="label-container__main">
|
||||
<div v-if="error">
|
||||
<div v-if="error" class="label-container__main__error-icon-container">
|
||||
<svg width="20" height="20" viewBox="0 0 20 20" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<rect width="20" height="20" rx="10" fill="#EB5757"/>
|
||||
<path d="M10.0012 11.7364C10.612 11.7364 11.1117 11.204 11.1117 10.5532V5.81218C11.1117 5.75302 11.108 5.68991 11.1006 5.63074C11.0192 5.06672 10.5565 4.62891 10.0012 4.62891C9.39037 4.62891 8.89062 5.16138 8.89062 5.81218V10.5492C8.89062 11.204 9.39037 11.7364 10.0012 11.7364Z" fill="white"/>
|
||||
@ -33,6 +33,8 @@
|
||||
wrap="hard"
|
||||
@input="onInput"
|
||||
@change="onInput"
|
||||
@paste.prevent="onPaste"
|
||||
autocomplete="off"
|
||||
v-model="value">
|
||||
</textarea>
|
||||
<input
|
||||
@ -43,7 +45,9 @@
|
||||
:type="[isPassword ? 'password': 'text']"
|
||||
@input="onInput"
|
||||
@change="onInput"
|
||||
@paste.prevent="onPaste"
|
||||
v-model="value"
|
||||
autocomplete="off"
|
||||
:style="style.inputStyle"
|
||||
/>
|
||||
</div>
|
||||
@ -99,13 +103,13 @@ export default class HeaderedInput extends HeaderlessInput {
|
||||
display: flex;
|
||||
justify-content: flex-start;
|
||||
align-items: center;
|
||||
margin-bottom: 8px;
|
||||
|
||||
&__label {
|
||||
font-family: 'font_regular', sans-serif;
|
||||
font-size: 16px;
|
||||
line-height: 21px;
|
||||
color: var(--c-gray);
|
||||
margin-bottom: 8px;
|
||||
}
|
||||
|
||||
&__error {
|
||||
@ -114,6 +118,14 @@ export default class HeaderedInput extends HeaderlessInput {
|
||||
color: var(--c-error);
|
||||
margin-left: 10px;
|
||||
}
|
||||
|
||||
&__error-icon-container {
|
||||
width: 20px;
|
||||
height: 20px;
|
||||
max-width: 20px;
|
||||
max-height: 20px;
|
||||
display: flex;
|
||||
}
|
||||
}
|
||||
|
||||
&__limit {
|
||||
|
@ -23,6 +23,7 @@
|
||||
:class="{'inputError' : error, 'password': isPassword}"
|
||||
@input="onInput"
|
||||
@change="onInput"
|
||||
@paste.prevent="onPaste"
|
||||
v-model="value"
|
||||
:placeholder="placeholder"
|
||||
:type="type"
|
||||
@ -133,6 +134,18 @@ export default class HeaderlessInput extends Vue {
|
||||
this.$emit('setData', this.value);
|
||||
}
|
||||
|
||||
public onPaste(event): void {
|
||||
const clipped: string = event.clipboardData.getData('text');
|
||||
|
||||
if (clipped.length > this.maxSymbols) {
|
||||
this.value = clipped.slice(0, this.maxSymbols);
|
||||
} else {
|
||||
this.value = clipped;
|
||||
}
|
||||
|
||||
this.$emit('setData', this.value);
|
||||
}
|
||||
|
||||
/**
|
||||
* Triggers input type between text and password to show/hide symbols.
|
||||
*/
|
||||
|
@ -4,14 +4,15 @@
|
||||
<template>
|
||||
<div
|
||||
class="dropdown"
|
||||
@click.self="toggleOptions"
|
||||
@click.stop="toggleOptions"
|
||||
:class="{ active: areOptionsShown }"
|
||||
v-if="options.length"
|
||||
>
|
||||
<span class="label">{{ selectedOption.label }}</span>
|
||||
<div class="dropdown__selection" v-show="areOptionsShown">
|
||||
<div class="dropdown__selection" v-if="areOptionsShown" v-click-outside="closeOptions">
|
||||
<div class="dropdown__selection__overflow-container">
|
||||
<div v-for="option in allOptions" :key="option.label" class="dropdown__selection__option" @click="onOptionClick(option)">
|
||||
<span class="dropdown__selection__option__label">{{ option.label }}</span>
|
||||
<div v-for="option in options" :key="option.label" class="dropdown__selection__option" @click="onOptionClick(option)">
|
||||
<span class="label">{{ option.label }}</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
@ -19,37 +20,43 @@
|
||||
</template>
|
||||
|
||||
<script lang="ts">
|
||||
import { Component, Prop, Vue, Watch } from 'vue-property-decorator';
|
||||
import { Component, Prop, Vue } from 'vue-property-decorator';
|
||||
|
||||
/**
|
||||
* OptionClick defines on click callback type for VDropdown Option.
|
||||
*/
|
||||
export type OptionClick = (id?: string) => Promise<void>;
|
||||
|
||||
/**
|
||||
* Option is a representation of VDropdown item.
|
||||
*/
|
||||
export class Option {
|
||||
public constructor(
|
||||
public label: string = '',
|
||||
public onClick: Function = () => { return; },
|
||||
public label: string = 'no options',
|
||||
public onClick: OptionClick = async (id) => Promise.resolve(),
|
||||
) {}
|
||||
}
|
||||
|
||||
@Component
|
||||
export default class VDropdown extends Vue {
|
||||
@Prop({default: 'All'})
|
||||
private readonly allLabel: string;
|
||||
@Prop({default: () => { return; }})
|
||||
private readonly onAllClick: Function;
|
||||
@Prop({default: []})
|
||||
private readonly options: Option[];
|
||||
|
||||
public areOptionsShown: boolean = false;
|
||||
|
||||
@Watch('options')
|
||||
public allOptions: Option[] = [ new Option(this.allLabel, this.onAllClick), ...this.options ];
|
||||
public selectedOption: Option;
|
||||
|
||||
@Watch('options')
|
||||
public selectedOption: Option = this.allOptions[0];
|
||||
public created(): void {
|
||||
this.selectedOption = this.options[0];
|
||||
}
|
||||
|
||||
public toggleOptions(): void {
|
||||
this.areOptionsShown = !this.areOptionsShown;
|
||||
}
|
||||
|
||||
public closeOptions(): void {
|
||||
if (!this.areOptionsShown) return;
|
||||
|
||||
this.areOptionsShown = false;
|
||||
}
|
||||
|
||||
@ -59,8 +66,8 @@ export default class VDropdown extends Vue {
|
||||
* @param option
|
||||
*/
|
||||
public async onOptionClick(option: Option): Promise<void> {
|
||||
await option.onClick();
|
||||
this.selectedOption = option;
|
||||
await option.onClick();
|
||||
this.closeOptions();
|
||||
}
|
||||
}
|
||||
@ -68,6 +75,7 @@ export default class VDropdown extends Vue {
|
||||
|
||||
<style lang="scss">
|
||||
.dropdown {
|
||||
position: relative;
|
||||
box-sizing: border-box;
|
||||
width: 300px;
|
||||
height: 40px;
|
||||
@ -85,6 +93,7 @@ export default class VDropdown extends Vue {
|
||||
|
||||
&:hover {
|
||||
border-color: var(--c-gray);
|
||||
color: var(--c-title);
|
||||
}
|
||||
|
||||
&.active {
|
||||
@ -99,6 +108,8 @@ export default class VDropdown extends Vue {
|
||||
border: 1px solid var(--c-gray--light);
|
||||
border-radius: 6px;
|
||||
overflow: hidden;
|
||||
background: white;
|
||||
z-index: 999;
|
||||
|
||||
&__overflow-container {
|
||||
overflow: overlay;
|
||||
@ -112,9 +123,6 @@ export default class VDropdown extends Vue {
|
||||
justify-content: flex-start;
|
||||
padding: 0 16px;
|
||||
height: 40px;
|
||||
white-space: nowrap;
|
||||
text-overflow: ellipsis;
|
||||
overflow: hidden;
|
||||
width: 100% !important;
|
||||
cursor: pointer;
|
||||
border-bottom: 1px solid var(--c-gray--light);
|
||||
@ -126,6 +134,12 @@ export default class VDropdown extends Vue {
|
||||
}
|
||||
}
|
||||
|
||||
.label {
|
||||
overflow: hidden;
|
||||
text-overflow: ellipsis;
|
||||
white-space: nowrap;
|
||||
}
|
||||
|
||||
::-webkit-scrollbar {
|
||||
width: 3px;
|
||||
}
|
||||
|
@ -7,22 +7,31 @@
|
||||
<v-modal v-if="isAddNewNodeModalShown" @close="closeModal">
|
||||
<h2 slot="header">Add New Node</h2>
|
||||
<div class="add-new-node__body" slot="body">
|
||||
<headered-input
|
||||
class="add-new-node__body__input"
|
||||
label="Node ID"
|
||||
placeholder="Enter Node ID"
|
||||
:error="idError"
|
||||
@setData="setNodeId"
|
||||
/>
|
||||
<headered-input
|
||||
class="add-new-node__body__input"
|
||||
label="Public IP Address"
|
||||
placeholder="Enter Public IP Address"
|
||||
:error="publicIPError"
|
||||
@setData="setPublicIP"
|
||||
/>
|
||||
<headered-input
|
||||
class="add-new-node__body__input"
|
||||
label="API Key"
|
||||
/>
|
||||
<headered-input
|
||||
class="add-new-node__body__input"
|
||||
label="Displayed Name"
|
||||
placeholder="Enter API Key"
|
||||
:error="apiKeyError"
|
||||
@setData="setApiKey"
|
||||
/>
|
||||
</div>
|
||||
<div class="add-new-node__footer" slot="footer">
|
||||
<v-button label="Cancel" :is-white="true" width="205px" :on-press="closeModal" />
|
||||
<v-button label="Continue" width="205px" />
|
||||
<v-button label="Create" width="205px" :on-press="onCreate"/>
|
||||
</div>
|
||||
</v-modal>
|
||||
</div>
|
||||
@ -36,6 +45,8 @@ import HeaderedInput from '@/app/components/common/HeaderedInput.vue';
|
||||
import VButton from '@/app/components/common/VButton.vue';
|
||||
import VModal from '@/app/components/common/VModal.vue';
|
||||
|
||||
import { CreateNodeFields } from '@/nodes';
|
||||
|
||||
@Component({
|
||||
components: {
|
||||
VButton,
|
||||
@ -45,14 +56,92 @@ import VModal from '@/app/components/common/VModal.vue';
|
||||
})
|
||||
export default class AddNewNode extends Vue {
|
||||
public isAddNewNodeModalShown: boolean = false;
|
||||
private nodeToAdd: CreateNodeFields = new CreateNodeFields();
|
||||
|
||||
private isLoading: boolean = false;
|
||||
// errors
|
||||
private idError: string = '';
|
||||
private publicIPError: string = '';
|
||||
private apiKeyError: string = '';
|
||||
|
||||
public openModal(): void {
|
||||
this.isAddNewNodeModalShown = true;
|
||||
}
|
||||
|
||||
public closeModal(): void {
|
||||
this.nodeToAdd = new CreateNodeFields();
|
||||
this.idError = '';
|
||||
this.publicIPError = '';
|
||||
this.apiKeyError = '';
|
||||
this.isLoading = false;
|
||||
this.isAddNewNodeModalShown = false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets node id field from value string.
|
||||
*/
|
||||
public setNodeId(value: string): void {
|
||||
this.nodeToAdd.id = value.trim();
|
||||
this.idError = '';
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets node public ip field from value string.
|
||||
*/
|
||||
public setPublicIP(value: string): void {
|
||||
this.nodeToAdd.publicAddress = value.trim();
|
||||
this.publicIPError = '';
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets API key field from value string.
|
||||
*/
|
||||
public setApiKey(value: string): void {
|
||||
this.nodeToAdd.apiSecret = value.trim();
|
||||
this.apiKeyError = '';
|
||||
}
|
||||
|
||||
public async onCreate(): Promise<void> {
|
||||
if (this.isLoading) return;
|
||||
|
||||
this.isLoading = true;
|
||||
|
||||
if (!this.validateFields()) {
|
||||
this.isLoading = false;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
await this.$store.dispatch('nodes/add', this.nodeToAdd);
|
||||
} catch (error) {
|
||||
console.error(error.message);
|
||||
this.isLoading = false;
|
||||
}
|
||||
|
||||
this.closeModal();
|
||||
}
|
||||
|
||||
private validateFields(): boolean {
|
||||
let hasNoErrors: boolean = true;
|
||||
|
||||
if (!this.nodeToAdd.id) {
|
||||
this.idError = 'This field is required. Please enter a valid node ID';
|
||||
hasNoErrors = false;
|
||||
}
|
||||
|
||||
if (!this.nodeToAdd.publicAddress) {
|
||||
this.publicIPError = 'This field is required. Please enter a valid node Public Address';
|
||||
hasNoErrors = false;
|
||||
}
|
||||
|
||||
if (!this.nodeToAdd.apiSecret) {
|
||||
this.apiKeyError = 'This field is required. Please enter a valid API Key';
|
||||
hasNoErrors = false;
|
||||
}
|
||||
|
||||
return hasNoErrors;
|
||||
}
|
||||
}
|
||||
</script>
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user