Merge 'main' branch

Change-Id: Ia0db1b1f9ef3e0671d3f2208881b0abc3064e200
This commit is contained in:
Michał Niewrzał 2021-01-04 12:13:27 +01:00
commit ad3e3a38c5
84 changed files with 1850 additions and 1157 deletions

View File

@ -13,8 +13,8 @@ Please describe the performance impact:
- [ ] NEW: Are there any Satellite database migrations? Are they forwards _and_ backwards compatible?
- [ ] Does the PR describe what changes are being made?
- [ ] Does the PR describe why the changes are being made?
- [ ] Does the code follow [our style guide](https://github.com/storj/docs/blob/master/code/Style.md)?
- [ ] Does the code follow [our testing guide](https://github.com/storj/docs/blob/master/code/Testing.md)?
- [ ] Does the code follow [our style guide](https://github.com/storj/docs/blob/main/code/Style.md)?
- [ ] Does the code follow [our testing guide](https://github.com/storj/docs/blob/main/code/Testing.md)?
- [ ] Is the PR appropriately sized? (If it could be broken into smaller PRs it should be)
- [ ] Does the new code have enough tests? (*every* PR should have tests or justification otherwise. Bug-fix PRs especially)
- [ ] Does the new code have enough documentation that answers "how do I use it?" and "what does it do?"? (both source documentation and [higher level](https://github.com/storj/docs), diagrams?)

10
Jenkinsfile vendored
View File

@ -10,7 +10,7 @@ node('node') {
echo "Current build result: ${currentBuild.result}"
}
if (env.BRANCH_NAME == "master") {
if (env.BRANCH_NAME == "main") {
stage('Run Versions Test') {
lastStage = env.STAGE_NAME
try {
@ -30,8 +30,8 @@ node('node') {
done
'''
sh 'docker exec postgres-$BUILD_NUMBER createdb -U postgres teststorj'
// fetch the remote master branch
sh 'git fetch --no-tags --progress -- https://github.com/storj/storj.git +refs/heads/master:refs/remotes/origin/master'
// fetch the remote main branch
sh 'git fetch --no-tags --progress -- https://github.com/storj/storj.git +refs/heads/main:refs/remotes/origin/main'
sh 'docker run -u $(id -u):$(id -g) --rm -i -v $PWD:$PWD -w $PWD --entrypoint $PWD/scripts/tests/testversions/test-sim-versions.sh -e STORJ_SIM_POSTGRES -e STORJ_SIM_REDIS --link redis-$BUILD_NUMBER:redis --link postgres-$BUILD_NUMBER:postgres -e CC=gcc storjlabs/golang:1.15.6'
}
catch(err){
@ -65,8 +65,8 @@ node('node') {
done
'''
sh 'docker exec postgres-$BUILD_NUMBER createdb -U postgres teststorj'
// fetch the remote master branch
sh 'git fetch --no-tags --progress -- https://github.com/storj/storj.git +refs/heads/master:refs/remotes/origin/master'
// fetch the remote main branch
sh 'git fetch --no-tags --progress -- https://github.com/storj/storj.git +refs/heads/main:refs/remotes/origin/main'
sh 'docker run -u $(id -u):$(id -g) --rm -i -v $PWD:$PWD -w $PWD --entrypoint $PWD/scripts/tests/rollingupgrade/test-sim-rolling-upgrade.sh -e BRANCH_NAME -e STORJ_SIM_POSTGRES -e STORJ_SIM_REDIS --link redis-$BUILD_NUMBER:redis --link postgres-$BUILD_NUMBER:postgres -e CC=gcc storjlabs/golang:1.15.6'
}
catch(err){

View File

@ -2,7 +2,7 @@ pipeline {
agent {
docker {
label 'main'
image docker.build("storj-ci", "--pull https://github.com/storj/ci.git").id
image docker.build("storj-ci", "--pull git://github.com/storj/ci.git#main").id
args '-u root:root --cap-add SYS_PTRACE -v "/tmp/gomod":/go/pkg/mod -v "/tmp/npm":/npm --tmpfs "/tmp:exec,mode=777"'
}
}
@ -69,7 +69,7 @@ pipeline {
STORJ_TEST_COCKROACH_ALT = 'cockroach://root@localhost:26260/testcockroach?sslmode=disable'
STORJ_TEST_POSTGRES = 'postgres://postgres@localhost/teststorj?sslmode=disable'
STORJ_TEST_DATABASES = 'crdb|pgx|cockroach://root@localhost:26259/testmetabase?sslmode=disable;pg|pgx|postgres://postgres@localhost/testmetabase?sslmode=disable'
COVERFLAGS = "${ env.BRANCH_NAME != 'master' ? '' : '-coverprofile=.build/coverprofile -coverpkg=storj.io/storj/private/...,storj.io/storj/pkg/...,storj.io/storj/satellite/...,storj.io/storj/storage/...,storj.io/storj/storagenode/...,storj.io/storj/versioncontrol/...'}"
COVERFLAGS = "${ env.BRANCH_NAME != 'main' ? '' : '-coverprofile=.build/coverprofile -coverpkg=storj.io/storj/private/...,storj.io/storj/pkg/...,storj.io/storj/satellite/...,storj.io/storj/storage/...,storj.io/storj/storagenode/...,storj.io/storj/versioncontrol/...'}"
}
steps {
sh 'cockroach sql --insecure --host=localhost:26256 -e \'create database testcockroach;\''

View File

@ -4,7 +4,7 @@ GOARCH ?= amd64
GOPATH ?= $(shell go env GOPATH)
COMPOSE_PROJECT_NAME := ${TAG}-$(shell git rev-parse --abbrev-ref HEAD)
BRANCH_NAME ?= $(shell git rev-parse --abbrev-ref HEAD | sed "s!/!-!g")
ifeq (${BRANCH_NAME},master)
ifeq (${BRANCH_NAME},main)
TAG := $(shell git rev-parse --short HEAD)-go${GO_VERSION}
TRACKED_BRANCH := true
LATEST_TAG := latest
@ -353,5 +353,5 @@ diagrams-graphml:
.PHONY: bump-dependencies
bump-dependencies:
go get storj.io/common@master storj.io/private@master storj.io/uplink@master
go get storj.io/common@main storj.io/private@main storj.io/uplink@main
go mod tidy

View File

@ -2,10 +2,10 @@
[![Go Report Card](https://goreportcard.com/badge/storj.io/storj)](https://goreportcard.com/report/storj.io/storj)
[![Go Doc](https://img.shields.io/badge/godoc-reference-blue.svg?style=flat-square)](https://pkg.go.dev/storj.io/storj)
[![Coverage Status](https://img.shields.io/badge/coverage-master-green.svg)](https://build.dev.storj.io/job/storj/job/master/cobertura)
[![Coverage Status](https://img.shields.io/badge/coverage-master-green.svg)](https://build.dev.storj.io/job/storj/job/main/cobertura)
![Alpha](https://img.shields.io/badge/version-alpha-green.svg)
<img src="https://github.com/storj/storj/raw/master/resources/logo.png" width="100">
<img src="https://github.com/storj/storj/raw/main/resources/logo.png" width="100">
Storj is building a decentralized cloud storage network.
[Check out our white paper for more info!](https://storj.io/white-paper)
@ -92,7 +92,7 @@ Use Git to push your changes to your fork:
```bash
git commit -a -m 'my changes!'
git push origin master
git push origin main
```
Use Github to open a pull request!

View File

@ -90,7 +90,7 @@ func networkExec(flags *Flags, args []string, command string) error {
if command == "setup" {
if flags.Postgres == "" {
return errors.New("postgres connection URL is required for running storj-sim. Example: `storj-sim network setup --postgres=<connection URL>`.\nSee docs for more details https://github.com/storj/docs/blob/master/Test-network.md#running-tests-with-postgres")
return errors.New("postgres connection URL is required for running storj-sim. Example: `storj-sim network setup --postgres=<connection URL>`.\nSee docs for more details https://github.com/storj/docs/blob/main/Test-network.md#running-tests-with-postgres")
}
identities, err := identitySetup(processes)

View File

@ -9,14 +9,11 @@ import (
"fmt"
"io/ioutil"
"net/http"
"os"
"path/filepath"
"github.com/btcsuite/btcutil/base58"
"github.com/spf13/cobra"
"github.com/zeebo/errs"
"storj.io/common/fpath"
"storj.io/common/pb"
"storj.io/private/cfgstruct"
"storj.io/private/process"
@ -26,7 +23,8 @@ import (
type registerConfig struct {
AuthService string `help:"the address to the service you wish to register your access with" default:"" basic-help:"true"`
Public bool `help:"if the access should be public" default:"false" basic-help:"true"`
AWSProfile string `help:"update AWS credentials file, appending the credentials using this profile name" default:"" basic-help:"true"`
Format string `help:"format of credentials, use 'env' or 'aws' for using in scripts" default:""`
AWSProfile string `help:"if using --format=aws, output the --profile tag using this profile" default:""`
AccessConfig
}
@ -61,7 +59,7 @@ func init() {
registerCmd := &cobra.Command{
Use: "register [ACCESS]",
Short: "Register your access for use with a hosted gateway.",
RunE: registerAccess,
RunE: accessRegister,
Args: cobra.MaximumNArgs(1),
}
@ -90,25 +88,9 @@ func accessList(cmd *cobra.Command, args []string) (err error) {
}
func accessInspect(cmd *cobra.Command, args []string) (err error) {
var access *uplink.Access
if len(args) == 0 {
access, err = inspectCfg.GetAccess()
if err != nil {
return err
}
} else {
firstArg := args[0]
access, err = inspectCfg.GetNamedAccess(firstArg)
if err != nil {
return err
}
if access == nil {
if access, err = uplink.ParseAccess(firstArg); err != nil {
return err
}
}
access, err := getAccessFromArgZeroOrConfig(inspectCfg, args)
if err != nil {
return errs.New("no access specified: %w", err)
}
serializedAccesss, err := access.Serialize()
@ -149,112 +131,95 @@ func parseAccess(access string) (sa string, apiKey string, ea string, err error)
return p.SatelliteAddr, apiKey, ea, nil
}
func registerAccess(cmd *cobra.Command, args []string) (err error) {
if len(args) == 0 {
return errs.New("no access specified")
func accessRegister(cmd *cobra.Command, args []string) (err error) {
access, err := getAccessFromArgZeroOrConfig(inspectCfg, args)
if err != nil {
return errs.New("no access specified: %w", err)
}
_, err = register(args[0], registerCfg.AuthService, registerCfg.Public)
return err
accessKey, secretKey, endpoint, err := RegisterAccess(access, registerCfg.AuthService, registerCfg.Public)
if err != nil {
return err
}
switch registerCfg.Format {
case "env": // export / set compatible format
fmt.Printf("AWS_ACCESS_KEY_ID=%s\n", accessKey)
fmt.Printf("AWS_SECRET_ACCESS_KEY=%s\n", secretKey)
// note that AWS_ENDPOINT configuration is not natively utilized by the AWS CLI
fmt.Printf("AWS_ENDPOINT=%s\n", endpoint)
case "aws": // aws configuration commands
profile := ""
if registerCfg.AWSProfile != "" {
profile = " --profile " + registerCfg.AWSProfile
fmt.Printf("aws configure %s\n", profile)
}
fmt.Printf("aws configure %s set aws_access_key_id %s\n", profile, accessKey)
fmt.Printf("aws configure %s set aws_secret_access_key %s\n", profile, secretKey)
// note that this configuration is not natively utilized by the AWS CLI
fmt.Printf("aws configure %s set s3.endpoint_url %s\n", profile, endpoint)
default: // plain text
fmt.Println("========== CREDENTIALS ===================================================================")
fmt.Println("Access Key ID: ", accessKey)
fmt.Println("Secret Key : ", secretKey)
fmt.Println("Endpoint : ", endpoint)
}
return nil
}
func register(accessRaw, authService string, public bool) (accessKey string, err error) {
if authService == "" {
return "", errs.New("no auth service address provided")
}
// try assuming that accessRaw is a named access
access, err := registerCfg.GetNamedAccess(accessRaw)
if err == nil && access != nil {
accessRaw, err = access.Serialize()
func getAccessFromArgZeroOrConfig(config AccessConfig, args []string) (access *uplink.Access, err error) {
if len(args) != 0 {
access, err = inspectCfg.GetNamedAccess(args[0])
if err != nil {
return "", errs.New("error serializing named access '%s': %w", accessRaw, err)
return nil, err
}
if access != nil {
return access, nil
}
return uplink.ParseAccess(args[0])
}
return inspectCfg.GetAccess()
}
// RegisterAccess registers an access grant with a Gateway Authorization Service.
func RegisterAccess(access *uplink.Access, authService string, public bool) (accessKey, secretKey, endpoint string, err error) {
if authService == "" {
return "", "", "", errs.New("no auth service address provided")
}
accesssSerialized, err := access.Serialize()
if err != nil {
return "", "", "", errs.Wrap(err)
}
postData, err := json.Marshal(map[string]interface{}{
"access_grant": accessRaw,
"access_grant": accesssSerialized,
"public": public,
})
if err != nil {
return accessKey, errs.Wrap(err)
return accessKey, "", "", errs.Wrap(err)
}
resp, err := http.Post(fmt.Sprintf("%s/v1/access", authService), "application/json", bytes.NewReader(postData))
if err != nil {
return "", err
return "", "", "", err
}
defer func() { err = errs.Combine(err, resp.Body.Close()) }()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", err
return "", "", "", err
}
respBody := make(map[string]string)
if err := json.Unmarshal(body, &respBody); err != nil {
return "", errs.New("unexpected response from auth service: %s", string(body))
return "", "", "", errs.New("unexpected response from auth service: %s", string(body))
}
accessKey, ok := respBody["access_key_id"]
if !ok {
return "", errs.New("access_key_id missing in response")
return "", "", "", errs.New("access_key_id missing in response")
}
secretKey, ok := respBody["secret_key"]
secretKey, ok = respBody["secret_key"]
if !ok {
return "", errs.New("secret_key missing in response")
return "", "", "", errs.New("secret_key missing in response")
}
fmt.Println("========== CREDENTIALS ===================================================================")
fmt.Println("Access Key ID: ", accessKey)
fmt.Println("Secret Key : ", secretKey)
fmt.Println("Endpoint : ", respBody["endpoint"])
// update AWS credential file if requested
if registerCfg.AWSProfile != "" {
credentialsPath, err := getAwsCredentialsPath()
if err != nil {
return "", err
}
err = writeAWSCredentials(credentialsPath, registerCfg.AWSProfile, accessKey, secretKey)
if err != nil {
return "", err
}
}
return accessKey, nil
}
// getAwsCredentialsPath returns the expected AWS credentials path.
func getAwsCredentialsPath() (string, error) {
if credentialsPath, found := os.LookupEnv("AWS_SHARED_CREDENTIALS_FILE"); found {
return credentialsPath, nil
}
homeDir, err := os.UserHomeDir()
if err != nil {
return "", errs.Wrap(err)
}
return filepath.Join(homeDir, ".aws", "credentials"), nil
}
// writeAWSCredentials appends to credentialsPath using an AWS compliant credential formatting.
func writeAWSCredentials(credentialsPath, profileName, accessKey, secretKey string) error {
oldCredentials, err := ioutil.ReadFile(credentialsPath)
if err != nil && !os.IsNotExist(err) {
return errs.Wrap(err)
}
const format = "\n[%s]\naws_access_key_id = %s\naws_secret_access_key = %s\n"
newCredentials := fmt.Sprintf(format, profileName, accessKey, secretKey)
var fileMode os.FileMode
fileInfo, err := os.Stat(credentialsPath)
if err == nil {
fileMode = fileInfo.Mode()
} else {
fileMode = 0644
}
err = fpath.AtomicWriteFile(credentialsPath, append(oldCredentials, newCredentials...), fileMode)
if err != nil {
return errs.Wrap(err)
}
fmt.Printf("Updated AWS credential file %s with profile '%s'\n", credentialsPath, profileName)
return nil
return accessKey, secretKey, respBody["endpoint"], nil
}

View File

@ -0,0 +1,38 @@
// Copyright (C) 2020 Storj Labs, Inc.
// See LICENSE for copying information
package cmd_test
import (
"fmt"
"net/http"
"net/http/httptest"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"storj.io/common/testcontext"
"storj.io/storj/cmd/uplink/cmd"
"storj.io/storj/private/testplanet"
)
func TestRegisterAccess(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
// mock the auth service
ts := httptest.NewServer(
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, `{"access_key_id":"1", "secret_key":"2", "endpoint":"3"}`)
}))
defer ts.Close()
// make sure we get back things
access := planet.Uplinks[0].Access[planet.Satellites[0].ID()]
accessKey, secretKey, endpoint, err := cmd.RegisterAccess(access, ts.URL, true)
require.NoError(t, err)
assert.Equal(t, "1", accessKey)
assert.Equal(t, "2", secretKey)
assert.Equal(t, "3", endpoint)
})
}

View File

@ -130,12 +130,13 @@ func importMain(cmd *cobra.Command, args []string) (err error) {
}
accessData = newAccessData
}
accesses[name] = accessData
// There is no easy way currently to save off a "hidden" configurable into
// the config file without a larger refactoring. For now, just do a manual
// override of the accesses.
// override of the access.
// TODO: revisit when the configuration/flag code makes it easy
if err := saveConfig(process.SaveConfigWithOverride("accesses", accesses)); err != nil {
accessKey := "accesses." + name
if err := saveConfig(process.SaveConfigWithOverride(accessKey, accessData)); err != nil {
return err
}

View File

@ -24,11 +24,11 @@ import (
var shareCfg struct {
DisallowReads bool `default:"false" help:"if true, disallow reads" basic-help:"true"`
DisallowWrites bool `default:"false" help:"if true, disallow writes. see also readonly" basic-help:"true"`
DisallowWrites bool `default:"false" help:"if true, disallow writes. see also --readonly" basic-help:"true"`
DisallowLists bool `default:"false" help:"if true, disallow lists" basic-help:"true"`
DisallowDeletes bool `default:"false" help:"if true, disallow deletes. see also readonly" basic-help:"true"`
Readonly bool `default:"true" help:"implies disallow_writes and disallow_deletes. you must specify --readonly=false if you don't want this" basic-help:"true"`
Writeonly bool `default:"false" help:"implies disallow_reads and disallow_lists" basic-help:"true"`
DisallowDeletes bool `default:"false" help:"if true, disallow deletes. see also --readonly" basic-help:"true"`
Readonly bool `default:"true" help:"implies --disallow-writes and --disallow-deletes. you must specify --readonly=false if you don't want this" basic-help:"true"`
Writeonly bool `default:"false" help:"implies --disallow-reads and --disallow-lists" basic-help:"true"`
NotBefore string `help:"disallow access before this time (e.g. '+2h', '2020-01-02T15:01:01-01:00')" basic-help:"true"`
NotAfter string `help:"disallow access after this time (e.g. '+2h', '2020-01-02T15:01:01-01:00')" basic-help:"true"`
AllowedPathPrefix []string `help:"whitelist of path prefixes to require, overrides the [allowed-path-prefix] arguments"`
@ -61,7 +61,7 @@ func init() {
}
func shareMain(cmd *cobra.Command, args []string) (err error) {
newAccessData, sharePrefixes, permission, err := createAccessGrant(args)
newAccess, newAccessData, sharePrefixes, permission, err := createAccessGrant(args)
if err != nil {
return err
}
@ -70,7 +70,7 @@ func shareMain(cmd *cobra.Command, args []string) (err error) {
if shareCfg.Register || shareCfg.URL || shareCfg.DNS != "" {
isPublic := (shareCfg.Public || shareCfg.URL || shareCfg.DNS != "")
accessKey, err = register(newAccessData, shareCfg.AuthService, isPublic)
accessKey, _, _, err = RegisterAccess(newAccess, shareCfg.AuthService, isPublic)
if err != nil {
return err
}
@ -105,15 +105,15 @@ func shareMain(cmd *cobra.Command, args []string) (err error) {
}
// Creates access grant for allowed path prefixes.
func createAccessGrant(args []string) (newAccessData string, sharePrefixes []sharePrefixExtension, permission uplink.Permission, err error) {
func createAccessGrant(args []string) (newAccess *uplink.Access, newAccessData string, sharePrefixes []sharePrefixExtension, permission uplink.Permission, err error) {
now := time.Now()
notBefore, err := parseHumanDate(shareCfg.NotBefore, now)
if err != nil {
return newAccessData, sharePrefixes, permission, err
return newAccess, newAccessData, sharePrefixes, permission, err
}
notAfter, err := parseHumanDate(shareCfg.NotAfter, now)
if err != nil {
return newAccessData, sharePrefixes, permission, err
return newAccess, newAccessData, sharePrefixes, permission, err
}
if len(shareCfg.AllowedPathPrefix) == 0 {
@ -128,10 +128,10 @@ func createAccessGrant(args []string) (newAccessData string, sharePrefixes []sha
for _, path := range shareCfg.AllowedPathPrefix {
p, err := fpath.New(path)
if err != nil {
return newAccessData, sharePrefixes, permission, err
return newAccess, newAccessData, sharePrefixes, permission, err
}
if p.IsLocal() {
return newAccessData, sharePrefixes, permission, errs.New("required path must be remote: %q", path)
return newAccess, newAccessData, sharePrefixes, permission, errs.New("required path must be remote: %q", path)
}
uplinkSharePrefix := uplink.SharePrefix{
@ -147,7 +147,7 @@ func createAccessGrant(args []string) (newAccessData string, sharePrefixes []sha
access, err := shareCfg.GetAccess()
if err != nil {
return newAccessData, sharePrefixes, permission, err
return newAccess, newAccessData, sharePrefixes, permission, err
}
permission = uplink.Permission{}
@ -158,19 +158,19 @@ func createAccessGrant(args []string) (newAccessData string, sharePrefixes []sha
permission.NotBefore = notBefore
permission.NotAfter = notAfter
newAccess, err := access.Share(permission, uplinkSharePrefixes...)
newAccess, err = access.Share(permission, uplinkSharePrefixes...)
if err != nil {
return newAccessData, sharePrefixes, permission, err
return newAccess, newAccessData, sharePrefixes, permission, err
}
newAccessData, err = newAccess.Serialize()
if err != nil {
return newAccessData, sharePrefixes, permission, err
return newAccess, newAccessData, sharePrefixes, permission, err
}
satelliteAddr, _, _, err := parseAccess(newAccessData)
if err != nil {
return newAccessData, sharePrefixes, permission, err
return newAccess, newAccessData, sharePrefixes, permission, err
}
fmt.Println("Sharing access to satellite", satelliteAddr)
@ -185,7 +185,7 @@ func createAccessGrant(args []string) (newAccessData string, sharePrefixes []sha
fmt.Println("=========== SERIALIZED ACCESS WITH THE ABOVE RESTRICTIONS TO SHARE WITH OTHERS ===========")
fmt.Println("Access :", newAccessData)
return newAccessData, sharePrefixes, permission, nil
return newAccess, newAccessData, sharePrefixes, permission, nil
}
// Creates linksharing url for allowed path prefixes.

View File

@ -47,7 +47,7 @@ The current Satellite database has the table `nodes`. For the offline time calcu
### Detecting offline nodes
Per [Kademlia removal blueprint](https://github.com/storj/storj/blob/master/docs/design/kademlia-removal.md#network-refreshing), any storage node has to ping the satellite every hour. For storage nodes that have not pinged, we need to contact them directly.
Per [Kademlia removal blueprint](https://github.com/storj/storj/blob/main/docs/design/kademlia-removal.md#network-refreshing), any storage node has to ping the satellite every hour. For storage nodes that have not pinged, we need to contact them directly.
For finding the storage nodes gone offline, we run a chore, with the following query:
@ -229,4 +229,4 @@ Data Science could use this approach to more nicely calculate statistics however
* The design needs to account for potential satellite or DNS outages to ensure that we do not unfairly disqualify nodes if the satellite cannot be contacted.
* The design indefinitely checks offline storage nodes until they are disqualified.
* The implementation requires coordination with the team working in [Kademlia removal blueprint](kademlia-removal.md) for the "ping" functionality.
* The implementation requires the [Kademlia removal network refreshing](https://github.com/storj/storj/blob/master/docs/design/kademlia-removal.md#network-refreshing) implemented and deployed before deploying the new chore. Use a feature flag for removing the constraint.
* The implementation requires the [Kademlia removal network refreshing](https://github.com/storj/storj/blob/main/docs/design/kademlia-removal.md#network-refreshing) implemented and deployed before deploying the new chore. Use a feature flag for removing the constraint.

View File

@ -29,7 +29,7 @@ Reference: [Redash query](https://redash.datasci.storj.io/queries/1224) for curr
## Design
#### GC Manager and Workers
The idea here is to split the garbage collection process into a manager process and many worker processes. The GC Manager will join the metainfo loop to retrieve pointer data. It will assign a portion of the storage nodes to each of the GC workers so that each worker is only responsible for creating bloom filters for a subset of all storage nodes. The GC master will send the piece IDs from the metainfo loop to the correct worker responsible for that storage node. Once the GC cycle is complete, the workers send the completed bloom filters to the storage nodes.
The idea here is to split the garbage collection process into a manager process and many worker processes. The GC Manager will join the metainfo loop to retrieve pointer data. It will assign a portion of the storage nodes to each of the GC workers so that each worker is only responsible for creating bloom filters for a subset of all storage nodes. The GC main will send the piece IDs from the metainfo loop to the correct worker responsible for that storage node. Once the GC cycle is complete, the workers send the completed bloom filters to the storage nodes.
#### Reliable data transfer mechanism
With this design, the GC Manager will be sending piece ID data to the workers, it's very important we never lose a piece ID. We need a way to confirm each worker received every piece ID from the GC Manager for any given GC iteration. Otherwise it could cause the storage node to delete the unintended data. One way to solve this is to assign a sequence number to each piece ID sent from GC Manager to the worker so that at the end of the GC cycle, the manager and worker must confirm the end sequence number match and all sequences in between have been received.

View File

@ -34,19 +34,19 @@ Metainfo is responsible for all things related to the metainfo stored for each f
4) metainfo service, which ensures proper access to metainfo database and simplifies modification for other services.
#### orders
Orders is responsible for creating/managing orders that the satellite issues for a file upload/download. Orders are used to keep track of how much bandwidth was used to upload/download a file. This data is used to pay storage nodes for bandwidth usage and to charge the uplinks. See this [doc on the lifecycle of data](https://github.com/storj/docs/blob/master/code/payments/Accounting.md#lifecycle-of-the-data) related to accounting which includes orders.
Orders is responsible for creating/managing orders that the satellite issues for a file upload/download. Orders are used to keep track of how much bandwidth was used to upload/download a file. This data is used to pay storage nodes for bandwidth usage and to charge the uplinks. See this [doc on the lifecycle of data](https://github.com/storj/docs/blob/main/code/payments/Accounting.md#lifecycle-of-the-data) related to accounting which includes orders.
#### audit
Audit performs audits of the storage nodes to make sure the data they store is still retrievable. The audit system is currently made up of an audit service that runs on an interval performing audits on a segment at a time. The result of the audits are reported to the overlay service to store in node table in satellite.DB. See [docs on audit](https://github.com/storj/docs/blob/master/code/audits/audit-service.md) for more details.
Audit performs audits of the storage nodes to make sure the data they store is still retrievable. The audit system is currently made up of an audit service that runs on an interval performing audits on a segment at a time. The result of the audits are reported to the overlay service to store in node table in satellite.DB. See [docs on audit](https://github.com/storj/docs/blob/main/code/audits/audit-service.md) for more details.
#### repair
Repair searches metainfo for injured segments and adds them to the repair queue. Repairer picks segments from queue and tries to fix them. When repair fails, then the segment is added to irreparable database. The repair system is currently made of 4 parts and 2 DBs (db tables). The 4 parts are 1) repair observer (contains ReliabilityCache) 2) irreparable loop 3) repairer 4) repair queue. The 2 DBs are 1) injuredsegment (repair queue) table in satellite.DB 2) and irreparabledb table in satellite.DB
#### garbage collection (GC)
GC iterates over the metainfo and creates a list for each storage node. It sends these list to storage nodes, who can delete all pieces missing from the list. See [GC design doc](https://github.com/storj/storj/blob/master/docs/design/garbage-collection.md) for more details.
GC iterates over the metainfo and creates a list for each storage node. It sends these list to storage nodes, who can delete all pieces missing from the list. See [GC design doc](https://github.com/storj/storj/blob/main/docs/design/garbage-collection.md) for more details.
#### accounting
Accounting calculates how much uplinks use storage and how much storage nodes store data. See [docs on accounting](https://github.com/storj/docs/blob/master/code/payments/Accounting.md) for more details.
Accounting calculates how much uplinks use storage and how much storage nodes store data. See [docs on accounting](https://github.com/storj/docs/blob/main/code/payments/Accounting.md) for more details.
#### console
Console provides the web UI for the Satellite where users can create new accounts/projects/apiKeys needed for uploading/downloading to the network.
@ -64,7 +64,7 @@ Nodestats allows storage nodes to ask information about themselves from the sate
Inspectors allow private diagnostics on certain systems. The following inspectors currently exist: overlay inspector, health inspector, and irreparable inspector.
#### kademlia
Kademlia, discovery, bootstrap, and vouchers are being removed and not included in this doc. See [kademlia removal design doc](https://github.com/storj/storj/blob/master/docs/design/kademlia-removal.md) for more details.
Kademlia, discovery, bootstrap, and vouchers are being removed and not included in this doc. See [kademlia removal design doc](https://github.com/storj/storj/blob/main/docs/design/kademlia-removal.md) for more details.
#### RPC endpoints
The Satellite has the following RPC endpoints:
@ -80,8 +80,8 @@ The Satellite has the following HTTP endpoints:
All services (except version) make connections to the satellite.DB. Five services rely on metainfo service to access the metainfoDB, this includes inspectors, accounting, audit, repair, and garbage collection.
See these docs for details on current database design:
- https://github.com/storj/docs/blob/master/code/Database.md#database
- https://github.com/storj/docs/blob/master/code/persistentstorage/Databases.md
- https://github.com/storj/docs/blob/main/code/Database.md#database
- https://github.com/storj/docs/blob/main/code/persistentstorage/Databases.md
#### limitations
The current items that prevent Satellite horizontal scaling include:
@ -108,7 +108,7 @@ The private api process handles all private RPC and HTTP requests, this includes
#### metainfo loop and the observer system
The metainfo loop process iterates over all the segments in metainfoDB repeatedly on an interval. With each loop, the process can also execute the code for the observer systems that take a segment as input and performs some action with it. The observer systems currently include: audit observer, gc observer, repair checker observer, and accounting tally.
The audit observer uses the segments from the metainfo loop to create segment reservoir samples for each storage node and saves those samples to a reservoir cache. Audit observer currently runs on a 30s interval for the release default setting. See [audit-v2 design](https://github.com/storj/storj/blob/master/docs/design/audit-v2.md) for more details.
The audit observer uses the segments from the metainfo loop to create segment reservoir samples for each storage node and saves those samples to a reservoir cache. Audit observer currently runs on a 30s interval for the release default setting. See [audit-v2 design](https://github.com/storj/storj/blob/main/docs/design/audit-v2.md) for more details.
The repair (checker) observer uses the segments from the metainfo loop to identify segments that need to be repaired and adds those injured segments to the repair queue. The repair check currently has a `checkerObserver.ReliabilityCache`, this should be ok to stay in-memory for the time being since we plan on only running a single metainfo loop. The repair observer currently runs on a 30s interval for the release default setting.
@ -131,7 +131,7 @@ Lets get rid of the irreparable loop. For one, we never expect there to be files
The repair worker executes a repair for an item in the repair queue. We want to work through the repair queue as fast as possible so its important to be able to dispatch many workers at a time.
#### audit workers
The audit process should be able to run many audits in parallel. See the [audit-v2 design doc](https://github.com/storj/storj/blob/master/docs/design/audit-v2.md) for updates to the audit design.
The audit process should be able to run many audits in parallel. See the [audit-v2 design doc](https://github.com/storj/storj/blob/main/docs/design/audit-v2.md) for updates to the audit design.
#### accounting
The accounting process is responsible for calculating disk usage and bandwidth usage. These calculations are used for uplink invoices and storage nodes payments. Accounting should receive storage node total stored bytes data from the tally observer running with the metainfo loop.

View File

@ -10,11 +10,11 @@ The previous implementation of uptime reputation consisted of a ratio of online
## Design
The solution proposed here is to use a series of sliding windows to indicate a general timeframe in which offline audits occur. Each window keeps two separate tallies indicating how many offline audits and total audits a particular node received within its timeframe. Once a window is complete, it is scored by calculating the percentage of total audits for which it was offline. We can average these scores over a trailing period of time, called the _tracking period_, to determine an overall "offline score" to be used for suspension and disqualification. By granting each individual window the same weight in the calculation of the overall average, the effect of any particularly unlucky period can be minimized while still allowing us to take the failures into account over a longer period.
The solution proposed here is to use a series of sliding windows to indicate a general timeframe in which offline audits occur. Each window keeps two separate tallies indicating how many online audits and total audits a particular node received within its timeframe. Once a window is complete, it is scored by calculating the percentage of total audits for which it was online. We can average these scores over a trailing period of time, called the _tracking period_, to determine an overall "online score" to be used for suspension and disqualification. By granting each individual window the same weight in the calculation of the overall average, the effect of any particularly unlucky period can be minimized while still allowing us to take the failures into account over a longer period.
Storage node downtime can have a range of causes. For those storage node operators who may have fallen victim to a temporary issue, we want to give them a chance to diagnose and fix it before disqualifying them for good. For this reason, we are introducing suspension as a component of disqualification.
Once a node's offline score has risen above an _offline threshold_, it is _suspended_ and enters a period of review. A suspended node will not receive any new pieces, but can continue to receive download and audit requests for the pieces it currently holds. However, its pieces are considered to be unhealthy. We repair a segment if it contains too many unhealthy pieces, at which point we may transfer the repaired pieces from a suspended node to a more reliable node. If at any point during the review period we find that a node's score has fallen below the offline threshold, it is unsuspended, or _reinstated_, but it remains _under review_. This prevents nodes from alternating between suspension and reinstatement without consequence.
Once a node's online score has fallen below an _offline threshold_, it is _suspended_ and enters a period of review. A suspended node will not receive any new pieces, but can continue to receive download and audit requests for the pieces it currently holds. However, its pieces are considered to be unhealthy. We repair a segment if it contains too many unhealthy pieces, at which point we may transfer the repaired pieces from a suspended node to a more reliable node. If at any point during the review period we find that a node's score has risen above the offline threshold, it is unsuspended, or _reinstated_, but it remains _under review_. This prevents nodes from alternating between suspension and reinstatement without consequence.
The review period consists of one _grace period_ and one _tracking period_. The _grace period_ is given to fix whatever issue is causing the downtime. After the grace period has expired, any offline audits will fall within the scope of the tracking period, and thus will be used in the node's final evaluation. If at the end of the review period, the node is still suspended, it is disqualified. Otherwise, the node is no longer _under review_.
@ -67,7 +67,7 @@ CREATE TABLE audit_history (
```
type AuditResults struct {
Offline int
Online int
Total int
}
@ -83,7 +83,7 @@ type AuditHistoryConfig struct {
WindowSize time.Duration `help:"the length of time spanning a single audit window."`
TrackingPeriod time.Duration `help:"the length of time to track audit windows for node suspension and disqualification."`
GracePeriod time.Duration `help:"The length of time to give suspended SNOs to diagnose and fix issues causing downtime. Afterwards, they will have one tracking period to reach the minimum online score before disqualification."`
OfflineThreshold float64 `help:"The point above which a node is punished for offline audits. Determined by calculating the percentage of offline audits within each window and finding the average across windows within the tracking period."`
OfflineThreshold float64 `help:"The point below which a node is penalized for offline audits. Determined by calculating the ratio of online/total audits within each window and finding the average across windows within the tracking period."`
}
```
@ -95,11 +95,11 @@ Add a new argument to `BatchUpdateStats` to pass in `AuditWindowConfig`. This gi
For each node, take the `offline_suspended` and `under_review` values from the node dossier.
Select and unmarshal the node's entry in the audit history table.
If a value does not exist in the windows map for the current window, this means that the previous window is now complete, and we need to evaluate the node and delete any windows which have fallen out of scope.
We evaluate the node's current standing by finding what percentage of audits were offline per window, then finding the average score across all complete windows within the tracking period.
We evaluate the node's current standing by finding what percentage of audits were online per window, then finding the average score across all complete windows within the tracking period.
With this information, there are a number of conditions we need to evaluate:
1) The current offline score is above the OfflineThreshold
1) The current online score is above the OfflineThreshold
1a. The node is under review
@ -111,7 +111,7 @@ With this information, there are a number of conditions we need to evaluate:
- The node is in good standing and does not need to be updated.
2) The current offline score is below the OfflineThreshold
2) The current online score is below the OfflineThreshold
2a. The node is under review

7
go.mod
View File

@ -12,6 +12,7 @@ require (
github.com/go-redis/redis v6.15.9+incompatible
github.com/gogo/protobuf v1.3.1
github.com/golang-migrate/migrate/v4 v4.7.0
github.com/golang/mock v1.4.4 // indirect
github.com/google/go-cmp v0.5.2
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3 // indirect
github.com/gorilla/mux v1.8.0
@ -25,6 +26,7 @@ require (
github.com/mattn/go-sqlite3 v2.0.3+incompatible
github.com/nsf/jsondiff v0.0.0-20200515183724-f29ed568f4ce
github.com/nsf/termbox-go v0.0.0-20200418040025-38ba6e5628f1
github.com/onsi/ginkgo v1.14.0 // indirect
github.com/shopspring/decimal v1.2.0
github.com/spacemonkeygo/monkit/v3 v3.0.7
github.com/spf13/cobra v1.0.0
@ -38,14 +40,15 @@ require (
go.etcd.io/bbolt v1.3.5
go.uber.org/zap v1.16.0
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a
golang.org/x/net v0.0.0-20200707034311-ab3426394381 // indirect
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208
golang.org/x/sys v0.0.0-20200929083018-4d22bbb62b3c
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e
google.golang.org/api v0.20.0 // indirect
google.golang.org/protobuf v1.25.0 // indirect
storj.io/common v0.0.0-20201207172416-78f4e59925c3
storj.io/common v0.0.0-20201210184814-6206aefd1d48
storj.io/drpc v0.0.16
storj.io/monkit-jaeger v0.0.0-20200518165323-80778fc3f91b
storj.io/private v0.0.0-20201126162939-6fbb1e924f51
storj.io/uplink v1.3.2-0.20201204110139-e7cbbe88e717
storj.io/uplink v1.4.4-0.20210104090336-abe239c20ec8
)

111
go.sum
View File

@ -1,7 +1,5 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo=
cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
@ -19,12 +17,7 @@ cloud.google.com/go/pubsub v1.0.1 h1:W9tAK3E57P75u0XLLR82LZyw8VpAnhmyTOxW9qzmyj8
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/storage v1.0.0 h1:VV2nUM3wwLLGh9lSABFgZMjInyUbJeaRSE64WuAIQ+4=
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU=
dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4=
dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU=
git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
@ -47,7 +40,6 @@ github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a h1:HbKu58rmZp
github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc=
github.com/alicebob/miniredis/v2 v2.13.3 h1:kohgdtN58KW/r9ZDVmMJE3MrfbumwsDQStd0LPAGmmw=
github.com/alicebob/miniredis/v2 v2.13.3/go.mod h1:uS970Sw5Gs9/iK3yBg0l9Uj9s25wXxSpQUE9EaJ/Blg=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
github.com/apache/thrift v0.12.0 h1:pODnxUFNcjP9UTLZGTdeh+j16A8lJbRvD3rOtrk/7bs=
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
@ -63,7 +55,6 @@ github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJm
github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
@ -75,14 +66,12 @@ github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVa
github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
github.com/calebcase/tmpfile v1.0.1/go.mod h1:iErLeG/iqJr8LaQ/gYRv4GXdqssi3jg4iSzvrA06/lw=
github.com/calebcase/tmpfile v1.0.2-0.20200602150926-3af473ef8439/go.mod h1:iErLeG/iqJr8LaQ/gYRv4GXdqssi3jg4iSzvrA06/lw=
github.com/calebcase/tmpfile v1.0.2 h1:1AGuhKiUu4J6wxz6lxuF6ck3f8G2kaV6KSEny0RGCig=
github.com/calebcase/tmpfile v1.0.2/go.mod h1:iErLeG/iqJr8LaQ/gYRv4GXdqssi3jg4iSzvrA06/lw=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ=
github.com/cheggaaa/pb/v3 v3.0.5 h1:lmZOti7CraK9RSjzExsY53+WWfub9Qv13B5m4ptEoPE=
github.com/cheggaaa/pb/v3 v3.0.5/go.mod h1:X1L61/+36nz9bjIsrDU52qHKOQukUQe2Ge+YvGuquCw=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
@ -101,7 +90,6 @@ github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
@ -135,7 +123,6 @@ github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKoh
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-units v0.3.3 h1:Xk8S3Xj5sLGlG5g67hJmYMmUgXv5N4PhkjJHHqrwnTk=
github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
@ -145,16 +132,12 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s=
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fsouza/fake-gcs-server v1.7.0/go.mod h1:5XIRs4YvwNbNoz+1JF8j6KLAyDh7RHGAyAK3EP2EsNk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
@ -178,15 +161,12 @@ github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekf
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7 h1:5ZkaAPbicIKTF2I64qf5Fh8Aa83Q/dnOafMYV0OMwjA=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1 h1:qGJ6qTW+x6xX/my+8YUVl4WNpX9B7+/l2tRsHGZ7f2s=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.4 h1:l75CXGRSwbaYNpl/Z2X1XIIAMSCquvXgpVZDhwEIJsc=
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
@ -223,9 +203,6 @@ github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hf
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3 h1:SRgJV+IoxM5MKyFdlSUeNy6/ycRUF2yBAKdAQswoHUk=
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/googleapis/gax-go v2.0.0+incompatible h1:j0GKcs05QVmm7yesiZq2+9cxHkNK9YM6zKx4D2qucQU=
github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
@ -242,10 +219,8 @@ github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoA
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/graphql-go/graphql v0.7.9 h1:5Va/Rt4l5g3YjwDnid3vFfn43faaQBq7rMcIZ0VnV34=
github.com/graphql-go/graphql v0.7.9/go.mod h1:k6yrAYQaSP59DC5UVxbgxESlmVyojThKdORUqGDGmrI=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4=
github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
@ -333,7 +308,6 @@ github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0f
github.com/jackc/puddle v1.1.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
github.com/jackc/puddle v1.1.1/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
github.com/jackc/puddle v1.1.2/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU=
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
@ -361,7 +335,6 @@ github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFB
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
@ -371,15 +344,9 @@ github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.3.0 h1:/qkRGz8zljWiDcFvgpwUpwIAPu3r07TDvs3Rws+o/pU=
github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lucas-clemente/quic-go v0.7.1-0.20201102053916-272229abf044/go.mod h1:ZUygOqIoai0ASXXLJ92LTnKdbqh9MHCLTX6Nr1jUrK0=
github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/marten-seemann/qpack v0.2.1/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc=
github.com/marten-seemann/qtls v0.10.0/go.mod h1:UvMd1oaYDACI99/oZUYLzMCkBXQVT0aGm99sJhbT8hs=
github.com/marten-seemann/qtls-go1-15 v0.1.1/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
@ -400,7 +367,6 @@ github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsO
github.com/mattn/go-sqlite3 v2.0.3+incompatible h1:gXHsfypPkaMZrKbD5209QV9jbUTJKjyR5WD3HYQSd+U=
github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/minio/sha256-simd v0.0.0-20190328051042-05b4dd3047e5/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
@ -418,8 +384,6 @@ github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c h1:nXxl5PrvVm2L/wCy8d
github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/nakagami/firebirdsql v0.0.0-20190310045651-3c02a58cfed8/go.mod h1:86wM1zFnC6/uDBfZGNwB65O+pR2OFi5q/YQaEUid1qA=
github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
github.com/nsf/jsondiff v0.0.0-20200515183724-f29ed568f4ce h1:RPclfga2SEJmgMmz2k+Mg7cowZ8yv4Trqw9UsJby758=
github.com/nsf/jsondiff v0.0.0-20200515183724-f29ed568f4ce/go.mod h1:uFMI8w+ref4v2r9jz+c9i1IfIttS/OkmLfrk1jne5hs=
github.com/nsf/termbox-go v0.0.0-20200418040025-38ba6e5628f1 h1:lh3PyZvY+B9nFliSGTn5uFuqQQJGuNrD0MLCokv09ag=
@ -443,7 +407,6 @@ github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2i
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
@ -456,7 +419,6 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
@ -464,11 +426,9 @@ github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
@ -479,39 +439,15 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU=
github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ=
github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY=
github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM=
github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0=
github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ=
github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw=
github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI=
github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU=
github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag=
github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg=
github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw=
github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y=
github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q=
github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ=
github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I=
github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0=
github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ=
github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk=
github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4=
github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
@ -521,8 +457,6 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1
github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE=
github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA=
github.com/spacemonkeygo/monkit/v3 v3.0.0-20191108235033-eacca33b3037/go.mod h1:JcK1pCbReQsOsMKF/POFSZCq7drXFybgGmbc27tuwes=
github.com/spacemonkeygo/monkit/v3 v3.0.4/go.mod h1:JcK1pCbReQsOsMKF/POFSZCq7drXFybgGmbc27tuwes=
github.com/spacemonkeygo/monkit/v3 v3.0.5/go.mod h1:JcK1pCbReQsOsMKF/POFSZCq7drXFybgGmbc27tuwes=
@ -563,12 +497,9 @@ github.com/stripe/stripe-go v70.15.0+incompatible h1:hNML7M1zx8RgtepEMlxyu/FpVPr
github.com/stripe/stripe-go v70.15.0+incompatible/go.mod h1:A1dQZmO/QypXmsL0T8axYZkSN/uA/T/A64pfKdBAMiY=
github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU=
github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM=
github.com/vivint/infectious v0.0.0-20200605153912-25a574ae18a3 h1:zMsHhfK9+Wdl1F7sIKLyx3wrOFofpb3rWFbA4HgcK5k=
github.com/vivint/infectious v0.0.0-20200605153912-25a574ae18a3/go.mod h1:R0Gbuw7ElaGSLOZUSwBm/GgVwMd30jWxBDdAyMOeTuc=
github.com/xanzy/go-gitlab v0.15.0/go.mod h1:8zdQa/ri1dfn8eS3Ir1SyfvOKlw7WBJ8DVThkpGiXrs=
@ -601,7 +532,6 @@ go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0=
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
go.mongodb.org/mongo-driver v1.1.0/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA=
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
@ -625,14 +555,10 @@ go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc=
go.uber.org/zap v1.16.0 h1:uFRZXykJGK9lLY4HtgSw44DnIcAM+kRBP7x5m+NpAOM=
go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ=
go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw=
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
@ -643,7 +569,6 @@ golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
@ -658,7 +583,6 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299 h1:zQpM52jfKHG6II1ISZY1Zcpyg
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@ -677,8 +601,6 @@ golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73r
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181108082009-03003ca0c849/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -687,7 +609,6 @@ golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73r
golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190424112056-4829fb13d2c6/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
@ -703,15 +624,12 @@ golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/
golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU=
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -726,7 +644,6 @@ golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -735,7 +652,6 @@ golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -762,13 +678,11 @@ golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200610111108-226ff32320da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200929083018-4d22bbb62b3c h1:/h0vtH0PyU0xAoZJVcRw1k0Ng+U0JAy3QDiFmppIlIE=
golang.org/x/sys v0.0.0-20200929083018-4d22bbb62b3c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s=
@ -776,7 +690,6 @@ golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxb
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
@ -808,9 +721,6 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y=
google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
google.golang.org/api v0.3.2/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
@ -822,7 +732,6 @@ google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsb
google.golang.org/api v0.20.0 h1:jz2KixHX7EcCPiQrySzPdnYT7DbINAypCqKZ1Z7GM40=
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@ -830,10 +739,6 @@ google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww
google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg=
google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
@ -847,8 +752,6 @@ google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba h1:pRj9OXZbwNtbtZt
google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
@ -895,7 +798,6 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o=
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
@ -904,15 +806,10 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck=
sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0=
storj.io/common v0.0.0-20200424175742-65ac59022f4f/go.mod h1:pZyXiIE7bGETIRXtfs0nICqMwp7PM8HqnDuyUeldNA0=
storj.io/common v0.0.0-20201026135900-1aaeec90670b/go.mod h1:GqdmNf3fLm2UZX/7Zr0BLFCJ4gFjgm6eHrk/fnmr5jQ=
storj.io/common v0.0.0-20201124202331-31c1d1dc486d/go.mod h1:ocAfQaE1dpflrdTr8hXRZTWP1bq2jXz7ieGSBVCmHEc=
storj.io/common v0.0.0-20201207172416-78f4e59925c3 h1:D+rAQBzjl0Mw3VQ+1Sjv5/53I7JaIymMrkDW5DYBgRE=
storj.io/common v0.0.0-20201207172416-78f4e59925c3/go.mod h1:6sepaQTRLuygvA+GNPzdgRPOB1+wFfjde76KBWofbMY=
storj.io/common v0.0.0-20201210184814-6206aefd1d48 h1:bxIYHG96eFQNsEazsICfiEHjFwo1YqqbXkGfg72d2mg=
storj.io/common v0.0.0-20201210184814-6206aefd1d48/go.mod h1:6sepaQTRLuygvA+GNPzdgRPOB1+wFfjde76KBWofbMY=
storj.io/drpc v0.0.11/go.mod h1:TiFc2obNjL9/3isMW1Rpxjy8V9uE0B2HMeMFGiiI7Iw=
storj.io/drpc v0.0.14/go.mod h1:82nfl+6YwRwF6UG31cEWWUqv/FaKvP5SGqUvoqTxCMA=
storj.io/drpc v0.0.16 h1:9sxypc5lKi/0D69cR21BR0S21+IvXfON8L5nXMVNTwQ=
@ -921,5 +818,5 @@ storj.io/monkit-jaeger v0.0.0-20200518165323-80778fc3f91b h1:Bbg9JCtY6l3HrDxs3BX
storj.io/monkit-jaeger v0.0.0-20200518165323-80778fc3f91b/go.mod h1:gj4vuCeyCRjRmH8LIrgoyU9Dc9uR6H+/GcDUXmTbf80=
storj.io/private v0.0.0-20201126162939-6fbb1e924f51 h1:3aNbTNJeZ00cnzgYFdGyBUxtBEYqBCEzvk6Svh0gIwc=
storj.io/private v0.0.0-20201126162939-6fbb1e924f51/go.mod h1:3KcGiA7phL3a0HUCe5ar90SlIU3iFb8hKInaEZQ5P7o=
storj.io/uplink v1.3.2-0.20201204110139-e7cbbe88e717 h1:a1VgBKF3XKs8NY4AMFyVMjWYCskExlN8OUmCBKHEjbc=
storj.io/uplink v1.3.2-0.20201204110139-e7cbbe88e717/go.mod h1:OmI3WhL5JFxBUt3v+q7RtW1RMndMvtHB5Dp6YN8ro3c=
storj.io/uplink v1.4.4-0.20210104090336-abe239c20ec8 h1:FEqhyAyUuE86cf4UX5jHmlkKKsk/dQ/YKlmyPm4fNCE=
storj.io/uplink v1.4.4-0.20210104090336-abe239c20ec8/go.mod h1:raBVCBf1/DwfkFNzKqjSKLPysk9+o8Ubt/LJIO9TVBw=

View File

@ -61,6 +61,8 @@ storj.io/storj/satellite/gracefulexit."graceful_exit_successful_pieces_transfer_
storj.io/storj/satellite/gracefulexit."graceful_exit_transfer_piece_fail" Meter
storj.io/storj/satellite/gracefulexit."graceful_exit_transfer_piece_success" Meter
storj.io/storj/satellite/metainfo."metainfo_rate_limit_exceeded" Event
storj.io/storj/satellite/metainfo/piecedeletion."delete_batch_size" IntVal
storj.io/storj/satellite/metainfo/piecedeletion."deletion_pieces_unhandled_count" IntVal
storj.io/storj/satellite/orders."download_failed_not_enough_pieces_uplink" Meter
storj.io/storj/satellite/repair/checker."checker_injured_segment_health" FloatVal
storj.io/storj/satellite/repair/checker."checker_segment_age" IntVal

View File

@ -33,10 +33,10 @@ type DB interface {
// ErrNoNode is a special error type that indicates about absence of node in NodesDB.
var ErrNoNode = errs.Class("no such node")
// Node is a representation of storeganode, that SNO could add to the Multinode Dashboard.
// Node is a representation of storagenode, that SNO could add to the Multinode Dashboard.
type Node struct {
ID storj.NodeID
// APISecret is a secret issued by storagenode, that will be main auth mechanism in MND <-> SNO api. is a secret issued by storagenode, that will be main auth mechanism in MND <-> SNO api.
// APISecret is a secret issued by storagenode, that will be main auth mechanism in MND <-> SNO api.
APISecret []byte
PublicAddress string
Name string

View File

@ -154,7 +154,17 @@ func TestRollupDeletes(t *testing.T) {
require.NotEmpty(t, dqedNodes)
// Set timestamp back by the number of days we want to save
initialTime := time.Now().UTC().AddDate(0, 0, -days)
now := time.Now().UTC()
initialTime := now.AddDate(0, 0, -days)
// TODO: this only runs the test for times that are farther back than
// an hour from midnight UTC. something is wrong for the hour of
// 11pm-midnight UTC.
hour, _, _ := now.Clock()
if hour == 23 {
initialTime = initialTime.Add(-time.Hour)
}
currentTime := initialTime
nodeData := map[storj.NodeID]float64{}

View File

@ -341,7 +341,7 @@ func (server *Server) deleteUser(w http.ResponseWriter, r *http.Request) {
}
if len(invoices) > 0 {
for _, invoice := range invoices {
if invoice.Status != "paid" {
if invoice.Status == "draft" || invoice.Status == "open" {
httpJSONError(w, "user has unpaid/pending invoices",
"", http.StatusConflict)
return

View File

@ -251,7 +251,10 @@ func NewAPI(log *zap.Logger, full *identity.FullIdentity, db DB,
{ // setup overlay
peer.Overlay.DB = peer.DB.OverlayCache()
peer.Overlay.Service = overlay.NewService(peer.Log.Named("overlay"), peer.Overlay.DB, config.Overlay)
peer.Overlay.Service, err = overlay.NewService(peer.Log.Named("overlay"), peer.Overlay.DB, config.Overlay)
if err != nil {
return nil, errs.Combine(err, peer.Close())
}
peer.Services.Add(lifecycle.Item{
Name: "overlay",
Close: peer.Overlay.Service.Close,
@ -633,6 +636,7 @@ func NewAPI(log *zap.Logger, full *identity.FullIdentity, db DB,
peer.Marketing.PartnersService,
peer.Console.Listener,
config.Payments.StripeCoinPayments.StripePublicKey,
peer.URL(),
)
peer.Servers.Add(lifecycle.Item{

View File

@ -30,6 +30,7 @@ import (
"golang.org/x/sync/errgroup"
"storj.io/common/errs2"
"storj.io/common/storj"
"storj.io/common/uuid"
"storj.io/storj/private/web"
"storj.io/storj/satellite/console"
@ -103,6 +104,7 @@ type Server struct {
server http.Server
cookieAuth *consolewebauth.CookieAuth
rateLimiter *web.IPRateLimiter
nodeURL storj.NodeURL
stripePublicKey string
@ -119,7 +121,7 @@ type Server struct {
}
// NewServer creates new instance of console server.
func NewServer(logger *zap.Logger, config Config, service *console.Service, mailService *mailservice.Service, referralsService *referrals.Service, partners *rewards.PartnersService, listener net.Listener, stripePublicKey string) *Server {
func NewServer(logger *zap.Logger, config Config, service *console.Service, mailService *mailservice.Service, referralsService *referrals.Service, partners *rewards.PartnersService, listener net.Listener, stripePublicKey string, nodeURL storj.NodeURL) *Server {
server := Server{
log: logger,
config: config,
@ -130,6 +132,7 @@ func NewServer(logger *zap.Logger, config Config, service *console.Service, mail
partners: partners,
stripePublicKey: stripePublicKey,
rateLimiter: web.NewIPRateLimiter(config.RateLimit),
nodeURL: nodeURL,
}
logger.Debug("Starting Satellite UI.", zap.Stringer("Address", server.listener.Addr()))
@ -281,6 +284,7 @@ func (server *Server) appHandler(w http.ResponseWriter, r *http.Request) {
var data struct {
SatelliteName string
SatelliteNodeURL string
SegmentIOPublicKey string
StripePublicKey string
VerificationPageURL string
@ -293,6 +297,7 @@ func (server *Server) appHandler(w http.ResponseWriter, r *http.Request) {
}
data.SatelliteName = server.config.SatelliteName
data.SatelliteNodeURL = server.nodeURL.String()
data.SegmentIOPublicKey = server.config.SegmentIOPublicKey
data.StripePublicKey = server.stripePublicKey
data.VerificationPageURL = server.config.VerificationPageURL

View File

@ -222,7 +222,10 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB,
{ // setup overlay
peer.Overlay.DB = peer.DB.OverlayCache()
peer.Overlay.Service = overlay.NewService(peer.Log.Named("overlay"), peer.Overlay.DB, config.Overlay)
peer.Overlay.Service, err = overlay.NewService(peer.Log.Named("overlay"), peer.Overlay.DB, config.Overlay)
if err != nil {
return nil, errs.Combine(err, peer.Close())
}
peer.Services.Add(lifecycle.Item{
Name: "overlay",
Close: peer.Overlay.Service.Close,

View File

@ -83,9 +83,9 @@ func (dialer *Dialer) Handle(ctx context.Context, node storj.NodeURL, queue Queu
for len(jobs) > 0 {
batch, promises, rest := batchJobs(jobs, dialer.piecesPerRequest)
// Aggregation metrics
mon.IntVal("delete batch size").Observe(int64(len(batch)))
mon.IntVal("delete_batch_size").Observe(int64(len(batch))) //mon:locked
// Tracing metrics
s.Annotate("delete batch size", strconv.Itoa(len(batch)))
s.Annotate("delete_batch_size", strconv.Itoa(len(batch)))
jobs = rest
@ -111,7 +111,7 @@ func (dialer *Dialer) Handle(ctx context.Context, node storj.NodeURL, queue Queu
}
break
} else {
mon.IntVal("deletion pieces unhandled count").Observe(resp.UnhandledCount)
mon.IntVal("deletion_pieces_unhandled_count").Observe(resp.UnhandledCount) //mon:locked
}
jobs = append(jobs, queue.PopAllWithoutClose()...)

View File

@ -57,6 +57,12 @@ func (e *Endpoint) GetStats(ctx context.Context, req *pb.GetStatsRequest) (_ *pb
e.log.Error("overlay.Get failed", zap.Error(err))
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
}
auditHistory, err := e.overlay.GetAuditHistory(ctx, peer.ID)
// if there is no audit history for the node, that's fine and we can continue with a nil auditHistory struct.
if err != nil && !overlay.ErrNodeNotFound.Has(err) {
e.log.Error("overlay.GetAuditHistory failed", zap.Error(err))
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
}
auditScore := calculateReputationScore(
node.Reputation.AuditReputationAlpha,
@ -86,6 +92,7 @@ func (e *Endpoint) GetStats(ctx context.Context, req *pb.GetStatsRequest) (_ *pb
Suspended: node.UnknownAuditSuspended,
OfflineSuspended: node.OfflineSuspended,
OfflineUnderReview: node.OfflineUnderReview,
AuditHistory: overlay.AuditHistoryToPB(auditHistory),
JoinedAt: node.CreatedAt,
}, nil
}

View File

@ -0,0 +1,74 @@
// Copyright (C) 2020 Storj Labs, Inc.
// See LICENSE for copying information.
package overlay
import (
"context"
"time"
"storj.io/common/pb"
"storj.io/common/storj"
)
// AuditHistoryDB implements the database for audit history.
//
// architecture: Database
type AuditHistoryDB interface {
// UpdateAuditHistory updates a node's audit history with an online or offline audit.
UpdateAuditHistory(ctx context.Context, nodeID storj.NodeID, auditTime time.Time, online bool, config AuditHistoryConfig) (*UpdateAuditHistoryResponse, error)
// GetAuditHistory gets a node's audit history.
GetAuditHistory(ctx context.Context, nodeID storj.NodeID) (*AuditHistory, error)
}
// AuditHistory represents a node's audit history for the most recent tracking period.
type AuditHistory struct {
Score float64
Windows []*AuditWindow
}
// UpdateAuditHistoryResponse contains information returned by UpdateAuditHistory.
type UpdateAuditHistoryResponse struct {
NewScore float64
TrackingPeriodFull bool
}
// AuditWindow represents the number of online and total audits a node received for a specific time period.
type AuditWindow struct {
WindowStart time.Time
TotalCount int32
OnlineCount int32
}
// UpdateAuditHistory updates a node's audit history with an online or offline audit.
func (service *Service) UpdateAuditHistory(ctx context.Context, nodeID storj.NodeID, auditTime time.Time, online bool) (res *UpdateAuditHistoryResponse, err error) {
defer mon.Task()(&ctx)(&err)
return service.db.UpdateAuditHistory(ctx, nodeID, auditTime, online, service.config.AuditHistory)
}
// GetAuditHistory gets a node's audit history.
func (service *Service) GetAuditHistory(ctx context.Context, nodeID storj.NodeID) (auditHistory *AuditHistory, err error) {
defer mon.Task()(&ctx)(&err)
return service.db.GetAuditHistory(ctx, nodeID)
}
// AuditHistoryToPB converts an overlay.AuditHistory to a pb.AuditHistory.
func AuditHistoryToPB(auditHistory *AuditHistory) (historyPB *pb.AuditHistory) {
if auditHistory == nil {
return nil
}
historyPB = &pb.AuditHistory{
Score: auditHistory.Score,
Windows: make([]*pb.AuditWindow, len(auditHistory.Windows)),
}
for i, window := range auditHistory.Windows {
historyPB.Windows[i] = &pb.AuditWindow{
TotalCount: window.TotalCount,
OnlineCount: window.OnlineCount,
WindowStart: window.WindowStart,
}
}
return historyPB
}

View File

@ -1,7 +1,7 @@
// Copyright (C) 2020 Storj Labs, Inc.
// See LICENSE for copying information.
package satellitedb_test
package overlay_test
import (
"testing"
@ -13,62 +13,71 @@ import (
"storj.io/common/testcontext"
"storj.io/storj/private/testplanet"
"storj.io/storj/satellite"
"storj.io/storj/satellite/overlay"
)
func TestAuditHistoryBasic(t *testing.T) {
var auditHistoryConfig overlay.AuditHistoryConfig
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 1,
Reconfigure: testplanet.Reconfigure{
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
config.Overlay.AuditHistory.WindowSize = time.Hour
config.Overlay.AuditHistory.TrackingPeriod = 2 * time.Hour
auditHistoryConfig = config.Overlay.AuditHistory
},
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
node := planet.StorageNodes[0]
cache := planet.Satellites[0].DB.OverlayCache()
auditHistoryConfig := planet.Satellites[0].Config.Overlay.AuditHistory
cache := planet.Satellites[0].Overlay.Service
startingWindow := time.Now().Truncate(time.Hour)
windowsInTrackingPeriod := int(auditHistoryConfig.TrackingPeriod.Seconds() / auditHistoryConfig.WindowSize.Seconds())
currentWindow := startingWindow
// online score should be 1 until the first window is finished
history, err := cache.UpdateAuditHistory(ctx, node.ID(), currentWindow.Add(2*time.Minute), false, auditHistoryConfig)
res, err := cache.UpdateAuditHistory(ctx, node.ID(), currentWindow.Add(2*time.Minute), false)
require.NoError(t, err)
require.EqualValues(t, 1, history.GetScore())
require.EqualValues(t, 1, res.NewScore)
require.False(t, res.TrackingPeriodFull)
history, err = cache.UpdateAuditHistory(ctx, node.ID(), currentWindow.Add(20*time.Minute), true, auditHistoryConfig)
res, err = cache.UpdateAuditHistory(ctx, node.ID(), currentWindow.Add(20*time.Minute), true)
require.NoError(t, err)
require.EqualValues(t, 1, history.GetScore())
require.EqualValues(t, 1, res.NewScore)
require.False(t, res.TrackingPeriodFull)
// move to next window
currentWindow = currentWindow.Add(time.Hour)
// online score should be now be 0.5 since the first window is complete with one online audit and one offline audit
history, err = cache.UpdateAuditHistory(ctx, node.ID(), currentWindow.Add(2*time.Minute), false, auditHistoryConfig)
res, err = cache.UpdateAuditHistory(ctx, node.ID(), currentWindow.Add(2*time.Minute), false)
require.NoError(t, err)
require.EqualValues(t, 0.5, history.GetScore())
require.EqualValues(t, 0.5, res.NewScore)
require.False(t, res.TrackingPeriodFull)
history, err = cache.UpdateAuditHistory(ctx, node.ID(), currentWindow.Add(20*time.Minute), true, auditHistoryConfig)
res, err = cache.UpdateAuditHistory(ctx, node.ID(), currentWindow.Add(20*time.Minute), true)
require.NoError(t, err)
require.EqualValues(t, 0.5, history.GetScore())
require.EqualValues(t, 0.5, res.NewScore)
require.False(t, res.TrackingPeriodFull)
// move to next window
currentWindow = currentWindow.Add(time.Hour)
// try to add an audit for an old window, expect error
_, err = cache.UpdateAuditHistory(ctx, node.ID(), startingWindow, true, auditHistoryConfig)
_, err = cache.UpdateAuditHistory(ctx, node.ID(), startingWindow, true)
require.Error(t, err)
// add another online audit for the latest window; score should still be 0.5
history, err = cache.UpdateAuditHistory(ctx, node.ID(), currentWindow, true, auditHistoryConfig)
res, err = cache.UpdateAuditHistory(ctx, node.ID(), currentWindow, true)
require.NoError(t, err)
require.EqualValues(t, 0.5, history.GetScore())
require.EqualValues(t, 0.5, res.NewScore)
// now that we have two full windows other than the current one, tracking period should be considered full.
require.True(t, res.TrackingPeriodFull)
// add another online audit for the latest window; score should still be 0.5
history, err = cache.UpdateAuditHistory(ctx, node.ID(), currentWindow.Add(45*time.Minute), true, auditHistoryConfig)
res, err = cache.UpdateAuditHistory(ctx, node.ID(), currentWindow.Add(45*time.Minute), true)
require.NoError(t, err)
require.EqualValues(t, 0.5, history.GetScore())
require.EqualValues(t, 0.5, res.NewScore)
require.True(t, res.TrackingPeriodFull)
currentWindow = currentWindow.Add(time.Hour)
// in the current state, there are windowsInTrackingPeriod windows with a score of 0.5
@ -76,8 +85,9 @@ func TestAuditHistoryBasic(t *testing.T) {
// window gets included in the tracking period, and the earliest 0.5 window gets dropped.
expectedScore := (0.5*float64(windowsInTrackingPeriod-1) + 1) / float64(windowsInTrackingPeriod)
// add online audit for next window; score should now be expectedScore
history, err = cache.UpdateAuditHistory(ctx, node.ID(), currentWindow.Add(time.Minute), true, auditHistoryConfig)
res, err = cache.UpdateAuditHistory(ctx, node.ID(), currentWindow.Add(time.Minute), true)
require.NoError(t, err)
require.EqualValues(t, expectedScore, history.GetScore())
require.EqualValues(t, expectedScore, res.NewScore)
require.True(t, res.TrackingPeriodFull)
})
}

View File

@ -448,12 +448,13 @@ func BenchmarkNodeSelection(b *testing.B) {
}
})
service := overlay.NewService(zap.NewNop(), overlaydb, overlay.Config{
service, err := overlay.NewService(zap.NewNop(), overlaydb, overlay.Config{
Node: nodeSelectionConfig,
NodeSelectionCache: overlay.CacheConfig{
Staleness: time.Hour,
},
})
require.NoError(b, err)
b.Run("FindStorageNodesWithPreference", func(b *testing.B) {
for i := 0; i < b.N; i++ {

View File

@ -26,6 +26,12 @@ type Config struct {
AuditHistory AuditHistoryConfig
}
// AsOfSystemTimeConfig is a configuration struct to enable 'AS OF SYSTEM TIME' for CRDB queries.
type AsOfSystemTimeConfig struct {
Enabled bool `help:"enables the use of the AS OF SYSTEM TIME feature in CRDB" default:"true"`
DefaultInterval time.Duration `help:"default duration for AS OF SYSTEM TIME" devDefault:"-1µs" releaseDefault:"-10s"`
}
// NodeSelectionConfig is a configuration struct to determine the minimum
// values for nodes to select.
type NodeSelectionConfig struct {
@ -44,6 +50,8 @@ type NodeSelectionConfig struct {
AuditReputationDQ float64 `help:"the reputation cut-off for disqualifying SNs based on audit history" default:"0.6"`
SuspensionGracePeriod time.Duration `help:"the time period that must pass before suspended nodes will be disqualified" releaseDefault:"168h" devDefault:"1h"`
SuspensionDQEnabled bool `help:"whether nodes will be disqualified if they have been suspended for longer than the suspended grace period" releaseDefault:"false" devDefault:"true"`
AsOfSystemTime AsOfSystemTimeConfig
}
// AuditHistoryConfig is a configuration struct defining time periods and thresholds for penalizing nodes for being offline.
@ -55,3 +63,16 @@ type AuditHistoryConfig struct {
OfflineThreshold float64 `help:"The point below which a node is punished for offline audits. Determined by calculating the ratio of online/total audits within each window and finding the average across windows within the tracking period." default:"0.6"`
OfflineDQEnabled bool `help:"whether nodes will be disqualified if they have low online score after a review period" releaseDefault:"false" devDefault:"true"`
}
func (aost *AsOfSystemTimeConfig) isValid() error {
if aost.Enabled {
if aost.DefaultInterval >= 0 {
return errs.New("AS OF SYSTEM TIME interval must be a negative number")
}
if aost.DefaultInterval > -time.Microsecond {
return errs.New("AS OF SYSTEM TIME interval cannot be in nanoseconds")
}
}
return nil
}

View File

@ -14,7 +14,6 @@ import (
"storj.io/common/pb"
"storj.io/common/storj"
"storj.io/storj/satellite/internalpb"
"storj.io/storj/satellite/metainfo/metabase"
"storj.io/storj/storage"
)
@ -69,9 +68,6 @@ type DB interface {
// UpdateCheckIn updates a single storagenode's check-in stats.
UpdateCheckIn(ctx context.Context, node NodeCheckInInfo, timestamp time.Time, config NodeSelectionConfig) (err error)
// UpdateAuditHistory updates a node's audit history with an online or offline audit.
UpdateAuditHistory(ctx context.Context, nodeID storj.NodeID, auditTime time.Time, online bool, config AuditHistoryConfig) (auditHistory *internalpb.AuditHistory, err error)
// AllPieceCounts returns a map of node IDs to piece counts from the db.
AllPieceCounts(ctx context.Context) (pieceCounts map[storj.NodeID]int, err error)
// UpdatePieceCounts sets the piece count field for the given node IDs.
@ -103,11 +99,18 @@ type DB interface {
SuspendNodeUnknownAudit(ctx context.Context, nodeID storj.NodeID, suspendedAt time.Time) (err error)
// UnsuspendNodeUnknownAudit unsuspends a storage node for unknown audits.
UnsuspendNodeUnknownAudit(ctx context.Context, nodeID storj.NodeID) (err error)
// SuspendNodeOfflineAudit suspends a storage node for offline audits.
SuspendNodeOfflineAudit(ctx context.Context, nodeID storj.NodeID, suspendedAt time.Time) (err error)
// UnsuspendNodeOfflineAudit unsuspends a storage node for offline audits.
UnsuspendNodeOfflineAudit(ctx context.Context, nodeID storj.NodeID) (err error)
// TestVetNode directly sets a node's vetted_at timestamp to make testing easier
TestVetNode(ctx context.Context, nodeID storj.NodeID) (vettedTime *time.Time, err error)
// TestUnvetNode directly sets a node's vetted_at timestamp to null to make testing easier
TestUnvetNode(ctx context.Context, nodeID storj.NodeID) (err error)
// AuditHistoryDB includes operations for interfacing with the audit history table.
AuditHistoryDB
}
// NodeCheckInInfo contains all the info that will be updated when a node checkins.
@ -132,19 +135,21 @@ type InfoResponse struct {
// FindStorageNodesRequest defines easy request parameters.
type FindStorageNodesRequest struct {
RequestedCount int
ExcludedIDs []storj.NodeID
MinimumVersion string // semver or empty
RequestedCount int
ExcludedIDs []storj.NodeID
MinimumVersion string // semver or empty
AsOfSystemTimeInterval time.Duration // only used for CRDB queries
}
// NodeCriteria are the requirements for selecting nodes.
type NodeCriteria struct {
FreeDisk int64
ExcludedIDs []storj.NodeID
ExcludedNetworks []string // the /24 subnet IPv4 or /64 subnet IPv6 for nodes
MinimumVersion string // semver or empty
OnlineWindow time.Duration
DistinctIP bool
FreeDisk int64
ExcludedIDs []storj.NodeID
ExcludedNetworks []string // the /24 subnet IPv4 or /64 subnet IPv6 for nodes
MinimumVersion string // semver or empty
OnlineWindow time.Duration
DistinctIP bool
AsOfSystemTimeInterval time.Duration // only used for CRDB queries
}
// AuditType is an enum representing the outcome of a particular audit reported to the overlay.
@ -277,7 +282,11 @@ type Service struct {
}
// NewService returns a new Service.
func NewService(log *zap.Logger, db DB, config Config) *Service {
func NewService(log *zap.Logger, db DB, config Config) (*Service, error) {
if err := config.Node.AsOfSystemTime.isValid(); err != nil {
return nil, err
}
return &Service{
log: log,
db: db,
@ -285,7 +294,7 @@ func NewService(log *zap.Logger, db DB, config Config) *Service {
SelectionCache: NewNodeSelectionCache(log, db,
config.NodeSelectionCache.Staleness, config.Node,
),
}
}, nil
}
// Close closes resources.
@ -324,6 +333,9 @@ func (service *Service) IsOnline(node *NodeDossier) bool {
// The main difference between this method and the normal FindStorageNodes is that here we avoid using the cache.
func (service *Service) FindStorageNodesForGracefulExit(ctx context.Context, req FindStorageNodesRequest) (_ []*SelectedNode, err error) {
defer mon.Task()(&ctx)(&err)
if service.config.Node.AsOfSystemTime.Enabled && service.config.Node.AsOfSystemTime.DefaultInterval < 0 {
req.AsOfSystemTimeInterval = service.config.Node.AsOfSystemTime.DefaultInterval
}
return service.FindStorageNodesWithPreferences(ctx, req, &service.config.Node)
}
@ -333,6 +345,10 @@ func (service *Service) FindStorageNodesForGracefulExit(ctx context.Context, req
// When the node selection from the cache fails, it falls back to the old implementation.
func (service *Service) FindStorageNodesForUpload(ctx context.Context, req FindStorageNodesRequest) (_ []*SelectedNode, err error) {
defer mon.Task()(&ctx)(&err)
if service.config.Node.AsOfSystemTime.Enabled && service.config.Node.AsOfSystemTime.DefaultInterval < 0 {
req.AsOfSystemTimeInterval = service.config.Node.AsOfSystemTime.DefaultInterval
}
if service.config.NodeSelectionCache.Disabled {
return service.FindStorageNodesWithPreferences(ctx, req, &service.config.Node)
}
@ -341,6 +357,7 @@ func (service *Service) FindStorageNodesForUpload(ctx context.Context, req FindS
if err != nil {
service.log.Warn("error selecting from node selection cache", zap.String("err", err.Error()))
}
if len(selectedNodes) < req.RequestedCount {
mon.Event("default_node_selection")
return service.FindStorageNodesWithPreferences(ctx, req, &service.config.Node)
@ -353,7 +370,6 @@ func (service *Service) FindStorageNodesForUpload(ctx context.Context, req FindS
// This does not use a cache.
func (service *Service) FindStorageNodesWithPreferences(ctx context.Context, req FindStorageNodesRequest, preferences *NodeSelectionConfig) (nodes []*SelectedNode, err error) {
defer mon.Task()(&ctx)(&err)
// TODO: add sanity limits to requested node count
// TODO: add sanity limits to excluded nodes
totalNeededNodes := req.RequestedCount
@ -375,12 +391,13 @@ func (service *Service) FindStorageNodesWithPreferences(ctx context.Context, req
}
criteria := NodeCriteria{
FreeDisk: preferences.MinimumDiskSpace.Int64(),
ExcludedIDs: excludedIDs,
ExcludedNetworks: excludedNetworks,
MinimumVersion: preferences.MinimumVersion,
OnlineWindow: preferences.OnlineWindow,
DistinctIP: preferences.DistinctIP,
FreeDisk: preferences.MinimumDiskSpace.Int64(),
ExcludedIDs: excludedIDs,
ExcludedNetworks: excludedNetworks,
MinimumVersion: preferences.MinimumVersion,
OnlineWindow: preferences.OnlineWindow,
DistinctIP: preferences.DistinctIP,
AsOfSystemTimeInterval: req.AsOfSystemTimeInterval,
}
nodes, err = service.db.SelectStorageNodes(ctx, totalNeededNodes, newNodeCount, &criteria)
if err != nil {

View File

@ -73,8 +73,8 @@ func testCache(ctx context.Context, t *testing.T, store overlay.DB) {
nodeSelectionConfig := testNodeSelectionConfig(0, false)
serviceConfig := overlay.Config{Node: nodeSelectionConfig, UpdateStatsBatchSize: 100, AuditHistory: testAuditHistoryConfig()}
service := overlay.NewService(zaptest.NewLogger(t), store, serviceConfig)
service, err := overlay.NewService(zaptest.NewLogger(t), store, serviceConfig)
require.NoError(t, err)
d := overlay.NodeCheckInInfo{
Address: address,
LastIPPort: address.Address,
@ -463,11 +463,15 @@ func TestKnownReliable(t *testing.T) {
require.NoError(t, err)
require.False(t, service.IsOnline(node))
// Suspend storage node #2
// Suspend storage node #2 for unknown audits
err = satellite.DB.OverlayCache().SuspendNodeUnknownAudit(ctx, planet.StorageNodes[2].ID(), time.Now())
require.NoError(t, err)
// Check that only storage nodes #3 and #4 are reliable
// Suspend storage node #3 for offline audits
err = satellite.DB.OverlayCache().SuspendNodeOfflineAudit(ctx, planet.StorageNodes[3].ID(), time.Now())
require.NoError(t, err)
// Check that only storage nodes #4 is reliable
result, err := service.KnownReliable(ctx, []storj.NodeID{
planet.StorageNodes[0].ID(),
planet.StorageNodes[1].ID(),
@ -476,11 +480,10 @@ func TestKnownReliable(t *testing.T) {
planet.StorageNodes[4].ID(),
})
require.NoError(t, err)
require.Len(t, result, 2)
require.Len(t, result, 1)
// Sort the storage nodes for predictable checks
expectedReliable := []storj.NodeURL{
planet.StorageNodes[3].NodeURL(),
planet.StorageNodes[4].NodeURL(),
}
sort.Slice(expectedReliable, func(i, j int) bool { return expectedReliable[i].ID.Less(expectedReliable[j].ID) })

View File

@ -40,7 +40,7 @@ func TestService_InvoiceElementsProcessing(t *testing.T) {
// pick a specific date so that it doesn't fail if it's the last day of the month
// keep month + 1 because user needs to be created before calculation
period := time.Date(2020, time.Now().Month()+1, 20, 0, 0, 0, 0, time.UTC)
period := time.Date(time.Now().Year(), time.Now().Month()+1, 20, 0, 0, 0, 0, time.UTC)
numberOfProjects := 19
// generate test data, each user has one project, one coupon and some credits
@ -110,7 +110,7 @@ func TestService_InvoiceUserWithManyProjects(t *testing.T) {
// pick a specific date so that it doesn't fail if it's the last day of the month
// keep month + 1 because user needs to be created before calculation
period := time.Date(2020, time.Now().Month()+1, 20, 0, 0, 0, 0, time.UTC)
period := time.Date(time.Now().Year(), time.Now().Month()+1, 20, 0, 0, 0, 0, time.UTC)
payments.Service.SetNow(func() time.Time {
return time.Date(period.Year(), period.Month()+1, 1, 0, 0, 0, 0, time.UTC)
@ -213,7 +213,7 @@ func TestService_InvoiceUserWithManyCoupons(t *testing.T) {
// pick a specific date so that it doesn't fail if it's the last day of the month
// keep month + 1 because user needs to be created before calculation
period := time.Date(2020, time.Now().Month()+1, 20, 0, 0, 0, 0, time.UTC)
period := time.Date(time.Now().Year(), time.Now().Month()+1, 20, 0, 0, 0, 0, time.UTC)
paymentsAPI.Service.SetNow(func() time.Time {
return time.Date(period.Year(), period.Month()+1, 1, 0, 0, 0, 0, time.UTC)
@ -327,7 +327,7 @@ func TestService_ApplyCouponsInTheOrder(t *testing.T) {
// pick a specific date so that it doesn't fail if it's the last day of the month
// keep month + 1 because user needs to be created before calculation
period := time.Date(2020, time.Now().Month()+1, 20, 0, 0, 0, 0, time.UTC)
period := time.Date(time.Now().Year(), time.Now().Month()+1, 20, 0, 0, 0, 0, time.UTC)
paymentsAPI.Service.SetNow(func() time.Time {
return time.Date(period.Year(), period.Month()+1, 1, 0, 0, 0, 0, time.UTC)
@ -491,7 +491,7 @@ func TestService_CouponStatus(t *testing.T) {
// pick a specific date so that it doesn't fail if it's the last day of the month
// keep month + 1 because user needs to be created before calculation
period := time.Date(2020, time.Now().Month()+1, 20, 0, 0, 0, 0, time.UTC)
period := time.Date(time.Now().Year(), time.Now().Month()+1, 20, 0, 0, 0, 0, time.UTC)
// generate egress
err = satellite.DB.Orders().UpdateBucketBandwidthSettle(ctx, project.ID, []byte("testbucket"),
@ -529,7 +529,7 @@ func TestService_ProjectsWithMembers(t *testing.T) {
// pick a specific date so that it doesn't fail if it's the last day of the month
// keep month + 1 because user needs to be created before calculation
period := time.Date(2020, time.Now().Month()+1, 20, 0, 0, 0, 0, time.UTC)
period := time.Date(time.Now().Year(), time.Now().Month()+1, 20, 0, 0, 0, 0, time.UTC)
numberOfUsers := 5
users := make([]*console.User, numberOfUsers)

View File

@ -8,6 +8,7 @@ import (
"testing"
"time"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
"storj.io/common/storj"
@ -21,7 +22,8 @@ func TestReliabilityCache_Concurrent(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
ocache := overlay.NewService(zap.NewNop(), fakeOverlayDB{}, overlay.Config{})
ocache, err := overlay.NewService(zap.NewNop(), fakeOverlayDB{}, overlay.Config{})
require.NoError(t, err)
rcache := NewReliabilityCache(ocache, time.Millisecond)
for i := 0; i < 10; i++ {

View File

@ -33,6 +33,12 @@ import "math"
// days, we divide the mean of the negative binomial distribution (X, above) by
// the number of nodes that we estimate will churn in one day.
func SegmentHealth(numHealthy, minPieces, totalNodes int, failureRate float64) float64 {
if totalNodes < minTotalNodes {
// this model gives wonky results when there are too few nodes; pretend
// there are more nodes than there really are so that the model gives
// sane repair priorities
totalNodes = minTotalNodes
}
churnPerRound := float64(totalNodes) * failureRate
if churnPerRound < minChurnPerRound {
// we artificially limit churnPerRound from going too low in cases
@ -52,4 +58,7 @@ func SegmentHealth(numHealthy, minPieces, totalNodes int, failureRate float64) f
return mean1 / churnPerRound
}
const minChurnPerRound = 1e-10
const (
minChurnPerRound = 1e-10
minTotalNodes = 100
)

View File

@ -132,7 +132,11 @@ func NewRepairer(log *zap.Logger, full *identity.FullIdentity,
}
{ // setup overlay
peer.Overlay = overlay.NewService(log.Named("overlay"), overlayCache, config.Overlay)
var err error
peer.Overlay, err = overlay.NewService(log.Named("overlay"), overlayCache, config.Overlay)
if err != nil {
return nil, errs.Combine(err, peer.Close())
}
peer.Services.Add(lifecycle.Item{
Name: "overlay",
Close: peer.Overlay.Close,

View File

@ -78,23 +78,32 @@ func addAudit(a *internalpb.AuditHistory, auditTime time.Time, online bool, conf
}
// UpdateAuditHistory updates a node's audit history with an online or offline audit.
func (cache *overlaycache) UpdateAuditHistory(ctx context.Context, nodeID storj.NodeID, auditTime time.Time, online bool, config overlay.AuditHistoryConfig) (history *internalpb.AuditHistory, err error) {
func (cache *overlaycache) UpdateAuditHistory(ctx context.Context, nodeID storj.NodeID, auditTime time.Time, online bool, config overlay.AuditHistoryConfig) (res *overlay.UpdateAuditHistoryResponse, err error) {
defer mon.Task()(&ctx)(&err)
err = cache.db.WithTx(ctx, func(ctx context.Context, tx *dbx.Tx) (err error) {
_, err = tx.Tx.ExecContext(ctx, "SET TRANSACTION ISOLATION LEVEL SERIALIZABLE")
if err != nil {
return err
}
history, err = cache.updateAuditHistoryWithTx(ctx, tx, nodeID, auditTime, online, config)
res, err = cache.updateAuditHistoryWithTx(ctx, tx, nodeID, auditTime, online, config)
if err != nil {
return err
}
return nil
})
return history, err
return res, err
}
func (cache *overlaycache) updateAuditHistoryWithTx(ctx context.Context, tx *dbx.Tx, nodeID storj.NodeID, auditTime time.Time, online bool, config overlay.AuditHistoryConfig) (*internalpb.AuditHistory, error) {
func (cache *overlaycache) updateAuditHistoryWithTx(ctx context.Context, tx *dbx.Tx, nodeID storj.NodeID, auditTime time.Time, online bool, config overlay.AuditHistoryConfig) (res *overlay.UpdateAuditHistoryResponse, err error) {
defer mon.Task()(&ctx)(&err)
res = &overlay.UpdateAuditHistoryResponse{
NewScore: 1,
TrackingPeriodFull: false,
}
// get and deserialize node audit history
historyBytes := []byte{}
newEntry := false
@ -106,7 +115,7 @@ func (cache *overlaycache) updateAuditHistoryWithTx(ctx context.Context, tx *dbx
// set flag to true so we know to create rather than update later
newEntry = true
} else if err != nil {
return nil, Error.Wrap(err)
return res, Error.Wrap(err)
} else {
historyBytes = dbAuditHistory.History
}
@ -114,17 +123,17 @@ func (cache *overlaycache) updateAuditHistoryWithTx(ctx context.Context, tx *dbx
history := &internalpb.AuditHistory{}
err = pb.Unmarshal(historyBytes, history)
if err != nil {
return history, err
return res, err
}
err = addAudit(history, auditTime, online, config)
if err != nil {
return history, err
return res, err
}
historyBytes, err = pb.Marshal(history)
if err != nil {
return history, err
return res, err
}
// if the entry did not exist at the beginning, create a new one. Otherwise update
@ -134,7 +143,7 @@ func (cache *overlaycache) updateAuditHistoryWithTx(ctx context.Context, tx *dbx
dbx.AuditHistory_NodeId(nodeID.Bytes()),
dbx.AuditHistory_History(historyBytes),
)
return history, Error.Wrap(err)
return res, Error.Wrap(err)
}
_, err = tx.Update_AuditHistory_By_NodeId(
@ -145,5 +154,49 @@ func (cache *overlaycache) updateAuditHistoryWithTx(ctx context.Context, tx *dbx
},
)
return history, Error.Wrap(err)
windowsPerTrackingPeriod := int(config.TrackingPeriod.Seconds() / config.WindowSize.Seconds())
res.TrackingPeriodFull = len(history.Windows)-1 >= windowsPerTrackingPeriod
res.NewScore = history.Score
return res, Error.Wrap(err)
}
// GetAuditHistory gets a node's audit history.
func (cache *overlaycache) GetAuditHistory(ctx context.Context, nodeID storj.NodeID) (auditHistory *overlay.AuditHistory, err error) {
defer mon.Task()(&ctx)(&err)
dbAuditHistory, err := cache.db.Get_AuditHistory_By_NodeId(
ctx,
dbx.AuditHistory_NodeId(nodeID.Bytes()),
)
if err != nil {
if errs.Is(err, sql.ErrNoRows) {
return nil, overlay.ErrNodeNotFound.New("no audit history for node")
}
return nil, err
}
history, err := auditHistoryFromPB(dbAuditHistory.History)
if err != nil {
return nil, err
}
return history, nil
}
func auditHistoryFromPB(historyBytes []byte) (auditHistory *overlay.AuditHistory, err error) {
historyPB := &internalpb.AuditHistory{}
err = pb.Unmarshal(historyBytes, historyPB)
if err != nil {
return nil, err
}
history := &overlay.AuditHistory{
Score: historyPB.Score,
Windows: make([]*overlay.AuditWindow, len(historyPB.Windows)),
}
for i, window := range historyPB.Windows {
history.Windows[i] = &overlay.AuditWindow{
TotalCount: window.TotalCount,
OnlineCount: window.OnlineCount,
WindowStart: window.WindowStart,
}
}
return history, nil
}

View File

@ -6,6 +6,7 @@ package satellitedb
import (
"context"
"sync"
"time"
"github.com/zeebo/errs"
"go.uber.org/zap"
@ -164,6 +165,17 @@ func (dbc *satelliteDBCollection) getByName(name string) *satelliteDB {
return dbc.dbs[""]
}
// AsOfSystemTimeClause returns the "AS OF SYSTEM TIME" clause if the DB implementation
// is CockroachDB and the interval is less than 0.
func (db *satelliteDB) AsOfSystemTimeClause(interval time.Duration) (asOf string) {
if db.implementation == dbutil.Cockroach && interval < 0 {
asOf = " AS OF SYSTEM TIME '" + interval.String() + "' "
}
return asOf
}
// TestDBAccess for raw database access,
// should not be used outside of migration tests.
func (db *satelliteDB) TestDBAccess() *dbx.DB { return db.DB }

View File

@ -1102,29 +1102,6 @@ read one (
where graceful_exit_transfer_queue.piece_num = ?
)
//--- downtime tracking ---//
model nodes_offline_time (
key node_id tracked_at
index (
fields node_id
)
field node_id blob
field tracked_at timestamp
field seconds int
)
create nodes_offline_time ()
read all (
select nodes_offline_time
where nodes_offline_time.node_id = ?
where nodes_offline_time.tracked_at > ?
where nodes_offline_time.tracked_at <= ?
)
//--- satellite payments ---//
model stripe_customer (

View File

@ -492,12 +492,6 @@ CREATE TABLE node_api_versions (
updated_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE nodes_offline_times (
node_id bytea NOT NULL,
tracked_at timestamp with time zone NOT NULL,
seconds integer NOT NULL,
PRIMARY KEY ( node_id, tracked_at )
);
CREATE TABLE offers (
id serial NOT NULL,
name text NOT NULL,
@ -772,7 +766,6 @@ CREATE INDEX injuredsegments_segment_health_index ON injuredsegments ( segment_h
CREATE INDEX injuredsegments_updated_at_index ON injuredsegments ( updated_at );
CREATE INDEX node_last_ip ON nodes ( last_net );
CREATE INDEX nodes_dis_unk_exit_fin_last_success_index ON nodes ( disqualified, unknown_audit_suspended, exit_finished_at, last_contact_success );
CREATE INDEX nodes_offline_times_node_id_index ON nodes_offline_times ( node_id );
CREATE UNIQUE INDEX serial_number_index ON serial_numbers ( serial_number );
CREATE INDEX serial_numbers_expires_at_index ON serial_numbers ( expires_at );
CREATE INDEX storagenode_bandwidth_rollups_interval_start_index ON storagenode_bandwidth_rollups ( interval_start );
@ -1051,12 +1044,6 @@ CREATE TABLE node_api_versions (
updated_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE nodes_offline_times (
node_id bytea NOT NULL,
tracked_at timestamp with time zone NOT NULL,
seconds integer NOT NULL,
PRIMARY KEY ( node_id, tracked_at )
);
CREATE TABLE offers (
id serial NOT NULL,
name text NOT NULL,
@ -1331,7 +1318,6 @@ CREATE INDEX injuredsegments_segment_health_index ON injuredsegments ( segment_h
CREATE INDEX injuredsegments_updated_at_index ON injuredsegments ( updated_at );
CREATE INDEX node_last_ip ON nodes ( last_net );
CREATE INDEX nodes_dis_unk_exit_fin_last_success_index ON nodes ( disqualified, unknown_audit_suspended, exit_finished_at, last_contact_success );
CREATE INDEX nodes_offline_times_node_id_index ON nodes_offline_times ( node_id );
CREATE UNIQUE INDEX serial_number_index ON serial_numbers ( serial_number );
CREATE INDEX serial_numbers_expires_at_index ON serial_numbers ( expires_at );
CREATE INDEX storagenode_bandwidth_rollups_interval_start_index ON storagenode_bandwidth_rollups ( interval_start );
@ -4418,74 +4404,6 @@ func (f NodeApiVersion_UpdatedAt_Field) value() interface{} {
func (NodeApiVersion_UpdatedAt_Field) _Column() string { return "updated_at" }
type NodesOfflineTime struct {
NodeId []byte
TrackedAt time.Time
Seconds int
}
func (NodesOfflineTime) _Table() string { return "nodes_offline_times" }
type NodesOfflineTime_Update_Fields struct {
}
type NodesOfflineTime_NodeId_Field struct {
_set bool
_null bool
_value []byte
}
func NodesOfflineTime_NodeId(v []byte) NodesOfflineTime_NodeId_Field {
return NodesOfflineTime_NodeId_Field{_set: true, _value: v}
}
func (f NodesOfflineTime_NodeId_Field) value() interface{} {
if !f._set || f._null {
return nil
}
return f._value
}
func (NodesOfflineTime_NodeId_Field) _Column() string { return "node_id" }
type NodesOfflineTime_TrackedAt_Field struct {
_set bool
_null bool
_value time.Time
}
func NodesOfflineTime_TrackedAt(v time.Time) NodesOfflineTime_TrackedAt_Field {
return NodesOfflineTime_TrackedAt_Field{_set: true, _value: v}
}
func (f NodesOfflineTime_TrackedAt_Field) value() interface{} {
if !f._set || f._null {
return nil
}
return f._value
}
func (NodesOfflineTime_TrackedAt_Field) _Column() string { return "tracked_at" }
type NodesOfflineTime_Seconds_Field struct {
_set bool
_null bool
_value int
}
func NodesOfflineTime_Seconds(v int) NodesOfflineTime_Seconds_Field {
return NodesOfflineTime_Seconds_Field{_set: true, _value: v}
}
func (f NodesOfflineTime_Seconds_Field) value() interface{} {
if !f._set || f._null {
return nil
}
return f._value
}
func (NodesOfflineTime_Seconds_Field) _Column() string { return "seconds" }
type Offer struct {
Id int
Name string
@ -10272,33 +10190,6 @@ func (obj *pgxImpl) CreateNoReturn_GracefulExitTransferQueue(ctx context.Context
}
func (obj *pgxImpl) Create_NodesOfflineTime(ctx context.Context,
nodes_offline_time_node_id NodesOfflineTime_NodeId_Field,
nodes_offline_time_tracked_at NodesOfflineTime_TrackedAt_Field,
nodes_offline_time_seconds NodesOfflineTime_Seconds_Field) (
nodes_offline_time *NodesOfflineTime, err error) {
defer mon.Task()(&ctx)(&err)
__node_id_val := nodes_offline_time_node_id.value()
__tracked_at_val := nodes_offline_time_tracked_at.value()
__seconds_val := nodes_offline_time_seconds.value()
var __embed_stmt = __sqlbundle_Literal("INSERT INTO nodes_offline_times ( node_id, tracked_at, seconds ) VALUES ( ?, ?, ? ) RETURNING nodes_offline_times.node_id, nodes_offline_times.tracked_at, nodes_offline_times.seconds")
var __values []interface{}
__values = append(__values, __node_id_val, __tracked_at_val, __seconds_val)
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
obj.logStmt(__stmt, __values...)
nodes_offline_time = &NodesOfflineTime{}
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&nodes_offline_time.NodeId, &nodes_offline_time.TrackedAt, &nodes_offline_time.Seconds)
if err != nil {
return nil, obj.makeErr(err)
}
return nodes_offline_time, nil
}
func (obj *pgxImpl) Create_StripeCustomer(ctx context.Context,
stripe_customer_user_id StripeCustomer_UserId_Field,
stripe_customer_customer_id StripeCustomer_CustomerId_Field) (
@ -12846,53 +12737,6 @@ func (obj *pgxImpl) Get_GracefulExitTransferQueue_By_NodeId_And_Path_And_PieceNu
}
func (obj *pgxImpl) All_NodesOfflineTime_By_NodeId_And_TrackedAt_Greater_And_TrackedAt_LessOrEqual(ctx context.Context,
nodes_offline_time_node_id NodesOfflineTime_NodeId_Field,
nodes_offline_time_tracked_at_greater NodesOfflineTime_TrackedAt_Field,
nodes_offline_time_tracked_at_less_or_equal NodesOfflineTime_TrackedAt_Field) (
rows []*NodesOfflineTime, err error) {
defer mon.Task()(&ctx)(&err)
var __embed_stmt = __sqlbundle_Literal("SELECT nodes_offline_times.node_id, nodes_offline_times.tracked_at, nodes_offline_times.seconds FROM nodes_offline_times WHERE nodes_offline_times.node_id = ? AND nodes_offline_times.tracked_at > ? AND nodes_offline_times.tracked_at <= ?")
var __values []interface{}
__values = append(__values, nodes_offline_time_node_id.value(), nodes_offline_time_tracked_at_greater.value(), nodes_offline_time_tracked_at_less_or_equal.value())
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
obj.logStmt(__stmt, __values...)
for {
rows, err = func() (rows []*NodesOfflineTime, err error) {
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
if err != nil {
return nil, err
}
defer __rows.Close()
for __rows.Next() {
nodes_offline_time := &NodesOfflineTime{}
err = __rows.Scan(&nodes_offline_time.NodeId, &nodes_offline_time.TrackedAt, &nodes_offline_time.Seconds)
if err != nil {
return nil, err
}
rows = append(rows, nodes_offline_time)
}
if err := __rows.Err(); err != nil {
return nil, err
}
return rows, nil
}()
if err != nil {
if obj.shouldRetry(err) {
continue
}
return nil, obj.makeErr(err)
}
return rows, nil
}
}
func (obj *pgxImpl) Get_StripeCustomer_CustomerId_By_UserId(ctx context.Context,
stripe_customer_user_id StripeCustomer_UserId_Field) (
row *CustomerId_Row, err error) {
@ -15694,16 +15538,6 @@ func (obj *pgxImpl) deleteAll(ctx context.Context) (count int64, err error) {
return 0, obj.makeErr(err)
}
__count, err = __res.RowsAffected()
if err != nil {
return 0, obj.makeErr(err)
}
count += __count
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM nodes_offline_times;")
if err != nil {
return 0, obj.makeErr(err)
}
__count, err = __res.RowsAffected()
if err != nil {
return 0, obj.makeErr(err)
@ -17109,33 +16943,6 @@ func (obj *pgxcockroachImpl) CreateNoReturn_GracefulExitTransferQueue(ctx contex
}
func (obj *pgxcockroachImpl) Create_NodesOfflineTime(ctx context.Context,
nodes_offline_time_node_id NodesOfflineTime_NodeId_Field,
nodes_offline_time_tracked_at NodesOfflineTime_TrackedAt_Field,
nodes_offline_time_seconds NodesOfflineTime_Seconds_Field) (
nodes_offline_time *NodesOfflineTime, err error) {
defer mon.Task()(&ctx)(&err)
__node_id_val := nodes_offline_time_node_id.value()
__tracked_at_val := nodes_offline_time_tracked_at.value()
__seconds_val := nodes_offline_time_seconds.value()
var __embed_stmt = __sqlbundle_Literal("INSERT INTO nodes_offline_times ( node_id, tracked_at, seconds ) VALUES ( ?, ?, ? ) RETURNING nodes_offline_times.node_id, nodes_offline_times.tracked_at, nodes_offline_times.seconds")
var __values []interface{}
__values = append(__values, __node_id_val, __tracked_at_val, __seconds_val)
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
obj.logStmt(__stmt, __values...)
nodes_offline_time = &NodesOfflineTime{}
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&nodes_offline_time.NodeId, &nodes_offline_time.TrackedAt, &nodes_offline_time.Seconds)
if err != nil {
return nil, obj.makeErr(err)
}
return nodes_offline_time, nil
}
func (obj *pgxcockroachImpl) Create_StripeCustomer(ctx context.Context,
stripe_customer_user_id StripeCustomer_UserId_Field,
stripe_customer_customer_id StripeCustomer_CustomerId_Field) (
@ -19683,53 +19490,6 @@ func (obj *pgxcockroachImpl) Get_GracefulExitTransferQueue_By_NodeId_And_Path_An
}
func (obj *pgxcockroachImpl) All_NodesOfflineTime_By_NodeId_And_TrackedAt_Greater_And_TrackedAt_LessOrEqual(ctx context.Context,
nodes_offline_time_node_id NodesOfflineTime_NodeId_Field,
nodes_offline_time_tracked_at_greater NodesOfflineTime_TrackedAt_Field,
nodes_offline_time_tracked_at_less_or_equal NodesOfflineTime_TrackedAt_Field) (
rows []*NodesOfflineTime, err error) {
defer mon.Task()(&ctx)(&err)
var __embed_stmt = __sqlbundle_Literal("SELECT nodes_offline_times.node_id, nodes_offline_times.tracked_at, nodes_offline_times.seconds FROM nodes_offline_times WHERE nodes_offline_times.node_id = ? AND nodes_offline_times.tracked_at > ? AND nodes_offline_times.tracked_at <= ?")
var __values []interface{}
__values = append(__values, nodes_offline_time_node_id.value(), nodes_offline_time_tracked_at_greater.value(), nodes_offline_time_tracked_at_less_or_equal.value())
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
obj.logStmt(__stmt, __values...)
for {
rows, err = func() (rows []*NodesOfflineTime, err error) {
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
if err != nil {
return nil, err
}
defer __rows.Close()
for __rows.Next() {
nodes_offline_time := &NodesOfflineTime{}
err = __rows.Scan(&nodes_offline_time.NodeId, &nodes_offline_time.TrackedAt, &nodes_offline_time.Seconds)
if err != nil {
return nil, err
}
rows = append(rows, nodes_offline_time)
}
if err := __rows.Err(); err != nil {
return nil, err
}
return rows, nil
}()
if err != nil {
if obj.shouldRetry(err) {
continue
}
return nil, obj.makeErr(err)
}
return rows, nil
}
}
func (obj *pgxcockroachImpl) Get_StripeCustomer_CustomerId_By_UserId(ctx context.Context,
stripe_customer_user_id StripeCustomer_UserId_Field) (
row *CustomerId_Row, err error) {
@ -22531,16 +22291,6 @@ func (obj *pgxcockroachImpl) deleteAll(ctx context.Context) (count int64, err er
return 0, obj.makeErr(err)
}
__count, err = __res.RowsAffected()
if err != nil {
return 0, obj.makeErr(err)
}
count += __count
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM nodes_offline_times;")
if err != nil {
return 0, obj.makeErr(err)
}
__count, err = __res.RowsAffected()
if err != nil {
return 0, obj.makeErr(err)
@ -22854,18 +22604,6 @@ func (rx *Rx) All_Node_Id_Node_PieceCount_By_PieceCount_Not_Number(ctx context.C
return tx.All_Node_Id_Node_PieceCount_By_PieceCount_Not_Number(ctx)
}
func (rx *Rx) All_NodesOfflineTime_By_NodeId_And_TrackedAt_Greater_And_TrackedAt_LessOrEqual(ctx context.Context,
nodes_offline_time_node_id NodesOfflineTime_NodeId_Field,
nodes_offline_time_tracked_at_greater NodesOfflineTime_TrackedAt_Field,
nodes_offline_time_tracked_at_less_or_equal NodesOfflineTime_TrackedAt_Field) (
rows []*NodesOfflineTime, err error) {
var tx *Tx
if tx, err = rx.getTx(ctx); err != nil {
return
}
return tx.All_NodesOfflineTime_By_NodeId_And_TrackedAt_Greater_And_TrackedAt_LessOrEqual(ctx, nodes_offline_time_node_id, nodes_offline_time_tracked_at_greater, nodes_offline_time_tracked_at_less_or_equal)
}
func (rx *Rx) All_Offer_OrderBy_Asc_Id(ctx context.Context) (
rows []*Offer, err error) {
var tx *Tx
@ -23283,19 +23021,6 @@ func (rx *Rx) Create_CouponUsage(ctx context.Context,
}
func (rx *Rx) Create_NodesOfflineTime(ctx context.Context,
nodes_offline_time_node_id NodesOfflineTime_NodeId_Field,
nodes_offline_time_tracked_at NodesOfflineTime_TrackedAt_Field,
nodes_offline_time_seconds NodesOfflineTime_Seconds_Field) (
nodes_offline_time *NodesOfflineTime, err error) {
var tx *Tx
if tx, err = rx.getTx(ctx); err != nil {
return
}
return tx.Create_NodesOfflineTime(ctx, nodes_offline_time_node_id, nodes_offline_time_tracked_at, nodes_offline_time_seconds)
}
func (rx *Rx) Create_Offer(ctx context.Context,
offer_name Offer_Name_Field,
offer_description Offer_Description_Field,
@ -24637,12 +24362,6 @@ type Methods interface {
All_Node_Id_Node_PieceCount_By_PieceCount_Not_Number(ctx context.Context) (
rows []*Id_PieceCount_Row, err error)
All_NodesOfflineTime_By_NodeId_And_TrackedAt_Greater_And_TrackedAt_LessOrEqual(ctx context.Context,
nodes_offline_time_node_id NodesOfflineTime_NodeId_Field,
nodes_offline_time_tracked_at_greater NodesOfflineTime_TrackedAt_Field,
nodes_offline_time_tracked_at_less_or_equal NodesOfflineTime_TrackedAt_Field) (
rows []*NodesOfflineTime, err error)
All_Offer_OrderBy_Asc_Id(ctx context.Context) (
rows []*Offer, err error)
@ -24855,12 +24574,6 @@ type Methods interface {
coupon_usage_period CouponUsage_Period_Field) (
coupon_usage *CouponUsage, err error)
Create_NodesOfflineTime(ctx context.Context,
nodes_offline_time_node_id NodesOfflineTime_NodeId_Field,
nodes_offline_time_tracked_at NodesOfflineTime_TrackedAt_Field,
nodes_offline_time_seconds NodesOfflineTime_Seconds_Field) (
nodes_offline_time *NodesOfflineTime, err error)
Create_Offer(ctx context.Context,
offer_name Offer_Name_Field,
offer_description Offer_Description_Field,

View File

@ -172,12 +172,6 @@ CREATE TABLE node_api_versions (
updated_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE nodes_offline_times (
node_id bytea NOT NULL,
tracked_at timestamp with time zone NOT NULL,
seconds integer NOT NULL,
PRIMARY KEY ( node_id, tracked_at )
);
CREATE TABLE offers (
id serial NOT NULL,
name text NOT NULL,
@ -452,7 +446,6 @@ CREATE INDEX injuredsegments_segment_health_index ON injuredsegments ( segment_h
CREATE INDEX injuredsegments_updated_at_index ON injuredsegments ( updated_at );
CREATE INDEX node_last_ip ON nodes ( last_net );
CREATE INDEX nodes_dis_unk_exit_fin_last_success_index ON nodes ( disqualified, unknown_audit_suspended, exit_finished_at, last_contact_success );
CREATE INDEX nodes_offline_times_node_id_index ON nodes_offline_times ( node_id );
CREATE UNIQUE INDEX serial_number_index ON serial_numbers ( serial_number );
CREATE INDEX serial_numbers_expires_at_index ON serial_numbers ( expires_at );
CREATE INDEX storagenode_bandwidth_rollups_interval_start_index ON storagenode_bandwidth_rollups ( interval_start );

View File

@ -172,12 +172,6 @@ CREATE TABLE node_api_versions (
updated_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE nodes_offline_times (
node_id bytea NOT NULL,
tracked_at timestamp with time zone NOT NULL,
seconds integer NOT NULL,
PRIMARY KEY ( node_id, tracked_at )
);
CREATE TABLE offers (
id serial NOT NULL,
name text NOT NULL,
@ -452,7 +446,6 @@ CREATE INDEX injuredsegments_segment_health_index ON injuredsegments ( segment_h
CREATE INDEX injuredsegments_updated_at_index ON injuredsegments ( updated_at );
CREATE INDEX node_last_ip ON nodes ( last_net );
CREATE INDEX nodes_dis_unk_exit_fin_last_success_index ON nodes ( disqualified, unknown_audit_suspended, exit_finished_at, last_contact_success );
CREATE INDEX nodes_offline_times_node_id_index ON nodes_offline_times ( node_id );
CREATE UNIQUE INDEX serial_number_index ON serial_numbers ( serial_number );
CREATE INDEX serial_numbers_expires_at_index ON serial_numbers ( expires_at );
CREATE INDEX storagenode_bandwidth_rollups_interval_start_index ON storagenode_bandwidth_rollups ( interval_start );

View File

@ -1158,6 +1158,15 @@ func (db *satelliteDB) PostgresMigration() *migrate.Migration {
`CREATE INDEX IF NOT EXISTS nodes_dis_unk_exit_fin_last_success_index ON nodes(disqualified, unknown_audit_suspended, exit_finished_at, last_contact_success);`,
},
},
{
DB: &db.migrationDB,
Description: "drop node_offline_times table",
Version: 138,
SeparateTx: true,
Action: migrate.SQL{
`DROP TABLE nodes_offline_times;`,
},
},
},
}
}

View File

@ -90,10 +90,7 @@ func (cache *overlaycache) SelectStorageNodes(ctx context.Context, totalNeededNo
return nodes, nil
}
func (cache *overlaycache) selectStorageNodesOnce(ctx context.Context, reputableNodeCount, newNodeCount int,
criteria *overlay.NodeCriteria, excludedIDs []storj.NodeID,
excludedNetworks []string) (reputableNodes, newNodes []*overlay.SelectedNode, err error) {
func (cache *overlaycache) selectStorageNodesOnce(ctx context.Context, reputableNodeCount, newNodeCount int, criteria *overlay.NodeCriteria, excludedIDs []storj.NodeID, excludedNetworks []string) (reputableNodes, newNodes []*overlay.SelectedNode, err error) {
defer mon.Task()(&ctx)(&err)
newNodesCondition, err := nodeSelectionCondition(ctx, criteria, excludedIDs, excludedNetworks, true)
@ -107,33 +104,39 @@ func (cache *overlaycache) selectStorageNodesOnce(ctx context.Context, reputable
var reputableNodeQuery, newNodeQuery partialQuery
asOf := cache.db.AsOfSystemTimeClause(criteria.AsOfSystemTimeInterval)
// Note: the true/false at the end of each selection string indicates if the selection is for new nodes or not.
// Later, the flag allows us to distinguish if a node is new when scanning the db rows.
if !criteria.DistinctIP {
reputableNodeQuery = partialQuery{
selection: `SELECT last_net, id, address, last_ip_port, false FROM nodes`,
condition: reputableNodesCondition,
limit: reputableNodeCount,
selection: `SELECT last_net, id, address, last_ip_port, false FROM nodes ` + asOf,
condition: reputableNodesCondition,
limit: reputableNodeCount,
aostClause: asOf,
}
newNodeQuery = partialQuery{
selection: `SELECT last_net, id, address, last_ip_port, true FROM nodes`,
condition: newNodesCondition,
limit: newNodeCount,
selection: `SELECT last_net, id, address, last_ip_port, true FROM nodes ` + asOf,
condition: newNodesCondition,
limit: newNodeCount,
aostClause: asOf,
}
} else {
reputableNodeQuery = partialQuery{
selection: `SELECT DISTINCT ON (last_net) last_net, id, address, last_ip_port, false FROM nodes`,
condition: reputableNodesCondition,
distinct: true,
limit: reputableNodeCount,
orderBy: "last_net",
selection: `SELECT DISTINCT ON (last_net) last_net, id, address, last_ip_port, false FROM nodes ` + asOf,
condition: reputableNodesCondition,
distinct: true,
limit: reputableNodeCount,
orderBy: "last_net",
aostClause: asOf,
}
newNodeQuery = partialQuery{
selection: `SELECT DISTINCT ON (last_net) last_net, id, address, last_ip_port, true FROM nodes`,
condition: newNodesCondition,
distinct: true,
limit: newNodeCount,
orderBy: "last_net",
selection: `SELECT DISTINCT ON (last_net) last_net, id, address, last_ip_port, true FROM nodes ` + asOf,
condition: newNodesCondition,
distinct: true,
limit: newNodeCount,
orderBy: "last_net",
aostClause: asOf,
}
}
@ -174,12 +177,11 @@ func (cache *overlaycache) selectStorageNodesOnce(ctx context.Context, reputable
}
// nodeSelectionCondition creates a condition with arguments that corresponds to the arguments.
func nodeSelectionCondition(ctx context.Context, criteria *overlay.NodeCriteria, excludedIDs []storj.NodeID,
excludedNetworks []string, isNewNodeQuery bool) (condition, error) {
func nodeSelectionCondition(ctx context.Context, criteria *overlay.NodeCriteria, excludedIDs []storj.NodeID, excludedNetworks []string, isNewNodeQuery bool) (condition, error) {
var conds conditions
conds.add(`disqualified IS NULL`)
conds.add(`unknown_audit_suspended IS NULL`)
conds.add(`offline_suspended IS NULL`)
conds.add(`exit_initiated_at IS NULL`)
conds.add(`type = ?`, int(pb.NodeType_STORAGE))
@ -236,11 +238,12 @@ func nodeSelectionCondition(ctx context.Context, criteria *overlay.NodeCriteria,
// SELECT * FROM ($selection WHERE $condition ORDER BY $orderBy, RANDOM()) filtered ORDER BY RANDOM() LIMIT $limit
//
type partialQuery struct {
selection string
condition condition
distinct bool
orderBy string
limit int
selection string
condition condition
distinct bool
orderBy string
limit int
aostClause string
}
// isEmpty returns whether the result for the query is definitely empty.
@ -271,7 +274,7 @@ func (partial partialQuery) asQuery() query {
fmt.Fprint(&q, " LIMIT ? ")
args = append(args, partial.limit)
} else {
fmt.Fprint(&q, ") filtered ORDER BY RANDOM() LIMIT ?")
fmt.Fprint(&q, ") filtered "+partial.aostClause+" ORDER BY RANDOM() LIMIT ?")
args = append(args, partial.limit)
}

View File

@ -22,7 +22,6 @@ import (
"storj.io/storj/private/dbutil/cockroachutil"
"storj.io/storj/private/dbutil/pgutil"
"storj.io/storj/private/tagsql"
"storj.io/storj/satellite/internalpb"
"storj.io/storj/satellite/overlay"
"storj.io/storj/satellite/satellitedb/dbx"
)
@ -56,11 +55,14 @@ func (cache *overlaycache) SelectAllStorageNodesUpload(ctx context.Context, sele
func (cache *overlaycache) selectAllStorageNodesUpload(ctx context.Context, selectionCfg overlay.NodeSelectionConfig) (reputable, new []*overlay.SelectedNode, err error) {
defer mon.Task()(&ctx)(&err)
asOf := cache.db.AsOfSystemTimeClause(selectionCfg.AsOfSystemTime.DefaultInterval)
query := `
SELECT id, address, last_net, last_ip_port, vetted_at
FROM nodes
FROM nodes ` + asOf + `
WHERE disqualified IS NULL
AND unknown_audit_suspended IS NULL
AND offline_suspended IS NULL
AND exit_initiated_at IS NULL
AND type = $1
AND free_disk >= $2
@ -253,10 +255,12 @@ func (cache *overlaycache) knownOffline(ctx context.Context, criteria *overlay.N
return nil, Error.New("no ids provided")
}
asOf := cache.db.AsOfSystemTimeClause(criteria.AsOfSystemTimeInterval)
// get offline nodes
var rows tagsql.Rows
rows, err = cache.db.Query(ctx, cache.db.Rebind(`
SELECT id FROM nodes
SELECT id FROM nodes `+asOf+`
WHERE id = any($1::bytea[])
AND last_contact_success < $2
`), pgutil.NodeIDArray(nodeIds), time.Now().Add(-criteria.OnlineWindow),
@ -300,13 +304,16 @@ func (cache *overlaycache) knownUnreliableOrOffline(ctx context.Context, criteri
return nil, Error.New("no ids provided")
}
asOf := cache.db.AsOfSystemTimeClause(criteria.AsOfSystemTimeInterval)
// get reliable and online nodes
var rows tagsql.Rows
rows, err = cache.db.Query(ctx, cache.db.Rebind(`
SELECT id FROM nodes
SELECT id FROM nodes `+asOf+`
WHERE id = any($1::bytea[])
AND disqualified IS NULL
AND unknown_audit_suspended IS NULL
AND offline_suspended IS NULL
AND exit_finished_at IS NULL
AND last_contact_success > $2
`), pgutil.NodeIDArray(nodeIDs), time.Now().Add(-criteria.OnlineWindow),
@ -363,6 +370,7 @@ func (cache *overlaycache) knownReliable(ctx context.Context, onlineWindow time.
WHERE id = any($1::bytea[])
AND disqualified IS NULL
AND unknown_audit_suspended IS NULL
AND offline_suspended IS NULL
AND exit_finished_at IS NULL
AND last_contact_success > $2
`), pgutil.NodeIDArray(nodeIDs), time.Now().Add(-onlineWindow),
@ -404,11 +412,14 @@ func (cache *overlaycache) Reliable(ctx context.Context, criteria *overlay.NodeC
}
func (cache *overlaycache) reliable(ctx context.Context, criteria *overlay.NodeCriteria) (nodes storj.NodeIDList, err error) {
asOf := cache.db.AsOfSystemTimeClause(criteria.AsOfSystemTimeInterval)
// get reliable and online nodes
rows, err := cache.db.Query(ctx, cache.db.Rebind(`
SELECT id FROM nodes
SELECT id FROM nodes `+asOf+`
WHERE disqualified IS NULL
AND unknown_audit_suspended IS NULL
AND offline_suspended IS NULL
AND exit_finished_at IS NULL
AND last_contact_success > ?
`), time.Now().Add(-criteria.OnlineWindow))
@ -473,13 +484,13 @@ func (cache *overlaycache) BatchUpdateStats(ctx context.Context, updateRequests
}
isUp := updateReq.AuditOutcome != overlay.AuditOffline
auditHistory, err := cache.updateAuditHistoryWithTx(ctx, tx, updateReq.NodeID, now, isUp, updateReq.AuditHistory)
auditHistoryResponse, err := cache.updateAuditHistoryWithTx(ctx, tx, updateReq.NodeID, now, isUp, updateReq.AuditHistory)
if err != nil {
doAppendAll = false
return err
}
updateNodeStats := cache.populateUpdateNodeStats(dbNode, updateReq, auditHistory, now)
updateNodeStats := cache.populateUpdateNodeStats(dbNode, updateReq, auditHistoryResponse, now)
sql := buildUpdateStatement(updateNodeStats)
@ -552,12 +563,12 @@ func (cache *overlaycache) UpdateStats(ctx context.Context, updateReq *overlay.U
}
isUp := updateReq.AuditOutcome != overlay.AuditOffline
auditHistory, err := cache.updateAuditHistoryWithTx(ctx, tx, updateReq.NodeID, now, isUp, updateReq.AuditHistory)
auditHistoryResponse, err := cache.updateAuditHistoryWithTx(ctx, tx, updateReq.NodeID, now, isUp, updateReq.AuditHistory)
if err != nil {
return err
}
updateFields := cache.populateUpdateFields(dbNode, updateReq, auditHistory, now)
updateFields := cache.populateUpdateFields(dbNode, updateReq, auditHistoryResponse, now)
dbNode, err = tx.Update_Node_By_Id(ctx, dbx.Node_Id(nodeID.Bytes()), updateFields)
if err != nil {
return err
@ -738,6 +749,38 @@ func (cache *overlaycache) UnsuspendNodeUnknownAudit(ctx context.Context, nodeID
return nil
}
// SuspendNodeOfflineAudit suspends a storage node for offline audits.
func (cache *overlaycache) SuspendNodeOfflineAudit(ctx context.Context, nodeID storj.NodeID, suspendedAt time.Time) (err error) {
defer mon.Task()(&ctx)(&err)
updateFields := dbx.Node_Update_Fields{}
updateFields.OfflineSuspended = dbx.Node_OfflineSuspended(suspendedAt.UTC())
dbNode, err := cache.db.Update_Node_By_Id(ctx, dbx.Node_Id(nodeID.Bytes()), updateFields)
if err != nil {
return err
}
if dbNode == nil {
return errs.New("unable to get node by ID: %v", nodeID)
}
return nil
}
// UnsuspendNodeOfflineAudit unsuspends a storage node for offline audits.
func (cache *overlaycache) UnsuspendNodeOfflineAudit(ctx context.Context, nodeID storj.NodeID) (err error) {
defer mon.Task()(&ctx)(&err)
updateFields := dbx.Node_Update_Fields{}
updateFields.OfflineSuspended = dbx.Node_OfflineSuspended_Null()
dbNode, err := cache.db.Update_Node_By_Id(ctx, dbx.Node_Id(nodeID.Bytes()), updateFields)
if err != nil {
return err
}
if dbNode == nil {
return errs.New("unable to get node by ID: %v", nodeID)
}
return nil
}
// AllPieceCounts returns a map of node IDs to piece counts from the db.
// NB: a valid, partial piece map can be returned even if node ID parsing error(s) are returned.
func (cache *overlaycache) AllPieceCounts(ctx context.Context) (_ map[storj.NodeID]int, err error) {
@ -1360,7 +1403,7 @@ type updateNodeStats struct {
OnlineScore float64Field
}
func (cache *overlaycache) populateUpdateNodeStats(dbNode *dbx.Node, updateReq *overlay.UpdateRequest, auditHistory *internalpb.AuditHistory, now time.Time) updateNodeStats {
func (cache *overlaycache) populateUpdateNodeStats(dbNode *dbx.Node, updateReq *overlay.UpdateRequest, auditHistoryResponse *overlay.UpdateAuditHistoryResponse, now time.Time) updateNodeStats {
// there are three audit outcomes: success, failure, and unknown
// if a node fails enough audits, it gets disqualified
// if a node gets enough "unknown" audits, it gets put into suspension
@ -1371,7 +1414,6 @@ func (cache *overlaycache) populateUpdateNodeStats(dbNode *dbx.Node, updateReq *
unknownAuditBeta := dbNode.UnknownAuditReputationBeta
totalAuditCount := dbNode.TotalAuditCount
vettedAt := dbNode.VettedAt
auditOnlineScore := auditHistory.Score
var updatedTotalAuditCount int64
@ -1424,7 +1466,7 @@ func (cache *overlaycache) populateUpdateNodeStats(dbNode *dbx.Node, updateReq *
mon.FloatVal("audit_reputation_beta").Observe(auditBeta) //mon:locked
mon.FloatVal("unknown_audit_reputation_alpha").Observe(unknownAuditAlpha) //mon:locked
mon.FloatVal("unknown_audit_reputation_beta").Observe(unknownAuditBeta) //mon:locked
mon.FloatVal("audit_online_score").Observe(auditOnlineScore) //mon:locked
mon.FloatVal("audit_online_score").Observe(auditHistoryResponse.NewScore) //mon:locked
totalUptimeCount := dbNode.TotalUptimeCount
isUp := updateReq.AuditOutcome != overlay.AuditOffline
@ -1500,17 +1542,15 @@ func (cache *overlaycache) populateUpdateNodeStats(dbNode *dbx.Node, updateReq *
// Updating node stats always exits it from containment mode
updateFields.Contained = boolField{set: true, value: false}
windowsPerTrackingPeriod := int(updateReq.AuditHistory.TrackingPeriod.Seconds() / updateReq.AuditHistory.WindowSize.Seconds())
// only penalize node if online score is below threshold and
// if it has enough completed windows to fill a tracking period
penalizeOfflineNode := false
if auditOnlineScore < updateReq.AuditHistory.OfflineThreshold && len(auditHistory.Windows)-1 >= windowsPerTrackingPeriod {
if auditHistoryResponse.NewScore < updateReq.AuditHistory.OfflineThreshold && auditHistoryResponse.TrackingPeriodFull {
penalizeOfflineNode = true
}
// always update online score
updateFields.OnlineScore = float64Field{set: true, value: auditOnlineScore}
updateFields.OnlineScore = float64Field{set: true, value: auditHistoryResponse.NewScore}
// Suspension and disqualification for offline nodes
if dbNode.UnderReview != nil {
@ -1548,9 +1588,9 @@ func (cache *overlaycache) populateUpdateNodeStats(dbNode *dbx.Node, updateReq *
return updateFields
}
func (cache *overlaycache) populateUpdateFields(dbNode *dbx.Node, updateReq *overlay.UpdateRequest, auditHistory *internalpb.AuditHistory, now time.Time) dbx.Node_Update_Fields {
func (cache *overlaycache) populateUpdateFields(dbNode *dbx.Node, updateReq *overlay.UpdateRequest, auditHistoryResponse *overlay.UpdateAuditHistoryResponse, now time.Time) dbx.Node_Update_Fields {
update := cache.populateUpdateNodeStats(dbNode, updateReq, auditHistory, now)
update := cache.populateUpdateNodeStats(dbNode, updateReq, auditHistoryResponse, now)
updateFields := dbx.Node_Update_Fields{}
if update.VettedAt.set {
updateFields.VettedAt = dbx.Node_VettedAt(update.VettedAt.value)

View File

@ -0,0 +1,567 @@
-- AUTOGENERATED BY storj.io/dbx
-- DO NOT EDIT
CREATE TABLE accounting_rollups (
node_id bytea NOT NULL,
start_time timestamp with time zone NOT NULL,
put_total bigint NOT NULL,
get_total bigint NOT NULL,
get_audit_total bigint NOT NULL,
get_repair_total bigint NOT NULL,
put_repair_total bigint NOT NULL,
at_rest_total double precision NOT NULL,
PRIMARY KEY ( node_id, start_time )
);
CREATE TABLE accounting_timestamps (
name text NOT NULL,
value timestamp with time zone NOT NULL,
PRIMARY KEY ( name )
);
CREATE TABLE audit_histories (
node_id bytea NOT NULL,
history bytea NOT NULL,
PRIMARY KEY ( node_id )
);
CREATE TABLE bucket_bandwidth_rollups (
bucket_name bytea NOT NULL,
project_id bytea NOT NULL,
interval_start timestamp with time zone NOT NULL,
interval_seconds integer NOT NULL,
action integer NOT NULL,
inline bigint NOT NULL,
allocated bigint NOT NULL,
settled bigint NOT NULL,
PRIMARY KEY ( bucket_name, project_id, interval_start, action )
);
CREATE TABLE bucket_storage_tallies (
bucket_name bytea NOT NULL,
project_id bytea NOT NULL,
interval_start timestamp with time zone NOT NULL,
inline bigint NOT NULL,
remote bigint NOT NULL,
remote_segments_count integer NOT NULL,
inline_segments_count integer NOT NULL,
object_count integer NOT NULL,
metadata_size bigint NOT NULL,
PRIMARY KEY ( bucket_name, project_id, interval_start )
);
CREATE TABLE coinpayments_transactions (
id text NOT NULL,
user_id bytea NOT NULL,
address text NOT NULL,
amount bytea NOT NULL,
received bytea NOT NULL,
status integer NOT NULL,
key text NOT NULL,
timeout integer NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE consumed_serials (
storage_node_id bytea NOT NULL,
serial_number bytea NOT NULL,
expires_at timestamp with time zone NOT NULL,
PRIMARY KEY ( storage_node_id, serial_number )
);
CREATE TABLE coupons (
id bytea NOT NULL,
user_id bytea NOT NULL,
amount bigint NOT NULL,
description text NOT NULL,
type integer NOT NULL,
status integer NOT NULL,
duration bigint NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE coupon_usages (
coupon_id bytea NOT NULL,
amount bigint NOT NULL,
status integer NOT NULL,
period timestamp with time zone NOT NULL,
PRIMARY KEY ( coupon_id, period )
);
CREATE TABLE graceful_exit_progress (
node_id bytea NOT NULL,
bytes_transferred bigint NOT NULL,
pieces_transferred bigint NOT NULL DEFAULT 0,
pieces_failed bigint NOT NULL DEFAULT 0,
updated_at timestamp with time zone NOT NULL,
PRIMARY KEY ( node_id )
);
CREATE TABLE graceful_exit_transfer_queue (
node_id bytea NOT NULL,
path bytea NOT NULL,
piece_num integer NOT NULL,
root_piece_id bytea,
durability_ratio double precision NOT NULL,
queued_at timestamp with time zone NOT NULL,
requested_at timestamp with time zone,
last_failed_at timestamp with time zone,
last_failed_code integer,
failed_count integer,
finished_at timestamp with time zone,
order_limit_send_count integer NOT NULL DEFAULT 0,
PRIMARY KEY ( node_id, path, piece_num )
);
CREATE TABLE injuredsegments (
path bytea NOT NULL,
data bytea NOT NULL,
attempted timestamp with time zone,
updated_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
segment_health double precision NOT NULL DEFAULT 1,
PRIMARY KEY ( path )
);
CREATE TABLE irreparabledbs (
segmentpath bytea NOT NULL,
segmentdetail bytea NOT NULL,
pieces_lost_count bigint NOT NULL,
seg_damaged_unix_sec bigint NOT NULL,
repair_attempt_count bigint NOT NULL,
PRIMARY KEY ( segmentpath )
);
CREATE TABLE nodes (
id bytea NOT NULL,
address text NOT NULL DEFAULT '',
last_net text NOT NULL,
last_ip_port text,
protocol integer NOT NULL DEFAULT 0,
type integer NOT NULL DEFAULT 0,
email text NOT NULL,
wallet text NOT NULL,
free_disk bigint NOT NULL DEFAULT -1,
piece_count bigint NOT NULL DEFAULT 0,
major bigint NOT NULL DEFAULT 0,
minor bigint NOT NULL DEFAULT 0,
patch bigint NOT NULL DEFAULT 0,
hash text NOT NULL DEFAULT '',
timestamp timestamp with time zone NOT NULL DEFAULT '0001-01-01 00:00:00+00',
release boolean NOT NULL DEFAULT false,
latency_90 bigint NOT NULL DEFAULT 0,
audit_success_count bigint NOT NULL DEFAULT 0,
total_audit_count bigint NOT NULL DEFAULT 0,
vetted_at timestamp with time zone,
uptime_success_count bigint NOT NULL,
total_uptime_count bigint NOT NULL,
created_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
updated_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
last_contact_success timestamp with time zone NOT NULL DEFAULT 'epoch',
last_contact_failure timestamp with time zone NOT NULL DEFAULT 'epoch',
contained boolean NOT NULL DEFAULT false,
disqualified timestamp with time zone,
suspended timestamp with time zone,
unknown_audit_suspended timestamp with time zone,
offline_suspended timestamp with time zone,
under_review timestamp with time zone,
online_score double precision NOT NULL DEFAULT 1,
audit_reputation_alpha double precision NOT NULL DEFAULT 1,
audit_reputation_beta double precision NOT NULL DEFAULT 0,
unknown_audit_reputation_alpha double precision NOT NULL DEFAULT 1,
unknown_audit_reputation_beta double precision NOT NULL DEFAULT 0,
uptime_reputation_alpha double precision NOT NULL DEFAULT 1,
uptime_reputation_beta double precision NOT NULL DEFAULT 0,
exit_initiated_at timestamp with time zone,
exit_loop_completed_at timestamp with time zone,
exit_finished_at timestamp with time zone,
exit_success boolean NOT NULL DEFAULT false,
PRIMARY KEY ( id )
);
CREATE TABLE node_api_versions (
id bytea NOT NULL,
api_version integer NOT NULL,
created_at timestamp with time zone NOT NULL,
updated_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE offers (
id serial NOT NULL,
name text NOT NULL,
description text NOT NULL,
award_credit_in_cents integer NOT NULL DEFAULT 0,
invitee_credit_in_cents integer NOT NULL DEFAULT 0,
award_credit_duration_days integer,
invitee_credit_duration_days integer,
redeemable_cap integer,
expires_at timestamp with time zone NOT NULL,
created_at timestamp with time zone NOT NULL,
status integer NOT NULL,
type integer NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE peer_identities (
node_id bytea NOT NULL,
leaf_serial_number bytea NOT NULL,
chain bytea NOT NULL,
updated_at timestamp with time zone NOT NULL,
PRIMARY KEY ( node_id )
);
CREATE TABLE pending_audits (
node_id bytea NOT NULL,
piece_id bytea NOT NULL,
stripe_index bigint NOT NULL,
share_size bigint NOT NULL,
expected_share_hash bytea NOT NULL,
reverify_count bigint NOT NULL,
path bytea NOT NULL,
PRIMARY KEY ( node_id )
);
CREATE TABLE pending_serial_queue (
storage_node_id bytea NOT NULL,
bucket_id bytea NOT NULL,
serial_number bytea NOT NULL,
action integer NOT NULL,
settled bigint NOT NULL,
expires_at timestamp with time zone NOT NULL,
PRIMARY KEY ( storage_node_id, bucket_id, serial_number )
);
CREATE TABLE projects (
id bytea NOT NULL,
name text NOT NULL,
description text NOT NULL,
usage_limit bigint,
bandwidth_limit bigint,
rate_limit integer,
max_buckets integer,
partner_id bytea,
owner_id bytea NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE project_bandwidth_rollups (
project_id bytea NOT NULL,
interval_month date NOT NULL,
egress_allocated bigint NOT NULL,
PRIMARY KEY ( project_id, interval_month )
);
CREATE TABLE registration_tokens (
secret bytea NOT NULL,
owner_id bytea,
project_limit integer NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( secret ),
UNIQUE ( owner_id )
);
CREATE TABLE reported_serials (
expires_at timestamp with time zone NOT NULL,
storage_node_id bytea NOT NULL,
bucket_id bytea NOT NULL,
action integer NOT NULL,
serial_number bytea NOT NULL,
settled bigint NOT NULL,
observed_at timestamp with time zone NOT NULL,
PRIMARY KEY ( expires_at, storage_node_id, bucket_id, action, serial_number )
);
CREATE TABLE reset_password_tokens (
secret bytea NOT NULL,
owner_id bytea NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( secret ),
UNIQUE ( owner_id )
);
CREATE TABLE revocations (
revoked bytea NOT NULL,
api_key_id bytea NOT NULL,
PRIMARY KEY ( revoked )
);
CREATE TABLE serial_numbers (
id serial NOT NULL,
serial_number bytea NOT NULL,
bucket_id bytea NOT NULL,
expires_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE storagenode_bandwidth_rollups (
storagenode_id bytea NOT NULL,
interval_start timestamp with time zone NOT NULL,
interval_seconds integer NOT NULL,
action integer NOT NULL,
allocated bigint DEFAULT 0,
settled bigint NOT NULL,
PRIMARY KEY ( storagenode_id, interval_start, action )
);
CREATE TABLE storagenode_bandwidth_rollups_phase2 (
storagenode_id bytea NOT NULL,
interval_start timestamp with time zone NOT NULL,
interval_seconds integer NOT NULL,
action integer NOT NULL,
allocated bigint DEFAULT 0,
settled bigint NOT NULL,
PRIMARY KEY ( storagenode_id, interval_start, action )
);
CREATE TABLE storagenode_payments (
id bigserial NOT NULL,
created_at timestamp with time zone NOT NULL,
node_id bytea NOT NULL,
period text NOT NULL,
amount bigint NOT NULL,
receipt text,
notes text,
PRIMARY KEY ( id )
);
CREATE TABLE storagenode_paystubs (
period text NOT NULL,
node_id bytea NOT NULL,
created_at timestamp with time zone NOT NULL,
codes text NOT NULL,
usage_at_rest double precision NOT NULL,
usage_get bigint NOT NULL,
usage_put bigint NOT NULL,
usage_get_repair bigint NOT NULL,
usage_put_repair bigint NOT NULL,
usage_get_audit bigint NOT NULL,
comp_at_rest bigint NOT NULL,
comp_get bigint NOT NULL,
comp_put bigint NOT NULL,
comp_get_repair bigint NOT NULL,
comp_put_repair bigint NOT NULL,
comp_get_audit bigint NOT NULL,
surge_percent bigint NOT NULL,
held bigint NOT NULL,
owed bigint NOT NULL,
disposed bigint NOT NULL,
paid bigint NOT NULL,
PRIMARY KEY ( period, node_id )
);
CREATE TABLE storagenode_storage_tallies (
node_id bytea NOT NULL,
interval_end_time timestamp with time zone NOT NULL,
data_total double precision NOT NULL,
PRIMARY KEY ( interval_end_time, node_id )
);
CREATE TABLE stripe_customers (
user_id bytea NOT NULL,
customer_id text NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( user_id ),
UNIQUE ( customer_id )
);
CREATE TABLE stripecoinpayments_invoice_project_records (
id bytea NOT NULL,
project_id bytea NOT NULL,
storage double precision NOT NULL,
egress bigint NOT NULL,
objects bigint NOT NULL,
period_start timestamp with time zone NOT NULL,
period_end timestamp with time zone NOT NULL,
state integer NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id ),
UNIQUE ( project_id, period_start, period_end )
);
CREATE TABLE stripecoinpayments_tx_conversion_rates (
tx_id text NOT NULL,
rate bytea NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( tx_id )
);
CREATE TABLE users (
id bytea NOT NULL,
email text NOT NULL,
normalized_email text NOT NULL,
full_name text NOT NULL,
short_name text,
password_hash bytea NOT NULL,
status integer NOT NULL,
partner_id bytea,
created_at timestamp with time zone NOT NULL,
project_limit integer NOT NULL DEFAULT 0,
PRIMARY KEY ( id )
);
CREATE TABLE value_attributions (
project_id bytea NOT NULL,
bucket_name bytea NOT NULL,
partner_id bytea NOT NULL,
last_updated timestamp with time zone NOT NULL,
PRIMARY KEY ( project_id, bucket_name )
);
CREATE TABLE api_keys (
id bytea NOT NULL,
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
head bytea NOT NULL,
name text NOT NULL,
secret bytea NOT NULL,
partner_id bytea,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id ),
UNIQUE ( head ),
UNIQUE ( name, project_id )
);
CREATE TABLE bucket_metainfos (
id bytea NOT NULL,
project_id bytea NOT NULL REFERENCES projects( id ),
name bytea NOT NULL,
partner_id bytea,
path_cipher integer NOT NULL,
created_at timestamp with time zone NOT NULL,
default_segment_size integer NOT NULL,
default_encryption_cipher_suite integer NOT NULL,
default_encryption_block_size integer NOT NULL,
default_redundancy_algorithm integer NOT NULL,
default_redundancy_share_size integer NOT NULL,
default_redundancy_required_shares integer NOT NULL,
default_redundancy_repair_shares integer NOT NULL,
default_redundancy_optimal_shares integer NOT NULL,
default_redundancy_total_shares integer NOT NULL,
PRIMARY KEY ( id ),
UNIQUE ( name, project_id ),
UNIQUE ( project_id, name )
);
CREATE TABLE project_members (
member_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( member_id, project_id )
);
CREATE TABLE stripecoinpayments_apply_balance_intents (
tx_id text NOT NULL REFERENCES coinpayments_transactions( id ) ON DELETE CASCADE,
state integer NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( tx_id )
);
CREATE TABLE used_serials (
serial_number_id integer NOT NULL REFERENCES serial_numbers( id ) ON DELETE CASCADE,
storage_node_id bytea NOT NULL,
PRIMARY KEY ( serial_number_id, storage_node_id )
);
CREATE TABLE user_credits (
id serial NOT NULL,
user_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
offer_id integer NOT NULL REFERENCES offers( id ),
referred_by bytea REFERENCES users( id ) ON DELETE SET NULL,
type text NOT NULL,
credits_earned_in_cents integer NOT NULL,
credits_used_in_cents integer NOT NULL,
expires_at timestamp with time zone NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id ),
UNIQUE ( id, offer_id )
);
CREATE INDEX accounting_rollups_start_time_index ON accounting_rollups ( start_time );
CREATE INDEX bucket_bandwidth_rollups_project_id_action_interval_index ON bucket_bandwidth_rollups ( project_id, action, interval_start );
CREATE INDEX bucket_bandwidth_rollups_action_interval_project_id_index ON bucket_bandwidth_rollups ( action, interval_start, project_id );
CREATE INDEX bucket_storage_tallies_project_id_index ON bucket_storage_tallies (project_id);
CREATE INDEX consumed_serials_expires_at_index ON consumed_serials ( expires_at );
CREATE INDEX graceful_exit_transfer_queue_nid_dr_qa_fa_lfa_index ON graceful_exit_transfer_queue ( node_id, durability_ratio, queued_at, finished_at, last_failed_at );
CREATE INDEX injuredsegments_attempted_index ON injuredsegments ( attempted );
CREATE INDEX injuredsegments_segment_health_index ON injuredsegments ( segment_health );
CREATE INDEX injuredsegments_updated_at_index ON injuredsegments ( updated_at );
CREATE INDEX node_last_ip ON nodes ( last_net );
CREATE INDEX nodes_dis_unk_exit_fin_last_success_index ON nodes(disqualified, unknown_audit_suspended, exit_finished_at, last_contact_success);
CREATE UNIQUE INDEX serial_number_index ON serial_numbers ( serial_number );
CREATE INDEX serial_numbers_expires_at_index ON serial_numbers ( expires_at );
CREATE INDEX storagenode_bandwidth_rollups_interval_start_index ON storagenode_bandwidth_rollups ( interval_start );
CREATE INDEX storagenode_payments_node_id_period_index ON storagenode_payments ( node_id, period );
CREATE INDEX storagenode_paystubs_node_id_index ON storagenode_paystubs ( node_id );
CREATE INDEX storagenode_storage_tallies_node_id_index ON storagenode_storage_tallies ( node_id );
CREATE UNIQUE INDEX credits_earned_user_id_offer_id ON user_credits ( id, offer_id );
INSERT INTO "accounting_rollups"("node_id", "start_time", "put_total", "get_total", "get_audit_total", "get_repair_total", "put_repair_total", "at_rest_total") VALUES (E'\\367M\\177\\251]t/\\022\\256\\214\\265\\025\\224\\204:\\217\\212\\0102<\\321\\374\\020&\\271Qc\\325\\261\\354\\246\\233'::bytea, '2019-02-09 00:00:00+00', 3000, 6000, 9000, 12000, 0, 15000);
INSERT INTO "accounting_timestamps" VALUES ('LastAtRestTally', '0001-01-01 00:00:00+00');
INSERT INTO "accounting_timestamps" VALUES ('LastRollup', '0001-01-01 00:00:00+00');
INSERT INTO "accounting_timestamps" VALUES ('LastBandwidthTally', '0001-01-01 00:00:00+00');
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 5, false, 1);
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '127.0.0.1:55518', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 0, 3, 3, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 0, false, 1);
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014', '127.0.0.1:55517', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 0, 0, 0, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 0, false, 1);
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\015', '127.0.0.1:55519', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 1, 2, 1, 2, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 1, false, 1);
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "vetted_at", "online_score") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', '127.0.0.1:55520', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 300, 400, 300, 400, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 300, 0, 1, 0, 300, 100, false, '2020-03-18 12:00:00.000000+00', 1);
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\154\\313\\233\\074\\327\\177\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 75, 25, 100, 5, false, 1);
INSERT INTO "nodes"("id", "address", "last_net", "last_ip_port", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\154\\313\\233\\074\\327\\177\\136\\070\\346\\002', '127.0.0.1:55516', '127.0.0.0', '127.0.0.1:55516', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 75, 25, 100, 5, false, 1);
INSERT INTO "users"("id", "full_name", "short_name", "email", "normalized_email", "password_hash", "status", "partner_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'Noahson', 'William', '1email1@mail.test', '1EMAIL1@MAIL.TEST', E'some_readable_hash'::bytea, 1, NULL, '2019-02-14 08:28:24.614594+00');
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets", "partner_id", "owner_id", "created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, 'ProjectName', 'projects description', NULL, NULL, NULL, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:28:24.254934+00');
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets", "partner_id", "owner_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, 'projName1', 'Test project 1', NULL, NULL, NULL, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:28:24.636949+00');
INSERT INTO "project_members"("member_id", "project_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, '2019-02-14 08:28:24.677953+00');
INSERT INTO "project_members"("member_id", "project_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, '2019-02-13 08:28:24.677953+00');
INSERT INTO "irreparabledbs" ("segmentpath", "segmentdetail", "pieces_lost_count", "seg_damaged_unix_sec", "repair_attempt_count") VALUES ('\x49616d5365676d656e746b6579696e666f30', '\x49616d5365676d656e7464657461696c696e666f30', 10, 1550159554, 10);
INSERT INTO "registration_tokens" ("secret", "owner_id", "project_limit", "created_at") VALUES (E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, null, 1, '2019-02-14 08:28:24.677953+00');
INSERT INTO "serial_numbers" ("id", "serial_number", "bucket_id", "expires_at") VALUES (1, E'0123456701234567'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014/testbucket'::bytea, '2019-03-06 08:28:24.677953+00');
INSERT INTO "used_serials" ("serial_number_id", "storage_node_id") VALUES (1, E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n');
INSERT INTO "storagenode_bandwidth_rollups" ("storagenode_id", "interval_start", "interval_seconds", "action", "allocated", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024);
INSERT INTO "storagenode_storage_tallies" VALUES (E'\\3510\\323\\225"~\\036<\\342\\330m\\0253Jhr\\246\\233K\\246#\\2303\\351\\256\\275j\\212UM\\362\\207', '2019-02-14 08:16:57.812849+00', 1000);
INSERT INTO "bucket_bandwidth_rollups" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024, 3024);
INSERT INTO "bucket_storage_tallies" ("bucket_name", "project_id", "interval_start", "inline", "remote", "remote_segments_count", "inline_segments_count", "object_count", "metadata_size") VALUES (E'testbucket'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 4024, 5024, 0, 0, 0, 0);
INSERT INTO "bucket_bandwidth_rollups" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\170\\160\\157\\370\\274\\366\\113\\364\\272\\235\\301\\243\\321\\102\\321\\136'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024, 3024);
INSERT INTO "bucket_storage_tallies" ("bucket_name", "project_id", "interval_start", "inline", "remote", "remote_segments_count", "inline_segments_count", "object_count", "metadata_size") VALUES (E'testbucket'::bytea, E'\\170\\160\\157\\370\\274\\366\\113\\364\\272\\235\\301\\243\\321\\102\\321\\136'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 4024, 5024, 0, 0, 0, 0);
INSERT INTO "reset_password_tokens" ("secret", "owner_id", "created_at") VALUES (E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-05-08 08:28:24.677953+00');
INSERT INTO "offers" ("id", "name", "description", "award_credit_in_cents", "invitee_credit_in_cents", "expires_at", "created_at", "status", "type", "award_credit_duration_days", "invitee_credit_duration_days") VALUES (1, 'Default referral offer', 'Is active when no other active referral offer', 300, 600, '2119-03-14 08:28:24.636949+00', '2019-07-14 08:28:24.636949+00', 1, 2, 365, 14);
INSERT INTO "offers" ("id", "name", "description", "award_credit_in_cents", "invitee_credit_in_cents", "expires_at", "created_at", "status", "type", "award_credit_duration_days", "invitee_credit_duration_days") VALUES (2, 'Default free credit offer', 'Is active when no active free credit offer', 0, 300, '2119-03-14 08:28:24.636949+00', '2019-07-14 08:28:24.636949+00', 1, 1, NULL, 14);
INSERT INTO "api_keys" ("id", "project_id", "head", "name", "secret", "partner_id", "created_at") VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\111\\142\\147\\304\\132\\375\\070\\163\\270\\160\\251\\370\\126\\063\\351\\037\\257\\071\\143\\375\\351\\320\\253\\232\\220\\260\\075\\173\\306\\307\\115\\136'::bytea, 'key 2', E'\\254\\011\\315\\333\\273\\365\\001\\071\\024\\154\\253\\332\\301\\216\\361\\074\\221\\367\\251\\231\\274\\333\\300\\367\\001\\272\\327\\111\\315\\123\\042\\016'::bytea, NULL, '2019-02-14 08:28:24.267934+00');
INSERT INTO "value_attributions" ("project_id", "bucket_name", "partner_id", "last_updated") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E''::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-02-14 08:07:31.028103+00');
INSERT INTO "user_credits" ("id", "user_id", "offer_id", "referred_by", "credits_earned_in_cents", "credits_used_in_cents", "type", "expires_at", "created_at") VALUES (1, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 1, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 200, 0, 'invalid', '2019-10-01 08:28:24.267934+00', '2019-06-01 08:28:24.267934+00');
INSERT INTO "bucket_metainfos" ("id", "project_id", "name", "partner_id", "created_at", "path_cipher", "default_segment_size", "default_encryption_cipher_suite", "default_encryption_block_size", "default_redundancy_algorithm", "default_redundancy_share_size", "default_redundancy_required_shares", "default_redundancy_repair_shares", "default_redundancy_optimal_shares", "default_redundancy_total_shares") VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'testbucketuniquename'::bytea, NULL, '2019-06-14 08:28:24.677953+00', 1, 65536, 1, 8192, 1, 4096, 4, 6, 8, 10);
INSERT INTO "pending_audits" ("node_id", "piece_id", "stripe_index", "share_size", "expected_share_hash", "reverify_count", "path") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 5, 1024, E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, 1, 'not null');
INSERT INTO "peer_identities" VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:07:31.335028+00');
INSERT INTO "graceful_exit_progress" ("node_id", "bytes_transferred", "pieces_transferred", "pieces_failed", "updated_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', 1000000000000000, 0, 0, '2019-09-12 10:07:31.028103+00');
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n \\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\311', 8, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n \\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\312', 8, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
INSERT INTO "stripe_customers" ("user_id", "customer_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'stripe_id', '2019-06-01 08:28:24.267934+00');
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n \\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\311', 9, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n \\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\312', 9, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
INSERT INTO "stripecoinpayments_invoice_project_records"("id", "project_id", "storage", "egress", "objects", "period_start", "period_end", "state", "created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\021\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, 0, 0, 0, '2019-06-01 08:28:24.267934+00', '2019-06-01 08:28:24.267934+00', 0, '2019-06-01 08:28:24.267934+00');
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "root_piece_id", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n \\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\311', 10, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
INSERT INTO "stripecoinpayments_tx_conversion_rates" ("tx_id", "rate", "created_at") VALUES ('tx_id', E'\\363\\311\\033w\\222\\303Ci,'::bytea, '2019-06-01 08:28:24.267934+00');
INSERT INTO "coinpayments_transactions" ("id", "user_id", "address", "amount", "received", "status", "key", "timeout", "created_at") VALUES ('tx_id', E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'address', E'\\363\\311\\033w'::bytea, E'\\363\\311\\033w'::bytea, 1, 'key', 60, '2019-06-01 08:28:24.267934+00');
INSERT INTO "storagenode_bandwidth_rollups" ("storagenode_id", "interval_start", "interval_seconds", "action", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2020-01-11 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 2024);
INSERT INTO "coupons" ("id", "user_id", "amount", "description", "type", "status", "duration", "created_at") VALUES (E'\\362\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 50, 'description', 0, 0, 2, '2019-06-01 08:28:24.267934+00');
INSERT INTO "coupon_usages" ("coupon_id", "amount", "status", "period") VALUES (E'\\362\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, 22, 0, '2019-06-01 09:28:24.267934+00');
INSERT INTO "reported_serials" ("expires_at", "storage_node_id", "bucket_id", "action", "serial_number", "settled", "observed_at") VALUES ('2020-01-11 08:00:00.000000+00', E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014/testbucket'::bytea, 1, E'0123456701234567'::bytea, 100, '2020-01-11 08:00:00.000000+00');
INSERT INTO "stripecoinpayments_apply_balance_intents" ("tx_id", "state", "created_at") VALUES ('tx_id', 0, '2019-06-01 08:28:24.267934+00');
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets", "rate_limit", "partner_id", "owner_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\347'::bytea, 'projName1', 'Test project 1', NULL, NULL, NULL, 2000000, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2020-01-15 08:28:24.636949+00');
INSERT INTO "pending_serial_queue" ("storage_node_id", "bucket_id", "serial_number", "action", "settled", "expires_at") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014/testbucket'::bytea, E'5123456701234567'::bytea, 1, 100, '2020-01-11 08:00:00.000000+00');
INSERT INTO "consumed_serials" ("storage_node_id", "serial_number", "expires_at") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', E'1234567012345678'::bytea, '2020-01-12 08:00:00.000000+00');
INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('0', '\x0a0130120100', 1.0, '2020-09-01 00:00:00.000000+00');
INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('here''s/a/great/path', '\x0a136865726527732f612f67726561742f70617468120a0102030405060708090a', 1.0, '2020-09-01 00:00:00.000000+00');
INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('yet/another/cool/path', '\x0a157965742f616e6f746865722f636f6f6c2f70617468120a0102030405060708090a', 1.0, '2020-09-01 00:00:00.000000+00');
INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('/this/is/a/new/path', '\x0a23736f2f6d616e792f69636f6e69632f70617468732f746f2f63686f6f73652f66726f6d120a0102030405060708090a', 1.0, '2020-09-01 00:00:00.000000+00');
INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('/some/path/1/23/4', '\x0a23736f2f6d618e792f69636f6e69632f70617468732f746f2f63686f6f73652f66726f6d120a0102030405060708090a', 0.2, '2020-09-01 00:00:00.000000+00');
INSERT INTO "project_bandwidth_rollups"("project_id", "interval_month", egress_allocated) VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\347'::bytea, '2020-04-01', 10000);
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets","rate_limit", "partner_id", "owner_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\345'::bytea, 'egress101', 'High Bandwidth Project', NULL, NULL, NULL, 2000000, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2020-05-15 08:46:24.000000+00');
INSERT INTO "storagenode_paystubs"("period", "node_id", "created_at", "codes", "usage_at_rest", "usage_get", "usage_put", "usage_get_repair", "usage_put_repair", "usage_get_audit", "comp_at_rest", "comp_get", "comp_put", "comp_get_repair", "comp_put_repair", "comp_get_audit", "surge_percent", "held", "owed", "disposed", "paid") VALUES ('2020-01', '\xf2a3b4c4dfdf7221310382fd5db5aa73e1d227d6df09734ec4e5305000000000', '2020-04-07T20:14:21.479141Z', '', 1327959864508416, 294054066688, 159031363328, 226751, 0, 836608, 2861984, 5881081, 0, 226751, 0, 8, 300, 0, 26909472, 0, 26909472);
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "unknown_audit_suspended", "offline_suspended", "under_review") VALUES (E'\\153\\313\\233\\074\\327\\255\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 5, false, '2019-02-14 08:07:31.108963+00', '2019-02-14 08:07:31.108963+00', '2019-02-14 08:07:31.108963+00');
INSERT INTO "audit_histories" ("node_id", "history") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', '\x0a23736f2f6d616e792f69636f6e69632f70617468732f746f2f63686f6f73652f66726f6d120a0102030405060708090a');
INSERT INTO "node_api_versions"("id", "api_version", "created_at", "updated_at") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', 1, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00');
INSERT INTO "node_api_versions"("id", "api_version", "created_at", "updated_at") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', 2, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00');
INSERT INTO "node_api_versions"("id", "api_version", "created_at", "updated_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014', 3, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00');
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\256\\263'::bytea, 'egress102', 'High Bandwidth Project 2', NULL, NULL, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-05-15 08:46:24.000000+00', 1000);
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\255\\244'::bytea, 'egress103', 'High Bandwidth Project 3', NULL, NULL, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-05-15 08:46:24.000000+00', 1000);
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\253\\231'::bytea, 'Limit Test 1', 'This project is above the default', 50000000001, 50000000001, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-10-14 10:10:10.000000+00', 101);
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\252\\230'::bytea, 'Limit Test 2', 'This project is below the default', NULL, NULL, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-10-14 10:10:11.000000+00', NULL);
INSERT INTO "storagenode_bandwidth_rollups_phase2" ("storagenode_id", "interval_start", "interval_seconds", "action", "allocated", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024);

View File

@ -451,6 +451,12 @@ identity.key-path: /root/.local/share/storj/identity/satellite/identity.key
# how stale the node selection cache can be
# overlay.node-selection-cache.staleness: 3m0s
# default duration for AS OF SYSTEM TIME
# overlay.node.as-of-system-time.default-interval: -10s
# enables the use of the AS OF SYSTEM TIME feature in CRDB
# overlay.node.as-of-system-time.enabled: true
# the number of times a node has been audited to not be considered a New Node
# overlay.node.audit-count: 100

View File

@ -2,10 +2,10 @@
# This file contains the second part of Stage 2 for the rolling upgrade test.
# Description of file functionality:
# * Upload an inline, remote, and multisegment file to the network using the master uplink and the new satellite api.
# * Upload an inline, remote, and multisegment file to the network using the master uplink and the old satellite api.
# * Download the six inline, remote, and multisegment files from the previous two steps using the master uplink and new satellite api.
# * Download the six inline, remote, and multisegment files from the previous two steps using the master uplink and old satellite api.
# * Upload an inline, remote, and multisegment file to the network using the main uplink and the new satellite api.
# * Upload an inline, remote, and multisegment file to the network using the main uplink and the old satellite api.
# * Download the six inline, remote, and multisegment files from the previous two steps using the main uplink and new satellite api.
# * Download the six inline, remote, and multisegment files from the previous two steps using the main uplink and old satellite api.
set -ueo pipefail

View File

@ -2,8 +2,8 @@
# This file contains the first part of Stage 2 for the rolling upgrade test.
# Description of file functionality:
# * Download the inline, remote, and multisegment file from the network using the master uplink and the new satellite api.
# * Download the inline, remote, and multisegment file from the network using the master uplink and the old satellite api.
# * Download the inline, remote, and multisegment file from the network using the main uplink and the new satellite api.
# * Download the inline, remote, and multisegment file from the network using the main uplink and the old satellite api.
set -ueo pipefail

View File

@ -24,6 +24,6 @@ until $(docker logs postgres-$BUILD_NUMBER | grep "database system is ready to a
done
docker exec postgres-$BUILD_NUMBER createdb -U postgres teststorj
# fetch the remote master branch
git fetch --no-tags --progress -- https://github.com/storj/storj.git +refs/heads/master:refs/remotes/origin/master
# fetch the remote main branch
git fetch --no-tags --progress -- https://github.com/storj/storj.git +refs/heads/main:refs/remotes/origin/main
$SCRIPTDIR/test-sim-rolling-upgrade.sh

View File

@ -9,15 +9,15 @@
# * (test-versions.sh upload) - Upload an inline, remote, and multisegment file to the network with the selected uplink.
# Stage 2:
# * Upgrade the satellite to current commit. Run an "old" satellite api server on the latest point release (port 30000).
# * Keep half of the storagenodes on the latest point release. Upgrade the other half to master.
# * Keep half of the storagenodes on the latest point release. Upgrade the other half to main.
# * Point half of the storagenodes to the old satellite api (port 30000). Keep the other half on the new satellite api (port 10000).
# * Check out the master version of the uplink.
# * (test-rolling-upgrade.sh) - Download the inline, remote, and multisegment file from the network using the master uplink and the new satellite api.
# * (test-rolling-upgrade.sh) - Download the inline, remote, and multisegment file from the network using the master uplink and the old satellite api.
# * (test-rolling-upgrade-final-upload.sh) - Upload an inline, remote, and multisegment file to the network using the master uplink and the new satellite api.
# * (test-rolling-upgrade-final-upload.sh) - Upload an inline, remote, and multisegment file to the network using the master uplink and the old satellite api.
# * (test-rolling-upgrade-final-upload.sh) - Download the six inline, remote, and multisegment files from the previous two steps using the master uplink and new satellite api.
# * (test-rolling-upgrade-final-upload.sh) - Download the six inline, remote, and multisegment files from the previous two steps using the master uplink and old satellite api.
# * Check out the main version of the uplink.
# * (test-rolling-upgrade.sh) - Download the inline, remote, and multisegment file from the network using the main uplink and the new satellite api.
# * (test-rolling-upgrade.sh) - Download the inline, remote, and multisegment file from the network using the main uplink and the old satellite api.
# * (test-rolling-upgrade-final-upload.sh) - Upload an inline, remote, and multisegment file to the network using the main uplink and the new satellite api.
# * (test-rolling-upgrade-final-upload.sh) - Upload an inline, remote, and multisegment file to the network using the main uplink and the old satellite api.
# * (test-rolling-upgrade-final-upload.sh) - Download the six inline, remote, and multisegment files from the previous two steps using the main uplink and new satellite api.
# * (test-rolling-upgrade-final-upload.sh) - Download the six inline, remote, and multisegment files from the previous two steps using the main uplink and old satellite api.
set -ueo pipefail
set +x
@ -46,11 +46,11 @@ populate_sno_versions(){
# set peers' versions
# in stage 1: satellite, uplink, and storagenode use latest release version
# in stage 2: satellite core uses latest release version and satellite api uses master. Storage nodes are split into half on latest release version and half on master. Uplink uses the latest release version plus master
# in stage 2: satellite core uses latest release version and satellite api uses main. Storage nodes are split into half on latest release version and half on main. Uplink uses the latest release version plus main
BRANCH_NAME=${BRANCH_NAME:-""}
git fetch --tags
# if it's running on a release branch, we will set the stage 1 version to be the latest previous major release
# if it's running on master, we will set the stage 1 version to be the current release version
# if it's running on main, we will set the stage 1 version to be the current release version
current_commit=$(git rev-parse HEAD)
stage1_release_version=$(git tag -l --sort -version:refname | grep -v rc | head -1)
if [[ $BRANCH_NAME = v* ]]; then
@ -277,7 +277,7 @@ old_api_pid=$!
# Wait until old satellite api is responding to requests to ensure it happens before migration.
storj-sim tool wait-for --retry 50 --interval 100ms "127.0.0.1:30000"
# Downloading every file uploaded in stage 1 from the network using the latest commit from master branch for each uplink version
# Downloading every file uploaded in stage 1 from the network using the latest commit from main branch for each uplink version
for ul_version in ${stage2_uplink_versions}; do
if [ "$ul_version" = "v1.6.3" ]; then
# TODO: skip v1.6.3 uplink since it doesn't support changing imported access satellite address

View File

@ -24,6 +24,6 @@ until $(docker logs postgres-$BUILD_NUMBER | grep "database system is ready to a
done
docker exec postgres-$BUILD_NUMBER createdb -U postgres teststorj
# fetch the remote master branch
git fetch --no-tags --progress -- https://github.com/storj/storj.git +refs/heads/master:refs/remotes/origin/master
# fetch the remote main branch
git fetch --no-tags --progress -- https://github.com/storj/storj.git +refs/heads/main:refs/remotes/origin/main
$SCRIPTDIR/test-sim-versions.sh

View File

@ -25,7 +25,7 @@ RUN_TYPE=${RUN_TYPE:-"jenkins"}
# set peers' versions
# in stage 1: satellite and storagenode use latest release version, uplink uses all highest point release from all major releases starting from v0.15
# in stage 2: satellite core uses latest release version and satellite api uses master. Storage nodes are split into half on latest release version and half on master. Uplink uses the all versions from stage 1 plus master
# in stage 2: satellite core uses latest release version and satellite api uses main. Storage nodes are split into half on latest release version and half on main. Uplink uses the all versions from stage 1 plus main
git fetch --tags
major_release_tags=$(
git tag -l --sort -version:refname | # get the tag list
@ -38,9 +38,9 @@ current_release_version=$(echo $major_release_tags | xargs -n 1 | tail -1)
stage1_sat_version=$current_release_version
stage1_uplink_versions=$major_release_tags
stage1_storagenode_versions=$(populate_sno_versions $current_release_version 10)
stage2_sat_version="master"
stage2_uplink_versions=$major_release_tags\ "master"
stage2_storagenode_versions=$(populate_sno_versions $current_release_version 5)\ $(populate_sno_versions "master" 5)
stage2_sat_version="main"
stage2_uplink_versions=$major_release_tags\ "main"
stage2_storagenode_versions=$(populate_sno_versions $current_release_version 5)\ $(populate_sno_versions "main" 5)
echo "stage1_sat_version" $stage1_sat_version
echo "stage1_uplink_versions" $stage1_uplink_versions
@ -179,9 +179,9 @@ for version in ${unique_versions}; do
bin_dir=${dir}/bin
echo -e "\nAdding worktree for ${version} in ${dir}."
if [[ $version = "master" ]]
if [[ $version = "main" ]]
then
git worktree add -f "$dir" "origin/master"
git worktree add -f "$dir" "origin/main"
else
git worktree add -f "$dir" "${version}"
fi
@ -196,7 +196,7 @@ for version in ${unique_versions}; do
EOF
fi
if [[ $version = $current_release_version || $version = "master" ]]
if [[ $version = $current_release_version || $version = "main" ]]
then
echo "Installing storj-sim for ${version} in ${dir}."
@ -238,7 +238,7 @@ test_dir=$(version_dir "test_dir")
cp -r $(version_dir ${stage1_sat_version}) ${test_dir}
echo -e "\nSetting up stage 1 in ${test_dir}"
setup_stage "${test_dir}" "${stage1_sat_version}" "${stage1_storagenode_versions}"
update_access_script_path="$(version_dir "master")/scripts/update-access.go"
update_access_script_path="$(version_dir "main")/scripts/update-access.go"
# Uploading files to the network using the latest release version for each uplink version
for ul_version in ${stage1_uplink_versions}; do
@ -254,7 +254,7 @@ echo -e "\nSetting up stage 2 in ${test_dir}"
setup_stage "${test_dir}" "${stage2_sat_version}" "${stage2_storagenode_versions}"
echo -e "\nRunning stage 2."
# Downloading every file uploaded in stage 1 from the network using the latest commit from master branch for each uplink version
# Downloading every file uploaded in stage 1 from the network using the latest commit from main branch for each uplink version
for ul_version in ${stage2_uplink_versions}; do
echo "Stage 2 Uplink version: ${ul_version}"
src_ul_version_dir=$(version_dir ${ul_version})

View File

@ -98,6 +98,7 @@ func (s *Service) GetReputationStats(ctx context.Context, satelliteID storj.Node
SuspendedAt: resp.GetSuspended(),
OfflineSuspendedAt: resp.GetOfflineSuspended(),
OfflineUnderReviewAt: resp.GetOfflineUnderReview(),
AuditHistory: resp.GetAuditHistory(),
UpdatedAt: time.Now(),
JoinedAt: resp.JoinedAt,
}, nil

View File

@ -266,19 +266,19 @@ func (endpoint *Endpoint) Upload(stream pb.DRPCPiecestore_UploadStream) (err err
mon.IntVal("upload_failure_size_bytes").Observe(uploadSize)
mon.IntVal("upload_failure_duration_ns").Observe(uploadDuration)
mon.FloatVal("upload_failure_rate_bytes_per_sec").Observe(uploadRate)
endpoint.log.Error("upload failed", zap.Stringer("Piece ID", limit.PieceId), zap.Stringer("Satellite ID", limit.SatelliteId), zap.Stringer("Action", limit.Action), zap.Error(err))
endpoint.log.Error("upload failed", zap.Stringer("Piece ID", limit.PieceId), zap.Stringer("Satellite ID", limit.SatelliteId), zap.Stringer("Action", limit.Action), zap.Error(err), zap.Int64("Size", uploadSize))
} else if errs2.IsCanceled(err) && !committed {
mon.Meter("upload_cancel_byte_meter").Mark64(uploadSize)
mon.IntVal("upload_cancel_size_bytes").Observe(uploadSize)
mon.IntVal("upload_cancel_duration_ns").Observe(uploadDuration)
mon.FloatVal("upload_cancel_rate_bytes_per_sec").Observe(uploadRate)
endpoint.log.Info("upload canceled", zap.Stringer("Piece ID", limit.PieceId), zap.Stringer("Satellite ID", limit.SatelliteId), zap.Stringer("Action", limit.Action))
endpoint.log.Info("upload canceled", zap.Stringer("Piece ID", limit.PieceId), zap.Stringer("Satellite ID", limit.SatelliteId), zap.Stringer("Action", limit.Action), zap.Int64("Size", uploadSize))
} else {
mon.Meter("upload_success_byte_meter").Mark64(uploadSize)
mon.IntVal("upload_success_size_bytes").Observe(uploadSize)
mon.IntVal("upload_success_duration_ns").Observe(uploadDuration)
mon.FloatVal("upload_success_rate_bytes_per_sec").Observe(uploadRate)
endpoint.log.Info("uploaded", zap.Stringer("Piece ID", limit.PieceId), zap.Stringer("Satellite ID", limit.SatelliteId), zap.Stringer("Action", limit.Action))
endpoint.log.Info("uploaded", zap.Stringer("Piece ID", limit.PieceId), zap.Stringer("Satellite ID", limit.SatelliteId), zap.Stringer("Action", limit.Action), zap.Int64("Size", uploadSize))
}
}()

View File

@ -7,6 +7,7 @@ import (
"context"
"time"
"storj.io/common/pb"
"storj.io/common/storj"
)
@ -34,6 +35,7 @@ type Stats struct {
SuspendedAt *time.Time
OfflineSuspendedAt *time.Time
OfflineUnderReviewAt *time.Time
AuditHistory *pb.AuditHistory
UpdatedAt time.Time
JoinedAt time.Time

View File

@ -10,6 +10,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"storj.io/common/pb"
"storj.io/common/testcontext"
"storj.io/common/testrand"
"storj.io/storj/storagenode"
@ -67,6 +68,7 @@ func TestReputationDBGetInsert(t *testing.T) {
assert.True(t, res.OfflineSuspendedAt.Equal(*stats.OfflineSuspendedAt))
assert.True(t, res.OfflineUnderReviewAt.Equal(*stats.OfflineUnderReviewAt))
assert.Equal(t, res.OnlineScore, stats.OnlineScore)
assert.Nil(t, res.AuditHistory)
compareReputationMetric(t, &res.Uptime, &stats.Uptime)
compareReputationMetric(t, &res.Audit, &stats.Audit)
@ -133,6 +135,7 @@ func TestReputationDBGetAll(t *testing.T) {
assert.Equal(t, rep.OfflineSuspendedAt, stats[0].OfflineSuspendedAt)
assert.Equal(t, rep.OfflineUnderReviewAt, stats[0].OfflineUnderReviewAt)
assert.Equal(t, rep.OnlineScore, stats[0].OnlineScore)
assert.Nil(t, rep.AuditHistory)
compareReputationMetric(t, &rep.Uptime, &stats[0].Uptime)
compareReputationMetric(t, &rep.Audit, &stats[0].Audit)
@ -149,3 +152,44 @@ func compareReputationMetric(t *testing.T, a, b *reputation.Metric) {
assert.Equal(t, a.Beta, b.Beta)
assert.Equal(t, a.Score, b.Score)
}
func TestReputationDBGetInsertAuditHistory(t *testing.T) {
storagenodedbtest.Run(t, func(ctx *testcontext.Context, t *testing.T, db storagenode.DB) {
timestamp := time.Now()
reputationDB := db.Reputation()
stats := reputation.Stats{
SatelliteID: testrand.NodeID(),
Uptime: reputation.Metric{},
Audit: reputation.Metric{},
AuditHistory: &pb.AuditHistory{
Score: 0.5,
Windows: []*pb.AuditWindow{
{
WindowStart: timestamp,
OnlineCount: 5,
TotalCount: 10,
},
},
},
}
t.Run("insert", func(t *testing.T) {
err := reputationDB.Store(ctx, stats)
assert.NoError(t, err)
})
t.Run("get", func(t *testing.T) {
res, err := reputationDB.Get(ctx, stats.SatelliteID)
assert.NoError(t, err)
assert.Equal(t, res.AuditHistory.Score, stats.AuditHistory.Score)
assert.Equal(t, len(res.AuditHistory.Windows), len(stats.AuditHistory.Windows))
resWindow := res.AuditHistory.Windows[0]
statsWindow := stats.AuditHistory.Windows[0]
assert.True(t, resWindow.WindowStart.Equal(statsWindow.WindowStart))
assert.Equal(t, resWindow.TotalCount, statsWindow.TotalCount)
assert.Equal(t, resWindow.OnlineCount, statsWindow.OnlineCount)
})
})
}

View File

@ -1830,6 +1830,14 @@ func (db *DB) Migration(ctx context.Context) *migrate.Migration {
);`,
},
},
{
DB: &db.reputationDB.DB,
Description: "Add audit_history field to reputation db",
Version: 47,
Action: migrate.SQL{
`ALTER TABLE reputation ADD COLUMN audit_history BLOB`,
},
},
},
}
}

View File

@ -10,6 +10,7 @@ import (
"github.com/zeebo/errs"
"storj.io/common/pb"
"storj.io/common/storj"
"storj.io/storj/storagenode/reputation"
)
@ -45,13 +46,14 @@ func (db *reputationDB) Store(ctx context.Context, stats reputation.Stats) (err
audit_unknown_reputation_beta,
audit_unknown_reputation_score,
online_score,
audit_history,
disqualified_at,
suspended_at,
offline_suspended_at,
offline_under_review_at,
updated_at,
joined_at
) VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)`
) VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)`
// ensure we insert utc
if stats.DisqualifiedAt != nil {
@ -71,6 +73,14 @@ func (db *reputationDB) Store(ctx context.Context, stats reputation.Stats) (err
stats.OfflineUnderReviewAt = &utc
}
var auditHistoryBytes []byte
if stats.AuditHistory != nil {
auditHistoryBytes, err = pb.Marshal(stats.AuditHistory)
if err != nil {
return ErrReputation.Wrap(err)
}
}
_, err = db.ExecContext(ctx, query,
stats.SatelliteID,
stats.Uptime.SuccessCount,
@ -87,6 +97,7 @@ func (db *reputationDB) Store(ctx context.Context, stats reputation.Stats) (err
stats.Audit.UnknownBeta,
stats.Audit.UnknownScore,
stats.OnlineScore,
auditHistoryBytes,
stats.DisqualifiedAt,
stats.SuspendedAt,
stats.OfflineSuspendedAt,
@ -121,6 +132,7 @@ func (db *reputationDB) Get(ctx context.Context, satelliteID storj.NodeID) (_ *r
audit_unknown_reputation_beta,
audit_unknown_reputation_score,
online_score,
audit_history,
disqualified_at,
suspended_at,
offline_suspended_at,
@ -131,6 +143,7 @@ func (db *reputationDB) Get(ctx context.Context, satelliteID storj.NodeID) (_ *r
satelliteID,
)
var auditHistoryBytes []byte
err = row.Scan(
&stats.Uptime.SuccessCount,
&stats.Uptime.TotalCount,
@ -146,6 +159,7 @@ func (db *reputationDB) Get(ctx context.Context, satelliteID storj.NodeID) (_ *r
&stats.Audit.UnknownBeta,
&stats.Audit.UnknownScore,
&stats.OnlineScore,
&auditHistoryBytes,
&stats.DisqualifiedAt,
&stats.SuspendedAt,
&stats.OfflineSuspendedAt,
@ -156,8 +170,16 @@ func (db *reputationDB) Get(ctx context.Context, satelliteID storj.NodeID) (_ *r
if errors.Is(err, sql.ErrNoRows) {
err = nil
return &stats, nil
}
if err != nil {
return &stats, ErrReputation.Wrap(err)
}
if auditHistoryBytes != nil {
stats.AuditHistory = &pb.AuditHistory{}
err = pb.Unmarshal(auditHistoryBytes, stats.AuditHistory)
}
return &stats, ErrReputation.Wrap(err)
}

View File

@ -521,6 +521,11 @@ func Schema() map[string]*dbschema.Schema {
Name: "reputation",
PrimaryKey: []string{"satellite_id"},
Columns: []*dbschema.Column{
&dbschema.Column{
Name: "audit_history",
Type: "BLOB",
IsNullable: true,
},
&dbschema.Column{
Name: "audit_reputation_alpha",
Type: "REAL",

View File

@ -61,6 +61,7 @@ var States = MultiDBStates{
&v44,
&v45,
&v46,
&v47,
},
}

View File

@ -0,0 +1,56 @@
// Copyright (C) 2020 Storj Labs, Inc.
// See LICENSE for copying information.
package testdata
import "storj.io/storj/storagenode/storagenodedb"
var v47 = MultiDBState{
Version: 47,
DBStates: DBStates{
storagenodedb.UsedSerialsDBName: v46.DBStates[storagenodedb.UsedSerialsDBName],
storagenodedb.StorageUsageDBName: v46.DBStates[storagenodedb.StorageUsageDBName],
storagenodedb.ReputationDBName: &DBState{
SQL: `
-- tables to store nodestats cache
CREATE TABLE reputation (
satellite_id BLOB NOT NULL,
uptime_success_count INTEGER NOT NULL,
uptime_total_count INTEGER NOT NULL,
uptime_reputation_alpha REAL NOT NULL,
uptime_reputation_beta REAL NOT NULL,
uptime_reputation_score REAL NOT NULL,
audit_success_count INTEGER NOT NULL,
audit_total_count INTEGER NOT NULL,
audit_reputation_alpha REAL NOT NULL,
audit_reputation_beta REAL NOT NULL,
audit_reputation_score REAL NOT NULL,
audit_unknown_reputation_alpha REAL NOT NULL,
audit_unknown_reputation_beta REAL NOT NULL,
audit_unknown_reputation_score REAL NOT NULL,
online_score REAL NOT NULL,
audit_history BLOB,
disqualified_at TIMESTAMP,
updated_at TIMESTAMP NOT NULL,
suspended_at TIMESTAMP,
offline_suspended_at TIMESTAMP,
offline_under_review_at TIMESTAMP,
joined_at TIMESTAMP NOT NULL,
PRIMARY KEY (satellite_id)
);
INSERT INTO reputation VALUES(X'0ed28abb2813e184a1e98b0f6605c4911ea468c7e8433eb583e0fca7ceac3000',1,1,1.0,1.0,1.0,1,1,1.0,1.0,1.0,1.0,1.0,1.0,1.0,NULL,'2019-07-19 20:00:00+00:00','2019-08-23 20:00:00+00:00',NULL,NULL,NULL,'1970-01-01 00:00:00+00:00');
`,
},
storagenodedb.PieceSpaceUsedDBName: v46.DBStates[storagenodedb.PieceSpaceUsedDBName],
storagenodedb.PieceInfoDBName: v46.DBStates[storagenodedb.PieceInfoDBName],
storagenodedb.PieceExpirationDBName: v46.DBStates[storagenodedb.PieceExpirationDBName],
storagenodedb.OrdersDBName: v46.DBStates[storagenodedb.OrdersDBName],
storagenodedb.BandwidthDBName: v46.DBStates[storagenodedb.BandwidthDBName],
storagenodedb.SatellitesDBName: v46.DBStates[storagenodedb.SatellitesDBName],
storagenodedb.DeprecatedInfoDBName: v46.DBStates[storagenodedb.DeprecatedInfoDBName],
storagenodedb.NotificationsDBName: v46.DBStates[storagenodedb.NotificationsDBName],
storagenodedb.HeldAmountDBName: v46.DBStates[storagenodedb.HeldAmountDBName],
storagenodedb.PricingDBName: v46.DBStates[storagenodedb.PricingDBName],
storagenodedb.SecretDBName: v46.DBStates[storagenodedb.SecretDBName],
},
}

View File

@ -1791,6 +1791,105 @@
"yorkie": "^2.0.0"
},
"dependencies": {
"ansi-styles": {
"version": "4.3.0",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
"integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==",
"dev": true,
"optional": true,
"requires": {
"color-convert": "^2.0.1"
}
},
"color-convert": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
"integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
"dev": true,
"optional": true,
"requires": {
"color-name": "~1.1.4"
}
},
"color-name": {
"version": "1.1.4",
"resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
"integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==",
"dev": true,
"optional": true
},
"fork-ts-checker-webpack-plugin-v5": {
"version": "npm:fork-ts-checker-webpack-plugin@5.2.1",
"resolved": "https://registry.npmjs.org/fork-ts-checker-webpack-plugin/-/fork-ts-checker-webpack-plugin-5.2.1.tgz",
"integrity": "sha512-SVi+ZAQOGbtAsUWrZvGzz38ga2YqjWvca1pXQFUArIVXqli0lLoDQ8uS0wg0kSpcwpZmaW5jVCZXQebkyUQSsw==",
"dev": true,
"optional": true,
"requires": {
"@babel/code-frame": "^7.8.3",
"@types/json-schema": "^7.0.5",
"chalk": "^4.1.0",
"cosmiconfig": "^6.0.0",
"deepmerge": "^4.2.2",
"fs-extra": "^9.0.0",
"memfs": "^3.1.2",
"minimatch": "^3.0.4",
"schema-utils": "2.7.0",
"semver": "^7.3.2",
"tapable": "^1.0.0"
},
"dependencies": {
"chalk": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.0.tgz",
"integrity": "sha512-qwx12AxXe2Q5xQ43Ac//I6v5aXTipYrSESdOgzrN+9XjgEpyjpKuvSGaN4qE93f7TQTlerQQ8S+EQ0EyDoVL1A==",
"dev": true,
"optional": true,
"requires": {
"ansi-styles": "^4.1.0",
"supports-color": "^7.1.0"
}
},
"schema-utils": {
"version": "2.7.0",
"resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-2.7.0.tgz",
"integrity": "sha512-0ilKFI6QQF5nxDZLFn2dMjvc4hjg/Wkg7rHd3jK6/A4a1Hl9VFdQWvgB1UMGoU94pad1P/8N7fMcEnLnSiju8A==",
"dev": true,
"optional": true,
"requires": {
"@types/json-schema": "^7.0.4",
"ajv": "^6.12.2",
"ajv-keywords": "^3.4.1"
}
},
"semver": {
"version": "7.3.4",
"resolved": "https://registry.npmjs.org/semver/-/semver-7.3.4.tgz",
"integrity": "sha512-tCfb2WLjqFAtXn4KEdxIhalnRtoKFN7nAwj0B3ZXCbQloV2tq5eDbcTmT68JJD3nRJq24/XgxtQKFIpQdtvmVw==",
"dev": true,
"optional": true,
"requires": {
"lru-cache": "^6.0.0"
}
}
}
},
"has-flag": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz",
"integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==",
"dev": true,
"optional": true
},
"lru-cache": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz",
"integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==",
"dev": true,
"optional": true,
"requires": {
"yallist": "^4.0.0"
}
},
"schema-utils": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz",
@ -1802,6 +1901,16 @@
"ajv-keywords": "^3.1.0"
}
},
"supports-color": {
"version": "7.2.0",
"resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz",
"integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==",
"dev": true,
"optional": true,
"requires": {
"has-flag": "^4.0.0"
}
},
"tslint": {
"version": "5.20.1",
"resolved": "https://registry.npmjs.org/tslint/-/tslint-5.20.1.tgz",
@ -1853,6 +1962,13 @@
"watchpack": "^1.7.4",
"webpack-sources": "^1.4.1"
}
},
"yallist": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz",
"integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==",
"dev": true,
"optional": true
}
}
},
@ -1932,6 +2048,16 @@
"integrity": "sha512-nQyp0o1/mNdbTO1PO6kHkwSrmgZ0MT/jCCpNiwbUjGoRN4dlBhqJtoQuCnEOKzgTVwg0ZWiCoQy6SxMebQVh8A==",
"dev": true
},
"ansi-styles": {
"version": "4.3.0",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
"integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==",
"dev": true,
"optional": true,
"requires": {
"color-convert": "^2.0.1"
}
},
"cacache": {
"version": "13.0.1",
"resolved": "https://registry.npmjs.org/cacache/-/cacache-13.0.1.tgz",
@ -1958,6 +2084,34 @@
"unique-filename": "^1.1.1"
}
},
"chalk": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.0.tgz",
"integrity": "sha512-qwx12AxXe2Q5xQ43Ac//I6v5aXTipYrSESdOgzrN+9XjgEpyjpKuvSGaN4qE93f7TQTlerQQ8S+EQ0EyDoVL1A==",
"dev": true,
"optional": true,
"requires": {
"ansi-styles": "^4.1.0",
"supports-color": "^7.1.0"
}
},
"color-convert": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
"integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
"dev": true,
"optional": true,
"requires": {
"color-name": "~1.1.4"
}
},
"color-name": {
"version": "1.1.4",
"resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
"integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==",
"dev": true,
"optional": true
},
"find-up": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz",
@ -2095,6 +2249,32 @@
"integrity": "sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg==",
"dev": true
},
"vue-loader-v16": {
"version": "npm:vue-loader@16.1.2",
"resolved": "https://registry.npmjs.org/vue-loader/-/vue-loader-16.1.2.tgz",
"integrity": "sha512-8QTxh+Fd+HB6fiL52iEVLKqE9N1JSlMXLR92Ijm6g8PZrwIxckgpqjPDWRP5TWxdiPaHR+alUWsnu1ShQOwt+Q==",
"dev": true,
"optional": true,
"requires": {
"chalk": "^4.1.0",
"hash-sum": "^2.0.0",
"loader-utils": "^2.0.0"
},
"dependencies": {
"loader-utils": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-2.0.0.tgz",
"integrity": "sha512-rP4F0h2RaWSvPEkD7BLDFQnvSf+nK+wr3ESUjNTyAGobqrijmW92zc+SO6d4p4B1wh7+B/Jg1mkQe5NYUEHtHQ==",
"dev": true,
"optional": true,
"requires": {
"big.js": "^5.2.2",
"emojis-list": "^3.0.0",
"json5": "^2.1.2"
}
}
}
},
"webpack": {
"version": "4.44.2",
"resolved": "https://registry.npmjs.org/webpack/-/webpack-4.44.2.tgz",
@ -3273,6 +3453,16 @@
"integrity": "sha512-1Yj8h9Q+QDF5FzhMs/c9+6UntbD5MkRfRwac8DoEm9ZfUBZ7tZ55YcGVAzEe4bXsdQHEk+s9S5wsOKVdZrw0tQ==",
"dev": true
},
"bindings": {
"version": "1.5.0",
"resolved": "https://registry.npmjs.org/bindings/-/bindings-1.5.0.tgz",
"integrity": "sha512-p2q/t/mhvuOj/UeLlV6566GD/guowlr0hHxClI0W9m7MWYkL1F0hLo+0Aexs9HSPCtR1SXQ0TD3MMKrXZajbiQ==",
"dev": true,
"optional": true,
"requires": {
"file-uri-to-path": "1.0.0"
}
},
"bluebird": {
"version": "3.7.2",
"resolved": "https://registry.npmjs.org/bluebird/-/bluebird-3.7.2.tgz",
@ -6036,6 +6226,13 @@
"schema-utils": "^2.5.0"
}
},
"file-uri-to-path": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/file-uri-to-path/-/file-uri-to-path-1.0.0.tgz",
"integrity": "sha512-0Zt+s3L7Vf1biwWZ29aARiVYLx7iMGnEUl9x33fbB/j3jR81u/O2LbqK+Bm1CDSNDKVtJ/YjwY7TUd5SkeLQLw==",
"dev": true,
"optional": true
},
"filesize": {
"version": "3.6.1",
"resolved": "https://registry.npmjs.org/filesize/-/filesize-3.6.1.tgz",
@ -6189,122 +6386,6 @@
"worker-rpc": "^0.1.0"
}
},
"fork-ts-checker-webpack-plugin-v5": {
"version": "npm:fork-ts-checker-webpack-plugin@5.2.1",
"resolved": "https://registry.npmjs.org/fork-ts-checker-webpack-plugin/-/fork-ts-checker-webpack-plugin-5.2.1.tgz",
"integrity": "sha512-SVi+ZAQOGbtAsUWrZvGzz38ga2YqjWvca1pXQFUArIVXqli0lLoDQ8uS0wg0kSpcwpZmaW5jVCZXQebkyUQSsw==",
"dev": true,
"optional": true,
"requires": {
"@babel/code-frame": "^7.8.3",
"@types/json-schema": "^7.0.5",
"chalk": "^4.1.0",
"cosmiconfig": "^6.0.0",
"deepmerge": "^4.2.2",
"fs-extra": "^9.0.0",
"memfs": "^3.1.2",
"minimatch": "^3.0.4",
"schema-utils": "2.7.0",
"semver": "^7.3.2",
"tapable": "^1.0.0"
},
"dependencies": {
"ansi-styles": {
"version": "4.3.0",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
"integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==",
"dev": true,
"optional": true,
"requires": {
"color-convert": "^2.0.1"
}
},
"chalk": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.0.tgz",
"integrity": "sha512-qwx12AxXe2Q5xQ43Ac//I6v5aXTipYrSESdOgzrN+9XjgEpyjpKuvSGaN4qE93f7TQTlerQQ8S+EQ0EyDoVL1A==",
"dev": true,
"optional": true,
"requires": {
"ansi-styles": "^4.1.0",
"supports-color": "^7.1.0"
}
},
"color-convert": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
"integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
"dev": true,
"optional": true,
"requires": {
"color-name": "~1.1.4"
}
},
"color-name": {
"version": "1.1.4",
"resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
"integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==",
"dev": true,
"optional": true
},
"has-flag": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz",
"integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==",
"dev": true,
"optional": true
},
"lru-cache": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz",
"integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==",
"dev": true,
"optional": true,
"requires": {
"yallist": "^4.0.0"
}
},
"schema-utils": {
"version": "2.7.0",
"resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-2.7.0.tgz",
"integrity": "sha512-0ilKFI6QQF5nxDZLFn2dMjvc4hjg/Wkg7rHd3jK6/A4a1Hl9VFdQWvgB1UMGoU94pad1P/8N7fMcEnLnSiju8A==",
"dev": true,
"optional": true,
"requires": {
"@types/json-schema": "^7.0.4",
"ajv": "^6.12.2",
"ajv-keywords": "^3.4.1"
}
},
"semver": {
"version": "7.3.4",
"resolved": "https://registry.npmjs.org/semver/-/semver-7.3.4.tgz",
"integrity": "sha512-tCfb2WLjqFAtXn4KEdxIhalnRtoKFN7nAwj0B3ZXCbQloV2tq5eDbcTmT68JJD3nRJq24/XgxtQKFIpQdtvmVw==",
"dev": true,
"optional": true,
"requires": {
"lru-cache": "^6.0.0"
}
},
"supports-color": {
"version": "7.2.0",
"resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz",
"integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==",
"dev": true,
"optional": true,
"requires": {
"has-flag": "^4.0.0"
}
},
"yallist": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz",
"integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==",
"dev": true,
"optional": true
}
}
},
"form-data": {
"version": "2.3.3",
"resolved": "https://registry.npmjs.org/form-data/-/form-data-2.3.3.tgz",
@ -8515,6 +8596,13 @@
"thenify-all": "^1.0.0"
}
},
"nan": {
"version": "2.14.2",
"resolved": "https://registry.npmjs.org/nan/-/nan-2.14.2.tgz",
"integrity": "sha512-M2ufzIiINKCuDfBSAUr1vWQ+vuVcA9kqx8JJUsbQi6yf1uGRyb7HfpdfUr5qLXf3B/t8dPvcjhKMmlfnP47EzQ==",
"dev": true,
"optional": true
},
"nanomatch": {
"version": "1.2.13",
"resolved": "https://registry.npmjs.org/nanomatch/-/nanomatch-1.2.13.tgz",
@ -13028,87 +13116,6 @@
}
}
},
"vue-loader-v16": {
"version": "npm:vue-loader@16.1.2",
"resolved": "https://registry.npmjs.org/vue-loader/-/vue-loader-16.1.2.tgz",
"integrity": "sha512-8QTxh+Fd+HB6fiL52iEVLKqE9N1JSlMXLR92Ijm6g8PZrwIxckgpqjPDWRP5TWxdiPaHR+alUWsnu1ShQOwt+Q==",
"dev": true,
"optional": true,
"requires": {
"chalk": "^4.1.0",
"hash-sum": "^2.0.0",
"loader-utils": "^2.0.0"
},
"dependencies": {
"ansi-styles": {
"version": "4.3.0",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
"integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==",
"dev": true,
"optional": true,
"requires": {
"color-convert": "^2.0.1"
}
},
"chalk": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.0.tgz",
"integrity": "sha512-qwx12AxXe2Q5xQ43Ac//I6v5aXTipYrSESdOgzrN+9XjgEpyjpKuvSGaN4qE93f7TQTlerQQ8S+EQ0EyDoVL1A==",
"dev": true,
"optional": true,
"requires": {
"ansi-styles": "^4.1.0",
"supports-color": "^7.1.0"
}
},
"color-convert": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
"integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
"dev": true,
"optional": true,
"requires": {
"color-name": "~1.1.4"
}
},
"color-name": {
"version": "1.1.4",
"resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
"integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==",
"dev": true,
"optional": true
},
"has-flag": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz",
"integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==",
"dev": true,
"optional": true
},
"loader-utils": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-2.0.0.tgz",
"integrity": "sha512-rP4F0h2RaWSvPEkD7BLDFQnvSf+nK+wr3ESUjNTyAGobqrijmW92zc+SO6d4p4B1wh7+B/Jg1mkQe5NYUEHtHQ==",
"dev": true,
"optional": true,
"requires": {
"big.js": "^5.2.2",
"emojis-list": "^3.0.0",
"json5": "^2.1.2"
}
},
"supports-color": {
"version": "7.2.0",
"resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz",
"integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==",
"dev": true,
"optional": true,
"requires": {
"has-flag": "^4.0.0"
}
}
}
},
"vue-parser": {
"version": "1.1.6",
"resolved": "https://registry.npmjs.org/vue-parser/-/vue-parser-1.1.6.tgz",
@ -13257,6 +13264,11 @@
"vue-parser": "^1.1.6"
}
},
"vuex": {
"version": "3.6.0",
"resolved": "https://registry.npmjs.org/vuex/-/vuex-3.6.0.tgz",
"integrity": "sha512-W74OO2vCJPs9/YjNjW8lLbj+jzT24waTo2KShI8jLvJW8OaIkgb3wuAMA7D+ZiUxDOx3ubwSZTaJBip9G8a3aQ=="
},
"watchpack": {
"version": "1.7.5",
"resolved": "https://registry.npmjs.org/watchpack/-/watchpack-1.7.5.tgz",
@ -13335,7 +13347,11 @@
"resolved": "https://registry.npmjs.org/fsevents/-/fsevents-1.2.13.tgz",
"integrity": "sha512-oWb1Z6mkHIskLzEJ/XWX0srkpkTQ7vaopMQkyaEIoq0fmtFVxOthb8cCxeT+p3ynTdkk/RZwbgG4brR5BeWECw==",
"dev": true,
"optional": true
"optional": true,
"requires": {
"bindings": "^1.5.0",
"nan": "^2.12.1"
}
},
"glob-parent": {
"version": "3.1.0",
@ -13830,7 +13846,11 @@
"resolved": "https://registry.npmjs.org/fsevents/-/fsevents-1.2.13.tgz",
"integrity": "sha512-oWb1Z6mkHIskLzEJ/XWX0srkpkTQ7vaopMQkyaEIoq0fmtFVxOthb8cCxeT+p3ynTdkk/RZwbgG4brR5BeWECw==",
"dev": true,
"optional": true
"optional": true,
"requires": {
"bindings": "^1.5.0",
"nan": "^2.12.1"
}
},
"glob-parent": {
"version": "3.1.0",

View File

@ -12,7 +12,8 @@
"vue": "2.6.11",
"vue-class-component": "7.2.6",
"vue-property-decorator": "9.1.2",
"vue-router": "3.4.9"
"vue-router": "3.4.9",
"vuex": "3.6.0"
},
"devDependencies": {
"@babel/core": "7.12.10",

View File

@ -11,5 +11,6 @@
import { Component, Vue } from 'vue-property-decorator';
@Component
export default class App extends Vue {}
export default class App extends Vue {
}
</script>

View File

@ -24,7 +24,7 @@ export class Route {
public readonly meta?: Metadata;
/**
* default contructor.
* default constructor.
* @param path - route path.
* @param name - name of the route, needed fot identifying route by name.
* @param component - component mapped to route.
@ -46,7 +46,7 @@ export class Route {
export class Config {
public static Root: Route = new Route('/', 'Root', Dashboard, undefined, {requiresAuth: true});
public mode: RouterMode = 'history';
public static mode: RouterMode = 'history';
public static routes: Route[] = [
Config.Root,
];

View File

@ -0,0 +1,39 @@
// Copyright (C) 2020 Storj Labs, Inc.
// See LICENSE for copying information.
import Vue from 'vue';
import Vuex, { ModuleTree, Store, StoreOptions } from 'vuex';
import { NodesModule, NodesState } from '@/app/store/nodes';
Vue.use(Vuex); // TODO: place to main.ts when initialization of everything will be there.
/**
* RootState is a representation of global state.
*/
export class RootState {
nodes: NodesState;
}
/**
* MultinodeStoreOptions contains all needed data for store creation.
*/
class MultinodeStoreOptions implements StoreOptions<RootState> {
public readonly strict: boolean;
public readonly state: RootState;
public readonly modules: ModuleTree<RootState>;
public constructor(nodes: NodesModule) {
this.strict = true;
this.state = {
nodes: nodes.state,
};
this.modules = {
nodes,
};
}
}
export const store: Store<RootState> = new Vuex.Store<RootState>(
new MultinodeStoreOptions(new NodesModule()),
);

View File

@ -0,0 +1,56 @@
// Copyright (C) 2020 Storj Labs, Inc.
// See LICENSE for copying information.
import { ActionContext, ActionTree, GetterTree, Module, MutationTree } from 'vuex';
import { RootState } from '@/app/store/index';
import { Node } from '@/nodes';
/**
* NodesState is a representation of nodes module state.
*/
export class NodesState {
public nodes: Node[] = [];
}
/**
* NodesModule is a part of a global store that encapsulates all nodes related logic.
*/
export class NodesModule implements Module<NodesState, RootState> {
public readonly namespaced: boolean;
public readonly state: NodesState;
public readonly getters?: GetterTree<NodesState, RootState>;
public readonly actions: ActionTree<NodesState, RootState>;
public readonly mutations: MutationTree<NodesState>;
public constructor() { // here should be services, apis, 3d party dependencies.
this.namespaced = true;
this.state = new NodesState();
this.mutations = {
populate: this.populate,
};
this.actions = {
fetch: this.fetch,
};
}
/**
* populate mutation will set state nodes with new nodes array.
* @param state - state of the module.
* @param nodes - nodes to save in state. all users nodes.
*/
public populate(state: NodesState, nodes: Node[]): void {
state.nodes = nodes;
}
/**
* fetch action loads all nodes.
* @param ctx - context of the Vuex action.
*/
public async fetch(ctx: ActionContext<NodesState, RootState>): Promise<void> {
await new Promise(() => null);
}
}

View File

@ -6,6 +6,7 @@ import Router from 'vue-router';
import App from '@/app/App.vue';
import { router } from '@/app/router';
import { store } from '@/app/store';
Vue.config.productionTip = false;
@ -13,6 +14,7 @@ Vue.use(Router);
const app = new Vue({
router,
store,
render: (h) => h(App),
});

View File

@ -0,0 +1,15 @@
// Copyright (C) 2020 Storj Labs, Inc.
// See LICENSE for copying information.
/**
* Node is a representation of storagenode, that SNO could add to the Multinode Dashboard.
*/
export class Node {
public id: string; // TODO: create ts analog of storj.NodeID;
/**
* apiSecret is a secret issued by storagenode, that will be main auth mechanism in MND <-> SNO api.
*/
public apiSecret: string; // TODO: change to Uint8Array[];
public publicAddress: string;
public name: string;
}

View File

@ -5,7 +5,7 @@ const path = require('path');
const StyleLintPlugin = require('stylelint-webpack-plugin');
module.exports = {
publicPath: "/static/dist",
// publicPath: "/static/dist",
productionSourceMap: false,
parallel: true,
configureWebpack: {

View File

@ -4,6 +4,7 @@
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, viewport-fit=cover">
<meta name="satellite-name" content="{{ .SatelliteName }}">
<meta name="satellite-nodeurl" content="{{ .SatelliteNodeURL }}">
<meta name="segment-io" content="{{ .SegmentIOPublicKey }}">
<meta name="stripe-public-key" content="{{ .StripePublicKey }}">
<meta name="verification-page-url" content="{{ .VerificationPageURL }}">

View File

@ -2150,6 +2150,115 @@
"yorkie": "^2.0.0"
},
"dependencies": {
"ansi-styles": {
"version": "4.3.0",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
"integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==",
"dev": true,
"optional": true,
"requires": {
"color-convert": "^2.0.1"
}
},
"color-convert": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
"integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
"dev": true,
"optional": true,
"requires": {
"color-name": "~1.1.4"
}
},
"color-name": {
"version": "1.1.4",
"resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
"integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==",
"dev": true,
"optional": true
},
"fork-ts-checker-webpack-plugin-v5": {
"version": "npm:fork-ts-checker-webpack-plugin@5.2.1",
"resolved": "https://registry.npmjs.org/fork-ts-checker-webpack-plugin/-/fork-ts-checker-webpack-plugin-5.2.1.tgz",
"integrity": "sha512-SVi+ZAQOGbtAsUWrZvGzz38ga2YqjWvca1pXQFUArIVXqli0lLoDQ8uS0wg0kSpcwpZmaW5jVCZXQebkyUQSsw==",
"dev": true,
"optional": true,
"requires": {
"@babel/code-frame": "^7.8.3",
"@types/json-schema": "^7.0.5",
"chalk": "^4.1.0",
"cosmiconfig": "^6.0.0",
"deepmerge": "^4.2.2",
"fs-extra": "^9.0.0",
"memfs": "^3.1.2",
"minimatch": "^3.0.4",
"schema-utils": "2.7.0",
"semver": "^7.3.2",
"tapable": "^1.0.0"
},
"dependencies": {
"chalk": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.0.tgz",
"integrity": "sha512-qwx12AxXe2Q5xQ43Ac//I6v5aXTipYrSESdOgzrN+9XjgEpyjpKuvSGaN4qE93f7TQTlerQQ8S+EQ0EyDoVL1A==",
"dev": true,
"optional": true,
"requires": {
"ansi-styles": "^4.1.0",
"supports-color": "^7.1.0"
}
},
"semver": {
"version": "7.3.4",
"resolved": "https://registry.npmjs.org/semver/-/semver-7.3.4.tgz",
"integrity": "sha512-tCfb2WLjqFAtXn4KEdxIhalnRtoKFN7nAwj0B3ZXCbQloV2tq5eDbcTmT68JJD3nRJq24/XgxtQKFIpQdtvmVw==",
"dev": true,
"optional": true,
"requires": {
"lru-cache": "^6.0.0"
}
}
}
},
"has-flag": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz",
"integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==",
"dev": true,
"optional": true
},
"lru-cache": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz",
"integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==",
"dev": true,
"optional": true,
"requires": {
"yallist": "^4.0.0"
}
},
"schema-utils": {
"version": "2.7.0",
"resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-2.7.0.tgz",
"integrity": "sha512-0ilKFI6QQF5nxDZLFn2dMjvc4hjg/Wkg7rHd3jK6/A4a1Hl9VFdQWvgB1UMGoU94pad1P/8N7fMcEnLnSiju8A==",
"dev": true,
"optional": true,
"requires": {
"@types/json-schema": "^7.0.4",
"ajv": "^6.12.2",
"ajv-keywords": "^3.4.1"
}
},
"supports-color": {
"version": "7.2.0",
"resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz",
"integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==",
"dev": true,
"optional": true,
"requires": {
"has-flag": "^4.0.0"
}
},
"tslint": {
"version": "5.20.1",
"resolved": "https://registry.npmjs.org/tslint/-/tslint-5.20.1.tgz",
@ -2170,6 +2279,13 @@
"tslib": "^1.8.0",
"tsutils": "^2.29.0"
}
},
"yallist": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz",
"integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==",
"dev": true,
"optional": true
}
}
},
@ -2349,6 +2465,17 @@
"unique-filename": "^1.1.1"
}
},
"chalk": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.0.tgz",
"integrity": "sha512-qwx12AxXe2Q5xQ43Ac//I6v5aXTipYrSESdOgzrN+9XjgEpyjpKuvSGaN4qE93f7TQTlerQQ8S+EQ0EyDoVL1A==",
"dev": true,
"optional": true,
"requires": {
"ansi-styles": "^4.1.0",
"supports-color": "^7.1.0"
}
},
"cliui": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/cliui/-/cliui-6.0.0.tgz",
@ -2444,6 +2571,18 @@
"graceful-fs": "^4.1.6"
}
},
"loader-utils": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-2.0.0.tgz",
"integrity": "sha512-rP4F0h2RaWSvPEkD7BLDFQnvSf+nK+wr3ESUjNTyAGobqrijmW92zc+SO6d4p4B1wh7+B/Jg1mkQe5NYUEHtHQ==",
"dev": true,
"optional": true,
"requires": {
"big.js": "^5.2.2",
"emojis-list": "^3.0.0",
"json5": "^2.1.2"
}
},
"locate-path": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz",
@ -2551,6 +2690,18 @@
"integrity": "sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg==",
"dev": true
},
"vue-loader-v16": {
"version": "npm:vue-loader@16.1.2",
"resolved": "https://registry.npmjs.org/vue-loader/-/vue-loader-16.1.2.tgz",
"integrity": "sha512-8QTxh+Fd+HB6fiL52iEVLKqE9N1JSlMXLR92Ijm6g8PZrwIxckgpqjPDWRP5TWxdiPaHR+alUWsnu1ShQOwt+Q==",
"dev": true,
"optional": true,
"requires": {
"chalk": "^4.1.0",
"hash-sum": "^2.0.0",
"loader-utils": "^2.0.0"
}
},
"wrap-ansi": {
"version": "6.2.0",
"resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-6.2.0.tgz",
@ -7399,102 +7550,6 @@
"worker-rpc": "^0.1.0"
}
},
"fork-ts-checker-webpack-plugin-v5": {
"version": "npm:fork-ts-checker-webpack-plugin@5.2.1",
"resolved": "https://registry.npmjs.org/fork-ts-checker-webpack-plugin/-/fork-ts-checker-webpack-plugin-5.2.1.tgz",
"integrity": "sha512-SVi+ZAQOGbtAsUWrZvGzz38ga2YqjWvca1pXQFUArIVXqli0lLoDQ8uS0wg0kSpcwpZmaW5jVCZXQebkyUQSsw==",
"dev": true,
"optional": true,
"requires": {
"@babel/code-frame": "^7.8.3",
"@types/json-schema": "^7.0.5",
"chalk": "^4.1.0",
"cosmiconfig": "^6.0.0",
"deepmerge": "^4.2.2",
"fs-extra": "^9.0.0",
"memfs": "^3.1.2",
"minimatch": "^3.0.4",
"schema-utils": "2.7.0",
"semver": "^7.3.2",
"tapable": "^1.0.0"
},
"dependencies": {
"ansi-styles": {
"version": "4.3.0",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
"integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==",
"dev": true,
"optional": true,
"requires": {
"color-convert": "^2.0.1"
}
},
"chalk": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.0.tgz",
"integrity": "sha512-qwx12AxXe2Q5xQ43Ac//I6v5aXTipYrSESdOgzrN+9XjgEpyjpKuvSGaN4qE93f7TQTlerQQ8S+EQ0EyDoVL1A==",
"dev": true,
"optional": true,
"requires": {
"ansi-styles": "^4.1.0",
"supports-color": "^7.1.0"
}
},
"color-convert": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
"integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
"dev": true,
"optional": true,
"requires": {
"color-name": "~1.1.4"
}
},
"color-name": {
"version": "1.1.4",
"resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
"integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==",
"dev": true,
"optional": true
},
"has-flag": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz",
"integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==",
"dev": true,
"optional": true
},
"schema-utils": {
"version": "2.7.0",
"resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-2.7.0.tgz",
"integrity": "sha512-0ilKFI6QQF5nxDZLFn2dMjvc4hjg/Wkg7rHd3jK6/A4a1Hl9VFdQWvgB1UMGoU94pad1P/8N7fMcEnLnSiju8A==",
"dev": true,
"optional": true,
"requires": {
"@types/json-schema": "^7.0.4",
"ajv": "^6.12.2",
"ajv-keywords": "^3.4.1"
}
},
"semver": {
"version": "7.3.2",
"resolved": "https://registry.npmjs.org/semver/-/semver-7.3.2.tgz",
"integrity": "sha512-OrOb32TeeambH6UrhtShmF7CRDqhL6/5XpPNp2DuRH6+9QLw/orhp72j87v8Qa1ScDkvrrBNpZcDejAirJmfXQ==",
"dev": true,
"optional": true
},
"supports-color": {
"version": "7.2.0",
"resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz",
"integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==",
"dev": true,
"optional": true,
"requires": {
"has-flag": "^4.0.0"
}
}
}
},
"form-data": {
"version": "2.3.3",
"resolved": "https://registry.npmjs.org/form-data/-/form-data-2.3.3.tgz",
@ -16717,87 +16772,6 @@
}
}
},
"vue-loader-v16": {
"version": "npm:vue-loader@16.0.0-rc.1",
"resolved": "https://registry.npmjs.org/vue-loader/-/vue-loader-16.0.0-rc.1.tgz",
"integrity": "sha512-yR+BS90EOXTNieasf8ce9J3TFCpm2DGqoqdbtiwQ33hon3FyIznLX7sKavAq1VmfBnOeV6It0Htg4aniv8ph1g==",
"dev": true,
"optional": true,
"requires": {
"chalk": "^4.1.0",
"hash-sum": "^2.0.0",
"loader-utils": "^2.0.0"
},
"dependencies": {
"ansi-styles": {
"version": "4.3.0",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
"integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==",
"dev": true,
"optional": true,
"requires": {
"color-convert": "^2.0.1"
}
},
"chalk": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.0.tgz",
"integrity": "sha512-qwx12AxXe2Q5xQ43Ac//I6v5aXTipYrSESdOgzrN+9XjgEpyjpKuvSGaN4qE93f7TQTlerQQ8S+EQ0EyDoVL1A==",
"dev": true,
"optional": true,
"requires": {
"ansi-styles": "^4.1.0",
"supports-color": "^7.1.0"
}
},
"color-convert": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
"integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
"dev": true,
"optional": true,
"requires": {
"color-name": "~1.1.4"
}
},
"color-name": {
"version": "1.1.4",
"resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
"integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==",
"dev": true,
"optional": true
},
"has-flag": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz",
"integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==",
"dev": true,
"optional": true
},
"loader-utils": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-2.0.0.tgz",
"integrity": "sha512-rP4F0h2RaWSvPEkD7BLDFQnvSf+nK+wr3ESUjNTyAGobqrijmW92zc+SO6d4p4B1wh7+B/Jg1mkQe5NYUEHtHQ==",
"dev": true,
"optional": true,
"requires": {
"big.js": "^5.2.2",
"emojis-list": "^3.0.0",
"json5": "^2.1.2"
}
},
"supports-color": {
"version": "7.2.0",
"resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz",
"integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==",
"dev": true,
"optional": true,
"requires": {
"has-flag": "^4.0.0"
}
}
}
},
"vue-parser": {
"version": "1.1.6",
"resolved": "https://registry.npmjs.org/vue-parser/-/vue-parser-1.1.6.tgz",

View File

@ -160,7 +160,7 @@ export default class AccessGrants extends Vue {
* Resets pagination to default state.
*/
public resetPagination(): void {
if (this.totalPageCount > 0) {
if (this.totalPageCount > 1) {
this.$refs.pagination.resetPageIndex();
}
}

View File

@ -6,7 +6,7 @@
<div class="confirm-delete__container">
<h1 class="confirm-delete__container__title">Are you sure?</h1>
<p class="confirm-delete__container__info">
When an access grant is removed, users using it will no longer have access to the buckets or dat.
When an access grant is removed, users using it will no longer have access to the buckets or data.
</p>
<p class="confirm-delete__container__list-label">
The following access grant(s) will be removed from this project:
@ -32,12 +32,14 @@
height="44px"
:on-press="onCancelClick"
is-white="true"
:is-disabled="isLoading"
/>
<VButton
label="Remove"
width="50%"
height="44px"
:on-press="onDeleteClick"
:is-disabled="isLoading"
/>
</div>
<div class="confirm-delete__container__close-cross-container" @click="onCancelClick">
@ -65,11 +67,16 @@ import { AccessGrant } from '@/types/accessGrants';
})
export default class ConfirmDeletePopup extends Vue {
private FIRST_PAGE: number = 1;
private isLoading: boolean = false;
/**
* Deletes selected access grants, fetches updated list and closes popup.
*/
public async onDeleteClick(): Promise<void> {
if (this.isLoading) return;
this.isLoading = true;
try {
await this.$store.dispatch(ACCESS_GRANTS_ACTIONS.DELETE);
await this.$notify.success(`Access Grant deleted successfully`);
@ -79,11 +86,13 @@ export default class ConfirmDeletePopup extends Vue {
try {
await this.$store.dispatch(ACCESS_GRANTS_ACTIONS.FETCH, this.FIRST_PAGE);
await this.$store.dispatch(ACCESS_GRANTS_ACTIONS.CLEAR_SELECTION);
} catch (error) {
await this.$notify.error(`Unable to fetch Access Grants. ${error.message}`);
}
this.$emit('reset-pagination');
this.isLoading = false;
this.onCancelClick();
}

View File

@ -106,7 +106,6 @@ export default class ProgressBar extends Vue {
.checked {
font-family: 'font_bold', sans-serif;
font-weight: bold;
color: #000;
}

View File

@ -194,14 +194,14 @@ export default class CreatePassphraseStep extends Vue {
this.isLoading = true;
const satelliteName = MetaUtils.getMetaContent('satellite-name');
const satelliteNodeURL = MetaUtils.getMetaContent('satellite-nodeurl');
this.worker.postMessage({
'type': 'GenerateAccess',
'apiKey': this.restrictedKey,
'passphrase': this.passphrase,
'projectID': this.$store.getters.selectedProject.id,
'satelliteName': satelliteName,
'satelliteNodeURL': satelliteNodeURL,
});
// Give time for web worker to return value.
@ -214,6 +214,7 @@ export default class CreatePassphraseStep extends Vue {
params: {
access: this.access,
key: this.key,
restrictedKey: this.restrictedKey,
},
});
@ -225,6 +226,7 @@ export default class CreatePassphraseStep extends Vue {
params: {
access: this.access,
key: this.key,
restrictedKey: this.restrictedKey,
},
});
}, 1000);

View File

@ -93,14 +93,14 @@ export default class EnterPassphraseStep extends Vue {
this.isLoading = true;
const satelliteName = MetaUtils.getMetaContent('satellite-name');
const satelliteNodeURL = MetaUtils.getMetaContent('satellite-nodeurl');
this.worker.postMessage({
'type': 'GenerateAccess',
'apiKey': this.restrictedKey,
'passphrase': this.passphrase,
'projectID': this.$store.getters.selectedProject.id,
'satelliteName': satelliteName,
'satelliteNodeURL': satelliteNodeURL,
});
// Give time for web worker to return value.
@ -112,6 +112,7 @@ export default class EnterPassphraseStep extends Vue {
params: {
access: this.access,
key: this.key,
restrictedKey: this.restrictedKey,
},
});
}, 1000);

View File

@ -124,6 +124,7 @@ import { MetaUtils } from '@/utils/meta';
})
export default class ResultStep extends Vue {
private key: string = '';
private restrictedKey: string = '';
public access: string = '';
public isGatewayDropdownVisible: boolean = false;
@ -136,7 +137,7 @@ export default class ResultStep extends Vue {
* Sets local access from props value.
*/
public mounted(): void {
if (!this.$route.params.access && !this.$route.params.key) {
if (!this.$route.params.access && !this.$route.params.key && !this.$route.params.resctrictedKey) {
this.$router.push(RouteConfig.AccessGrants.with(RouteConfig.CreateAccessGrant.with(RouteConfig.NameStep)).path);
return;
@ -144,6 +145,7 @@ export default class ResultStep extends Vue {
this.access = this.$route.params.access;
this.key = this.$route.params.key;
this.restrictedKey = this.$route.params.restrictedKey;
const requestURL = MetaUtils.getMetaContent('gateway-credentials-request-url');
if (requestURL) this.isGatewayDropdownVisible = true;
@ -202,6 +204,7 @@ export default class ResultStep extends Vue {
name: RouteConfig.OnboardingTour.with(RouteConfig.AccessGrant.with(RouteConfig.AccessGrantPassphrase)).name,
params: {
key: this.key,
restrictedKey: this.restrictedKey,
},
});
@ -213,6 +216,7 @@ export default class ResultStep extends Vue {
name: RouteConfig.AccessGrants.with(RouteConfig.CreateAccessGrant.with(RouteConfig.EnterPassphraseStep)).name,
params: {
key: this.key,
restrictedKey: this.restrictedKey,
},
});
@ -223,6 +227,7 @@ export default class ResultStep extends Vue {
name: RouteConfig.AccessGrants.with(RouteConfig.CreateAccessGrant.with(RouteConfig.CreatePassphraseStep)).name,
params: {
key: this.key,
restrictedKey: this.restrictedKey,
},
});
}

View File

@ -119,6 +119,7 @@ export default class VButton extends Vue {
line-height: 23px;
color: #fff;
margin: 0;
white-space: nowrap;
}
&:hover {

View File

@ -13,7 +13,7 @@
>
<div class="navigation-area__item-container__link">
<component :is="navItem.icon"></component>
<h1 class="navigation-area__item-container__link__title">{{navItem.name}}</h1>
<p class="navigation-area__item-container__link__title">{{navItem.name}}</p>
</div>
</router-link>
</div>
@ -103,10 +103,12 @@ export default class NavigationArea extends Vue {
align-items: center;
&__title {
font-family: 'font_medium', sans-serif;
font-size: 16px;
line-height: 23px;
color: #1b2533;
margin: 0 0 0 15px;
white-space: nowrap;
}
}

View File

@ -193,10 +193,8 @@ export function makeAccessGrantsModule(api: AccessGrantsApi): StoreModule<Access
return accessGrant;
},
deleteAccessGrants: async function({state, commit}: any): Promise<void> {
deleteAccessGrants: async function({state}: any): Promise<void> {
await api.delete(state.selectedAccessGrantsIds);
commit(CLEAR_SELECTION);
},
getGatewayCredentials: async function({state, commit}: any, accessGrant: string): Promise<void> {
const credentials: GatewayCredentials = await api.getGatewayCredentials(accessGrant);

View File

@ -29,8 +29,7 @@ self.onmessage = function (event) {
apiKey = data.apiKey;
const passphrase = data.passphrase;
const projectID = data.projectID;
const satelliteName = data.satelliteName;
const nodeURL = getSatelliteNodeURL(satelliteName);
const nodeURL = data.satelliteNodeURL;
result = self.generateAccessGrant(nodeURL, apiKey, passphrase, projectID);
@ -63,16 +62,3 @@ self.onmessage = function (event) {
self.postMessage(new Error('provided message event type is not supported'));
}
};
function getSatelliteNodeURL(satellite) {
switch (satellite) {
case 'Asia-East-1':
return '121RTSDpyNZVcEU84Ticf2L1ntiuUimbWgfATz21tuvgk3vzoA6@asia-east-1.tardigrade.io:7777';
case 'Europe-West-1':
return '12L9ZFwhzVpuEKMUNUqkaTLGzwY9G24tbiigLiXpmZWKwmcNDDs@europe-west-1.tardigrade.io:7777';
case 'US-Central-1':
return '12EayRS2V1kEsWESU9QMRseFhdxYxKicsiFmxrsLZHeLUtdps3S@us-central-1.tardigrade.io:7777';
default:
return '12jHxTS7txdGgzeaXCSMR6yomWnD7nJcpz8QB7GrqxTAgftE5Y2@127.0.0.1:10000';
}
}

View File

@ -8,19 +8,19 @@ exports[`NavigationArea snapshot not changed with project 1`] = `
<router-link-stub to="/project-dashboard" tag="a" ariacurrentvalue="page" event="click" aria-label="Dashboard" class="navigation-area__item-container">
<div class="navigation-area__item-container__link">
<anonymous-stub></anonymous-stub>
<h1 class="navigation-area__item-container__link__title">Dashboard</h1>
<p class="navigation-area__item-container__link__title">Dashboard</p>
</div>
</router-link-stub>
<router-link-stub to="/access-grants" tag="a" ariacurrentvalue="page" event="click" aria-label="Access Grants" class="navigation-area__item-container">
<div class="navigation-area__item-container__link">
<anonymous-stub></anonymous-stub>
<h1 class="navigation-area__item-container__link__title">Access Grants</h1>
<p class="navigation-area__item-container__link__title">Access Grants</p>
</div>
</router-link-stub>
<router-link-stub to="/project-members" tag="a" ariacurrentvalue="page" event="click" aria-label="Users" class="navigation-area__item-container">
<div class="navigation-area__item-container__link">
<anonymous-stub></anonymous-stub>
<h1 class="navigation-area__item-container__link__title">Users</h1>
<p class="navigation-area__item-container__link__title">Users</p>
</div>
</router-link-stub>
</div>

View File

@ -72,7 +72,7 @@ export default class HeldProgress extends Vue {
),
new HeldStep(
'50%',
`Month ${this.MONTHS_BREAKPOINTS[1] + 1}-${this.MONTHS_BREAKPOINTS[4]}`,
`Month ${this.MONTHS_BREAKPOINTS[1] + 1}-${this.MONTHS_BREAKPOINTS[2]}`,
this.monthsOnNetwork > this.MONTHS_BREAKPOINTS[1] && this.monthsOnNetwork <= this.MONTHS_BREAKPOINTS[2],
this.monthsOnNetwork < this.MONTHS_BREAKPOINTS[1] + 1,
),