Merge 'master' branch

Change-Id: Ib73af0ff3ce0e9a1547b0b9fc55bf88704f6f394
This commit is contained in:
Michal Niewrzal 2020-12-18 09:13:03 +01:00
commit 2111740236
52 changed files with 15972 additions and 456 deletions

View File

@ -226,7 +226,7 @@ binary:
scripts/release.sh build $(EXTRA_ARGS) -o release/${TAG}/$(COMPONENT)_${GOOS}_${GOARCH}${FILEEXT} \
storj.io/storj/cmd/${COMPONENT}
if [ "${COMPONENT}" = "satellite" ] && [ "${GOARCH}" = "amd64" ]; \
if [ "${COMPONENT}" = "satellite" ] && [ "${GOOS}" = "linux" ] && [ "${GOARCH}" = "amd64" ]; \
then \
echo "Building wasm code"; \
$(MAKE) satellite-wasm; \

View File

@ -28,6 +28,8 @@ COPY --from=ui /app/marketing /app/marketing
COPY --from=ca-cert /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
COPY release/${TAG}/wasm/access.wasm /app/static/wasm/
COPY release/${TAG}/wasm/wasm_exec.js /app/static/wasm/
COPY release/${TAG}/wasm/access.wasm.br /app/static/wasm/
COPY release/${TAG}/wasm/wasm_exec.js.br /app/static/wasm/
COPY release/${TAG}/satellite_linux_${GOARCH:-amd64} /app/satellite
COPY release/${TAG}/inspector_linux_${GOARCH:-amd64} /app/inspector
COPY cmd/satellite/entrypoint /entrypoint

View File

@ -43,7 +43,7 @@ const (
)
var (
defaultAccess = "13GKzTN8PoLvMFuN9JDZxMhyKGACmdoZuYXYSRNZQqiDDwv2Jm1FjVuZRHvNZ4Eh1Ganzi4cNV5N3fNb17ycSYqJQAdAPSSyXM1KqSbDFqYTbZAN2LTgxKJVkrnKGCGd2a93sM9eKyhfoXrukPhYjfk2dUpRzsCPsAVFVT4Cm2v7RpjEiwN1L42z"
defaultAccess = "12edqtGZnqQo6QHwTB92EDqg9B1WrWn34r7ALu94wkqXL4eXjBNnVr6F5W7GhJjVqJCqxpFERmDR1dhZWyMt3Qq5zwrE9yygXeT6kBoS9AfiPuwB6kNjjxepg5UtPPtp4VLp9mP5eeyobKQRD5TsEsxTGhxamsrHvGGBPrZi8DeLtNYFMRTV6RyJVxpYX6MrPCw9HVoDQbFs7VcPeeRxRMQttSXL3y33BJhkqJ6ByFviEquaX5R2wjQT2Kx"
)
const (

View File

@ -0,0 +1,45 @@
// Copyright (C) 2020 Storj Labs, Inc.
// See LICENSE for copying information.
package controllers
import (
"encoding/json"
"net/http"
"github.com/spacemonkeygo/monkit/v3"
"go.uber.org/zap"
)
var (
mon = monkit.Package()
)
// NotFound handles API response for not found routes.
type NotFound struct {
log *zap.Logger
}
// NewNotFound creates new instance of NotFound handler.
func NewNotFound(log *zap.Logger) http.Handler {
return &NotFound{
log: log,
}
}
// ServeHTTP serves 404 response with json error when resource is not found.
func (handler *NotFound) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusNotFound)
w.Header().Add("Content-Type", "application/json")
var response struct {
Error string `json:"error"`
}
response.Error = "resource not found"
err := json.NewEncoder(w).Encode(response)
if err != nil {
handler.log.Error("failed to write json error response", zap.Error(err))
}
}

View File

@ -34,52 +34,139 @@ func NewNodes(log *zap.Logger, service *nodes.Service) *Nodes {
}
}
// AddNodeRequest holds all data needed to add node.
type AddNodeRequest struct {
ID string `json:"id"`
APISecret string `json:"apiSecret"`
PublicAddress string `json:"publicAddress"`
}
// Add handles node addition.
func (controller *Nodes) Add(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
var err error
defer mon.Task()(&ctx)(&err)
w.Header().Add("Content-Type", "application/json")
var request AddNodeRequest
if err = json.NewDecoder(r.Body).Decode(&request); err != nil {
var payload struct {
ID string `json:"id"`
APISecret string `json:"apiSecret"`
PublicAddress string `json:"publicAddress"`
}
if err = json.NewDecoder(r.Body).Decode(&payload); err != nil {
controller.serveError(w, http.StatusBadRequest, ErrNodes.Wrap(err))
return
}
id, err := storj.NodeIDFromString(request.ID)
id, err := storj.NodeIDFromString(payload.ID)
if err != nil {
controller.serveError(w, http.StatusBadRequest, ErrNodes.Wrap(err))
return
}
apiSecret, err := nodes.APISecretFromBase64(request.APISecret)
apiSecret, err := nodes.APISecretFromBase64(payload.APISecret)
if err != nil {
controller.serveError(w, http.StatusBadRequest, ErrNodes.Wrap(err))
return
}
if err = controller.service.Add(ctx, id, apiSecret, request.PublicAddress); err != nil {
// TODO: add more error checks in future, like bad request if address is invalid or unauthorized if secret invalid.
if err = controller.service.Add(ctx, id, apiSecret, payload.PublicAddress); err != nil {
// TODO: add more error checks in future, like bad payload if address is invalid or unauthorized if secret invalid.
controller.log.Error("add node internal error", zap.Error(err))
controller.serveError(w, http.StatusInternalServerError, ErrNodes.Wrap(err))
return
}
}
// UpdateName is an endpoint to update node name.
func (controller *Nodes) UpdateName(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
var err error
defer mon.Task()(&ctx)(&err)
w.Header().Add("Content-Type", "application/json")
segmentParams := mux.Vars(r)
idString, ok := segmentParams["id"]
if !ok {
controller.serveError(w, http.StatusBadRequest, ErrNodes.New("id segment parameter is missing"))
return
}
id, err := storj.NodeIDFromString(idString)
if err != nil {
controller.serveError(w, http.StatusBadRequest, ErrNodes.Wrap(err))
return
}
var payload struct {
Name string `json:"name"`
}
if err = json.NewDecoder(r.Body).Decode(&payload); err != nil {
controller.serveError(w, http.StatusBadRequest, ErrNodes.Wrap(err))
return
}
err = controller.service.UpdateName(ctx, id, payload.Name)
if err != nil {
// TODO: add more error checks in future, like not found if node is missing.
controller.log.Error("update node name internal error", zap.Error(err))
controller.serveError(w, http.StatusInternalServerError, ErrNodes.Wrap(err))
return
}
}
// Get handles retrieving node by id.
func (controller *Nodes) Get(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
var err error
defer mon.Task()(&ctx)(&err)
w.Header().Add("Content-Type", "application/json")
vars := mux.Vars(r)
nodeID, err := storj.NodeIDFromString(vars["id"])
if err != nil {
controller.serveError(w, http.StatusBadRequest, ErrNodes.Wrap(err))
return
}
node, err := controller.service.Get(ctx, nodeID)
if err != nil {
controller.log.Error("get node not found error", zap.Error(err))
controller.serveError(w, http.StatusNotFound, ErrNodes.Wrap(err))
return
}
if err = json.NewEncoder(w).Encode(node); err != nil {
controller.log.Error("failed to write json response", zap.Error(err))
return
}
}
// List handles retrieving list of nodes.
func (controller *Nodes) List(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
var err error
defer mon.Task()(&ctx)(&err)
w.Header().Add("Content-Type", "application/json")
nodes, err := controller.service.List(ctx)
if err != nil {
controller.log.Error("list nodes internal error", zap.Error(err))
controller.serveError(w, http.StatusInternalServerError, ErrNodes.Wrap(err))
return
}
if err = json.NewEncoder(w).Encode(nodes); err != nil {
controller.log.Error("failed to write json response", zap.Error(err))
return
}
}
// Delete handles node removal.
func (controller *Nodes) Delete(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
var err error
defer mon.Task()(&ctx)(&err)
w.Header().Add("Content-Type", "application/json")
@ -99,60 +186,17 @@ func (controller *Nodes) Delete(w http.ResponseWriter, r *http.Request) {
}
if err = controller.service.Remove(ctx, id); err != nil {
// TODO: add more error checks in future, like not found if node is missing or unauthorized if secret invalid.
// TODO: add more error checks in future, like not found if node is missing.
controller.log.Error("delete node internal error", zap.Error(err))
controller.serveError(w, http.StatusInternalServerError, ErrNodes.Wrap(err))
return
}
}
// UpdateNodeNameRequest holds all data needed to add node.
type UpdateNodeNameRequest struct {
Name string `json:"name"`
}
// UpdateName is an endpoint to update node name.
func (controller *Nodes) UpdateName(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
var err error
defer mon.Task()(&ctx)(&err)
w.Header().Add("Content-Type", "application/json")
segmentParams := mux.Vars(r)
idString, ok := segmentParams["id"]
if !ok {
controller.serveError(w, http.StatusBadRequest, ErrNodes.New("id segment parameter is missing"))
return
}
id, err := storj.NodeIDFromString(idString)
if err != nil {
controller.serveError(w, http.StatusBadRequest, ErrNodes.Wrap(err))
return
}
var request UpdateNodeNameRequest
if err = json.NewDecoder(r.Body).Decode(&request); err != nil {
controller.serveError(w, http.StatusBadRequest, ErrNodes.Wrap(err))
return
}
err = controller.service.Update(ctx, id, request.Name)
if err != nil {
// TODO: add more error checks in future, like not found if node is missing or unauthorized if secret invalid.
controller.serveError(w, http.StatusInternalServerError, ErrNodes.Wrap(err))
return
}
}
// serveError is used to log error, set http statuses and send error with json.
// serveError set http statuses and send json error.
func (controller *Nodes) serveError(w http.ResponseWriter, status int, err error) {
w.WriteHeader(status)
controller.log.Error("", zap.Error(err))
var response struct {
Error string `json:"error"`
}

View File

@ -1,12 +0,0 @@
// Copyright (C) 2020 Storj Labs, Inc.
// See LICENSE for copying information.
package controllers
import (
"github.com/spacemonkeygo/monkit/v3"
)
var (
mon = monkit.Package()
)

View File

@ -13,7 +13,7 @@ import (
"go.uber.org/zap"
"golang.org/x/sync/errgroup"
"storj.io/storj/multinode/console/server/controllers"
"storj.io/storj/multinode/console/controllers"
"storj.io/storj/multinode/nodes"
)
@ -51,15 +51,16 @@ func NewServer(log *zap.Logger, config Config, nodes *nodes.Service, listener ne
}
router := mux.NewRouter()
router.StrictSlash(true)
apiRouter := router.PathPrefix("/api/v0").Subrouter()
apiRouter.NotFoundHandler = controllers.NewNotFound(server.log)
nodesController := controllers.NewNodes(server.log, server.nodes)
nodesRouter := apiRouter.PathPrefix("/nodes").Subrouter()
nodesRouter.HandleFunc("", nodesController.Add).Methods(http.MethodPost)
nodesRouter.HandleFunc("/{id}", nodesController.Delete).Methods(http.MethodDelete)
nodesRouter.HandleFunc("", nodesController.List).Methods(http.MethodGet)
nodesRouter.HandleFunc("/{id}", nodesController.Get).Methods(http.MethodGet)
nodesRouter.HandleFunc("/{id}", nodesController.UpdateName).Methods(http.MethodPatch)
nodesRouter.HandleFunc("/{id}", nodesController.Delete).Methods(http.MethodDelete)
server.http = http.Server{
Handler: router,

View File

@ -29,8 +29,8 @@ type nodesdb struct {
methods dbx.Methods
}
// GetAll returns all connected nodes.
func (n *nodesdb) GetAll(ctx context.Context) (allNodes []nodes.Node, err error) {
// List returns all connected nodes.
func (n *nodesdb) List(ctx context.Context) (allNodes []nodes.Node, err error) {
defer mon.Task()(&ctx)(&err)
dbxNodes, err := n.methods.All_Node(ctx)
@ -53,8 +53,8 @@ func (n *nodesdb) GetAll(ctx context.Context) (allNodes []nodes.Node, err error)
return allNodes, ErrNodesDB.Wrap(err)
}
// GetByID return node from NodesDB by its id.
func (n *nodesdb) GetByID(ctx context.Context, id storj.NodeID) (_ nodes.Node, err error) {
// Get return node from NodesDB by its id.
func (n *nodesdb) Get(ctx context.Context, id storj.NodeID) (_ nodes.Node, err error) {
defer mon.Task()(&ctx)(&err)
dbxNode, err := n.methods.Get_Node_By_Id(ctx, dbx.Node_Id(id.Bytes()))

View File

@ -18,10 +18,10 @@ import (
//
// architecture: Database
type DB interface {
// GetByID return node from NodesDB by its id.
GetByID(ctx context.Context, id storj.NodeID) (Node, error)
// GetAll returns all connected nodes.
GetAll(ctx context.Context) ([]Node, error)
// Get return node from NodesDB by its id.
Get(ctx context.Context, id storj.NodeID) (Node, error)
// List returns all connected nodes.
List(ctx context.Context) ([]Node, error)
// Add creates new node in NodesDB.
Add(ctx context.Context, id storj.NodeID, apiSecret []byte, publicAddress string) error
// Remove removed node from NodesDB.

View File

@ -26,13 +26,13 @@ func TestNodesDB(t *testing.T) {
err := nodesRepository.Add(ctx, nodeID, apiSecret, publicAddress)
assert.NoError(t, err)
node, err := nodesRepository.GetByID(ctx, nodeID)
node, err := nodesRepository.Get(ctx, nodeID)
assert.NoError(t, err)
assert.Equal(t, node.ID.Bytes(), nodeID.Bytes())
assert.Equal(t, node.APISecret, apiSecret)
assert.Equal(t, node.PublicAddress, publicAddress)
allNodes, err := nodesRepository.GetAll(ctx)
allNodes, err := nodesRepository.List(ctx)
assert.NoError(t, err)
assert.Equal(t, len(allNodes), 1)
assert.Equal(t, node.ID.Bytes(), allNodes[0].ID.Bytes())
@ -43,18 +43,18 @@ func TestNodesDB(t *testing.T) {
err = nodesRepository.UpdateName(ctx, nodeID, newName)
assert.NoError(t, err)
node, err = nodesRepository.GetByID(ctx, nodeID)
node, err = nodesRepository.Get(ctx, nodeID)
assert.NoError(t, err)
assert.Equal(t, node.Name, newName)
err = nodesRepository.Remove(ctx, nodeID)
assert.NoError(t, err)
_, err = nodesRepository.GetAll(ctx)
_, err = nodesRepository.List(ctx)
assert.Error(t, err)
assert.True(t, nodes.ErrNoNode.Has(err))
node, err = nodesRepository.GetByID(ctx, nodeID)
node, err = nodesRepository.Get(ctx, nodeID)
assert.Error(t, err)
assert.True(t, nodes.ErrNoNode.Has(err))
})

View File

@ -42,14 +42,39 @@ func (service *Service) Add(ctx context.Context, id storj.NodeID, apiSecret []by
return Error.Wrap(service.nodes.Add(ctx, id, apiSecret, publicAddress))
}
// UpdateName will update name of the specified node.
func (service *Service) UpdateName(ctx context.Context, id storj.NodeID, name string) (err error) {
defer mon.Task()(&ctx)(&err)
return Error.Wrap(service.nodes.UpdateName(ctx, id, name))
}
// Get retrieves node by id.
func (service *Service) Get(ctx context.Context, id storj.NodeID) (_ Node, err error) {
defer mon.Task()(&ctx)(&err)
node, err := service.nodes.Get(ctx, id)
if err != nil {
return Node{}, Error.Wrap(err)
}
return node, nil
}
// List retrieves list of all added nodes.
func (service *Service) List(ctx context.Context) (_ []Node, err error) {
defer mon.Task()(&ctx)(&err)
nodes, err := service.nodes.List(ctx)
if err != nil {
return nil, Error.Wrap(err)
}
return nodes, nil
}
// Remove removes node from the system.
func (service *Service) Remove(ctx context.Context, id storj.NodeID) (err error) {
defer mon.Task()(&ctx)(&err)
return Error.Wrap(service.nodes.Remove(ctx, id))
}
// Update will update name of the specified node.
func (service *Service) Update(ctx context.Context, id storj.NodeID, name string) (err error) {
defer mon.Task()(&ctx)(&err)
return Error.Wrap(service.nodes.UpdateName(ctx, id, name))
}

View File

@ -80,7 +80,7 @@ type Config struct {
GoogleTagManagerID string `help:"id for google tag manager" default:""`
GeneralRequestURL string `help:"url link to general request page" default:"https://support.tardigrade.io/hc/en-us/requests/new?ticket_form_id=360000379291"`
ProjectLimitsIncreaseRequestURL string `help:"url link to project limit increase request page" default:"https://support.tardigrade.io/hc/en-us/requests/new?ticket_form_id=360000683212"`
GatewayCredentialsRequestURL string `help:"url link for gateway credentials requests" default:""`
GatewayCredentialsRequestURL string `help:"url link for gateway credentials requests" default:"https://auth.tardigradeshare.io"`
RateLimit web.IPRateLimiterConfig
@ -207,7 +207,7 @@ func NewServer(logger *zap.Logger, config Config, service *console.Service, mail
router.HandleFunc("/password-recovery/", server.passwordRecoveryHandler)
router.HandleFunc("/cancel-password-recovery/", server.cancelPasswordRecoveryHandler)
router.HandleFunc("/usage-report", server.bucketUsageReportHandler)
router.PathPrefix("/static/").Handler(server.gzipMiddleware(http.StripPrefix("/static", fs)))
router.PathPrefix("/static/").Handler(server.brotliMiddleware(http.StripPrefix("/static", fs)))
router.PathPrefix("/").Handler(http.HandlerFunc(server.appHandler))
}
@ -792,19 +792,19 @@ func (server *Server) seoHandler(w http.ResponseWriter, req *http.Request) {
}
}
// gzipMiddleware is used to gzip static content to minify resources if browser support such decoding.
func (server *Server) gzipMiddleware(fn http.Handler) http.Handler {
// brotliMiddleware is used to compress static content using brotli to minify resources if browser support such decoding.
func (server *Server) brotliMiddleware(fn http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Cache-Control", "public, max-age=31536000")
w.Header().Set("X-Content-Type-Options", "nosniff")
isGzipSupported := strings.Contains(r.Header.Get("Accept-Encoding"), "gzip")
if !isGzipSupported {
isBrotliSupported := strings.Contains(r.Header.Get("Accept-Encoding"), "br")
if !isBrotliSupported {
fn.ServeHTTP(w, r)
return
}
info, err := os.Stat(server.config.StaticDir + strings.TrimPrefix(r.URL.Path, "/static") + ".gz")
info, err := os.Stat(server.config.StaticDir + strings.TrimPrefix(r.URL.Path, "/static") + ".br")
if err != nil {
fn.ServeHTTP(w, r)
return
@ -812,13 +812,13 @@ func (server *Server) gzipMiddleware(fn http.Handler) http.Handler {
extension := filepath.Ext(info.Name()[:len(info.Name())-3])
w.Header().Set(contentType, mime.TypeByExtension(extension))
w.Header().Set("Content-Encoding", "gzip")
w.Header().Set("Content-Encoding", "br")
newRequest := new(http.Request)
*newRequest = *r
newRequest.URL = new(url.URL)
*newRequest.URL = *r.URL
newRequest.URL.Path += ".gz"
newRequest.URL.Path += ".br"
fn.ServeHTTP(w, newRequest)
})

View File

@ -110,8 +110,8 @@ type RateLimiterConfig struct {
// ProjectLimitConfig is a configuration struct for default project limits.
type ProjectLimitConfig struct {
MaxBuckets int `help:"max bucket count for a project." default:"100"`
DefaultMaxUsage memory.Size `help:"the default storage usage limit" releaseDefault:"50.00GB" devDefault:"200GB"`
DefaultMaxBandwidth memory.Size `help:"the default bandwidth usage limit" releaseDefault:"50.00GB" devDefault:"200GB"`
DefaultMaxUsage memory.Size `help:"the default storage usage limit" default:"500.00GB"`
DefaultMaxBandwidth memory.Size `help:"the default bandwidth usage limit" default:"500.00GB"`
}
// Config is a configuration struct that is everything you need to start a metainfo.

View File

@ -84,6 +84,28 @@ func (checker *Checker) Run(ctx context.Context) (err error) {
return group.Wait()
}
// getNodesEstimate updates the estimate of the total number of nodes. It is guaranteed
// to return a number greater than 0 when the error is nil.
//
// We can't calculate this upon first starting a Checker, because there may not be any
// nodes yet. We expect that there will be nodes before there are segments, though.
func (checker *Checker) getNodesEstimate(ctx context.Context) (int, error) {
// this should be safe to call frequently; it is an efficient caching lookup.
totalNumNodes, err := checker.nodestate.NumNodes(ctx)
if err != nil {
// We could proceed here by returning the last good value, or by returning a fallback
// constant estimate, like "20000", and we'd probably be fine, but it would be better
// not to have that happen silently for too long. Also, if we can't get this from the
// database, we probably can't modify the injured segments queue, so it won't help to
// proceed with this repair operation.
return 0, err
}
if totalNumNodes == 0 {
return 0, Error.New("segment health is meaningless: there are no nodes")
}
return totalNumNodes, nil
}
// RefreshReliabilityCache forces refreshing node online status cache.
func (checker *Checker) RefreshReliabilityCache(ctx context.Context) error {
return checker.nodestate.Refresh(ctx)
@ -102,14 +124,15 @@ func (checker *Checker) IdentifyInjuredSegments(ctx context.Context) (err error)
startTime := time.Now()
observer := &checkerObserver{
repairQueue: checker.repairQueue,
irrdb: checker.irrdb,
nodestate: checker.nodestate,
statsCollector: checker.statsCollector,
monStats: aggregateStats{},
repairOverrides: checker.repairOverrides,
nodeFailureRate: checker.nodeFailureRate,
log: checker.logger,
repairQueue: checker.repairQueue,
irrdb: checker.irrdb,
nodestate: checker.nodestate,
statsCollector: checker.statsCollector,
monStats: aggregateStats{},
repairOverrides: checker.repairOverrides,
nodeFailureRate: checker.nodeFailureRate,
getNodesEstimate: checker.getNodesEstimate,
log: checker.logger,
}
err = checker.metaLoop.Join(ctx, observer)
if err != nil {
@ -186,6 +209,11 @@ func (checker *Checker) updateIrreparableSegmentStatus(ctx context.Context, key
repairThreshold = overrideValue
}
totalNumNodes, err := checker.getNodesEstimate(ctx)
if err != nil {
return Error.New("could not get estimate of total number of nodes: %w", err)
}
// we repair when the number of healthy pieces is less than or equal to the repair threshold and is greater or equal to
// minimum required pieces in redundancy
// except for the case when the repair and success thresholds are the same (a case usually seen during testing)
@ -193,7 +221,7 @@ func (checker *Checker) updateIrreparableSegmentStatus(ctx context.Context, key
// If the segment is suddenly entirely healthy again, we don't need to repair and we don't need to
// keep it in the irreparabledb queue either.
if numHealthy >= int32(redundancy.RequiredShares) && numHealthy <= repairThreshold && numHealthy < int32(redundancy.OptimalShares) {
segmentHealth := float64(numHealthy)
segmentHealth := repair.SegmentHealth(int(numHealthy), int(redundancy.RequiredShares), totalNumNodes, checker.nodeFailureRate)
_, err = checker.repairQueue.Insert(ctx, &internalpb.InjuredSegment{
Path: key,
LostPieces: missingPieces,
@ -238,14 +266,15 @@ var _ metainfo.Observer = (*checkerObserver)(nil)
//
// architecture: Observer
type checkerObserver struct {
repairQueue queue.RepairQueue
irrdb irreparable.DB
nodestate *ReliabilityCache
statsCollector *statsCollector
monStats aggregateStats // TODO(cam): once we verify statsCollector reports data correctly, remove this
repairOverrides RepairOverridesMap
nodeFailureRate float64
log *zap.Logger
repairQueue queue.RepairQueue
irrdb irreparable.DB
nodestate *ReliabilityCache
statsCollector *statsCollector
monStats aggregateStats // TODO(cam): once we verify statsCollector reports data correctly, remove this
repairOverrides RepairOverridesMap
nodeFailureRate float64
getNodesEstimate func(ctx context.Context) (int, error)
log *zap.Logger
}
func (obs *checkerObserver) getStatsByRS(redundancy storj.RedundancyScheme) *stats {
@ -285,6 +314,19 @@ func (obs *checkerObserver) RemoteSegment(ctx context.Context, segment *metainfo
return nil
}
pbPieces := make([]*pb.RemotePiece, len(pieces))
for i, piece := range pieces {
pbPieces[i] = &pb.RemotePiece{
PieceNum: int32(piece.Number),
NodeId: piece.StorageNode,
}
}
totalNumNodes, err := obs.getNodesEstimate(ctx)
if err != nil {
return Error.New("could not get estimate of total number of nodes: %w", err)
}
// TODO: update MissingPieces to accept metabase.Pieces
missingPieces, err := obs.nodestate.MissingPieces(ctx, segment.CreationDate, segment.Pieces)
if err != nil {
@ -305,7 +347,7 @@ func (obs *checkerObserver) RemoteSegment(ctx context.Context, segment *metainfo
required, repairThreshold, successThreshold, _ := obs.loadRedundancy(segment.Redundancy)
segmentHealth := repair.SegmentHealth(numHealthy, required, obs.nodeFailureRate)
segmentHealth := repair.SegmentHealth(numHealthy, required, totalNumNodes, obs.nodeFailureRate)
mon.FloatVal("checker_segment_health").Observe(segmentHealth) //mon:locked
stats.segmentHealth.Observe(segmentHealth)

View File

@ -39,7 +39,8 @@ func NewReliabilityCache(overlay *overlay.Service, staleness time.Duration) *Rel
}
}
// LastUpdate returns when the cache was last updated.
// LastUpdate returns when the cache was last updated, or the zero value (time.Time{}) if it
// has never yet been updated. LastUpdate() does not trigger an update itself.
func (cache *ReliabilityCache) LastUpdate() time.Time {
if state, ok := cache.state.Load().(*reliabilityState); ok {
return state.created
@ -47,10 +48,40 @@ func (cache *ReliabilityCache) LastUpdate() time.Time {
return time.Time{}
}
// NumNodes returns the number of online active nodes (as determined by the reliability cache).
// This number is not guaranteed to be consistent with either the nodes database or the
// reliability cache after returning; it is just a best-effort count and should be treated as an
// estimate.
func (cache *ReliabilityCache) NumNodes(ctx context.Context) (numNodes int, err error) {
defer mon.Task()(&ctx)(&err)
state, err := cache.loadFast(ctx, time.Time{})
if err != nil {
return 0, err
}
return len(state.reliable), nil
}
// MissingPieces returns piece indices that are unreliable with the given staleness period.
func (cache *ReliabilityCache) MissingPieces(ctx context.Context, created time.Time, pieces metabase.Pieces) (_ []int32, err error) {
defer mon.Task()(&ctx)(&err)
state, err := cache.loadFast(ctx, created)
if err != nil {
return nil, err
}
var unreliable []int32
for _, piece := range pieces {
if _, ok := state.reliable[piece.StorageNode]; !ok {
unreliable = append(unreliable, int32(piece.Number))
}
}
return unreliable, nil
}
func (cache *ReliabilityCache) loadFast(ctx context.Context, validUpTo time.Time) (_ *reliabilityState, err error) {
defer mon.Task()(&ctx)(&err)
// This code is designed to be very fast in the case where a refresh is not needed: just an
// atomic load from rarely written to bit of shared memory. The general strategy is to first
// read if the state suffices to answer the query. If not (due to it not existing, being
@ -60,10 +91,10 @@ func (cache *ReliabilityCache) MissingPieces(ctx context.Context, created time.T
// the acquisition. Only then do we refresh and can then proceed answering the query.
state, ok := cache.state.Load().(*reliabilityState)
if !ok || created.After(state.created) || time.Since(state.created) > cache.staleness {
if !ok || validUpTo.After(state.created) || time.Since(state.created) > cache.staleness {
cache.mu.Lock()
state, ok = cache.state.Load().(*reliabilityState)
if !ok || created.After(state.created) || time.Since(state.created) > cache.staleness {
if !ok || validUpTo.After(state.created) || time.Since(state.created) > cache.staleness {
state, err = cache.refreshLocked(ctx)
}
cache.mu.Unlock()
@ -71,14 +102,7 @@ func (cache *ReliabilityCache) MissingPieces(ctx context.Context, created time.T
return nil, err
}
}
var unreliable []int32
for _, piece := range pieces {
if _, ok := state.reliable[piece.StorageNode]; !ok {
unreliable = append(unreliable, int32(piece.Number))
}
}
return unreliable, nil
return state, nil
}
// Refresh refreshes the cache.

View File

@ -3,144 +3,53 @@
package repair
import (
"math"
)
import "math"
// SegmentHealth returns a value corresponding to the health of a segment
// in the repair queue. Lower health segments should be repaired first.
func SegmentHealth(numHealthy, minPieces int, failureRate float64) float64 {
return 1.0 / SegmentDanger(numHealthy, minPieces, failureRate)
// SegmentHealth returns a value corresponding to the health of a segment in the
// repair queue. Lower health segments should be repaired first.
//
// This calculation purports to find the number of iterations for which a
// segment can be expected to survive, with the given failureRate. The number of
// iterations for the segment to survive (X) can be modeled with the negative
// binomial distribution, with the number of pieces that must be lost as the
// success threshold r, and the chance of losing a single piece in a round as
// the trial success probability p.
//
// First, we calculate the expected number of iterations for a segment to
// survive if we were to lose exactly one node every iteration:
//
// r = numHealthy - minPieces + 1
// p = (totalNodes - numHealthy) / totalNodes
// X ~ NB(r, p)
//
// Then we take the mean of that distribution to use as our expected value,
// which is pr/(1-p).
//
// Finally, to get away from the "one node per iteration" simplification, we
// just scale the magnitude of the iterations in the model so that there really
// is one node being lost. For example, if our failureRate and totalNodes imply
// a churn rate of 3 nodes per day, we just take 1/3 of a day and call that an
// "iteration" for purposes of the model. To convert iterations in the model to
// days, we divide the mean of the negative binomial distribution (X, above) by
// the number of nodes that we estimate will churn in one day.
func SegmentHealth(numHealthy, minPieces, totalNodes int, failureRate float64) float64 {
churnPerRound := float64(totalNodes) * failureRate
if churnPerRound < minChurnPerRound {
// we artificially limit churnPerRound from going too low in cases
// where there are not many nodes, so that health values do not
// start to approach the floating point maximum
churnPerRound = minChurnPerRound
}
p := float64(totalNodes-numHealthy) / float64(totalNodes)
if p == 1.0 {
// floating point precision is insufficient to represent the difference
// from p to 1. there are too many nodes for this model, or else
// numHealthy is 0 somehow. we can't proceed with the normal calculation
// or we will divide by zero.
return math.Inf(1)
}
mean1 := float64(numHealthy-minPieces+1) * p / (1 - p)
return mean1 / churnPerRound
}
// SegmentDanger returns the chance of a segment with the given minPieces
// and the given number of healthy pieces of being lost in the next time
// period.
//
// It assumes:
//
// * Nodes fail at the given failureRate (i.e., each node has a failureRate
// chance of going offline within the next time period).
// * Node failures are entirely independent. Obviously this is not the case,
// because many nodes may be operated by a single entity or share network
// infrastructure, in which case their failures would be correlated. But we
// can't easily model that, so our best hope is to try to avoid putting
// pieces for the same segment on related nodes to maximize failure
// independence.
//
// (The "time period" we are talking about here could be anything. The returned
// danger value will be given in terms of whatever time period was used to
// determine failureRate. If it simplifies things, you can think of the time
// period as "one repair worker iteration".)
//
// If those things are true, then the number of nodes holding this segment
// that will go offline follows the Binomial distribution:
//
// X ~ Binom(numHealthy, failureRate)
//
// A segment is lost if the number of nodes that go offline is higher than
// (numHealthy - minPieces). So we want to find
//
// Pr[X > (numHealthy - minPieces)]
//
// If we invert the logic here, we can use the standard CDF for the binomial
// distribution.
//
// Pr[X > (numHealthy - minPieces)] = 1 - Pr[X <= (numHealthy - minPieces)]
//
// And that gives us the danger value.
func SegmentDanger(numHealthy, minPieces int, failureRate float64) float64 {
return 1.0 - binomialCDF(float64(numHealthy-minPieces), float64(numHealthy), failureRate)
}
// math.Lgamma without the returned sign parameter; it's unneeded here.
func lnGamma(x float64) float64 {
lg, _ := math.Lgamma(x)
return lg
}
// The following functions are based on code from
// Numerical Recipes in C, Second Edition, Section 6.4 (pp. 227-228).
// betaI calculates the incomplete beta function I_x(a, b).
func betaI(a, b, x float64) float64 {
if x < 0.0 || x > 1.0 {
return math.NaN()
}
bt := 0.0
if x > 0.0 && x < 1.0 {
// factors in front of the continued function
bt = math.Exp(lnGamma(a+b) - lnGamma(a) - lnGamma(b) + a*math.Log(x) + b*math.Log(1.0-x))
}
if x < (a+1.0)/(a+b+2.0) {
// use continued fraction directly
return bt * betaCF(a, b, x) / a
}
// use continued fraction after making the symmetry transformation
return 1.0 - bt*betaCF(b, a, 1.0-x)/b
}
const (
// unlikely to go this far, as betaCF is expected to converge quickly for
// typical values.
maxIter = 100
// betaI outputs will be accurate to within this amount.
epsilon = 1.0e-14
)
// betaCF evaluates the continued fraction for the incomplete beta function
// by a modified Lentz's method.
func betaCF(a, b, x float64) float64 {
avoidZero := func(f float64) float64 {
if math.Abs(f) < math.SmallestNonzeroFloat64 {
return math.SmallestNonzeroFloat64
}
return f
}
qab := a + b
qap := a + 1.0
qam := a - 1.0
c := 1.0
d := 1.0 / avoidZero(1.0-qab*x/qap)
h := d
for m := 1; m <= maxIter; m++ {
m := float64(m)
m2 := 2.0 * m
aa := m * (b - m) * x / ((qam + m2) * (a + m2))
// one step (the even one) of the recurrence
d = 1.0 / avoidZero(1.0+aa*d)
c = avoidZero(1.0 + aa/c)
h *= d * c
aa = -(a + m) * (qab + m) * x / ((a + m2) * (qap + m2))
// next step of the recurrence (the odd one)
d = 1.0 / avoidZero(1.0+aa*d)
c = avoidZero(1.0 + aa/c)
del := d * c
h *= del
if math.Abs(del-1.0) < epsilon {
return h
}
}
// a or b too big, or maxIter too small
return math.NaN()
}
// binomialCDF evaluates the CDF of the binomial distribution Binom(n, p) at k.
// This is done using (1-p)**(n-k) when k is 0, or with the incomplete beta
// function otherwise.
func binomialCDF(k, n, p float64) float64 {
k = math.Floor(k)
if k < 0.0 || n < k {
return math.NaN()
}
if k == n {
return 1.0
}
if k == 0 {
return math.Pow(1.0-p, n-k)
}
return betaI(n-k, k+1.0, 1.0-p)
}
const minChurnPerRound = 1e-10

View File

@ -10,59 +10,45 @@ import (
"github.com/stretchr/testify/assert"
)
func TestBetaI(t *testing.T) {
// check a few places where betaI has some easily representable values
assert.Equal(t, 0.0, betaI(0.5, 5, 0))
assert.Equal(t, 0.0, betaI(1, 3, 0))
assert.Equal(t, 0.0, betaI(8, 10, 0))
assert.Equal(t, 0.0, betaI(8, 10, 0))
assert.InDelta(t, 0.5, betaI(0.5, 0.5, 0.5), epsilon)
assert.InDelta(t, 1.0/3.0, betaI(0.5, 0.5, 0.25), epsilon)
assert.InDelta(t, 0.488, betaI(1, 3, 0.2), epsilon)
}
func BenchmarkBetaI(b *testing.B) {
for i := 0; i < b.N; i++ {
assert.InDelta(b, 1.0/3.0, betaI(0.5, 0.5, 0.25), epsilon)
}
}
func TestSegmentDanger(t *testing.T) {
const failureRate = 0.01
assert.Greater(t,
SegmentDanger(11, 10, failureRate),
SegmentDanger(10, 5, failureRate))
assert.Greater(t,
SegmentDanger(11, 10, failureRate),
SegmentDanger(10, 9, failureRate))
assert.Greater(t,
SegmentDanger(10, 10, failureRate),
SegmentDanger(9, 9, failureRate))
assert.Less(t,
SegmentDanger(11, 10, failureRate),
SegmentDanger(12, 11, failureRate))
}
func TestSegmentHealth(t *testing.T) {
const failureRate = 0.01
assert.Less(t,
SegmentHealth(11, 10, failureRate),
SegmentHealth(10, 5, failureRate))
SegmentHealth(11, 10, 10000, failureRate),
SegmentHealth(10, 5, 10000, failureRate))
assert.Less(t,
SegmentHealth(11, 10, failureRate),
SegmentHealth(10, 9, failureRate))
SegmentHealth(11, 10, 10000, failureRate),
SegmentHealth(10, 9, 10000, failureRate))
assert.Less(t,
SegmentHealth(10, 10, failureRate),
SegmentHealth(9, 9, failureRate))
SegmentHealth(10, 10, 10000, failureRate),
SegmentHealth(9, 9, 10000, failureRate))
assert.Greater(t,
SegmentHealth(11, 10, failureRate),
SegmentHealth(12, 11, failureRate))
SegmentHealth(11, 10, 10000, failureRate),
SegmentHealth(12, 11, 10000, failureRate))
assert.Greater(t,
SegmentHealth(13, 10, failureRate),
SegmentHealth(12, 10, failureRate))
SegmentHealth(13, 10, 10000, failureRate),
SegmentHealth(12, 10, 10000, failureRate))
}
func TestSegmentHealthForDecayedSegment(t *testing.T) {
const failureRate = 0.01
assert.True(t, math.IsNaN(SegmentHealth(9, 10, failureRate)))
got := SegmentHealth(9, 10, 10000, failureRate)
assert.Equal(t, float64(0), got)
}
func TestHighHealthAndLowFailureRate(t *testing.T) {
const failureRate = 0.00005435
assert.Less(t,
SegmentHealth(36, 35, 10000, failureRate), math.Inf(1))
assert.Greater(t,
SegmentHealth(36, 35, 10000, failureRate),
SegmentHealth(35, 35, 10000, failureRate))
assert.Less(t,
SegmentHealth(60, 29, 10000, failureRate), math.Inf(1))
assert.Greater(t,
SegmentHealth(61, 29, 10000, failureRate),
SegmentHealth(60, 29, 10000, failureRate))
assert.Greater(t,
SegmentHealth(11, 10, 10000, failureRate),
SegmentHealth(39, 34, 10000, failureRate))
}

View File

@ -252,17 +252,12 @@ model injuredsegment (
field data blob
field attempted timestamp (updatable, nullable)
field updated_at timestamp ( updatable, default current_timestamp )
field num_healthy_pieces int (default 52)
field segment_health float64 (default 1)
index (
fields attempted
)
index (
fields num_healthy_pieces
)
index (
fields segment_health
)

View File

@ -428,7 +428,6 @@ CREATE TABLE injuredsegments (
data bytea NOT NULL,
attempted timestamp with time zone,
updated_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
num_healthy_pieces integer NOT NULL DEFAULT 52,
segment_health double precision NOT NULL DEFAULT 1,
PRIMARY KEY ( path )
);
@ -768,7 +767,6 @@ CREATE INDEX bucket_bandwidth_rollups_action_interval_project_id_index ON bucket
CREATE INDEX consumed_serials_expires_at_index ON consumed_serials ( expires_at );
CREATE INDEX graceful_exit_transfer_queue_nid_dr_qa_fa_lfa_index ON graceful_exit_transfer_queue ( node_id, durability_ratio, queued_at, finished_at, last_failed_at );
CREATE INDEX injuredsegments_attempted_index ON injuredsegments ( attempted );
CREATE INDEX injuredsegments_num_healthy_pieces_index ON injuredsegments ( num_healthy_pieces );
CREATE INDEX injuredsegments_segment_health_index ON injuredsegments ( segment_health );
CREATE INDEX injuredsegments_updated_at_index ON injuredsegments ( updated_at );
CREATE INDEX node_last_ip ON nodes ( last_net );
@ -986,7 +984,6 @@ CREATE TABLE injuredsegments (
data bytea NOT NULL,
attempted timestamp with time zone,
updated_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
num_healthy_pieces integer NOT NULL DEFAULT 52,
segment_health double precision NOT NULL DEFAULT 1,
PRIMARY KEY ( path )
);
@ -1326,7 +1323,6 @@ CREATE INDEX bucket_bandwidth_rollups_action_interval_project_id_index ON bucket
CREATE INDEX consumed_serials_expires_at_index ON consumed_serials ( expires_at );
CREATE INDEX graceful_exit_transfer_queue_nid_dr_qa_fa_lfa_index ON graceful_exit_transfer_queue ( node_id, durability_ratio, queued_at, finished_at, last_failed_at );
CREATE INDEX injuredsegments_attempted_index ON injuredsegments ( attempted );
CREATE INDEX injuredsegments_num_healthy_pieces_index ON injuredsegments ( num_healthy_pieces );
CREATE INDEX injuredsegments_segment_health_index ON injuredsegments ( segment_health );
CREATE INDEX injuredsegments_updated_at_index ON injuredsegments ( updated_at );
CREATE INDEX node_last_ip ON nodes ( last_net );
@ -3010,21 +3006,19 @@ func (GracefulExitTransferQueue_OrderLimitSendCount_Field) _Column() string {
}
type Injuredsegment struct {
Path []byte
Data []byte
Attempted *time.Time
UpdatedAt time.Time
NumHealthyPieces int
SegmentHealth float64
Path []byte
Data []byte
Attempted *time.Time
UpdatedAt time.Time
SegmentHealth float64
}
func (Injuredsegment) _Table() string { return "injuredsegments" }
type Injuredsegment_Create_Fields struct {
Attempted Injuredsegment_Attempted_Field
UpdatedAt Injuredsegment_UpdatedAt_Field
NumHealthyPieces Injuredsegment_NumHealthyPieces_Field
SegmentHealth Injuredsegment_SegmentHealth_Field
Attempted Injuredsegment_Attempted_Field
UpdatedAt Injuredsegment_UpdatedAt_Field
SegmentHealth Injuredsegment_SegmentHealth_Field
}
type Injuredsegment_Update_Fields struct {
@ -3121,25 +3115,6 @@ func (f Injuredsegment_UpdatedAt_Field) value() interface{} {
func (Injuredsegment_UpdatedAt_Field) _Column() string { return "updated_at" }
type Injuredsegment_NumHealthyPieces_Field struct {
_set bool
_null bool
_value int
}
func Injuredsegment_NumHealthyPieces(v int) Injuredsegment_NumHealthyPieces_Field {
return Injuredsegment_NumHealthyPieces_Field{_set: true, _value: v}
}
func (f Injuredsegment_NumHealthyPieces_Field) value() interface{} {
if !f._set || f._null {
return nil
}
return f._value
}
func (Injuredsegment_NumHealthyPieces_Field) _Column() string { return "num_healthy_pieces" }
type Injuredsegment_SegmentHealth_Field struct {
_set bool
_null bool

View File

@ -108,7 +108,6 @@ CREATE TABLE injuredsegments (
data bytea NOT NULL,
attempted timestamp with time zone,
updated_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
num_healthy_pieces integer NOT NULL DEFAULT 52,
segment_health double precision NOT NULL DEFAULT 1,
PRIMARY KEY ( path )
);
@ -448,7 +447,6 @@ CREATE INDEX bucket_bandwidth_rollups_action_interval_project_id_index ON bucket
CREATE INDEX consumed_serials_expires_at_index ON consumed_serials ( expires_at );
CREATE INDEX graceful_exit_transfer_queue_nid_dr_qa_fa_lfa_index ON graceful_exit_transfer_queue ( node_id, durability_ratio, queued_at, finished_at, last_failed_at );
CREATE INDEX injuredsegments_attempted_index ON injuredsegments ( attempted );
CREATE INDEX injuredsegments_num_healthy_pieces_index ON injuredsegments ( num_healthy_pieces );
CREATE INDEX injuredsegments_segment_health_index ON injuredsegments ( segment_health );
CREATE INDEX injuredsegments_updated_at_index ON injuredsegments ( updated_at );
CREATE INDEX node_last_ip ON nodes ( last_net );

View File

@ -108,7 +108,6 @@ CREATE TABLE injuredsegments (
data bytea NOT NULL,
attempted timestamp with time zone,
updated_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
num_healthy_pieces integer NOT NULL DEFAULT 52,
segment_health double precision NOT NULL DEFAULT 1,
PRIMARY KEY ( path )
);
@ -448,7 +447,6 @@ CREATE INDEX bucket_bandwidth_rollups_action_interval_project_id_index ON bucket
CREATE INDEX consumed_serials_expires_at_index ON consumed_serials ( expires_at );
CREATE INDEX graceful_exit_transfer_queue_nid_dr_qa_fa_lfa_index ON graceful_exit_transfer_queue ( node_id, durability_ratio, queued_at, finished_at, last_failed_at );
CREATE INDEX injuredsegments_attempted_index ON injuredsegments ( attempted );
CREATE INDEX injuredsegments_num_healthy_pieces_index ON injuredsegments ( num_healthy_pieces );
CREATE INDEX injuredsegments_segment_health_index ON injuredsegments ( segment_health );
CREATE INDEX injuredsegments_updated_at_index ON injuredsegments ( updated_at );
CREATE INDEX node_last_ip ON nodes ( last_net );

View File

@ -1123,6 +1123,14 @@ func (db *satelliteDB) PostgresMigration() *migrate.Migration {
return nil
}),
},
{
DB: &db.migrationDB,
Description: "drop num_healthy_pieces column from injuredsegments",
Version: 134,
Action: migrate.SQL{
`ALTER TABLE injuredsegments DROP COLUMN num_healthy_pieces;`,
},
},
},
}
}

View File

@ -29,7 +29,7 @@ func (r *repairQueue) Insert(ctx context.Context, seg *internalpb.InjuredSegment
// insert if not exists, or update healthy count if does exist
var query string
// we want to insert the segment if it is not in the queue, but update the number of healthy pieces if it already is in the queue
// we want to insert the segment if it is not in the queue, but update the segment health if it already is in the queue
// we also want to know if the result was an insert or an update - this is the reasoning for the xmax section of the postgres query
// and the separate cockroach query (which the xmax trick does not work for)
switch r.db.implementation {

View File

@ -0,0 +1,575 @@
-- AUTOGENERATED BY storj.io/dbx
-- DO NOT EDIT
CREATE TABLE accounting_rollups (
node_id bytea NOT NULL,
start_time timestamp with time zone NOT NULL,
put_total bigint NOT NULL,
get_total bigint NOT NULL,
get_audit_total bigint NOT NULL,
get_repair_total bigint NOT NULL,
put_repair_total bigint NOT NULL,
at_rest_total double precision NOT NULL,
PRIMARY KEY ( node_id, start_time )
);
CREATE TABLE accounting_timestamps (
name text NOT NULL,
value timestamp with time zone NOT NULL,
PRIMARY KEY ( name )
);
CREATE TABLE audit_histories (
node_id bytea NOT NULL,
history bytea NOT NULL,
PRIMARY KEY ( node_id )
);
CREATE TABLE bucket_bandwidth_rollups (
bucket_name bytea NOT NULL,
project_id bytea NOT NULL,
interval_start timestamp with time zone NOT NULL,
interval_seconds integer NOT NULL,
action integer NOT NULL,
inline bigint NOT NULL,
allocated bigint NOT NULL,
settled bigint NOT NULL,
PRIMARY KEY ( bucket_name, project_id, interval_start, action )
);
CREATE TABLE bucket_storage_tallies (
bucket_name bytea NOT NULL,
project_id bytea NOT NULL,
interval_start timestamp with time zone NOT NULL,
inline bigint NOT NULL,
remote bigint NOT NULL,
remote_segments_count integer NOT NULL,
inline_segments_count integer NOT NULL,
object_count integer NOT NULL,
metadata_size bigint NOT NULL,
PRIMARY KEY ( bucket_name, project_id, interval_start )
);
CREATE TABLE coinpayments_transactions (
id text NOT NULL,
user_id bytea NOT NULL,
address text NOT NULL,
amount bytea NOT NULL,
received bytea NOT NULL,
status integer NOT NULL,
key text NOT NULL,
timeout integer NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE consumed_serials (
storage_node_id bytea NOT NULL,
serial_number bytea NOT NULL,
expires_at timestamp with time zone NOT NULL,
PRIMARY KEY ( storage_node_id, serial_number )
);
CREATE TABLE coupons (
id bytea NOT NULL,
user_id bytea NOT NULL,
amount bigint NOT NULL,
description text NOT NULL,
type integer NOT NULL,
status integer NOT NULL,
duration bigint NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE coupon_usages (
coupon_id bytea NOT NULL,
amount bigint NOT NULL,
status integer NOT NULL,
period timestamp with time zone NOT NULL,
PRIMARY KEY ( coupon_id, period )
);
CREATE TABLE graceful_exit_progress (
node_id bytea NOT NULL,
bytes_transferred bigint NOT NULL,
pieces_transferred bigint NOT NULL DEFAULT 0,
pieces_failed bigint NOT NULL DEFAULT 0,
updated_at timestamp with time zone NOT NULL,
PRIMARY KEY ( node_id )
);
CREATE TABLE graceful_exit_transfer_queue (
node_id bytea NOT NULL,
path bytea NOT NULL,
piece_num integer NOT NULL,
root_piece_id bytea,
durability_ratio double precision NOT NULL,
queued_at timestamp with time zone NOT NULL,
requested_at timestamp with time zone,
last_failed_at timestamp with time zone,
last_failed_code integer,
failed_count integer,
finished_at timestamp with time zone,
order_limit_send_count integer NOT NULL DEFAULT 0,
PRIMARY KEY ( node_id, path, piece_num )
);
CREATE TABLE injuredsegments (
path bytea NOT NULL,
data bytea NOT NULL,
attempted timestamp with time zone,
updated_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
segment_health double precision NOT NULL DEFAULT 1,
PRIMARY KEY ( path )
);
CREATE TABLE irreparabledbs (
segmentpath bytea NOT NULL,
segmentdetail bytea NOT NULL,
pieces_lost_count bigint NOT NULL,
seg_damaged_unix_sec bigint NOT NULL,
repair_attempt_count bigint NOT NULL,
PRIMARY KEY ( segmentpath )
);
CREATE TABLE nodes (
id bytea NOT NULL,
address text NOT NULL DEFAULT '',
last_net text NOT NULL,
last_ip_port text,
protocol integer NOT NULL DEFAULT 0,
type integer NOT NULL DEFAULT 0,
email text NOT NULL,
wallet text NOT NULL,
free_disk bigint NOT NULL DEFAULT -1,
piece_count bigint NOT NULL DEFAULT 0,
major bigint NOT NULL DEFAULT 0,
minor bigint NOT NULL DEFAULT 0,
patch bigint NOT NULL DEFAULT 0,
hash text NOT NULL DEFAULT '',
timestamp timestamp with time zone NOT NULL DEFAULT '0001-01-01 00:00:00+00',
release boolean NOT NULL DEFAULT false,
latency_90 bigint NOT NULL DEFAULT 0,
audit_success_count bigint NOT NULL DEFAULT 0,
total_audit_count bigint NOT NULL DEFAULT 0,
vetted_at timestamp with time zone,
uptime_success_count bigint NOT NULL,
total_uptime_count bigint NOT NULL,
created_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
updated_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
last_contact_success timestamp with time zone NOT NULL DEFAULT 'epoch',
last_contact_failure timestamp with time zone NOT NULL DEFAULT 'epoch',
contained boolean NOT NULL DEFAULT false,
disqualified timestamp with time zone,
suspended timestamp with time zone,
unknown_audit_suspended timestamp with time zone,
offline_suspended timestamp with time zone,
under_review timestamp with time zone,
online_score double precision NOT NULL DEFAULT 1,
audit_reputation_alpha double precision NOT NULL DEFAULT 1,
audit_reputation_beta double precision NOT NULL DEFAULT 0,
unknown_audit_reputation_alpha double precision NOT NULL DEFAULT 1,
unknown_audit_reputation_beta double precision NOT NULL DEFAULT 0,
uptime_reputation_alpha double precision NOT NULL DEFAULT 1,
uptime_reputation_beta double precision NOT NULL DEFAULT 0,
exit_initiated_at timestamp with time zone,
exit_loop_completed_at timestamp with time zone,
exit_finished_at timestamp with time zone,
exit_success boolean NOT NULL DEFAULT false,
PRIMARY KEY ( id )
);
CREATE TABLE node_api_versions (
id bytea NOT NULL,
api_version integer NOT NULL,
created_at timestamp with time zone NOT NULL,
updated_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE nodes_offline_times (
node_id bytea NOT NULL,
tracked_at timestamp with time zone NOT NULL,
seconds integer NOT NULL,
PRIMARY KEY ( node_id, tracked_at )
);
CREATE TABLE offers (
id serial NOT NULL,
name text NOT NULL,
description text NOT NULL,
award_credit_in_cents integer NOT NULL DEFAULT 0,
invitee_credit_in_cents integer NOT NULL DEFAULT 0,
award_credit_duration_days integer,
invitee_credit_duration_days integer,
redeemable_cap integer,
expires_at timestamp with time zone NOT NULL,
created_at timestamp with time zone NOT NULL,
status integer NOT NULL,
type integer NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE peer_identities (
node_id bytea NOT NULL,
leaf_serial_number bytea NOT NULL,
chain bytea NOT NULL,
updated_at timestamp with time zone NOT NULL,
PRIMARY KEY ( node_id )
);
CREATE TABLE pending_audits (
node_id bytea NOT NULL,
piece_id bytea NOT NULL,
stripe_index bigint NOT NULL,
share_size bigint NOT NULL,
expected_share_hash bytea NOT NULL,
reverify_count bigint NOT NULL,
path bytea NOT NULL,
PRIMARY KEY ( node_id )
);
CREATE TABLE pending_serial_queue (
storage_node_id bytea NOT NULL,
bucket_id bytea NOT NULL,
serial_number bytea NOT NULL,
action integer NOT NULL,
settled bigint NOT NULL,
expires_at timestamp with time zone NOT NULL,
PRIMARY KEY ( storage_node_id, bucket_id, serial_number )
);
CREATE TABLE projects (
id bytea NOT NULL,
name text NOT NULL,
description text NOT NULL,
usage_limit bigint,
bandwidth_limit bigint,
rate_limit integer,
max_buckets integer,
partner_id bytea,
owner_id bytea NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE project_bandwidth_rollups (
project_id bytea NOT NULL,
interval_month date NOT NULL,
egress_allocated bigint NOT NULL,
PRIMARY KEY ( project_id, interval_month )
);
CREATE TABLE registration_tokens (
secret bytea NOT NULL,
owner_id bytea,
project_limit integer NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( secret ),
UNIQUE ( owner_id )
);
CREATE TABLE reported_serials (
expires_at timestamp with time zone NOT NULL,
storage_node_id bytea NOT NULL,
bucket_id bytea NOT NULL,
action integer NOT NULL,
serial_number bytea NOT NULL,
settled bigint NOT NULL,
observed_at timestamp with time zone NOT NULL,
PRIMARY KEY ( expires_at, storage_node_id, bucket_id, action, serial_number )
);
CREATE TABLE reset_password_tokens (
secret bytea NOT NULL,
owner_id bytea NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( secret ),
UNIQUE ( owner_id )
);
CREATE TABLE revocations (
revoked bytea NOT NULL,
api_key_id bytea NOT NULL,
PRIMARY KEY ( revoked )
);
CREATE TABLE serial_numbers (
id serial NOT NULL,
serial_number bytea NOT NULL,
bucket_id bytea NOT NULL,
expires_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE storagenode_bandwidth_rollups (
storagenode_id bytea NOT NULL,
interval_start timestamp with time zone NOT NULL,
interval_seconds integer NOT NULL,
action integer NOT NULL,
allocated bigint DEFAULT 0,
settled bigint NOT NULL,
PRIMARY KEY ( storagenode_id, interval_start, action )
);
CREATE TABLE storagenode_bandwidth_rollups_phase2 (
storagenode_id bytea NOT NULL,
interval_start timestamp with time zone NOT NULL,
interval_seconds integer NOT NULL,
action integer NOT NULL,
allocated bigint DEFAULT 0,
settled bigint NOT NULL,
PRIMARY KEY ( storagenode_id, interval_start, action )
);
CREATE TABLE storagenode_payments (
id bigserial NOT NULL,
created_at timestamp with time zone NOT NULL,
node_id bytea NOT NULL,
period text NOT NULL,
amount bigint NOT NULL,
receipt text,
notes text,
PRIMARY KEY ( id )
);
CREATE TABLE storagenode_paystubs (
period text NOT NULL,
node_id bytea NOT NULL,
created_at timestamp with time zone NOT NULL,
codes text NOT NULL,
usage_at_rest double precision NOT NULL,
usage_get bigint NOT NULL,
usage_put bigint NOT NULL,
usage_get_repair bigint NOT NULL,
usage_put_repair bigint NOT NULL,
usage_get_audit bigint NOT NULL,
comp_at_rest bigint NOT NULL,
comp_get bigint NOT NULL,
comp_put bigint NOT NULL,
comp_get_repair bigint NOT NULL,
comp_put_repair bigint NOT NULL,
comp_get_audit bigint NOT NULL,
surge_percent bigint NOT NULL,
held bigint NOT NULL,
owed bigint NOT NULL,
disposed bigint NOT NULL,
paid bigint NOT NULL,
PRIMARY KEY ( period, node_id )
);
CREATE TABLE storagenode_storage_tallies (
node_id bytea NOT NULL,
interval_end_time timestamp with time zone NOT NULL,
data_total double precision NOT NULL,
PRIMARY KEY ( interval_end_time, node_id )
);
CREATE TABLE stripe_customers (
user_id bytea NOT NULL,
customer_id text NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( user_id ),
UNIQUE ( customer_id )
);
CREATE TABLE stripecoinpayments_invoice_project_records (
id bytea NOT NULL,
project_id bytea NOT NULL,
storage double precision NOT NULL,
egress bigint NOT NULL,
objects bigint NOT NULL,
period_start timestamp with time zone NOT NULL,
period_end timestamp with time zone NOT NULL,
state integer NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id ),
UNIQUE ( project_id, period_start, period_end )
);
CREATE TABLE stripecoinpayments_tx_conversion_rates (
tx_id text NOT NULL,
rate bytea NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( tx_id )
);
CREATE TABLE users (
id bytea NOT NULL,
email text NOT NULL,
normalized_email text NOT NULL,
full_name text NOT NULL,
short_name text,
password_hash bytea NOT NULL,
status integer NOT NULL,
partner_id bytea,
created_at timestamp with time zone NOT NULL,
project_limit integer NOT NULL DEFAULT 0,
PRIMARY KEY ( id )
);
CREATE TABLE value_attributions (
project_id bytea NOT NULL,
bucket_name bytea NOT NULL,
partner_id bytea NOT NULL,
last_updated timestamp with time zone NOT NULL,
PRIMARY KEY ( project_id, bucket_name )
);
CREATE TABLE api_keys (
id bytea NOT NULL,
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
head bytea NOT NULL,
name text NOT NULL,
secret bytea NOT NULL,
partner_id bytea,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id ),
UNIQUE ( head ),
UNIQUE ( name, project_id )
);
CREATE TABLE bucket_metainfos (
id bytea NOT NULL,
project_id bytea NOT NULL REFERENCES projects( id ),
name bytea NOT NULL,
partner_id bytea,
path_cipher integer NOT NULL,
created_at timestamp with time zone NOT NULL,
default_segment_size integer NOT NULL,
default_encryption_cipher_suite integer NOT NULL,
default_encryption_block_size integer NOT NULL,
default_redundancy_algorithm integer NOT NULL,
default_redundancy_share_size integer NOT NULL,
default_redundancy_required_shares integer NOT NULL,
default_redundancy_repair_shares integer NOT NULL,
default_redundancy_optimal_shares integer NOT NULL,
default_redundancy_total_shares integer NOT NULL,
PRIMARY KEY ( id ),
UNIQUE ( name, project_id ),
UNIQUE ( project_id, name )
);
CREATE TABLE project_members (
member_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( member_id, project_id )
);
CREATE TABLE stripecoinpayments_apply_balance_intents (
tx_id text NOT NULL REFERENCES coinpayments_transactions( id ) ON DELETE CASCADE,
state integer NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( tx_id )
);
CREATE TABLE used_serials (
serial_number_id integer NOT NULL REFERENCES serial_numbers( id ) ON DELETE CASCADE,
storage_node_id bytea NOT NULL,
PRIMARY KEY ( serial_number_id, storage_node_id )
);
CREATE TABLE user_credits (
id serial NOT NULL,
user_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
offer_id integer NOT NULL REFERENCES offers( id ),
referred_by bytea REFERENCES users( id ) ON DELETE SET NULL,
type text NOT NULL,
credits_earned_in_cents integer NOT NULL,
credits_used_in_cents integer NOT NULL,
expires_at timestamp with time zone NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id ),
UNIQUE ( id, offer_id )
);
CREATE INDEX accounting_rollups_start_time_index ON accounting_rollups ( start_time );
CREATE INDEX bucket_bandwidth_rollups_project_id_action_interval_index ON bucket_bandwidth_rollups ( project_id, action, interval_start );
CREATE INDEX bucket_bandwidth_rollups_action_interval_project_id_index ON bucket_bandwidth_rollups ( action, interval_start, project_id );
CREATE INDEX consumed_serials_expires_at_index ON consumed_serials ( expires_at );
CREATE INDEX graceful_exit_transfer_queue_nid_dr_qa_fa_lfa_index ON graceful_exit_transfer_queue ( node_id, durability_ratio, queued_at, finished_at, last_failed_at );
CREATE INDEX injuredsegments_attempted_index ON injuredsegments ( attempted );
CREATE INDEX injuredsegments_segment_health_index ON injuredsegments ( segment_health );
CREATE INDEX injuredsegments_updated_at_index ON injuredsegments ( updated_at );
CREATE INDEX node_last_ip ON nodes ( last_net );
CREATE INDEX nodes_offline_times_node_id_index ON nodes_offline_times ( node_id );
CREATE UNIQUE INDEX serial_number_index ON serial_numbers ( serial_number );
CREATE INDEX serial_numbers_expires_at_index ON serial_numbers ( expires_at );
CREATE INDEX storagenode_payments_node_id_period_index ON storagenode_payments ( node_id, period );
CREATE INDEX storagenode_paystubs_node_id_index ON storagenode_paystubs ( node_id );
CREATE INDEX storagenode_storage_tallies_node_id_index ON storagenode_storage_tallies ( node_id );
CREATE UNIQUE INDEX credits_earned_user_id_offer_id ON user_credits ( id, offer_id );
INSERT INTO "accounting_rollups"("node_id", "start_time", "put_total", "get_total", "get_audit_total", "get_repair_total", "put_repair_total", "at_rest_total") VALUES (E'\\367M\\177\\251]t/\\022\\256\\214\\265\\025\\224\\204:\\217\\212\\0102<\\321\\374\\020&\\271Qc\\325\\261\\354\\246\\233'::bytea, '2019-02-09 00:00:00+00', 3000, 6000, 9000, 12000, 0, 15000);
INSERT INTO "accounting_timestamps" VALUES ('LastAtRestTally', '0001-01-01 00:00:00+00');
INSERT INTO "accounting_timestamps" VALUES ('LastRollup', '0001-01-01 00:00:00+00');
INSERT INTO "accounting_timestamps" VALUES ('LastBandwidthTally', '0001-01-01 00:00:00+00');
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 5, false, 1);
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '127.0.0.1:55518', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 0, 3, 3, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 0, false, 1);
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014', '127.0.0.1:55517', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 0, 0, 0, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 0, false, 1);
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\015', '127.0.0.1:55519', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 1, 2, 1, 2, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 1, false, 1);
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "vetted_at", "online_score") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', '127.0.0.1:55520', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 300, 400, 300, 400, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 300, 0, 1, 0, 300, 100, false, '2020-03-18 12:00:00.000000+00', 1);
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\154\\313\\233\\074\\327\\177\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 75, 25, 100, 5, false, 1);
INSERT INTO "nodes"("id", "address", "last_net", "last_ip_port", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\154\\313\\233\\074\\327\\177\\136\\070\\346\\002', '127.0.0.1:55516', '127.0.0.0', '127.0.0.1:55516', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 75, 25, 100, 5, false, 1);
INSERT INTO "users"("id", "full_name", "short_name", "email", "normalized_email", "password_hash", "status", "partner_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'Noahson', 'William', '1email1@mail.test', '1EMAIL1@MAIL.TEST', E'some_readable_hash'::bytea, 1, NULL, '2019-02-14 08:28:24.614594+00');
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets", "partner_id", "owner_id", "created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, 'ProjectName', 'projects description', NULL, NULL, NULL, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:28:24.254934+00');
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets", "partner_id", "owner_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, 'projName1', 'Test project 1', NULL, NULL, NULL, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:28:24.636949+00');
INSERT INTO "project_members"("member_id", "project_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, '2019-02-14 08:28:24.677953+00');
INSERT INTO "project_members"("member_id", "project_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, '2019-02-13 08:28:24.677953+00');
INSERT INTO "irreparabledbs" ("segmentpath", "segmentdetail", "pieces_lost_count", "seg_damaged_unix_sec", "repair_attempt_count") VALUES ('\x49616d5365676d656e746b6579696e666f30', '\x49616d5365676d656e7464657461696c696e666f30', 10, 1550159554, 10);
INSERT INTO "registration_tokens" ("secret", "owner_id", "project_limit", "created_at") VALUES (E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, null, 1, '2019-02-14 08:28:24.677953+00');
INSERT INTO "serial_numbers" ("id", "serial_number", "bucket_id", "expires_at") VALUES (1, E'0123456701234567'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014/testbucket'::bytea, '2019-03-06 08:28:24.677953+00');
INSERT INTO "used_serials" ("serial_number_id", "storage_node_id") VALUES (1, E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n');
INSERT INTO "storagenode_bandwidth_rollups" ("storagenode_id", "interval_start", "interval_seconds", "action", "allocated", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024);
INSERT INTO "storagenode_storage_tallies" VALUES (E'\\3510\\323\\225"~\\036<\\342\\330m\\0253Jhr\\246\\233K\\246#\\2303\\351\\256\\275j\\212UM\\362\\207', '2019-02-14 08:16:57.812849+00', 1000);
INSERT INTO "bucket_bandwidth_rollups" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024, 3024);
INSERT INTO "bucket_storage_tallies" ("bucket_name", "project_id", "interval_start", "inline", "remote", "remote_segments_count", "inline_segments_count", "object_count", "metadata_size") VALUES (E'testbucket'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 4024, 5024, 0, 0, 0, 0);
INSERT INTO "bucket_bandwidth_rollups" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\170\\160\\157\\370\\274\\366\\113\\364\\272\\235\\301\\243\\321\\102\\321\\136'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024, 3024);
INSERT INTO "bucket_storage_tallies" ("bucket_name", "project_id", "interval_start", "inline", "remote", "remote_segments_count", "inline_segments_count", "object_count", "metadata_size") VALUES (E'testbucket'::bytea, E'\\170\\160\\157\\370\\274\\366\\113\\364\\272\\235\\301\\243\\321\\102\\321\\136'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 4024, 5024, 0, 0, 0, 0);
INSERT INTO "reset_password_tokens" ("secret", "owner_id", "created_at") VALUES (E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-05-08 08:28:24.677953+00');
INSERT INTO "offers" ("id", "name", "description", "award_credit_in_cents", "invitee_credit_in_cents", "expires_at", "created_at", "status", "type", "award_credit_duration_days", "invitee_credit_duration_days") VALUES (1, 'Default referral offer', 'Is active when no other active referral offer', 300, 600, '2119-03-14 08:28:24.636949+00', '2019-07-14 08:28:24.636949+00', 1, 2, 365, 14);
INSERT INTO "offers" ("id", "name", "description", "award_credit_in_cents", "invitee_credit_in_cents", "expires_at", "created_at", "status", "type", "award_credit_duration_days", "invitee_credit_duration_days") VALUES (2, 'Default free credit offer', 'Is active when no active free credit offer', 0, 300, '2119-03-14 08:28:24.636949+00', '2019-07-14 08:28:24.636949+00', 1, 1, NULL, 14);
INSERT INTO "api_keys" ("id", "project_id", "head", "name", "secret", "partner_id", "created_at") VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\111\\142\\147\\304\\132\\375\\070\\163\\270\\160\\251\\370\\126\\063\\351\\037\\257\\071\\143\\375\\351\\320\\253\\232\\220\\260\\075\\173\\306\\307\\115\\136'::bytea, 'key 2', E'\\254\\011\\315\\333\\273\\365\\001\\071\\024\\154\\253\\332\\301\\216\\361\\074\\221\\367\\251\\231\\274\\333\\300\\367\\001\\272\\327\\111\\315\\123\\042\\016'::bytea, NULL, '2019-02-14 08:28:24.267934+00');
INSERT INTO "value_attributions" ("project_id", "bucket_name", "partner_id", "last_updated") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E''::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-02-14 08:07:31.028103+00');
INSERT INTO "user_credits" ("id", "user_id", "offer_id", "referred_by", "credits_earned_in_cents", "credits_used_in_cents", "type", "expires_at", "created_at") VALUES (1, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 1, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 200, 0, 'invalid', '2019-10-01 08:28:24.267934+00', '2019-06-01 08:28:24.267934+00');
INSERT INTO "bucket_metainfos" ("id", "project_id", "name", "partner_id", "created_at", "path_cipher", "default_segment_size", "default_encryption_cipher_suite", "default_encryption_block_size", "default_redundancy_algorithm", "default_redundancy_share_size", "default_redundancy_required_shares", "default_redundancy_repair_shares", "default_redundancy_optimal_shares", "default_redundancy_total_shares") VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'testbucketuniquename'::bytea, NULL, '2019-06-14 08:28:24.677953+00', 1, 65536, 1, 8192, 1, 4096, 4, 6, 8, 10);
INSERT INTO "pending_audits" ("node_id", "piece_id", "stripe_index", "share_size", "expected_share_hash", "reverify_count", "path") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 5, 1024, E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, 1, 'not null');
INSERT INTO "peer_identities" VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:07:31.335028+00');
INSERT INTO "graceful_exit_progress" ("node_id", "bytes_transferred", "pieces_transferred", "pieces_failed", "updated_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', 1000000000000000, 0, 0, '2019-09-12 10:07:31.028103+00');
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n \\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\311', 8, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n \\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\312', 8, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
INSERT INTO "stripe_customers" ("user_id", "customer_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'stripe_id', '2019-06-01 08:28:24.267934+00');
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n \\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\311', 9, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n \\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\312', 9, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
INSERT INTO "stripecoinpayments_invoice_project_records"("id", "project_id", "storage", "egress", "objects", "period_start", "period_end", "state", "created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\021\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, 0, 0, 0, '2019-06-01 08:28:24.267934+00', '2019-06-01 08:28:24.267934+00', 0, '2019-06-01 08:28:24.267934+00');
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "root_piece_id", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n \\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\311', 10, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
INSERT INTO "stripecoinpayments_tx_conversion_rates" ("tx_id", "rate", "created_at") VALUES ('tx_id', E'\\363\\311\\033w\\222\\303Ci,'::bytea, '2019-06-01 08:28:24.267934+00');
INSERT INTO "coinpayments_transactions" ("id", "user_id", "address", "amount", "received", "status", "key", "timeout", "created_at") VALUES ('tx_id', E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'address', E'\\363\\311\\033w'::bytea, E'\\363\\311\\033w'::bytea, 1, 'key', 60, '2019-06-01 08:28:24.267934+00');
INSERT INTO "nodes_offline_times" ("node_id", "tracked_at", "seconds") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001'::bytea, '2019-06-01 09:28:24.267934+00', 3600);
INSERT INTO "nodes_offline_times" ("node_id", "tracked_at", "seconds") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001'::bytea, '2017-06-01 09:28:24.267934+00', 100);
INSERT INTO "nodes_offline_times" ("node_id", "tracked_at", "seconds") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n'::bytea, '2019-06-01 09:28:24.267934+00', 3600);
INSERT INTO "storagenode_bandwidth_rollups" ("storagenode_id", "interval_start", "interval_seconds", "action", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2020-01-11 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 2024);
INSERT INTO "coupons" ("id", "user_id", "amount", "description", "type", "status", "duration", "created_at") VALUES (E'\\362\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 50, 'description', 0, 0, 2, '2019-06-01 08:28:24.267934+00');
INSERT INTO "coupon_usages" ("coupon_id", "amount", "status", "period") VALUES (E'\\362\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, 22, 0, '2019-06-01 09:28:24.267934+00');
INSERT INTO "reported_serials" ("expires_at", "storage_node_id", "bucket_id", "action", "serial_number", "settled", "observed_at") VALUES ('2020-01-11 08:00:00.000000+00', E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014/testbucket'::bytea, 1, E'0123456701234567'::bytea, 100, '2020-01-11 08:00:00.000000+00');
INSERT INTO "stripecoinpayments_apply_balance_intents" ("tx_id", "state", "created_at") VALUES ('tx_id', 0, '2019-06-01 08:28:24.267934+00');
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets", "rate_limit", "partner_id", "owner_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\347'::bytea, 'projName1', 'Test project 1', NULL, NULL, NULL, 2000000, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2020-01-15 08:28:24.636949+00');
INSERT INTO "pending_serial_queue" ("storage_node_id", "bucket_id", "serial_number", "action", "settled", "expires_at") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014/testbucket'::bytea, E'5123456701234567'::bytea, 1, 100, '2020-01-11 08:00:00.000000+00');
INSERT INTO "consumed_serials" ("storage_node_id", "serial_number", "expires_at") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', E'1234567012345678'::bytea, '2020-01-12 08:00:00.000000+00');
INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('0', '\x0a0130120100', 1.0, '2020-09-01 00:00:00.000000+00');
INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('here''s/a/great/path', '\x0a136865726527732f612f67726561742f70617468120a0102030405060708090a', 1.0, '2020-09-01 00:00:00.000000+00');
INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('yet/another/cool/path', '\x0a157965742f616e6f746865722f636f6f6c2f70617468120a0102030405060708090a', 1.0, '2020-09-01 00:00:00.000000+00');
INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('/this/is/a/new/path', '\x0a23736f2f6d616e792f69636f6e69632f70617468732f746f2f63686f6f73652f66726f6d120a0102030405060708090a', 1.0, '2020-09-01 00:00:00.000000+00');
INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('/some/path/1/23/4', '\x0a23736f2f6d618e792f69636f6e69632f70617468732f746f2f63686f6f73652f66726f6d120a0102030405060708090a', 0.2, '2020-09-01 00:00:00.000000+00');
INSERT INTO "project_bandwidth_rollups"("project_id", "interval_month", egress_allocated) VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\347'::bytea, '2020-04-01', 10000);
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets","rate_limit", "partner_id", "owner_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\345'::bytea, 'egress101', 'High Bandwidth Project', NULL, NULL, NULL, 2000000, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2020-05-15 08:46:24.000000+00');
INSERT INTO "storagenode_paystubs"("period", "node_id", "created_at", "codes", "usage_at_rest", "usage_get", "usage_put", "usage_get_repair", "usage_put_repair", "usage_get_audit", "comp_at_rest", "comp_get", "comp_put", "comp_get_repair", "comp_put_repair", "comp_get_audit", "surge_percent", "held", "owed", "disposed", "paid") VALUES ('2020-01', '\xf2a3b4c4dfdf7221310382fd5db5aa73e1d227d6df09734ec4e5305000000000', '2020-04-07T20:14:21.479141Z', '', 1327959864508416, 294054066688, 159031363328, 226751, 0, 836608, 2861984, 5881081, 0, 226751, 0, 8, 300, 0, 26909472, 0, 26909472);
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "unknown_audit_suspended", "offline_suspended", "under_review") VALUES (E'\\153\\313\\233\\074\\327\\255\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 5, false, '2019-02-14 08:07:31.108963+00', '2019-02-14 08:07:31.108963+00', '2019-02-14 08:07:31.108963+00');
INSERT INTO "audit_histories" ("node_id", "history") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', '\x0a23736f2f6d616e792f69636f6e69632f70617468732f746f2f63686f6f73652f66726f6d120a0102030405060708090a');
INSERT INTO "node_api_versions"("id", "api_version", "created_at", "updated_at") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', 1, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00');
INSERT INTO "node_api_versions"("id", "api_version", "created_at", "updated_at") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', 2, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00');
INSERT INTO "node_api_versions"("id", "api_version", "created_at", "updated_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014', 3, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00');
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\256\\263'::bytea, 'egress102', 'High Bandwidth Project 2', NULL, NULL, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-05-15 08:46:24.000000+00', 1000);
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\255\\244'::bytea, 'egress103', 'High Bandwidth Project 3', NULL, NULL, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-05-15 08:46:24.000000+00', 1000);
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\253\\231'::bytea, 'Limit Test 1', 'This project is above the default', 50000000001, 50000000001, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-10-14 10:10:10.000000+00', 101);
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\252\\230'::bytea, 'Limit Test 2', 'This project is below the default', NULL, NULL, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-10-14 10:10:11.000000+00', NULL);
INSERT INTO "storagenode_bandwidth_rollups_phase2" ("storagenode_id", "interval_start", "interval_seconds", "action", "allocated", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024);

View File

@ -9,5 +9,11 @@ mkdir -p release/$TAG/wasm/
# Copy wasm javascript to match the go version
cp "$(go env GOROOT)/misc/wasm/wasm_exec.js" release/$TAG/wasm/
# Compress wasm javascript using brotli
brotli -k release/$TAG/wasm/wasm_exec.js
# Build wasm code
exec go build -o release/$TAG/wasm/access.wasm storj.io/storj/satellite/console/wasm
go build -o release/$TAG/wasm/access.wasm storj.io/storj/satellite/console/wasm
# Compress wasm code using brotli
brotli -k release/$TAG/wasm/access.wasm

View File

@ -89,7 +89,7 @@ compensation.withheld-percents: 75,75,75,50,50,50,25,25,25,0,0,0,0,0,0
# console.frame-ancestors: tardigrade.io
# url link for gateway credentials requests
# console.gateway-credentials-request-url: ""
# console.gateway-credentials-request-url: https://auth.tardigradeshare.io
# url link to general request page
# console.general-request-url: https://support.tardigrade.io/hc/en-us/requests/new?ticket_form_id=360000379291
@ -377,10 +377,10 @@ identity.key-path: /root/.local/share/storj/identity/satellite/identity.key
# metainfo.piece-deletion.request-timeout: 1m0s
# the default bandwidth usage limit
# metainfo.project-limits.default-max-bandwidth: 50.00 GB
# metainfo.project-limits.default-max-bandwidth: 500.00 GB
# the default storage usage limit
# metainfo.project-limits.default-max-usage: 50.00 GB
# metainfo.project-limits.default-max-usage: 500.00 GB
# max bucket count for a project.
# metainfo.project-limits.max-buckets: 100

26
web/multinode/.gitignore vendored Normal file
View File

@ -0,0 +1,26 @@
.DS_Store
node_modules
dist
coverage
temp
# local env files
.env.local
.env.*.local
# Log files
npm-debug.log*
yarn-debug.log*
yarn-error.log*
# Allow images
!*.svg
# Editor directories and files
.idea
.vscode
*.suo
*.ntvs*
*.njsproj
*.sln
*.sw*

View File

@ -0,0 +1,6 @@
// Copyright (C) 2020 Storj Labs, Inc.
// See LICENSE for copying information.
module.exports = {
presets: [ [ "@vue/app", { useBuiltIns: "entry" } ] ]
};

16
web/multinode/index.html Normal file
View File

@ -0,0 +1,16 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<link rel="apple-touch-icon" href="" type="image/x-icon">
<meta name="apple-mobile-web-app-capable" content="yes">
<meta name="apple-mobile-web-app-status-bar-style" content="black">
<meta name="viewport" content="width=device-width, initial-scale=1, viewport-fit=cover">
<meta name="description" content="Multinode Dashboard">
<link rel="shortcut icon" href="" type="image/x-icon">
<title>Multinode Dashboard</title>
</head>
<body>
<div id="app"></div>
</body>
</html>

14368
web/multinode/package-lock.json generated Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,73 @@
{
"name": "multinode",
"version": "0.0.1",
"scripts": {
"serve": "vue-cli-service serve",
"lint": "vue-cli-service lint && stylelint '**/*.{vue,scss}' --fix",
"build": "vue-cli-service build",
"debug": "vue-cli-service build --mode development",
"test": "vue-cli-service test:unit"
},
"dependencies": {
"vue": "2.6.11",
"vue-class-component": "7.2.6",
"vue-property-decorator": "9.1.2"
},
"devDependencies": {
"@babel/core": "7.12.10",
"@babel/plugin-proposal-object-rest-spread": "7.12.1",
"@vue/cli-plugin-babel": "4.5.9",
"@vue/cli-plugin-typescript": "4.5.9",
"@vue/cli-service": "4.5.9",
"babel-core": "6.26.3",
"core-js": "3.8.1",
"stylelint": "13.8.0",
"stylelint-config-standard": "20.0.0",
"stylelint-scss": "3.18.0",
"stylelint-webpack-plugin": "2.1.1",
"tslint": "6.1.3",
"tslint-consistent-codestyle": "1.16.0",
"tslint-loader": "3.5.4",
"typescript": "3.7.4",
"vue-template-compiler": "2.6.11",
"vue-tslint": "0.3.2",
"vue-tslint-loader": "3.5.6",
"webpack": "4.41.5"
},
"stylelint": {
"plugins": [
"stylelint-scss"
],
"extends": "stylelint-config-standard",
"rules": {
"indentation": 4,
"string-quotes": "single",
"no-duplicate-selectors": true,
"selector-max-attribute": 1,
"selector-combinator-space-after": "always",
"selector-attribute-operator-space-before": "never",
"selector-attribute-operator-space-after": "never",
"selector-attribute-brackets-space-inside": "never",
"declaration-block-trailing-semicolon": "always",
"declaration-colon-space-before": "never",
"declaration-colon-space-after": "always",
"number-leading-zero": "always",
"function-url-quotes": "always",
"font-family-name-quotes": "always-unless-keyword",
"comment-whitespace-inside": "always",
"comment-empty-line-before": "always",
"rule-empty-line-before": "always-multi-line",
"selector-pseudo-element-colon-notation": "single",
"selector-pseudo-class-parentheses-space-inside": "never",
"selector-max-type": 1,
"font-family-no-missing-generic-family-keyword": true,
"at-rule-no-unknown": null,
"scss/at-rule-no-unknown": true,
"media-feature-range-operator-space-before": "always",
"media-feature-range-operator-space-after": "always",
"media-feature-parentheses-space-inside": "never",
"media-feature-colon-space-before": "never",
"media-feature-colon-space-after": "always"
}
}
}

View File

@ -0,0 +1,14 @@
// Copyright (C) 2020 Storj Labs, Inc.
// See LICENSE for copying information.
<template>
<div id="app">
</div>
</template>
<script lang="ts">
import { Component, Vue } from 'vue-property-decorator';
@Component
export default class App extends Vue {}
</script>

14
web/multinode/src/main.ts Normal file
View File

@ -0,0 +1,14 @@
// Copyright (C) 2020 Storj Labs, Inc.
// See LICENSE for copying information.
import Vue from 'vue';
import App from '@/app/App.vue';
Vue.config.productionTip = false;
new Vue({
// TODO: add router,
render: (h) => h(App),
// TODO: add store,
}).$mount('#app');

View File

@ -0,0 +1,39 @@
{
"compilerOptions": {
"target": "esnext",
"module": "esnext",
"strict": true,
"noImplicitAny": false,
"jsx": "preserve",
"importHelpers": true,
"moduleResolution": "node",
"experimentalDecorators": true,
"esModuleInterop": true,
"allowSyntheticDefaultImports": true,
"sourceMap": true,
"baseUrl": ".",
"strictPropertyInitialization": false,
"types": [
"webpack-env",
],
"paths": {
"@/*": [
"src/*"
]
},
"lib": [
"esnext",
"dom",
"dom.iterable",
"scripthost"
]
},
"include": [
"src/**/*.ts",
"src/**/*.vue",
"tests/**/*.ts"
],
"exclude": [
"node_modules"
]
}

97
web/multinode/tslint.json Normal file
View File

@ -0,0 +1,97 @@
{
"defaultSeverity": "warning",
"rulesDirectory": [
"tslint-consistent-codestyle"
],
"linterOptions": {
"exclude": [
"node_modules/**"
]
},
"rules": {
"function-constructor": true,
"align": [true, "parameters", "statements"],
"array-type": [true, "array-simple"],
"arrow-return-shorthand": true,
"class-name": true,
"comment-format": [true, "check-space"],
"comment-type": [true, "doc", "singleline"],
"curly": [true, "ignore-same-line"],
"early-exit": true,
"eofline": true,
"indent": [true, "spaces", 4],
"interface-name": false,
"import-spacing": true,
"no-async-without-await": true,
"no-boolean-literal-compare": true,
"no-conditional-assignment": true,
"no-consecutive-blank-lines": [true, 1],
"no-console": [true, "log"],
"no-default-export": false,
"no-duplicate-imports": true,
"no-duplicate-super": true,
"no-duplicate-switch-case": true,
"no-empty": true,
"no-eval": true,
"no-invalid-template-strings": true,
"no-invalid-this": true,
"no-misused-new": true,
"no-static-this": true,
"no-trailing-whitespace": true,
"no-var-keyword": true,
"newline-before-return": true,
"object-literal-sort-keys": false,
"one-variable-per-declaration": [true, "ignore-for-loop"],
"ordered-imports": [
true, {
"import-sources-order": "case-insensitive",
"named-imports-order": "case-insensitive",
"grouped-imports": true,
"groups": [{
"name": "external",
"match": "^[A-Za-z]",
"order": 1
}, {
"name": "internal components",
"match": "^@/app/components",
"order": 2
}, {
"name": "internal images",
"match": "^@/../static/images",
"order": 3
}, {
"name": "internal else",
"match": "^@",
"order": 4
}]
}],
"prefer-const": true,
"prefer-method-signature": true,
"prefer-switch": [true, {"min-cases": 2}],
"prefer-while": true,
"quotemark": [true, "single", "avoid-escape"],
"semicolon": [true, "always"],
"space-within-parens": 0,
"static-this": true,
"trailing-comma": [true, {"multiline": "always", "singleline": "never"}],
"triple-equals": true,
"typedef": [
true,
"property-declaration"
],
"type-literal-delimiter": true,
"unnecessary-else": true,
"whitespace": [
true,
"check-branch",
"check-decl",
"check-module",
"check-operator",
"check-preblock",
"check-rest-spread",
"check-separator",
"check-type",
"check-type-operator"
]
}
}

View File

@ -0,0 +1,33 @@
// Copyright (C) 2020 Storj Labs, Inc.
// See LICENSE for copying information.
const path = require('path');
const StyleLintPlugin = require('stylelint-webpack-plugin');
module.exports = {
publicPath: "/static/dist",
productionSourceMap: false,
parallel: true,
configureWebpack: {
plugins: [
new StyleLintPlugin({
files: ['**/*.{vue,sss,less,scss,sass}'],
emitWarning: true,
})
],
},
chainWebpack: config => {
config.output.chunkFilename(`js/vendors_[hash].js`);
config.output.filename(`js/app_[hash].js`);
config.resolve.alias
.set('@', path.resolve('src'));
config
.plugin('html')
.tap(args => {
args[0].template = './index.html';
return args
});
}
};

View File

@ -17684,6 +17684,15 @@
"errno": "~0.1.7"
}
},
"worker-plugin": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/worker-plugin/-/worker-plugin-5.0.0.tgz",
"integrity": "sha512-AXMUstURCxDD6yGam2r4E34aJg6kW85IiaeX72hi+I1cxyaMUtrvVY6sbfpGKAj5e7f68Acl62BjQF5aOOx2IQ==",
"dev": true,
"requires": {
"loader-utils": "^1.1.0"
}
},
"worker-rpc": {
"version": "0.1.1",
"resolved": "https://registry.npmjs.org/worker-rpc/-/worker-rpc-0.1.1.tgz",

View File

@ -58,7 +58,8 @@
"vue-template-compiler": "2.6.12",
"vue-tslint": "0.3.2",
"vue-tslint-loader": "3.5.6",
"webpack": "4.44.1"
"webpack": "4.44.1",
"worker-plugin": "5.0.0"
},
"postcss": {
"plugins": {

View File

@ -1,7 +1,7 @@
<!--Copyright (C) 2020 Storj Labs, Inc.-->
<!--See LICENSE for copying information.-->
<div class="cli-container">
<div class="cli-container" :class="{ 'border-radius': isOnboardingTour }">
<BackIcon class="cli-container__back-icon" @click="onBackClick"/>
<h1 class="cli-container__title">Create Access Grant in CLI</h1>
<p class="cli-container__sub-title">
@ -10,7 +10,7 @@
<div class="cli-container__token-area">
<p class="cli-container__token-area__label">Token</p>
<div class="cli-container__token-area__container">
<p class="cli-container__token-area__container__token">{{ key }}</p>
<p class="cli-container__token-area__container__token">{{ restrictedKey }}</p>
<VButton
class="cli-container__token-area__container__button"
label="Copy"

View File

@ -89,3 +89,7 @@
margin: 16px 0;
}
}
.border-radius {
border-radius: 6px;
}

View File

@ -20,17 +20,21 @@ import { RouteConfig } from '@/router';
})
export default class CLIStep extends Vue {
public key: string = '';
public restrictedKey: string = '';
/**
* Lifecycle hook after initial render.
* Sets local key from props value.
*/
public mounted(): void {
if (!this.$route.params.key) {
if (!this.$route.params.key && !this.$route.params.restrictedKey) {
this.$router.push(RouteConfig.AccessGrants.with(RouteConfig.CreateAccessGrant.with(RouteConfig.NameStep)).path);
return;
}
this.key = this.$route.params.key;
this.restrictedKey = this.$route.params.restrictedKey;
}
/**
@ -38,6 +42,17 @@ export default class CLIStep extends Vue {
* Redirects to previous step.
*/
public onBackClick(): void {
if (this.isOnboardingTour) {
this.$router.push({
name: RouteConfig.OnboardingTour.with(RouteConfig.AccessGrant.with(RouteConfig.AccessGrantPermissions)).name,
params: {
key: this.key,
},
});
return;
}
this.$router.push({
name: RouteConfig.AccessGrants.with(RouteConfig.CreateAccessGrant.with(RouteConfig.PermissionsStep)).name,
params: {
@ -51,6 +66,12 @@ export default class CLIStep extends Vue {
* Redirects to upload step.
*/
public onDoneClick(): void {
if (this.isOnboardingTour) {
this.$router.push(RouteConfig.ProjectDashboard.path);
return;
}
this.$router.push({
name: RouteConfig.AccessGrants.with(RouteConfig.CreateAccessGrant.with(RouteConfig.UploadStep)).name,
params: {
@ -67,6 +88,13 @@ export default class CLIStep extends Vue {
this.$copyText(this.key);
this.$notify.success('Token was copied successfully');
}
/**
* Indicates if current route is onboarding tour.
*/
public get isOnboardingTour(): boolean {
return this.$route.path.includes(RouteConfig.OnboardingTour.path);
}
}
</script>

View File

@ -87,6 +87,7 @@ import { MetaUtils } from '@/utils/meta';
})
export default class CreatePassphraseStep extends Vue {
private key: string = '';
private restrictedKey: string = '';
private access: string = '';
private worker: Worker;
private isLoading: boolean = true;
@ -101,13 +102,38 @@ export default class CreatePassphraseStep extends Vue {
* Sets local key from props value.
*/
public async mounted(): Promise<void> {
if (!this.$route.params.key) {
if (!this.$route.params.key && !this.$route.params.restrictedKey) {
await this.$router.push(RouteConfig.AccessGrants.with(RouteConfig.CreateAccessGrant.with(RouteConfig.NameStep)).path);
return;
}
this.key = this.$route.params.key;
this.restrictedKey = this.$route.params.restrictedKey;
this.passphrase = bip39.generateMnemonic();
this.worker = await new Worker('/static/static/wasm/webWorker.js');
this.setWorker();
this.isLoading = false;
}
/**
* Changes state to generate passphrase.
*/
public onChooseGenerate(): void {
if (this.passphrase && this.isGenerateState) return;
this.passphrase = bip39.generateMnemonic();
this.isCreateState = false;
this.isGenerateState = true;
}
/**
* Sets local worker with worker instantiated in store.
* Also sets worker's onmessage and onerror logic.
*/
public setWorker(): void {
this.worker = this.$store.state.accessGrantsModule.accessGrantsWebWorker;
this.worker.onmessage = (event: MessageEvent) => {
const data = event.data;
if (data.error) {
@ -123,19 +149,6 @@ export default class CreatePassphraseStep extends Vue {
this.worker.onerror = (error: ErrorEvent) => {
this.$notify.error(error.message);
};
this.isLoading = false;
}
/**
* Changes state to generate passphrase.
*/
public onChooseGenerate(): void {
if (this.passphrase && this.isGenerateState) return;
this.passphrase = bip39.generateMnemonic();
this.isCreateState = false;
this.isGenerateState = true;
}
/**
@ -185,7 +198,7 @@ export default class CreatePassphraseStep extends Vue {
this.worker.postMessage({
'type': 'GenerateAccess',
'apiKey': this.key,
'apiKey': this.restrictedKey,
'passphrase': this.passphrase,
'projectID': this.$store.getters.selectedProject.id,
'satelliteName': satelliteName,

View File

@ -2,7 +2,7 @@
// See LICENSE for copying information.
<template>
<div class="enter-passphrase">
<div class="enter-passphrase" :class="{ 'border-radius': isOnboardingTour }">
<BackIcon class="enter-passphrase__back-icon" @click="onBackClick"/>
<h1 class="enter-passphrase__title">Enter Encryption Passphrase</h1>
<p class="enter-passphrase__sub-title">Enter the passphrase you most recently generated for Access Grants</p>
@ -44,6 +44,7 @@ import { MetaUtils } from '@/utils/meta';
})
export default class EnterPassphraseStep extends Vue {
private key: string = '';
private restrictedKey: string = '';
private access: string = '';
private worker: Worker;
private isLoading: boolean = true;
@ -56,27 +57,16 @@ export default class EnterPassphraseStep extends Vue {
* Sets local key from props value.
*/
public async mounted(): Promise<void> {
if (!this.$route.params.key) {
if (!this.$route.params.key && !this.$route.params.restrictedKey) {
await this.$router.push(RouteConfig.AccessGrants.with(RouteConfig.CreateAccessGrant.with(RouteConfig.NameStep)).path);
return;
}
this.key = this.$route.params.key;
this.worker = await new Worker('/static/static/wasm/webWorker.js');
this.worker.onmessage = (event: MessageEvent) => {
const data = event.data;
if (data.error) {
this.$notify.error(data.error);
this.restrictedKey = this.$route.params.restrictedKey;
return;
}
this.access = data.value;
this.$notify.success('Access Grant was generated successfully');
};
this.worker.onerror = (error: ErrorEvent) => {
this.$notify.error(error.message);
};
this.setWorker();
this.isLoading = false;
}
@ -107,7 +97,7 @@ export default class EnterPassphraseStep extends Vue {
this.worker.postMessage({
'type': 'GenerateAccess',
'apiKey': this.key,
'apiKey': this.restrictedKey,
'passphrase': this.passphrase,
'projectID': this.$store.getters.selectedProject.id,
'satelliteName': satelliteName,
@ -127,6 +117,29 @@ export default class EnterPassphraseStep extends Vue {
}, 1000);
}
/**
* Sets local worker with worker instantiated in store.
* Also sets worker's onmessage and onerror logic.
*/
public setWorker(): void {
this.worker = this.$store.state.accessGrantsModule.accessGrantsWebWorker;
this.worker.onmessage = (event: MessageEvent) => {
const data = event.data;
if (data.error) {
this.$notify.error(data.error);
return;
}
this.access = data.value;
this.$notify.success('Access Grant was generated successfully');
};
this.worker.onerror = (error: ErrorEvent) => {
this.$notify.error(error.message);
};
}
/**
* Holds on back button click logic.
* Redirects to previous step.
@ -191,5 +204,9 @@ export default class EnterPassphraseStep extends Vue {
margin-top: 93px;
}
}
.border-radius {
border-radius: 6px;
}
</style>

View File

@ -53,9 +53,13 @@
width="100%"
height="48px"
:on-press="onContinueInBrowserClick"
:is-disabled="isLoading"
:is-disabled="isLoading || !isAccessGrantsWebWorkerReady"
/>
<p v-if="!isOnboardingTour" class="permissions__cli-link" @click.stop="onContinueInCLIClick">
<p
class="permissions__cli-link"
:class="{ disabled: !isAccessGrantsWebWorkerReady || isLoading }"
@click.stop="onContinueInCLIClick"
>
Continue in CLI
</p>
</div>
@ -72,6 +76,7 @@ import VButton from '@/components/common/VButton.vue';
import BackIcon from '@/../static/images/accessGrants/back.svg';
import { RouteConfig } from '@/router';
import { ACCESS_GRANTS_ACTIONS } from '@/store/modules/accessGrants';
import { BUCKET_ACTIONS } from '@/store/modules/buckets';
@Component({
@ -102,25 +107,13 @@ export default class PermissionsStep extends Vue {
public async mounted(): Promise<void> {
if (!this.$route.params.key) {
this.onBackClick();
return;
}
this.key = this.$route.params.key;
this.worker = await new Worker('/static/static/wasm/webWorker.js');
this.worker.onmessage = (event: MessageEvent) => {
const data = event.data;
if (data.error) {
this.$notify.error(data.error);
return;
}
this.restrictedKey = data.value;
this.$notify.success('Permissions were set successfully');
};
this.worker.onerror = (error: ErrorEvent) => {
this.$notify.error(error.message);
};
this.setWorker();
try {
await this.$store.dispatch(BUCKET_ACTIONS.FETCH_ALL_BUCKET_NAMES);
@ -136,30 +129,37 @@ export default class PermissionsStep extends Vue {
* Redirects to previous step.
*/
public onBackClick(): void {
const PREVIOUS_ROUTE_NUMBER: number = -1;
this.$router.push(RouteConfig.AccessGrants.with(RouteConfig.CreateAccessGrant.with(RouteConfig.NameStep)).path);
}
this.$router.go(PREVIOUS_ROUTE_NUMBER);
/**
* Sets local worker with worker instantiated in store.
* Also sets worker's onmessage and onerror logic.
*/
public setWorker(): void {
this.worker = this.$store.state.accessGrantsModule.accessGrantsWebWorker;
this.worker.onmessage = (event: MessageEvent) => {
const data = event.data;
if (data.error) {
this.$notify.error(data.error);
return;
}
this.restrictedKey = data.value;
this.$notify.success('Permissions were set successfully');
};
this.worker.onerror = (error: ErrorEvent) => {
this.$notify.error(error.message);
};
}
/**
* Holds on continue in CLI button click logic.
*/
public onContinueInCLIClick(): void {
if (this.isLoading) return;
this.$router.push({
name: RouteConfig.AccessGrants.with(RouteConfig.CreateAccessGrant.with(RouteConfig.CLIStep)).name,
params: {
key: this.key,
},
});
}
/**
* Holds on continue in browser button click logic.
*/
public onContinueInBrowserClick(): void {
if (this.isLoading) return;
if (this.isLoading || !this.isAccessGrantsWebWorkerReady) return;
this.isLoading = true;
@ -177,13 +177,62 @@ export default class PermissionsStep extends Vue {
// Give time for web worker to return value.
setTimeout(() => {
this.$store.dispatch(ACCESS_GRANTS_ACTIONS.CLEAR_SELECTION);
this.isLoading = false;
if (this.isOnboardingTour) {
this.$router.push({
name: RouteConfig.OnboardingTour.with(RouteConfig.AccessGrant.with(RouteConfig.AccessGrantCLI)).name,
params: {
key: this.key,
restrictedKey: this.restrictedKey,
},
});
return;
}
this.$router.push({
name: RouteConfig.AccessGrants.with(RouteConfig.CreateAccessGrant.with(RouteConfig.CLIStep)).name,
params: {
key: this.key,
restrictedKey: this.restrictedKey,
},
});
}, 1000);
}
/**
* Holds on continue in browser button click logic.
*/
public onContinueInBrowserClick(): void {
if (this.isLoading || !this.isAccessGrantsWebWorkerReady) return;
this.isLoading = true;
this.worker.postMessage({
'type': 'SetPermission',
'isDownload': this.isDownload,
'isUpload': this.isUpload,
'isList': this.isList,
'isDelete': this.isDelete,
'buckets': this.selectedBucketNames,
'apiKey': this.key,
'notBefore': this.notBeforePermission,
'notAfter': this.notAfterPermission,
});
// Give time for web worker to return value.
setTimeout(() => {
this.$store.dispatch(ACCESS_GRANTS_ACTIONS.CLEAR_SELECTION);
this.isLoading = false;
if (this.isOnboardingTour) {
this.$router.push({
name: RouteConfig.OnboardingTour.with(RouteConfig.AccessGrant.with(RouteConfig.AccessGrantPassphrase)).name,
params: {
key: this.restrictedKey,
key: this.key,
restrictedKey: this.restrictedKey,
},
});
@ -194,7 +243,8 @@ export default class PermissionsStep extends Vue {
this.$router.push({
name: RouteConfig.AccessGrants.with(RouteConfig.CreateAccessGrant.with(RouteConfig.EnterPassphraseStep)).name,
params: {
key: this.restrictedKey,
key: this.key,
restrictedKey: this.restrictedKey,
},
});
@ -204,12 +254,20 @@ export default class PermissionsStep extends Vue {
this.$router.push({
name: RouteConfig.AccessGrants.with(RouteConfig.CreateAccessGrant.with(RouteConfig.CreatePassphraseStep)).name,
params: {
key: this.restrictedKey,
key: this.key,
restrictedKey: this.restrictedKey,
},
});
}, 1000);
}
/**
* Indicates if access grants web worker ready to use.
*/
public get isAccessGrantsWebWorkerReady(): boolean {
return this.$store.state.accessGrantsModule.isAccessGrantsWebWorkerReady;
}
/**
* Indicates if current route is onboarding tour.
*/
@ -381,4 +439,9 @@ export default class PermissionsStep extends Vue {
.border-radius {
border-radius: 6px;
}
.disabled {
pointer-events: none;
color: rgba(0, 0, 0, 0.4);
}
</style>

View File

@ -123,6 +123,8 @@ import { MetaUtils } from '@/utils/meta';
},
})
export default class ResultStep extends Vue {
private key: string = '';
public access: string = '';
public isGatewayDropdownVisible: boolean = false;
public areGatewayCredentialsVisible: boolean = false;
@ -134,11 +136,14 @@ export default class ResultStep extends Vue {
* Sets local access from props value.
*/
public mounted(): void {
if (!this.$route.params.access || !this.$route.params.key) {
if (!this.$route.params.access && !this.$route.params.key) {
this.$router.push(RouteConfig.AccessGrants.with(RouteConfig.CreateAccessGrant.with(RouteConfig.NameStep)).path);
return;
}
this.access = this.$route.params.access;
this.key = this.$route.params.key;
const requestURL = MetaUtils.getMetaContent('gateway-credentials-request-url');
if (requestURL) this.isGatewayDropdownVisible = true;
@ -196,7 +201,7 @@ export default class ResultStep extends Vue {
this.$router.push({
name: RouteConfig.OnboardingTour.with(RouteConfig.AccessGrant.with(RouteConfig.AccessGrantPassphrase)).name,
params: {
key: this.$route.params.key,
key: this.key,
},
});
@ -207,7 +212,7 @@ export default class ResultStep extends Vue {
this.$router.push({
name: RouteConfig.AccessGrants.with(RouteConfig.CreateAccessGrant.with(RouteConfig.EnterPassphraseStep)).name,
params: {
key: this.$route.params.key,
key: this.key,
},
});
@ -217,7 +222,7 @@ export default class ResultStep extends Vue {
this.$router.push({
name: RouteConfig.AccessGrants.with(RouteConfig.CreateAccessGrant.with(RouteConfig.CreatePassphraseStep)).name,
params: {
key: this.$route.params.key,
key: this.key,
},
});
}

View File

@ -102,6 +102,8 @@ export default class UploadStep extends Vue {
public mounted(): void {
if (!this.$route.params.isUplinkSectionEnabled) {
this.$router.push(RouteConfig.AccessGrants.with(RouteConfig.CreateAccessGrant.with(RouteConfig.NameStep)).path);
return;
}
this.$route.params.isUplinkSectionEnabled === 'true' ? this.isUplinkSectionEnabled = true : this.isUplinkSectionEnabled = false;

View File

@ -13,9 +13,10 @@
'permissions-margin': isPermissionsStep,
'passphrase-margin': isPassphraseStep,
'result-margin': isResultStep,
'cli-margin': isCLIStep,
}"
>
<ProgressBar/>
<ProgressBar v-if="!isCLIStep"/>
<router-view/>
</div>
</div>
@ -48,6 +49,13 @@ export default class CreateAccessGrantStep extends Vue {
return this.$route.name === RouteConfig.AccessGrantPassphrase.name;
}
/**
* Indicates if current route is access grant CLI step.
*/
public get isCLIStep(): boolean {
return this.$route.name === RouteConfig.AccessGrantCLI.name;
}
/**
* Indicates if current route is access grant result step.
*/
@ -81,7 +89,8 @@ export default class CreateAccessGrantStep extends Vue {
&__content {
display: flex;
align-items: center;
margin-left: -145px;
justify-content: center;
margin-left: -195px;
}
}
@ -96,4 +105,8 @@ export default class CreateAccessGrantStep extends Vue {
.result-margin {
margin-left: -175px;
}
.cli-margin {
margin-left: 0;
}
</style>

View File

@ -77,6 +77,7 @@ export abstract class RouteConfig {
public static AccessGrant = new NavigationLink('access', 'Onboarding Access Grant');
public static AccessGrantName = new NavigationLink('name', 'Onboarding Name Access Grant');
public static AccessGrantPermissions = new NavigationLink('permissions', 'Onboarding Access Grant Permissions');
public static AccessGrantCLI = new NavigationLink('cli', 'Onboarding Access Grant CLI');
public static AccessGrantPassphrase = new NavigationLink('create-passphrase', 'Onboarding Access Grant Create Passphrase');
public static AccessGrantResult = new NavigationLink('result', 'Onboarding Access Grant Result');
@ -208,15 +209,23 @@ export const router = new Router({
component: PermissionsStep,
props: true,
},
{
path: RouteConfig.AccessGrantCLI.path,
name: RouteConfig.AccessGrantCLI.name,
component: CLIStep,
props: true,
},
{
path: RouteConfig.AccessGrantPassphrase.path,
name: RouteConfig.AccessGrantPassphrase.name,
component: CreatePassphraseStep,
props: true,
},
{
path: RouteConfig.AccessGrantResult.path,
name: RouteConfig.AccessGrantResult.name,
component: ResultStep,
props: true,
},
],
},
@ -257,21 +266,25 @@ export const router = new Router({
path: RouteConfig.CreatePassphraseStep.path,
name: RouteConfig.CreatePassphraseStep.name,
component: CreatePassphraseStep,
props: true,
},
{
path: RouteConfig.EnterPassphraseStep.path,
name: RouteConfig.EnterPassphraseStep.name,
component: EnterPassphraseStep,
props: true,
},
{
path: RouteConfig.ResultStep.path,
name: RouteConfig.ResultStep.name,
component: ResultStep,
props: true,
},
{
path: RouteConfig.CLIStep.path,
name: RouteConfig.CLIStep.name,
component: CLIStep,
props: true,
},
{
path: RouteConfig.UploadStep.path,

View File

@ -19,6 +19,7 @@ export const ACCESS_GRANTS_ACTIONS = {
DELETE: 'deleteAccessGrants',
CLEAR: 'clearAccessGrants',
GET_GATEWAY_CREDENTIALS: 'getGatewayCredentials',
SET_ACCESS_GRANTS_WEB_WORKER: 'setAccessGrantsWebWorker',
SET_SEARCH_QUERY: 'setAccessGrantsSearchQuery',
SET_SORT_BY: 'setAccessGrantsSortingBy',
SET_SORT_DIRECTION: 'setAccessGrantsSortingDirection',
@ -31,6 +32,7 @@ export const ACCESS_GRANTS_ACTIONS = {
export const ACCESS_GRANTS_MUTATIONS = {
SET_PAGE: 'setAccessGrants',
SET_GATEWAY_CREDENTIALS: 'setGatewayCredentials',
SET_ACCESS_GRANTS_WEB_WORKER: 'setAccessGrantsWebWorker',
TOGGLE_SELECTION: 'toggleAccessGrantsSelection',
TOGGLE_BUCKET_SELECTION: 'toggleBucketSelection',
CLEAR_SELECTION: 'clearAccessGrantsSelection',
@ -54,6 +56,7 @@ const {
SET_PAGE_NUMBER,
SET_DURATION_PERMISSION,
SET_GATEWAY_CREDENTIALS,
SET_ACCESS_GRANTS_WEB_WORKER,
} = ACCESS_GRANTS_MUTATIONS;
export class AccessGrantsState {
@ -64,6 +67,8 @@ export class AccessGrantsState {
public permissionNotBefore: Date = new Date();
public permissionNotAfter: Date = new Date('2200-01-01');
public gatewayCredentials: GatewayCredentials = new GatewayCredentials();
public accessGrantsWebWorker: Worker | null = null;
public isAccessGrantsWebWorkerReady: boolean = false;
}
/**
@ -75,6 +80,22 @@ export function makeAccessGrantsModule(api: AccessGrantsApi): StoreModule<Access
return {
state: new AccessGrantsState(),
mutations: {
[SET_ACCESS_GRANTS_WEB_WORKER](state: AccessGrantsState): void {
state.accessGrantsWebWorker = new Worker('@/../static/wasm/accessGrant.worker.js', { type: 'module' });
state.accessGrantsWebWorker.onmessage = (event: MessageEvent) => {
const data = event.data;
if (data !== 'configured') {
console.error('Failed to configure access grants web worker');
return;
}
state.isAccessGrantsWebWorkerReady = true;
};
state.accessGrantsWebWorker.onerror = (error: ErrorEvent) => {
console.error(`Failed to configure access grants web worker. ${error.message}`);
};
},
[SET_PAGE](state: AccessGrantsState, page: AccessGrantsPage) {
state.page = page;
state.page.accessGrants = state.page.accessGrants.map(accessGrant => {
@ -155,6 +176,9 @@ export function makeAccessGrantsModule(api: AccessGrantsApi): StoreModule<Access
},
},
actions: {
setAccessGrantsWebWorker: function({commit}: any): void {
commit(SET_ACCESS_GRANTS_WEB_WORKER);
},
fetchAccessGrants: async function ({commit, rootGetters, state}, pageNumber: number): Promise<AccessGrantsPage> {
const projectId = rootGetters.selectedProject.id;
commit(SET_PAGE_NUMBER, pageNumber);

View File

@ -57,7 +57,6 @@ import { PAYMENTS_ACTIONS } from '@/store/modules/payments';
import { PROJECTS_ACTIONS } from '@/store/modules/projects';
import { USER_ACTIONS } from '@/store/modules/users';
import { Project } from '@/types/projects';
import { User } from '@/types/users';
import { Size } from '@/utils/bytesSize';
import {
APP_STATE_ACTIONS,
@ -93,16 +92,26 @@ export default class DashboardArea extends Vue {
*/
public readonly projectDashboardPath: string = RouteConfig.ProjectDashboard.path;
/**
* Lifecycle hook before initial render.
* Sets access grants web worker.
*/
public beforeMount(): void {
try {
this.$store.dispatch(ACCESS_GRANTS_ACTIONS.SET_ACCESS_GRANTS_WEB_WORKER);
} catch (error) {
this.$notify.error(`Unable to set access grants wizard. ${error.message}`);
}
}
/**
* Lifecycle hook after initial render.
* Pre fetches user`s and project information.
*/
public async mounted(): Promise<void> {
let user: User;
// TODO: combine all project related requests in one
try {
user = await this.$store.dispatch(USER_ACTIONS.GET);
await this.$store.dispatch(USER_ACTIONS.GET);
} catch (error) {
if (!(error instanceof ErrorUnauthorized)) {
await this.$store.dispatch(APP_STATE_ACTIONS.CHANGE_STATE, AppState.ERROR);

View File

@ -15,7 +15,10 @@ const instantiateStreaming = WebAssembly.instantiateStreaming || async function
return await WebAssembly.instantiate(source, importObject);
};
const response = fetch('/static/static/wasm/access.wasm');
instantiateStreaming(response, go.importObject).then(result => go.run(result.instance)).catch(err => self.postMessage(new Error(err.message)));
instantiateStreaming(response, go.importObject).then(result => {
go.run(result.instance)
self.postMessage('configured');
}).catch(err => self.postMessage(new Error(err.message)));
self.onmessage = function (event) {
const data = event.data;

View File

@ -4,7 +4,8 @@
const path = require('path');
const CompressionWebpackPlugin = require('compression-webpack-plugin');
const StyleLintPlugin = require('stylelint-webpack-plugin');
const productionGzipExtensions = ['js', 'css', 'ttf'];
const WorkerPlugin = require('worker-plugin');
const productionBrotliExtensions = ['js', 'css', 'ttf'];
module.exports = {
publicPath: "/static/dist",
@ -13,14 +14,18 @@ module.exports = {
configureWebpack: {
plugins: [
new CompressionWebpackPlugin({
algorithm: 'gzip',
test: new RegExp('\\.(' + productionGzipExtensions.join('|') + ')$'),
algorithm: 'brotliCompress',
filename: '[path][name].br',
test: new RegExp('\\.(' + productionBrotliExtensions.join('|') + ')$'),
threshold: 1024,
minRatio: 0.8
}),
new StyleLintPlugin({
files: ['**/*.{vue,sss,less,scss,sass}'],
emitWarning: true,
}),
new WorkerPlugin({
globalObject: 'self',
})
],
},