Merge 'master' branch
Change-Id: Iba69ea73ca4d3f1cd4ae94243eaaae033c5324e8
This commit is contained in:
commit
9a8959d429
24
Makefile
24
Makefile
@ -136,7 +136,7 @@ storagenode-console:
|
||||
-w /go/src/storj.io/storj/web/storagenode \
|
||||
-e HOME=/tmp \
|
||||
-u $(shell id -u):$(shell id -g) \
|
||||
node:10.15.1 \
|
||||
node:14.15.3 \
|
||||
/bin/bash -c "npm ci && npm run build"
|
||||
# embed web assets into go
|
||||
go-bindata -prefix web/storagenode/ -fs -o storagenode/console/consoleassets/bindata.resource.go -pkg consoleassets web/storagenode/dist/... web/storagenode/static/...
|
||||
@ -163,8 +163,8 @@ satellite-image: satellite_linux_arm satellite_linux_arm64 satellite_linux_amd64
|
||||
${DOCKER_BUILD} --pull=true -t storjlabs/satellite:${TAG}${CUSTOMTAG}-arm32v6 \
|
||||
--build-arg=GOARCH=arm --build-arg=DOCKER_ARCH=arm32v6 \
|
||||
-f cmd/satellite/Dockerfile .
|
||||
${DOCKER_BUILD} --pull=true -t storjlabs/satellite:${TAG}${CUSTOMTAG}-aarch64 \
|
||||
--build-arg=GOARCH=arm --build-arg=DOCKER_ARCH=aarch64 \
|
||||
${DOCKER_BUILD} --pull=true -t storjlabs/satellite:${TAG}${CUSTOMTAG}-arm64v8 \
|
||||
--build-arg=GOARCH=arm --build-arg=DOCKER_ARCH=arm64v8 \
|
||||
-f cmd/satellite/Dockerfile .
|
||||
|
||||
.PHONY: storagenode-image
|
||||
@ -174,8 +174,8 @@ storagenode-image: storagenode_linux_arm storagenode_linux_arm64 storagenode_lin
|
||||
${DOCKER_BUILD} --pull=true -t storjlabs/storagenode:${TAG}${CUSTOMTAG}-arm32v6 \
|
||||
--build-arg=GOARCH=arm --build-arg=DOCKER_ARCH=arm32v6 \
|
||||
-f cmd/storagenode/Dockerfile .
|
||||
${DOCKER_BUILD} --pull=true -t storjlabs/storagenode:${TAG}${CUSTOMTAG}-aarch64 \
|
||||
--build-arg=GOARCH=arm --build-arg=DOCKER_ARCH=aarch64 \
|
||||
${DOCKER_BUILD} --pull=true -t storjlabs/storagenode:${TAG}${CUSTOMTAG}-arm64v8 \
|
||||
--build-arg=GOARCH=arm --build-arg=DOCKER_ARCH=arm64v8 \
|
||||
-f cmd/storagenode/Dockerfile .
|
||||
.PHONY: uplink-image
|
||||
uplink-image: uplink_linux_arm uplink_linux_arm64 uplink_linux_amd64 ## Build uplink Docker image
|
||||
@ -184,8 +184,8 @@ uplink-image: uplink_linux_arm uplink_linux_arm64 uplink_linux_amd64 ## Build up
|
||||
${DOCKER_BUILD} --pull=true -t storjlabs/uplink:${TAG}${CUSTOMTAG}-arm32v6 \
|
||||
--build-arg=GOARCH=arm --build-arg=DOCKER_ARCH=arm32v6 \
|
||||
-f cmd/uplink/Dockerfile .
|
||||
${DOCKER_BUILD} --pull=true -t storjlabs/uplink:${TAG}${CUSTOMTAG}-aarch64 \
|
||||
--build-arg=GOARCH=arm --build-arg=DOCKER_ARCH=aarch64 \
|
||||
${DOCKER_BUILD} --pull=true -t storjlabs/uplink:${TAG}${CUSTOMTAG}-arm64v8 \
|
||||
--build-arg=GOARCH=arm --build-arg=DOCKER_ARCH=arm64v8 \
|
||||
-f cmd/uplink/Dockerfile .
|
||||
.PHONY: versioncontrol-image
|
||||
versioncontrol-image: versioncontrol_linux_arm versioncontrol_linux_arm64 versioncontrol_linux_amd64 ## Build versioncontrol Docker image
|
||||
@ -194,8 +194,8 @@ versioncontrol-image: versioncontrol_linux_arm versioncontrol_linux_arm64 versio
|
||||
${DOCKER_BUILD} --pull=true -t storjlabs/versioncontrol:${TAG}${CUSTOMTAG}-arm32v6 \
|
||||
--build-arg=GOARCH=arm --build-arg=DOCKER_ARCH=arm32v6 \
|
||||
-f cmd/versioncontrol/Dockerfile .
|
||||
${DOCKER_BUILD} --pull=true -t storjlabs/versioncontrol:${TAG}${CUSTOMTAG}-aarch64 \
|
||||
--build-arg=GOARCH=arm --build-arg=DOCKER_ARCH=aarch64 \
|
||||
${DOCKER_BUILD} --pull=true -t storjlabs/versioncontrol:${TAG}${CUSTOMTAG}-arm64v8 \
|
||||
--build-arg=GOARCH=arm --build-arg=DOCKER_ARCH=arm64v8 \
|
||||
-f cmd/versioncontrol/Dockerfile .
|
||||
|
||||
.PHONY: binary
|
||||
@ -291,15 +291,15 @@ push-images: ## Push Docker images to Docker Hub (jenkins)
|
||||
for c in satellite storagenode uplink versioncontrol ; do \
|
||||
docker push storjlabs/$$c:${TAG}${CUSTOMTAG}-amd64 \
|
||||
&& docker push storjlabs/$$c:${TAG}${CUSTOMTAG}-arm32v6 \
|
||||
&& docker push storjlabs/$$c:${TAG}${CUSTOMTAG}-aarch64 \
|
||||
&& docker push storjlabs/$$c:${TAG}${CUSTOMTAG}-arm64v8 \
|
||||
&& for t in ${TAG}${CUSTOMTAG} ${LATEST_TAG}; do \
|
||||
docker manifest create storjlabs/$$c:$$t \
|
||||
storjlabs/$$c:${TAG}${CUSTOMTAG}-amd64 \
|
||||
storjlabs/$$c:${TAG}${CUSTOMTAG}-arm32v6 \
|
||||
storjlabs/$$c:${TAG}${CUSTOMTAG}-aarch64 \
|
||||
storjlabs/$$c:${TAG}${CUSTOMTAG}-arm64v8 \
|
||||
&& docker manifest annotate storjlabs/$$c:$$t storjlabs/$$c:${TAG}${CUSTOMTAG}-amd64 --os linux --arch amd64 \
|
||||
&& docker manifest annotate storjlabs/$$c:$$t storjlabs/$$c:${TAG}${CUSTOMTAG}-arm32v6 --os linux --arch arm --variant v6 \
|
||||
&& docker manifest annotate storjlabs/$$c:$$t storjlabs/$$c:${TAG}${CUSTOMTAG}-aarch64 --os linux --arch arm64 \
|
||||
&& docker manifest annotate storjlabs/$$c:$$t storjlabs/$$c:${TAG}${CUSTOMTAG}-arm64v8 --os linux --arch arm64 --variant v8 \
|
||||
&& docker manifest push --purge storjlabs/$$c:$$t \
|
||||
; done \
|
||||
; done
|
||||
|
@ -1,6 +1,6 @@
|
||||
ARG DOCKER_ARCH
|
||||
# Satellite UI static asset generation
|
||||
FROM node:10.15.1 as ui
|
||||
FROM node:14.15.3 as ui
|
||||
WORKDIR /app
|
||||
COPY web/satellite/ /app
|
||||
COPY web/marketing/ /app/marketing
|
||||
|
@ -1,8 +1,9 @@
|
||||
ARG DOCKER_ARCH
|
||||
|
||||
# Fetch ca-certificates file for arch independent builds below
|
||||
FROM alpine as ca-cert
|
||||
RUN apk -U add ca-certificates
|
||||
|
||||
ARG DOCKER_ARCH
|
||||
FROM ${DOCKER_ARCH:-amd64}/alpine
|
||||
ARG TAG
|
||||
ARG GOARCH
|
||||
|
@ -355,7 +355,6 @@ func newNetwork(flags *Flags) (*Processes, error) {
|
||||
apiProcess.Arguments["setup"] = append(apiProcess.Arguments["setup"],
|
||||
"--database", masterDBURL,
|
||||
"--metainfo.database-url", metainfoDBURL,
|
||||
"--orders.include-encrypted-metadata=true",
|
||||
"--orders.encryption-keys", "0100000000000000=0100000000000000000000000000000000000000000000000000000000000000",
|
||||
)
|
||||
}
|
||||
@ -394,7 +393,6 @@ func newNetwork(flags *Flags) (*Processes, error) {
|
||||
coreProcess.Arguments = withCommon(apiProcess.Directory, Arguments{
|
||||
"run": {
|
||||
"--debug.addr", net.JoinHostPort(host, port(satellitePeer, i, debugPeerHTTP)),
|
||||
"--orders.include-encrypted-metadata=true",
|
||||
"--orders.encryption-keys", "0100000000000000=0100000000000000000000000000000000000000000000000000000000000000",
|
||||
},
|
||||
})
|
||||
@ -423,7 +421,6 @@ func newNetwork(flags *Flags) (*Processes, error) {
|
||||
"run": {
|
||||
"repair",
|
||||
"--debug.addr", net.JoinHostPort(host, port(satellitePeer, i, debugRepairerHTTP)),
|
||||
"--orders.include-encrypted-metadata=true",
|
||||
"--orders.encryption-keys", "0100000000000000=0100000000000000000000000000000000000000000000000000000000000000",
|
||||
},
|
||||
})
|
||||
|
@ -1,8 +1,9 @@
|
||||
ARG DOCKER_ARCH
|
||||
|
||||
# Fetch ca-certificates file for arch independent builds below
|
||||
FROM alpine as ca-cert
|
||||
RUN apk -U add ca-certificates
|
||||
|
||||
ARG DOCKER_ARCH
|
||||
FROM ${DOCKER_ARCH:-amd64}/alpine
|
||||
ARG TAG
|
||||
ARG GOARCH
|
||||
|
74
docs/blueprints/sparse-order-storage-rollout.md
Normal file
74
docs/blueprints/sparse-order-storage-rollout.md
Normal file
@ -0,0 +1,74 @@
|
||||
# Sparse Order Storage Rollout
|
||||
|
||||
## Problem Statement
|
||||
|
||||
We need to fix all of the underspend/doublespends as described in
|
||||
|
||||
- https://storjlabs.atlassian.net/browse/SM-1187
|
||||
- Nodes can submit with the new endpoint and then the old endpoint to double spend
|
||||
- https://storjlabs.atlassian.net/browse/SM-1242
|
||||
- Orders may be placed in the queue, then submitted with the new endpoint, then processed leading to a double spend
|
||||
- https://storjlabs.atlassian.net/browse/SM-1241
|
||||
- Nodes may accidentally underspend by submitting some orders in a window with the old endpoint, upgrading, then being unable to submit the remainder of the window with the new endpoint
|
||||
|
||||
## Current Design
|
||||
|
||||
We have two endpoints: an old one and a new one. The behaviors are
|
||||
|
||||
### Old
|
||||
- Check for validity of the orders, removing invalid ones
|
||||
- Place all valid orders into the queue
|
||||
- A worker eventually consumes the queue and updates any existing rollup rows
|
||||
|
||||
Note that orders submitted through the old endpoint may be submitted multiple times without double spends because the queue worker handles that.
|
||||
|
||||
### New
|
||||
- Checks for validity of the orders, removing invalid ones
|
||||
- Ensures all orders are in the same window
|
||||
- Checks if existing rollup rows exist for that window
|
||||
- If the rows exist
|
||||
- Returns accepted if submitted orders match
|
||||
- Returns rejected if submitted orders don't match
|
||||
- Otherwise creates rows with sum of submitted orders
|
||||
|
||||
Additionally, once a node uses the new endpoint, any attempts from that node to use the old endpoint will be rejected.
|
||||
|
||||
## Proposed Design
|
||||
|
||||
The rollout will happen in three phases. In phase 1 the endpoints will
|
||||
|
||||
### Old
|
||||
- Remain unchanged from the current design
|
||||
|
||||
### New
|
||||
- Check for validity of the orders, removing invalid ones
|
||||
- Ensures all orders are in the same window
|
||||
- Place all valid orders into the queue
|
||||
- Create rollup rows with settled amount equal to 0 if they do not already exist
|
||||
- A worker eventually consumes the queue and updates existing rollup rows
|
||||
|
||||
After all (or enough) of the nodes are using the new endpoint, we switch to phase 2 where the endpoints will
|
||||
|
||||
### Old
|
||||
- Be removed
|
||||
|
||||
### New
|
||||
- Remain unchanged from phase 1
|
||||
|
||||
Once we have waited longer than 2 days (the order expiration time), we know that every valid order will be submitted with the new endpoint only. Then we move to phase 3 where the endpoints will
|
||||
|
||||
### Old
|
||||
- Remain removed
|
||||
|
||||
### New
|
||||
- Go back to the current design
|
||||
|
||||
We then wait for the queue to be fully drained, and then remove the worker and the queue tables.
|
||||
|
||||
## Discussion
|
||||
|
||||
The reason why this fixes double spends is because when orders are submitted with the new endpoint in phase 1, we create rollup rows with settled amount equal to 0. That means that when we move to phase 2, it will reject any attempts to submit to a window that you already submitted to.
|
||||
|
||||
The reason why this fixes under spends is because during phase 1, we allow the orders to be submitted multiple times and let the queue and worker handle the double submissions like it already does.
|
||||
|
||||
The reason why we have the middle phase 2 is so that a node cannot go directly from using the current design old endpoint to the current design new endpoint with valid orders. If that were allowed, then we would have the same double/under spends that we are trying to avoid.
|
119
docs/blueprints/sparse-order-storage.md
Normal file
119
docs/blueprints/sparse-order-storage.md
Normal file
@ -0,0 +1,119 @@
|
||||
# Sparse Order Storage Take 2
|
||||
|
||||
This blueprint describes a way to cut down on database traffic due to bandwidth
|
||||
measurement and tracking enormously. It reduces used serial storage from
|
||||
terabytes to a single boolean per node per hour.
|
||||
|
||||
## Background
|
||||
|
||||
Storj needs to measure bandwidth usage in a secure, cryptographic, and
|
||||
correctly incentivized way to be able to pay storage node operators for the
|
||||
bandwidth that the system uses from them.
|
||||
|
||||
Currently we use a system described in section 4.17 of the Storj V3 whitepaper,
|
||||
though "bandwidth allocations" have become named "Order Limits" and
|
||||
"restricted bandwidth allocations" have become named "Orders."
|
||||
|
||||
We track Order Limit serial numbers in the database, created at the time a
|
||||
segment begins upload, and then we use that database for preventing storage
|
||||
nodes from submitting the same Order more than once.
|
||||
|
||||
The current naive system of keeping track of serial numbers, used or unused, is
|
||||
a massive amount of load on our central database, to the point that it accounts
|
||||
currently for 99% of all of the data in the Satellite DB. The plan below seeks
|
||||
to eliminate terabytes of data storage in used serial tracking in Postgres for
|
||||
a bit per node per hour (we have a couple thousand nodes total right now).
|
||||
|
||||
## Order Limit creation
|
||||
|
||||
The only reason we need to talk to the database currently when creating order
|
||||
limits is to store which bucket the order limit belongs to. Otherwise, we sign
|
||||
the order limit in a way where the Satellite can validate that it signed the
|
||||
order limit later. So, instead, we should encrypt the bucket id in the order
|
||||
limit.
|
||||
|
||||
For future proofing we should make a Satellite-eyes-only generic metadata
|
||||
envelope (with support for future key/value pairs) in the order limit.
|
||||
|
||||
We should have the Satellite keep a set of shared symmetric keys, and we always
|
||||
support decrypting with any key we know of, but we only encrypt with currently
|
||||
rotating-in approved keys. Keys should have an id so the envelope can identify
|
||||
which key it was encrypted with.
|
||||
|
||||
We should use authenticated encryption no matter what, but authenticated
|
||||
encryption could additionally avoid keys needing ids (we would have to cycle
|
||||
through and try keys until one worked).
|
||||
|
||||
## Order submission
|
||||
|
||||
The main problem we're trying to solve is the concept of double spends. We want
|
||||
to make sure that a Satellite and an Uplink and a Storage Node all agree on
|
||||
an amount of bandwidth getting used. The way we do this as described in whitepaper
|
||||
section 4.17 is to have the Satellite and Uplink work together to sign what they
|
||||
intend to use, and then the Storage Node is bound to not submit more than that.
|
||||
We want the Storage Node to submit the bandwidth it has used, but once it "claims"
|
||||
bandwidth from an Uplink, we need to make sure it can't claim the same bandwidth
|
||||
twice.
|
||||
|
||||
Originally, this section described a rather ingenious plan that involved reusing
|
||||
tools from the efficient certificate revocation literature (sparse merkle trees),
|
||||
but then we realized that we don't need any of it. Here's the new plan:
|
||||
|
||||
The Satellite will keep Order windows, one per hour. Each Order Limit specifies
|
||||
the time it was issued, and the satellite will keep a flag per node per window
|
||||
representing whether or not Orders for that window were submitted.
|
||||
|
||||
Storage Nodes will keep track of Orders by creation hour. Storage Nodes
|
||||
currently reject Orders that are older than a couple of hours. We will make sure
|
||||
this is exactly an hour, and therefore, Storage Nodes will have the property
|
||||
that once an hour has expired, no new orders for that hour will come in.
|
||||
|
||||
As pointed out by Pentium100 on the forum: we’ll need to specifically make sure
|
||||
we consider that requests must start within the hour, and then not submit orders
|
||||
for a window until all of the requests that started within that hour finish.
|
||||
|
||||
Storage Nodes will then have 48 hours to submit their Orders, in an hour batch
|
||||
at a time. Submitted Orders for an hour is an all-or-nothing process. The
|
||||
Storage Node will stream to the Satellite all of its Orders for that hour, and
|
||||
the Satellite will result in one of three outcomes: the orders for that
|
||||
window are accepted, the window was already submitted for, or an unexpected
|
||||
error occurred and the storage node should try again.
|
||||
|
||||
The Satellite will keep track, per window per node, whether or not it has
|
||||
accepted orders for that hour. If the window has already had a submission, no
|
||||
further submissions for that hour can be made.
|
||||
|
||||
When orders are submitted, the Satellite will update the bandwidth rollup
|
||||
tables in the usual way.
|
||||
|
||||
## Other options
|
||||
|
||||
See the previous version of this document on
|
||||
https://review.dev.storj.io/c/storj/storj/+/1732/3 (patchset 3)
|
||||
|
||||
## Other concerns
|
||||
|
||||
## Can storage nodes submit window batches that make the Satellite consume large amounts of memory?
|
||||
|
||||
Depending on the storage node, the number of orders submitted within an hour
|
||||
may be substantial. Care will need to be taken about the order streaming protocol
|
||||
to make sure the Satellite is able to appropriately stage its own internal updates
|
||||
so that it does not try and store all of the orders in memory while processing.
|
||||
|
||||
### Usage limits
|
||||
|
||||
We think usage limits are an orthogonal problem and should be solved with a
|
||||
separate system to keep track of per-bandwidth-window allocations and usage.
|
||||
|
||||
## References
|
||||
|
||||
* https://review.dev.storj.io/c/storj/storj/+/1732/3
|
||||
* https://www.links.org/files/RevocationTransparency.pdf
|
||||
* https://ethresear.ch/t/optimizing-sparse-merkle-trees/3751
|
||||
* https://eprint.iacr.org/2018/955.pdf
|
||||
* https://www.cs.purdue.edu/homes/ninghui/papers/accumulator_acns07.pdf
|
||||
* https://en.wikipedia.org/wiki/Benaloh_cryptosystem
|
||||
* https://en.wikipedia.org/wiki/Paillier_cryptosystem#Electronic_voting
|
||||
* https://en.wikipedia.org/wiki/Homomorphic_encryption#Partially_homomorphic_cryptosystems
|
||||
* http://kodu.ut.ee/~lipmaa/papers/lip12b/cl-accum.pdf
|
||||
* https://blog.goodaudience.com/deep-dive-on-rsa-accumulators-230bc84144d9
|
@ -43,7 +43,6 @@ import (
|
||||
"storj.io/storj/satellite/console/consoleauth"
|
||||
"storj.io/storj/satellite/console/consoleweb"
|
||||
"storj.io/storj/satellite/contact"
|
||||
"storj.io/storj/satellite/dbcleanup"
|
||||
"storj.io/storj/satellite/gc"
|
||||
"storj.io/storj/satellite/gracefulexit"
|
||||
"storj.io/storj/satellite/inspector"
|
||||
@ -137,10 +136,6 @@ type Satellite struct {
|
||||
Chore *expireddeletion.Chore
|
||||
}
|
||||
|
||||
DBCleanup struct {
|
||||
Chore *dbcleanup.Chore
|
||||
}
|
||||
|
||||
Accounting struct {
|
||||
Tally *tally.Service
|
||||
Rollup *rollup.Service
|
||||
@ -508,7 +503,6 @@ func (planet *Planet) newSatellite(ctx context.Context, prefix string, index int
|
||||
FlushInterval: defaultInterval,
|
||||
NodeStatusLogging: true,
|
||||
WindowEndpointRolloutPhase: orders.WindowEndpointRolloutPhase3,
|
||||
IncludeEncryptedMetadata: true,
|
||||
EncryptionKeys: *encryptionKeys,
|
||||
},
|
||||
Checker: checker.Config{
|
||||
@ -562,10 +556,6 @@ func (planet *Planet) newSatellite(ctx context.Context, prefix string, index int
|
||||
Interval: defaultInterval,
|
||||
Enabled: true,
|
||||
},
|
||||
DBCleanup: dbcleanup.Config{
|
||||
SerialsInterval: defaultInterval,
|
||||
BatchSize: 1000,
|
||||
},
|
||||
Tally: tally.Config{
|
||||
Interval: defaultInterval,
|
||||
},
|
||||
@ -744,8 +734,6 @@ func createNewSystem(name string, log *zap.Logger, config satellite.Config, peer
|
||||
|
||||
system.ExpiredDeletion.Chore = peer.ExpiredDeletion.Chore
|
||||
|
||||
system.DBCleanup.Chore = peer.DBCleanup.Chore
|
||||
|
||||
system.Accounting.Tally = peer.Accounting.Tally
|
||||
system.Accounting.Rollup = peer.Accounting.Rollup
|
||||
system.Accounting.ProjectUsage = api.Accounting.ProjectUsage
|
||||
|
@ -129,21 +129,33 @@ func newAddressedOrderLimit(ctx context.Context, action pb.PieceAction, satellit
|
||||
serialNumber := testrand.SerialNumber()
|
||||
|
||||
now := time.Now()
|
||||
|
||||
limit := &pb.OrderLimit{
|
||||
SerialNumber: serialNumber,
|
||||
SatelliteId: satellite.ID(),
|
||||
UplinkPublicKey: piecePublicKey,
|
||||
StorageNodeId: storageNode.ID(),
|
||||
PieceId: pieceID,
|
||||
Action: action,
|
||||
Limit: dataSize.Int64(),
|
||||
PieceExpiration: time.Time{},
|
||||
OrderCreation: now,
|
||||
OrderExpiration: now.Add(24 * time.Hour),
|
||||
key := satellite.Config.Orders.EncryptionKeys.Default
|
||||
encrypted, err := key.EncryptMetadata(
|
||||
serialNumber,
|
||||
&pb.OrderLimitMetadata{
|
||||
ProjectBucketPrefix: []byte("testprojectid/testbucketname"),
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
limit, err := signing.SignOrderLimit(ctx, signing.SignerFromFullIdentity(satellite.Identity), limit)
|
||||
limit := &pb.OrderLimit{
|
||||
SerialNumber: serialNumber,
|
||||
SatelliteId: satellite.ID(),
|
||||
UplinkPublicKey: piecePublicKey,
|
||||
StorageNodeId: storageNode.ID(),
|
||||
PieceId: pieceID,
|
||||
Action: action,
|
||||
Limit: dataSize.Int64(),
|
||||
PieceExpiration: time.Time{},
|
||||
OrderCreation: now,
|
||||
OrderExpiration: now.Add(24 * time.Hour),
|
||||
EncryptedMetadataKeyId: key.ID[:],
|
||||
EncryptedMetadata: encrypted,
|
||||
}
|
||||
|
||||
limit, err = signing.SignOrderLimit(ctx, signing.SignerFromFullIdentity(satellite.Identity), limit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -31,7 +31,6 @@ import (
|
||||
"storj.io/storj/satellite/accounting/tally"
|
||||
"storj.io/storj/satellite/audit"
|
||||
"storj.io/storj/satellite/contact"
|
||||
"storj.io/storj/satellite/dbcleanup"
|
||||
"storj.io/storj/satellite/gc"
|
||||
"storj.io/storj/satellite/gracefulexit"
|
||||
"storj.io/storj/satellite/metainfo"
|
||||
@ -110,10 +109,6 @@ type Core struct {
|
||||
Chore *expireddeletion.Chore
|
||||
}
|
||||
|
||||
DBCleanup struct {
|
||||
Chore *dbcleanup.Chore
|
||||
}
|
||||
|
||||
Accounting struct {
|
||||
Tally *tally.Service
|
||||
Rollup *rollup.Service
|
||||
@ -394,17 +389,6 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB,
|
||||
debug.Cycle("Expired Segments Chore", peer.ExpiredDeletion.Chore.Loop))
|
||||
}
|
||||
|
||||
{ // setup db cleanup
|
||||
peer.DBCleanup.Chore = dbcleanup.NewChore(peer.Log.Named("dbcleanup"), peer.DB.Orders(), config.DBCleanup)
|
||||
peer.Services.Add(lifecycle.Item{
|
||||
Name: "dbcleanup",
|
||||
Run: peer.DBCleanup.Chore.Run,
|
||||
Close: peer.DBCleanup.Chore.Close,
|
||||
})
|
||||
peer.Debug.Server.Panel.Add(
|
||||
debug.Cycle("DB Cleanup Serials", peer.DBCleanup.Chore.Serials))
|
||||
}
|
||||
|
||||
{ // setup accounting
|
||||
peer.Accounting.Tally = tally.New(peer.Log.Named("accounting:tally"), peer.DB.StoragenodeAccounting(), peer.DB.ProjectAccounting(), peer.LiveAccounting.Cache, peer.Metainfo.Loop, config.Tally.Interval)
|
||||
peer.Services.Add(lifecycle.Item{
|
||||
|
@ -1,92 +0,0 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package dbcleanup
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/spacemonkeygo/monkit/v3"
|
||||
"github.com/zeebo/errs"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"storj.io/common/sync2"
|
||||
"storj.io/storj/satellite/orders"
|
||||
)
|
||||
|
||||
var (
|
||||
// Error the default dbcleanup errs class.
|
||||
Error = errs.Class("dbcleanup error")
|
||||
|
||||
mon = monkit.Package()
|
||||
)
|
||||
|
||||
// Config defines configuration struct for dbcleanup chore.
|
||||
type Config struct {
|
||||
SerialsInterval time.Duration `help:"how often to delete expired serial numbers" default:"4h"`
|
||||
BatchSize int `help:"number of records to delete per delete execution. 0 indicates no limit. applicable to cockroach DB only." default:"1000"`
|
||||
}
|
||||
|
||||
// Chore for deleting DB entries that are no longer needed.
|
||||
//
|
||||
// architecture: Chore
|
||||
type Chore struct {
|
||||
log *zap.Logger
|
||||
orders orders.DB
|
||||
|
||||
Serials *sync2.Cycle
|
||||
config Config
|
||||
|
||||
serialsDeleteOptions orders.SerialDeleteOptions
|
||||
}
|
||||
|
||||
// NewChore creates new chore for deleting DB entries.
|
||||
func NewChore(log *zap.Logger, ordersDB orders.DB, config Config) *Chore {
|
||||
return &Chore{
|
||||
log: log,
|
||||
orders: ordersDB,
|
||||
|
||||
Serials: sync2.NewCycle(config.SerialsInterval),
|
||||
config: config,
|
||||
|
||||
serialsDeleteOptions: orders.SerialDeleteOptions{
|
||||
BatchSize: config.BatchSize,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Run starts the db cleanup chore.
|
||||
func (chore *Chore) Run(ctx context.Context) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
return chore.Serials.Run(ctx, chore.deleteExpiredSerials)
|
||||
}
|
||||
|
||||
func (chore *Chore) deleteExpiredSerials(ctx context.Context) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
chore.log.Debug("deleting expired serial numbers")
|
||||
|
||||
now := time.Now()
|
||||
|
||||
deleted, err := chore.orders.DeleteExpiredSerials(ctx, now, chore.serialsDeleteOptions)
|
||||
if err != nil {
|
||||
chore.log.Error("deleting expired serial numbers", zap.Error(err))
|
||||
} else {
|
||||
chore.log.Debug("expired serials deleted", zap.Int("items deleted", deleted))
|
||||
}
|
||||
|
||||
deleted, err = chore.orders.DeleteExpiredConsumedSerials(ctx, now)
|
||||
if err != nil {
|
||||
chore.log.Error("deleting expired serial numbers", zap.Error(err))
|
||||
} else {
|
||||
chore.log.Debug("expired serials deleted", zap.Int("items deleted", deleted))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close stops the dbcleanup chore.
|
||||
func (chore *Chore) Close() error {
|
||||
chore.Serials.Close()
|
||||
return nil
|
||||
}
|
@ -1,68 +0,0 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package dbcleanup_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"storj.io/common/storj"
|
||||
"storj.io/common/testcontext"
|
||||
"storj.io/storj/private/testplanet"
|
||||
)
|
||||
|
||||
func TestDeleteExpiredSerials(t *testing.T) {
|
||||
testplanet.Run(t, testplanet.Config{
|
||||
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 0,
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
satellite := planet.Satellites[0]
|
||||
node := planet.StorageNodes[0].ID()
|
||||
satellite.DBCleanup.Chore.Serials.Pause()
|
||||
|
||||
var expiredSerials []storj.SerialNumber
|
||||
for i := 0; i < 5; i++ {
|
||||
expiredSerials = append(expiredSerials, storj.SerialNumber{byte(i)})
|
||||
}
|
||||
|
||||
var freshSerials []storj.SerialNumber
|
||||
for i := 5; i < 10; i++ {
|
||||
freshSerials = append(freshSerials, storj.SerialNumber{byte(i)})
|
||||
}
|
||||
|
||||
yesterday := time.Now().Add(-24 * time.Hour)
|
||||
for _, serial := range expiredSerials {
|
||||
err := satellite.DB.Orders().CreateSerialInfo(ctx, serial, []byte("bucket"), yesterday)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = satellite.DB.Orders().UseSerialNumber(ctx, serial, node)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
tomorrow := yesterday.Add(48 * time.Hour)
|
||||
for _, serial := range freshSerials {
|
||||
err := satellite.DB.Orders().CreateSerialInfo(ctx, serial, []byte("bucket"), tomorrow)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = satellite.DB.Orders().UseSerialNumber(ctx, serial, node)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// trigger expired serial number deletion
|
||||
satellite.DBCleanup.Chore.Serials.TriggerWait()
|
||||
|
||||
// check expired serial numbers have been deleted from serial_numbers and used_serials
|
||||
for _, serial := range expiredSerials {
|
||||
_, err := satellite.DB.Orders().UseSerialNumber(ctx, serial, node)
|
||||
require.EqualError(t, err, "serial number: serial number not found")
|
||||
}
|
||||
|
||||
// check fresh serial numbers have not been deleted
|
||||
for _, serial := range freshSerials {
|
||||
_, err := satellite.DB.Orders().UseSerialNumber(ctx, serial, node)
|
||||
require.EqualError(t, err, "serial number: serial number already used")
|
||||
}
|
||||
})
|
||||
}
|
@ -38,10 +38,6 @@ type DB interface {
|
||||
UseSerialNumber(ctx context.Context, serialNumber storj.SerialNumber, storageNodeID storj.NodeID) ([]byte, error)
|
||||
// UnuseSerialNumber removes pair serial number -> storage node id from database
|
||||
UnuseSerialNumber(ctx context.Context, serialNumber storj.SerialNumber, storageNodeID storj.NodeID) error
|
||||
// DeleteExpiredSerials deletes all expired serials in serial_number, used_serials, and consumed_serials table.
|
||||
DeleteExpiredSerials(ctx context.Context, now time.Time, options SerialDeleteOptions) (_ int, err error)
|
||||
// DeleteExpiredConsumedSerials deletes all expired serials in the consumed_serials table.
|
||||
DeleteExpiredConsumedSerials(ctx context.Context, now time.Time) (_ int, err error)
|
||||
// GetBucketIDFromSerialNumber returns the bucket ID associated with the serial number
|
||||
GetBucketIDFromSerialNumber(ctx context.Context, serialNumber storj.SerialNumber) ([]byte, error)
|
||||
|
||||
@ -647,52 +643,32 @@ func (endpoint *Endpoint) SettlementWithWindowFinal(stream pb.DRPCOrders_Settlem
|
||||
|
||||
storagenodeSettled[int32(orderLimit.Action)] += order.Amount
|
||||
|
||||
var bucketName string
|
||||
var projectID uuid.UUID
|
||||
if len(orderLimit.EncryptedMetadata) > 0 {
|
||||
metadata, err := endpoint.ordersService.DecryptOrderMetadata(ctx, orderLimit)
|
||||
if err != nil {
|
||||
log.Info("decrypt order metadata err:", zap.Error(err))
|
||||
mon.Event("bucketinfo_from_orders_metadata_error_1")
|
||||
goto idFromSerialTable
|
||||
}
|
||||
bucketInfo, err := metabase.ParseBucketPrefix(
|
||||
metabase.BucketPrefix(metadata.GetProjectBucketPrefix()),
|
||||
)
|
||||
if err != nil {
|
||||
log.Info("decrypt order: ParseBucketPrefix", zap.Error(err))
|
||||
mon.Event("bucketinfo_from_orders_metadata_error_2")
|
||||
goto idFromSerialTable
|
||||
}
|
||||
bucketName = bucketInfo.BucketName
|
||||
projectID = bucketInfo.ProjectID
|
||||
mon.Event("bucketinfo_from_orders_metadata")
|
||||
metadata, err := endpoint.ordersService.DecryptOrderMetadata(ctx, orderLimit)
|
||||
if err != nil {
|
||||
log.Debug("decrypt order metadata err:", zap.Error(err))
|
||||
mon.Event("bucketinfo_from_orders_metadata_error_1")
|
||||
continue
|
||||
}
|
||||
|
||||
// If we cannot get the bucket name and project ID from the orderLimit metadata, then fallback
|
||||
// to the old method of getting it from the serial_numbers table.
|
||||
// This is only temporary to make sure the orderLimit metadata is working correctly.
|
||||
idFromSerialTable:
|
||||
if bucketName == "" || projectID.IsZero() {
|
||||
bucketPrefix, err := endpoint.DB.GetBucketIDFromSerialNumber(ctx, serialNum)
|
||||
if err != nil {
|
||||
log.Info("get bucketPrefix from serial number table err", zap.Error(err))
|
||||
continue
|
||||
}
|
||||
|
||||
bucket, err := metabase.ParseBucketPrefix(metabase.BucketPrefix(bucketPrefix))
|
||||
if err != nil {
|
||||
log.Info("split bucket err", zap.Error(err), zap.String("bucketPrefix", string(bucketPrefix)))
|
||||
continue
|
||||
}
|
||||
bucketName = bucket.BucketName
|
||||
projectID = bucket.ProjectID
|
||||
mon.Event("bucketinfo_from_serial_number")
|
||||
bucketInfo, err := metabase.ParseBucketPrefix(
|
||||
metabase.BucketPrefix(metadata.GetProjectBucketPrefix()),
|
||||
)
|
||||
if err != nil {
|
||||
log.Debug("decrypt order: ParseBucketPrefix", zap.Error(err))
|
||||
mon.Event("bucketinfo_from_orders_metadata_error_2")
|
||||
continue
|
||||
}
|
||||
if bucketInfo.BucketName == "" || bucketInfo.ProjectID.IsZero() {
|
||||
log.Info("decrypt order: bucketName or projectID not set",
|
||||
zap.String("bucketName", bucketInfo.BucketName),
|
||||
zap.String("projectID", bucketInfo.ProjectID.String()),
|
||||
)
|
||||
mon.Event("bucketinfo_from_orders_metadata_error_3")
|
||||
continue
|
||||
}
|
||||
|
||||
bucketSettled[bucketIDAction{
|
||||
bucketname: bucketName,
|
||||
projectID: projectID,
|
||||
bucketname: bucketInfo.BucketName,
|
||||
projectID: bucketInfo.ProjectID,
|
||||
action: orderLimit.Action,
|
||||
}] += order.Amount
|
||||
}
|
||||
|
@ -43,7 +43,9 @@ func runTestWithPhases(t *testing.T, fn func(t *testing.T, ctx *testcontext.Cont
|
||||
}
|
||||
|
||||
func TestSettlementWithWindowEndpointManyOrders(t *testing.T) {
|
||||
runTestWithPhases(t, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
testplanet.Run(t, testplanet.Config{
|
||||
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 1,
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
satellite := planet.Satellites[0]
|
||||
ordersDB := satellite.Orders.DB
|
||||
storagenode := planet.StorageNodes[0]
|
||||
@ -51,6 +53,7 @@ func TestSettlementWithWindowEndpointManyOrders(t *testing.T) {
|
||||
projectID := testrand.UUID()
|
||||
bucketname := "testbucket"
|
||||
bucketID := storj.JoinPaths(projectID.String(), bucketname)
|
||||
key := satellite.Config.Orders.EncryptionKeys.Default
|
||||
|
||||
// stop any async flushes because we want to be sure when some values are
|
||||
// written to avoid races
|
||||
@ -79,11 +82,21 @@ func TestSettlementWithWindowEndpointManyOrders(t *testing.T) {
|
||||
func() {
|
||||
// create serial number to use in test. must be unique for each run.
|
||||
serialNumber1 := testrand.SerialNumber()
|
||||
err = ordersDB.CreateSerialInfo(ctx, serialNumber1, []byte(bucketID), now.AddDate(1, 0, 10))
|
||||
encrypted1, err := key.EncryptMetadata(
|
||||
serialNumber1,
|
||||
&pb.OrderLimitMetadata{
|
||||
ProjectBucketPrefix: []byte(bucketID),
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
serialNumber2 := testrand.SerialNumber()
|
||||
err = ordersDB.CreateSerialInfo(ctx, serialNumber2, []byte(bucketID), now.AddDate(1, 0, 10))
|
||||
encrypted2, err := key.EncryptMetadata(
|
||||
serialNumber2,
|
||||
&pb.OrderLimitMetadata{
|
||||
ProjectBucketPrefix: []byte(bucketID),
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
piecePublicKey, piecePrivateKey, err := storj.NewPieceKey()
|
||||
@ -91,16 +104,18 @@ func TestSettlementWithWindowEndpointManyOrders(t *testing.T) {
|
||||
|
||||
// create signed orderlimit or order to test with
|
||||
limit1 := &pb.OrderLimit{
|
||||
SerialNumber: serialNumber1,
|
||||
SatelliteId: satellite.ID(),
|
||||
UplinkPublicKey: piecePublicKey,
|
||||
StorageNodeId: storagenode.ID(),
|
||||
PieceId: storj.NewPieceID(),
|
||||
Action: pb.PieceAction_PUT,
|
||||
Limit: 1000,
|
||||
PieceExpiration: time.Time{},
|
||||
OrderCreation: tt.orderCreation,
|
||||
OrderExpiration: now.Add(24 * time.Hour),
|
||||
SerialNumber: serialNumber1,
|
||||
SatelliteId: satellite.ID(),
|
||||
UplinkPublicKey: piecePublicKey,
|
||||
StorageNodeId: storagenode.ID(),
|
||||
PieceId: storj.NewPieceID(),
|
||||
Action: pb.PieceAction_PUT,
|
||||
Limit: 1000,
|
||||
PieceExpiration: time.Time{},
|
||||
OrderCreation: tt.orderCreation,
|
||||
OrderExpiration: now.Add(24 * time.Hour),
|
||||
EncryptedMetadataKeyId: key.ID[:],
|
||||
EncryptedMetadata: encrypted1,
|
||||
}
|
||||
orderLimit1, err := signing.SignOrderLimit(ctx, signing.SignerFromFullIdentity(satellite.Identity), limit1)
|
||||
require.NoError(t, err)
|
||||
@ -112,16 +127,18 @@ func TestSettlementWithWindowEndpointManyOrders(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
limit2 := &pb.OrderLimit{
|
||||
SerialNumber: serialNumber2,
|
||||
SatelliteId: satellite.ID(),
|
||||
UplinkPublicKey: piecePublicKey,
|
||||
StorageNodeId: storagenode.ID(),
|
||||
PieceId: storj.NewPieceID(),
|
||||
Action: pb.PieceAction_PUT,
|
||||
Limit: 1000,
|
||||
PieceExpiration: time.Time{},
|
||||
OrderCreation: now,
|
||||
OrderExpiration: now.Add(24 * time.Hour),
|
||||
SerialNumber: serialNumber2,
|
||||
SatelliteId: satellite.ID(),
|
||||
UplinkPublicKey: piecePublicKey,
|
||||
StorageNodeId: storagenode.ID(),
|
||||
PieceId: storj.NewPieceID(),
|
||||
Action: pb.PieceAction_PUT,
|
||||
Limit: 1000,
|
||||
PieceExpiration: time.Time{},
|
||||
OrderCreation: now,
|
||||
OrderExpiration: now.Add(24 * time.Hour),
|
||||
EncryptedMetadataKeyId: key.ID[:],
|
||||
EncryptedMetadata: encrypted2,
|
||||
}
|
||||
orderLimit2, err := signing.SignOrderLimit(ctx, signing.SignerFromFullIdentity(satellite.Identity), limit2)
|
||||
require.NoError(t, err)
|
||||
@ -182,7 +199,10 @@ func TestSettlementWithWindowEndpointManyOrders(t *testing.T) {
|
||||
})
|
||||
}
|
||||
func TestSettlementWithWindowEndpointSingleOrder(t *testing.T) {
|
||||
runTestWithPhases(t, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
testplanet.Run(t, testplanet.Config{
|
||||
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 1,
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
|
||||
const dataAmount int64 = 50
|
||||
satellite := planet.Satellites[0]
|
||||
ordersDB := satellite.Orders.DB
|
||||
@ -191,6 +211,7 @@ func TestSettlementWithWindowEndpointSingleOrder(t *testing.T) {
|
||||
projectID := testrand.UUID()
|
||||
bucketname := "testbucket"
|
||||
bucketID := storj.JoinPaths(projectID.String(), bucketname)
|
||||
key := satellite.Config.Orders.EncryptionKeys.Default
|
||||
|
||||
// stop any async flushes because we want to be sure when some values are
|
||||
// written to avoid races
|
||||
@ -208,7 +229,12 @@ func TestSettlementWithWindowEndpointSingleOrder(t *testing.T) {
|
||||
|
||||
// create serial number to use in test
|
||||
serialNumber := testrand.SerialNumber()
|
||||
err = ordersDB.CreateSerialInfo(ctx, serialNumber, []byte(bucketID), now.AddDate(1, 0, 10))
|
||||
encrypted, err := key.EncryptMetadata(
|
||||
serialNumber,
|
||||
&pb.OrderLimitMetadata{
|
||||
ProjectBucketPrefix: []byte(bucketID),
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
piecePublicKey, piecePrivateKey, err := storj.NewPieceKey()
|
||||
@ -228,16 +254,18 @@ func TestSettlementWithWindowEndpointSingleOrder(t *testing.T) {
|
||||
func() {
|
||||
// create signed orderlimit or order to test with
|
||||
limit := &pb.OrderLimit{
|
||||
SerialNumber: serialNumber,
|
||||
SatelliteId: satellite.ID(),
|
||||
UplinkPublicKey: piecePublicKey,
|
||||
StorageNodeId: storagenode.ID(),
|
||||
PieceId: storj.NewPieceID(),
|
||||
Action: pb.PieceAction_PUT,
|
||||
Limit: 1000,
|
||||
PieceExpiration: time.Time{},
|
||||
OrderCreation: now,
|
||||
OrderExpiration: now.Add(24 * time.Hour),
|
||||
SerialNumber: serialNumber,
|
||||
SatelliteId: satellite.ID(),
|
||||
UplinkPublicKey: piecePublicKey,
|
||||
StorageNodeId: storagenode.ID(),
|
||||
PieceId: storj.NewPieceID(),
|
||||
Action: pb.PieceAction_PUT,
|
||||
Limit: 1000,
|
||||
PieceExpiration: time.Time{},
|
||||
OrderCreation: now,
|
||||
OrderExpiration: now.Add(24 * time.Hour),
|
||||
EncryptedMetadataKeyId: key.ID[:],
|
||||
EncryptedMetadata: encrypted,
|
||||
}
|
||||
orderLimit, err := signing.SignOrderLimit(ctx, signing.SignerFromFullIdentity(satellite.Identity), limit)
|
||||
require.NoError(t, err)
|
||||
@ -335,18 +363,28 @@ func TestSettlementWithWindowEndpointErrors(t *testing.T) {
|
||||
|
||||
_, piecePrivateKey2, err := storj.NewPieceKey()
|
||||
require.NoError(t, err)
|
||||
key := satellite.Config.Orders.EncryptionKeys.Default
|
||||
encrypted, err := key.EncryptMetadata(
|
||||
serialNumber1,
|
||||
&pb.OrderLimitMetadata{
|
||||
ProjectBucketPrefix: []byte(bucketID),
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
limit := pb.OrderLimit{
|
||||
SerialNumber: serialNumber1,
|
||||
SatelliteId: satellite.ID(),
|
||||
UplinkPublicKey: piecePublicKey1,
|
||||
StorageNodeId: storagenode.ID(),
|
||||
PieceId: storj.NewPieceID(),
|
||||
Action: pb.PieceAction_PUT,
|
||||
Limit: 1000,
|
||||
PieceExpiration: time.Time{},
|
||||
OrderCreation: now,
|
||||
OrderExpiration: now.Add(24 * time.Hour),
|
||||
SerialNumber: serialNumber1,
|
||||
SatelliteId: satellite.ID(),
|
||||
UplinkPublicKey: piecePublicKey1,
|
||||
StorageNodeId: storagenode.ID(),
|
||||
PieceId: storj.NewPieceID(),
|
||||
Action: pb.PieceAction_PUT,
|
||||
Limit: 1000,
|
||||
PieceExpiration: time.Time{},
|
||||
OrderCreation: now,
|
||||
OrderExpiration: now.Add(24 * time.Hour),
|
||||
EncryptedMetadataKeyId: key.ID[:],
|
||||
EncryptedMetadata: encrypted,
|
||||
}
|
||||
orderLimit1, err := signing.SignOrderLimit(ctx, signing.SignerFromFullIdentity(satellite.Identity), &limit)
|
||||
require.NoError(t, err)
|
||||
@ -453,19 +491,29 @@ func TestSettlementEndpointSingleOrder(t *testing.T) {
|
||||
|
||||
piecePublicKey, piecePrivateKey, err := storj.NewPieceKey()
|
||||
require.NoError(t, err)
|
||||
key := satellite.Config.Orders.EncryptionKeys.Default
|
||||
encrypted, err := key.EncryptMetadata(
|
||||
serialNumber,
|
||||
&pb.OrderLimitMetadata{
|
||||
ProjectBucketPrefix: []byte(bucketID),
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
// create signed orderlimit or order to test with
|
||||
limit := &pb.OrderLimit{
|
||||
SerialNumber: serialNumber,
|
||||
SatelliteId: satellite.ID(),
|
||||
UplinkPublicKey: piecePublicKey,
|
||||
StorageNodeId: storagenode.ID(),
|
||||
PieceId: storj.NewPieceID(),
|
||||
Action: pb.PieceAction_PUT,
|
||||
Limit: 1000,
|
||||
PieceExpiration: time.Time{},
|
||||
OrderCreation: now,
|
||||
OrderExpiration: now.Add(24 * time.Hour),
|
||||
SerialNumber: serialNumber,
|
||||
SatelliteId: satellite.ID(),
|
||||
UplinkPublicKey: piecePublicKey,
|
||||
StorageNodeId: storagenode.ID(),
|
||||
PieceId: storj.NewPieceID(),
|
||||
Action: pb.PieceAction_PUT,
|
||||
Limit: 1000,
|
||||
PieceExpiration: time.Time{},
|
||||
OrderCreation: now,
|
||||
OrderExpiration: now.Add(24 * time.Hour),
|
||||
EncryptedMetadataKeyId: key.ID[:],
|
||||
EncryptedMetadata: encrypted,
|
||||
}
|
||||
orderLimit, err := signing.SignOrderLimit(ctx, signing.SignerFromFullIdentity(satellite.Identity), limit)
|
||||
require.NoError(t, err)
|
||||
|
@ -1,82 +0,0 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package orders_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"storj.io/common/storj"
|
||||
"storj.io/common/testcontext"
|
||||
"storj.io/storj/satellite"
|
||||
"storj.io/storj/satellite/orders"
|
||||
"storj.io/storj/satellite/satellitedb/satellitedbtest"
|
||||
)
|
||||
|
||||
func TestSerialNumbers(t *testing.T) {
|
||||
satellitedbtest.Run(t, func(ctx *testcontext.Context, t *testing.T, db satellite.DB) {
|
||||
ordersDB := db.Orders()
|
||||
|
||||
expectedBucket := []byte("bucketID")
|
||||
err := ordersDB.CreateSerialInfo(ctx, storj.SerialNumber{1}, expectedBucket, time.Now())
|
||||
require.NoError(t, err)
|
||||
|
||||
bucketID, err := ordersDB.UseSerialNumber(ctx, storj.SerialNumber{1}, storj.NodeID{1})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedBucket, bucketID)
|
||||
|
||||
// try to use used serial number
|
||||
_, err = ordersDB.UseSerialNumber(ctx, storj.SerialNumber{1}, storj.NodeID{1})
|
||||
require.Error(t, err)
|
||||
require.True(t, orders.ErrUsingSerialNumber.Has(err))
|
||||
|
||||
err = ordersDB.UnuseSerialNumber(ctx, storj.SerialNumber{1}, storj.NodeID{1})
|
||||
require.NoError(t, err)
|
||||
|
||||
bucketID, err = ordersDB.UseSerialNumber(ctx, storj.SerialNumber{1}, storj.NodeID{1})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedBucket, bucketID)
|
||||
|
||||
// not existing serial number
|
||||
bucketID, err = ordersDB.UseSerialNumber(ctx, storj.SerialNumber{99}, storj.NodeID{1})
|
||||
require.Error(t, err)
|
||||
require.True(t, orders.ErrUsingSerialNumber.Has(err))
|
||||
require.Empty(t, bucketID)
|
||||
|
||||
deleted, err := ordersDB.DeleteExpiredSerials(ctx, time.Now(), orders.SerialDeleteOptions{})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, deleted, 1)
|
||||
|
||||
// check serial number has been deleted from serial_numbers and used_serials
|
||||
_, err = ordersDB.UseSerialNumber(ctx, storj.SerialNumber{1}, storj.NodeID{1})
|
||||
require.EqualError(t, err, "serial number: serial number not found")
|
||||
})
|
||||
}
|
||||
|
||||
func TestDeleteExpiredWithOptions(t *testing.T) {
|
||||
satellitedbtest.Run(t, func(ctx *testcontext.Context, t *testing.T, db satellite.DB) {
|
||||
ordersDB := db.Orders()
|
||||
|
||||
err := ordersDB.CreateSerialInfo(ctx, storj.SerialNumber{1}, []byte("bucketID1"), time.Now())
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ordersDB.CreateSerialInfo(ctx, storj.SerialNumber{2}, []byte("bucketID2"), time.Now())
|
||||
require.NoError(t, err)
|
||||
|
||||
deleted, err := ordersDB.DeleteExpiredSerials(ctx, time.Now(), orders.SerialDeleteOptions{
|
||||
BatchSize: 1,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, deleted)
|
||||
|
||||
// check serial number has been deleted from serial_numbers and used_serials
|
||||
_, err = ordersDB.UseSerialNumber(ctx, storj.SerialNumber{1}, storj.NodeID{1})
|
||||
require.EqualError(t, err, "serial number: serial number not found")
|
||||
|
||||
_, err = ordersDB.UseSerialNumber(ctx, storj.SerialNumber{2}, storj.NodeID{1})
|
||||
require.EqualError(t, err, "serial number: serial number not found")
|
||||
})
|
||||
}
|
@ -32,7 +32,6 @@ var (
|
||||
// Config is a configuration struct for orders Service.
|
||||
type Config struct {
|
||||
EncryptionKeys EncryptionKeys `help:"encryption keys to encrypt info in orders" default:""`
|
||||
IncludeEncryptedMetadata bool `help:"include encrypted metadata in the order limit" default:"false"`
|
||||
Expiration time.Duration `help:"how long until an order expires" default:"48h"` // 2 days
|
||||
SettlementBatchSize int `help:"how many orders to batch per transaction" default:"250"`
|
||||
FlushBatchSize int `help:"how many items in the rollups write cache before they are flushed to the database" devDefault:"20" releaseDefault:"10000"`
|
||||
@ -59,8 +58,7 @@ type Service struct {
|
||||
orders DB
|
||||
buckets BucketsDB
|
||||
|
||||
includeEncryptedMetadata bool
|
||||
encryptionKeys EncryptionKeys
|
||||
encryptionKeys EncryptionKeys
|
||||
|
||||
satelliteAddress *pb.NodeAddress
|
||||
orderExpiration time.Duration
|
||||
@ -76,7 +74,7 @@ func NewService(
|
||||
config Config,
|
||||
satelliteAddress *pb.NodeAddress,
|
||||
) (*Service, error) {
|
||||
if config.IncludeEncryptedMetadata && config.EncryptionKeys.Default.IsZero() {
|
||||
if config.EncryptionKeys.Default.IsZero() {
|
||||
return nil, Error.New("encryption keys must be specified to include encrypted metadata")
|
||||
}
|
||||
|
||||
@ -87,8 +85,7 @@ func NewService(
|
||||
orders: orders,
|
||||
buckets: buckets,
|
||||
|
||||
includeEncryptedMetadata: config.IncludeEncryptedMetadata,
|
||||
encryptionKeys: config.EncryptionKeys,
|
||||
encryptionKeys: config.EncryptionKeys,
|
||||
|
||||
satelliteAddress: satelliteAddress,
|
||||
orderExpiration: config.Expiration,
|
||||
@ -103,11 +100,6 @@ func (service *Service) VerifyOrderLimitSignature(ctx context.Context, signed *p
|
||||
return signing.VerifyOrderLimitSignature(ctx, service.satellite, signed)
|
||||
}
|
||||
|
||||
func (service *Service) saveSerial(ctx context.Context, serialNumber storj.SerialNumber, bucket metabase.BucketLocation, expiresAt time.Time) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
return service.orders.CreateSerialInfo(ctx, serialNumber, []byte(bucket.Prefix()), expiresAt)
|
||||
}
|
||||
|
||||
func (service *Service) updateBandwidth(ctx context.Context, bucket metabase.BucketLocation, addressedOrderLimits ...*pb.AddressedOrderLimit) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
if len(addressedOrderLimits) == 0 {
|
||||
@ -195,11 +187,6 @@ func (service *Service) CreateGetOrderLimits(ctx context.Context, bucket metabas
|
||||
return nil, storj.PiecePrivateKey{}, ErrDownloadFailedNotEnoughPieces.New("not enough orderlimits: got %d, required %d", len(signer.AddressedLimits), redundancy.RequiredCount())
|
||||
}
|
||||
|
||||
err = service.saveSerial(ctx, signer.Serial, bucket, signer.OrderExpiration)
|
||||
if err != nil {
|
||||
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
||||
}
|
||||
|
||||
if err := service.updateBandwidth(ctx, bucket, signer.AddressedLimits...); err != nil {
|
||||
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
||||
}
|
||||
@ -265,11 +252,6 @@ func (service *Service) CreateGetOrderLimits2(ctx context.Context, bucket metaba
|
||||
return nil, storj.PiecePrivateKey{}, ErrDownloadFailedNotEnoughPieces.New("not enough orderlimits: got %d, required %d", len(signer.AddressedLimits), redundancy.RequiredCount())
|
||||
}
|
||||
|
||||
err = service.saveSerial(ctx, signer.Serial, bucket, signer.OrderExpiration)
|
||||
if err != nil {
|
||||
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
||||
}
|
||||
|
||||
if err := service.updateBandwidth(ctx, bucket, signer.AddressedLimits...); err != nil {
|
||||
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
||||
}
|
||||
@ -303,11 +285,6 @@ func (service *Service) CreatePutOrderLimits(ctx context.Context, bucket metabas
|
||||
}
|
||||
}
|
||||
|
||||
err = service.saveSerial(ctx, signer.Serial, bucket, signer.OrderExpiration)
|
||||
if err != nil {
|
||||
return storj.PieceID{}, nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
||||
}
|
||||
|
||||
if err := service.updateBandwidth(ctx, bucket, signer.AddressedLimits...); err != nil {
|
||||
return storj.PieceID{}, nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
||||
}
|
||||
@ -369,11 +346,6 @@ func (service *Service) CreateAuditOrderLimits(ctx context.Context, bucket metab
|
||||
err = Error.New("not enough nodes available: got %d, required %d", limitsCount, segment.Redundancy.RequiredShares)
|
||||
return nil, storj.PiecePrivateKey{}, nil, errs.Combine(err, nodeErrors.Err())
|
||||
}
|
||||
|
||||
err = service.saveSerial(ctx, signer.Serial, bucket, signer.OrderExpiration)
|
||||
if err != nil {
|
||||
return nil, storj.PiecePrivateKey{}, nil, Error.Wrap(err)
|
||||
}
|
||||
if err := service.updateBandwidth(ctx, bucket, limits...); err != nil {
|
||||
return nil, storj.PiecePrivateKey{}, nil, Error.Wrap(err)
|
||||
}
|
||||
@ -413,10 +385,6 @@ func (service *Service) CreateAuditOrderLimit(ctx context.Context, bucket metaba
|
||||
return nil, storj.PiecePrivateKey{}, "", Error.Wrap(err)
|
||||
}
|
||||
|
||||
err = service.saveSerial(ctx, signer.Serial, bucket, signer.OrderExpiration)
|
||||
if err != nil {
|
||||
return nil, storj.PiecePrivateKey{}, "", Error.Wrap(err)
|
||||
}
|
||||
if err := service.updateBandwidth(ctx, bucket, limit); err != nil {
|
||||
return nil, storj.PiecePrivateKey{}, "", Error.Wrap(err)
|
||||
}
|
||||
@ -483,10 +451,6 @@ func (service *Service) CreateGetRepairOrderLimits(ctx context.Context, bucket m
|
||||
return nil, storj.PiecePrivateKey{}, errs.Combine(err, nodeErrors.Err())
|
||||
}
|
||||
|
||||
err = service.saveSerial(ctx, signer.Serial, bucket, signer.OrderExpiration)
|
||||
if err != nil {
|
||||
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
||||
}
|
||||
if err := service.updateBandwidth(ctx, bucket, limits...); err != nil {
|
||||
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
||||
}
|
||||
@ -555,10 +519,6 @@ func (service *Service) CreatePutRepairOrderLimits(ctx context.Context, bucket m
|
||||
}
|
||||
}
|
||||
|
||||
err = service.saveSerial(ctx, signer.Serial, bucket, signer.OrderExpiration)
|
||||
if err != nil {
|
||||
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
||||
}
|
||||
if err := service.updateBandwidth(ctx, bucket, limits...); err != nil {
|
||||
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
||||
}
|
||||
@ -597,10 +557,6 @@ func (service *Service) CreateGracefulExitPutOrderLimit(ctx context.Context, buc
|
||||
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
||||
}
|
||||
|
||||
err = service.saveSerial(ctx, signer.Serial, bucket, signer.OrderExpiration)
|
||||
if err != nil {
|
||||
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
||||
}
|
||||
if err := service.updateBandwidth(ctx, bucket, limit); err != nil {
|
||||
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
||||
}
|
||||
|
@ -60,12 +60,5 @@ func TestOrderLimitsEncryptedMetadata(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, bucketName, actualBucketInfo.BucketName)
|
||||
require.Equal(t, projectID, actualBucketInfo.ProjectID)
|
||||
|
||||
bucketPrefix, err := satellitePeer.Orders.DB.GetBucketIDFromSerialNumber(ctx, orderLimit1.SerialNumber)
|
||||
require.NoError(t, err)
|
||||
bucket1, err := metabase.ParseBucketPrefix(metabase.BucketPrefix(bucketPrefix))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, actualBucketInfo.BucketName, bucket1.BucketName)
|
||||
require.Equal(t, actualBucketInfo.ProjectID, bucket1.ProjectID)
|
||||
})
|
||||
}
|
||||
|
@ -140,7 +140,7 @@ func NewSignerGracefulExit(service *Service, rootPieceID storj.PieceID, orderCre
|
||||
func (signer *Signer) Sign(ctx context.Context, node storj.NodeURL, pieceNum int32) (_ *pb.AddressedOrderLimit, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
if signer.Service.includeEncryptedMetadata && len(signer.EncryptedMetadata) == 0 {
|
||||
if len(signer.EncryptedMetadata) == 0 {
|
||||
encryptionKey := signer.Service.encryptionKeys.Default
|
||||
if encryptionKey.IsZero() {
|
||||
return nil, ErrSigner.New("default encryption key is missing")
|
||||
|
@ -33,7 +33,6 @@ func TestSigner_EncryptedMetadata(t *testing.T) {
|
||||
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
|
||||
testplanet.ReconfigureRS(1, 1, 1, 1)(log, index, config)
|
||||
|
||||
config.Orders.IncludeEncryptedMetadata = true
|
||||
config.Orders.EncryptionKeys = *ekeys
|
||||
},
|
||||
},
|
||||
@ -91,7 +90,6 @@ func TestSigner_EncryptedMetadata_UploadDownload(t *testing.T) {
|
||||
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
|
||||
testplanet.ReconfigureRS(1, 1, 1, 1)(log, index, config)
|
||||
|
||||
config.Orders.IncludeEncryptedMetadata = true
|
||||
config.Orders.EncryptionKeys = *ekeys
|
||||
},
|
||||
},
|
||||
|
@ -26,7 +26,6 @@ import (
|
||||
"storj.io/storj/satellite/console"
|
||||
"storj.io/storj/satellite/console/consoleweb"
|
||||
"storj.io/storj/satellite/contact"
|
||||
"storj.io/storj/satellite/dbcleanup"
|
||||
"storj.io/storj/satellite/gc"
|
||||
"storj.io/storj/satellite/gracefulexit"
|
||||
"storj.io/storj/satellite/mailservice"
|
||||
@ -129,8 +128,6 @@ type Config struct {
|
||||
|
||||
ExpiredDeletion expireddeletion.Config
|
||||
|
||||
DBCleanup dbcleanup.Config
|
||||
|
||||
Tally tally.Config
|
||||
Rollup rollup.Config
|
||||
LiveAccounting live.Config
|
||||
|
@ -125,6 +125,11 @@ model node (
|
||||
fields last_net
|
||||
)
|
||||
|
||||
index (
|
||||
name nodes_dis_unk_exit_fin_last_success_index
|
||||
fields disqualified unknown_audit_suspended exit_finished_at last_contact_success
|
||||
)
|
||||
|
||||
field id blob
|
||||
// address is how to contact the node, this can be a hostname or IP and it contains the port
|
||||
field address text ( updatable, default "" ) // TODO: use compressed format
|
||||
@ -600,6 +605,11 @@ read scalar (
|
||||
model bucket_storage_tally (
|
||||
key bucket_name project_id interval_start
|
||||
|
||||
index (
|
||||
name bucket_storage_tallies_project_id_index
|
||||
fields project_id
|
||||
)
|
||||
|
||||
field bucket_name blob
|
||||
field project_id blob
|
||||
|
||||
@ -641,6 +651,11 @@ read all (
|
||||
model storagenode_bandwidth_rollup (
|
||||
key storagenode_id interval_start action
|
||||
|
||||
index (
|
||||
name storagenode_bandwidth_rollups_interval_start_index
|
||||
fields interval_start
|
||||
)
|
||||
|
||||
field storagenode_id blob
|
||||
field interval_start timestamp
|
||||
field interval_seconds uint
|
||||
|
@ -764,15 +764,18 @@ CREATE TABLE user_credits (
|
||||
CREATE INDEX accounting_rollups_start_time_index ON accounting_rollups ( start_time );
|
||||
CREATE INDEX bucket_bandwidth_rollups_project_id_action_interval_index ON bucket_bandwidth_rollups ( project_id, action, interval_start );
|
||||
CREATE INDEX bucket_bandwidth_rollups_action_interval_project_id_index ON bucket_bandwidth_rollups ( action, interval_start, project_id );
|
||||
CREATE INDEX bucket_storage_tallies_project_id_index ON bucket_storage_tallies ( project_id );
|
||||
CREATE INDEX consumed_serials_expires_at_index ON consumed_serials ( expires_at );
|
||||
CREATE INDEX graceful_exit_transfer_queue_nid_dr_qa_fa_lfa_index ON graceful_exit_transfer_queue ( node_id, durability_ratio, queued_at, finished_at, last_failed_at );
|
||||
CREATE INDEX injuredsegments_attempted_index ON injuredsegments ( attempted );
|
||||
CREATE INDEX injuredsegments_segment_health_index ON injuredsegments ( segment_health );
|
||||
CREATE INDEX injuredsegments_updated_at_index ON injuredsegments ( updated_at );
|
||||
CREATE INDEX node_last_ip ON nodes ( last_net );
|
||||
CREATE INDEX nodes_dis_unk_exit_fin_last_success_index ON nodes ( disqualified, unknown_audit_suspended, exit_finished_at, last_contact_success );
|
||||
CREATE INDEX nodes_offline_times_node_id_index ON nodes_offline_times ( node_id );
|
||||
CREATE UNIQUE INDEX serial_number_index ON serial_numbers ( serial_number );
|
||||
CREATE INDEX serial_numbers_expires_at_index ON serial_numbers ( expires_at );
|
||||
CREATE INDEX storagenode_bandwidth_rollups_interval_start_index ON storagenode_bandwidth_rollups ( interval_start );
|
||||
CREATE INDEX storagenode_payments_node_id_period_index ON storagenode_payments ( node_id, period );
|
||||
CREATE INDEX storagenode_paystubs_node_id_index ON storagenode_paystubs ( node_id );
|
||||
CREATE INDEX storagenode_storage_tallies_node_id_index ON storagenode_storage_tallies ( node_id );
|
||||
@ -1320,15 +1323,18 @@ CREATE TABLE user_credits (
|
||||
CREATE INDEX accounting_rollups_start_time_index ON accounting_rollups ( start_time );
|
||||
CREATE INDEX bucket_bandwidth_rollups_project_id_action_interval_index ON bucket_bandwidth_rollups ( project_id, action, interval_start );
|
||||
CREATE INDEX bucket_bandwidth_rollups_action_interval_project_id_index ON bucket_bandwidth_rollups ( action, interval_start, project_id );
|
||||
CREATE INDEX bucket_storage_tallies_project_id_index ON bucket_storage_tallies ( project_id );
|
||||
CREATE INDEX consumed_serials_expires_at_index ON consumed_serials ( expires_at );
|
||||
CREATE INDEX graceful_exit_transfer_queue_nid_dr_qa_fa_lfa_index ON graceful_exit_transfer_queue ( node_id, durability_ratio, queued_at, finished_at, last_failed_at );
|
||||
CREATE INDEX injuredsegments_attempted_index ON injuredsegments ( attempted );
|
||||
CREATE INDEX injuredsegments_segment_health_index ON injuredsegments ( segment_health );
|
||||
CREATE INDEX injuredsegments_updated_at_index ON injuredsegments ( updated_at );
|
||||
CREATE INDEX node_last_ip ON nodes ( last_net );
|
||||
CREATE INDEX nodes_dis_unk_exit_fin_last_success_index ON nodes ( disqualified, unknown_audit_suspended, exit_finished_at, last_contact_success );
|
||||
CREATE INDEX nodes_offline_times_node_id_index ON nodes_offline_times ( node_id );
|
||||
CREATE UNIQUE INDEX serial_number_index ON serial_numbers ( serial_number );
|
||||
CREATE INDEX serial_numbers_expires_at_index ON serial_numbers ( expires_at );
|
||||
CREATE INDEX storagenode_bandwidth_rollups_interval_start_index ON storagenode_bandwidth_rollups ( interval_start );
|
||||
CREATE INDEX storagenode_payments_node_id_period_index ON storagenode_payments ( node_id, period );
|
||||
CREATE INDEX storagenode_paystubs_node_id_index ON storagenode_paystubs ( node_id );
|
||||
CREATE INDEX storagenode_storage_tallies_node_id_index ON storagenode_storage_tallies ( node_id );
|
||||
|
@ -444,15 +444,18 @@ CREATE TABLE user_credits (
|
||||
CREATE INDEX accounting_rollups_start_time_index ON accounting_rollups ( start_time );
|
||||
CREATE INDEX bucket_bandwidth_rollups_project_id_action_interval_index ON bucket_bandwidth_rollups ( project_id, action, interval_start );
|
||||
CREATE INDEX bucket_bandwidth_rollups_action_interval_project_id_index ON bucket_bandwidth_rollups ( action, interval_start, project_id );
|
||||
CREATE INDEX bucket_storage_tallies_project_id_index ON bucket_storage_tallies ( project_id );
|
||||
CREATE INDEX consumed_serials_expires_at_index ON consumed_serials ( expires_at );
|
||||
CREATE INDEX graceful_exit_transfer_queue_nid_dr_qa_fa_lfa_index ON graceful_exit_transfer_queue ( node_id, durability_ratio, queued_at, finished_at, last_failed_at );
|
||||
CREATE INDEX injuredsegments_attempted_index ON injuredsegments ( attempted );
|
||||
CREATE INDEX injuredsegments_segment_health_index ON injuredsegments ( segment_health );
|
||||
CREATE INDEX injuredsegments_updated_at_index ON injuredsegments ( updated_at );
|
||||
CREATE INDEX node_last_ip ON nodes ( last_net );
|
||||
CREATE INDEX nodes_dis_unk_exit_fin_last_success_index ON nodes ( disqualified, unknown_audit_suspended, exit_finished_at, last_contact_success );
|
||||
CREATE INDEX nodes_offline_times_node_id_index ON nodes_offline_times ( node_id );
|
||||
CREATE UNIQUE INDEX serial_number_index ON serial_numbers ( serial_number );
|
||||
CREATE INDEX serial_numbers_expires_at_index ON serial_numbers ( expires_at );
|
||||
CREATE INDEX storagenode_bandwidth_rollups_interval_start_index ON storagenode_bandwidth_rollups ( interval_start );
|
||||
CREATE INDEX storagenode_payments_node_id_period_index ON storagenode_payments ( node_id, period );
|
||||
CREATE INDEX storagenode_paystubs_node_id_index ON storagenode_paystubs ( node_id );
|
||||
CREATE INDEX storagenode_storage_tallies_node_id_index ON storagenode_storage_tallies ( node_id );
|
||||
|
@ -444,15 +444,18 @@ CREATE TABLE user_credits (
|
||||
CREATE INDEX accounting_rollups_start_time_index ON accounting_rollups ( start_time );
|
||||
CREATE INDEX bucket_bandwidth_rollups_project_id_action_interval_index ON bucket_bandwidth_rollups ( project_id, action, interval_start );
|
||||
CREATE INDEX bucket_bandwidth_rollups_action_interval_project_id_index ON bucket_bandwidth_rollups ( action, interval_start, project_id );
|
||||
CREATE INDEX bucket_storage_tallies_project_id_index ON bucket_storage_tallies ( project_id );
|
||||
CREATE INDEX consumed_serials_expires_at_index ON consumed_serials ( expires_at );
|
||||
CREATE INDEX graceful_exit_transfer_queue_nid_dr_qa_fa_lfa_index ON graceful_exit_transfer_queue ( node_id, durability_ratio, queued_at, finished_at, last_failed_at );
|
||||
CREATE INDEX injuredsegments_attempted_index ON injuredsegments ( attempted );
|
||||
CREATE INDEX injuredsegments_segment_health_index ON injuredsegments ( segment_health );
|
||||
CREATE INDEX injuredsegments_updated_at_index ON injuredsegments ( updated_at );
|
||||
CREATE INDEX node_last_ip ON nodes ( last_net );
|
||||
CREATE INDEX nodes_dis_unk_exit_fin_last_success_index ON nodes ( disqualified, unknown_audit_suspended, exit_finished_at, last_contact_success );
|
||||
CREATE INDEX nodes_offline_times_node_id_index ON nodes_offline_times ( node_id );
|
||||
CREATE UNIQUE INDEX serial_number_index ON serial_numbers ( serial_number );
|
||||
CREATE INDEX serial_numbers_expires_at_index ON serial_numbers ( expires_at );
|
||||
CREATE INDEX storagenode_bandwidth_rollups_interval_start_index ON storagenode_bandwidth_rollups ( interval_start );
|
||||
CREATE INDEX storagenode_payments_node_id_period_index ON storagenode_payments ( node_id, period );
|
||||
CREATE INDEX storagenode_paystubs_node_id_index ON storagenode_paystubs ( node_id );
|
||||
CREATE INDEX storagenode_storage_tallies_node_id_index ON storagenode_storage_tallies ( node_id );
|
||||
|
@ -1131,6 +1131,33 @@ func (db *satelliteDB) PostgresMigration() *migrate.Migration {
|
||||
`ALTER TABLE injuredsegments DROP COLUMN num_healthy_pieces;`,
|
||||
},
|
||||
},
|
||||
{
|
||||
DB: &db.migrationDB,
|
||||
Description: "create new index on storagenode_bandwidth_rollups to improve get nodes since query.",
|
||||
Version: 135,
|
||||
SeparateTx: true,
|
||||
Action: migrate.SQL{
|
||||
`CREATE INDEX IF NOT EXISTS storagenode_bandwidth_rollups_interval_start_index ON storagenode_bandwidth_rollups ( interval_start );`,
|
||||
},
|
||||
},
|
||||
{
|
||||
DB: &db.migrationDB,
|
||||
Description: "create new index on bucket_storage_tallies to improve get tallies by project ID.",
|
||||
Version: 136,
|
||||
SeparateTx: true,
|
||||
Action: migrate.SQL{
|
||||
`CREATE INDEX IF NOT EXISTS bucket_storage_tallies_project_id_index ON bucket_storage_tallies (project_id);`,
|
||||
},
|
||||
},
|
||||
{
|
||||
DB: &db.migrationDB,
|
||||
Description: "create new index on nodes to improve queries using disqualified, unknown_audit_suspended, exit_finished_at, and last_contact_success.",
|
||||
Version: 137,
|
||||
SeparateTx: true,
|
||||
Action: migrate.SQL{
|
||||
`CREATE INDEX IF NOT EXISTS nodes_dis_unk_exit_fin_last_success_index ON nodes(disqualified, unknown_audit_suspended, exit_finished_at, last_contact_success);`,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -58,66 +58,6 @@ func (db *ordersDB) CreateSerialInfo(ctx context.Context, serialNumber storj.Ser
|
||||
)
|
||||
}
|
||||
|
||||
// DeleteExpiredSerials deletes all expired serials in the serial_number table.
|
||||
func (db *ordersDB) DeleteExpiredSerials(ctx context.Context, now time.Time, options orders.SerialDeleteOptions) (_ int, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
if db.db.implementation == dbutil.Cockroach && options.BatchSize > 0 {
|
||||
var deleted int
|
||||
|
||||
for {
|
||||
d, err := func() (_ int, err error) {
|
||||
var r int
|
||||
rs, err := db.db.Query(ctx, "DELETE FROM serial_numbers WHERE expires_at <= $1 ORDER BY expires_at DESC LIMIT $2 RETURNING expires_at;", now.UTC(), options.BatchSize)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer func() { err = errs.Combine(err, rs.Close()) }()
|
||||
|
||||
for rs.Next() {
|
||||
err = rs.Scan(&now)
|
||||
if err != nil {
|
||||
return r, err
|
||||
}
|
||||
r++
|
||||
}
|
||||
if rs.Err() != nil {
|
||||
return r, rs.Err()
|
||||
}
|
||||
|
||||
return r, nil
|
||||
}()
|
||||
deleted += d
|
||||
if err != nil {
|
||||
return deleted, err
|
||||
}
|
||||
|
||||
if d < options.BatchSize {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return deleted, err
|
||||
}
|
||||
|
||||
count, err := db.db.Delete_SerialNumber_By_ExpiresAt_LessOrEqual(ctx, dbx.SerialNumber_ExpiresAt(now.UTC()))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return int(count), nil
|
||||
}
|
||||
|
||||
// DeleteExpiredConsumedSerials deletes all expired serials in the consumed_serials table.
|
||||
func (db *ordersDB) DeleteExpiredConsumedSerials(ctx context.Context, now time.Time) (_ int, err error) {
|
||||
defer mon.Task()(&ctx, now)(&err)
|
||||
|
||||
count, err := db.db.Delete_ConsumedSerial_By_ExpiresAt_LessOrEqual(ctx, dbx.ConsumedSerial_ExpiresAt(now))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return int(count), nil
|
||||
}
|
||||
|
||||
// UseSerialNumber creates a used serial number entry in database from an
|
||||
// existing serial number.
|
||||
// It returns the bucket ID associated to serialNumber.
|
||||
|
@ -101,9 +101,9 @@ func (db *StoragenodeAccounting) GetTalliesSince(ctx context.Context, latestRoll
|
||||
return out, Error.Wrap(err)
|
||||
}
|
||||
|
||||
func (db *StoragenodeAccounting) getNodeIds(ctx context.Context) (nodeids [][]byte, err error) {
|
||||
func (db *StoragenodeAccounting) getNodeIdsSince(ctx context.Context, since time.Time) (nodeids [][]byte, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
rows, err := db.db.QueryContext(ctx, db.db.Rebind(`select distinct storagenode_id from storagenode_bandwidth_rollups`))
|
||||
rows, err := db.db.QueryContext(ctx, db.db.Rebind(`select distinct storagenode_id from storagenode_bandwidth_rollups where interval_start >= $1`), since)
|
||||
if err != nil {
|
||||
return nil, Error.Wrap(err)
|
||||
}
|
||||
@ -215,7 +215,7 @@ func (db *StoragenodeAccounting) GetBandwidthSince(ctx context.Context, latestRo
|
||||
// going to allow us to avoid 16 minute queries.
|
||||
var nodeids [][]byte
|
||||
for {
|
||||
nodeids, err = db.getNodeIds(ctx)
|
||||
nodeids, err = db.getNodeIdsSince(ctx, latestRollup)
|
||||
if err != nil {
|
||||
if cockroachutil.NeedsRetry(err) {
|
||||
continue
|
||||
|
576
satellite/satellitedb/testdata/postgres.v135.sql
vendored
Normal file
576
satellite/satellitedb/testdata/postgres.v135.sql
vendored
Normal file
@ -0,0 +1,576 @@
|
||||
-- AUTOGENERATED BY storj.io/dbx
|
||||
-- DO NOT EDIT
|
||||
CREATE TABLE accounting_rollups (
|
||||
node_id bytea NOT NULL,
|
||||
start_time timestamp with time zone NOT NULL,
|
||||
put_total bigint NOT NULL,
|
||||
get_total bigint NOT NULL,
|
||||
get_audit_total bigint NOT NULL,
|
||||
get_repair_total bigint NOT NULL,
|
||||
put_repair_total bigint NOT NULL,
|
||||
at_rest_total double precision NOT NULL,
|
||||
PRIMARY KEY ( node_id, start_time )
|
||||
);
|
||||
CREATE TABLE accounting_timestamps (
|
||||
name text NOT NULL,
|
||||
value timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( name )
|
||||
);
|
||||
CREATE TABLE audit_histories (
|
||||
node_id bytea NOT NULL,
|
||||
history bytea NOT NULL,
|
||||
PRIMARY KEY ( node_id )
|
||||
);
|
||||
CREATE TABLE bucket_bandwidth_rollups (
|
||||
bucket_name bytea NOT NULL,
|
||||
project_id bytea NOT NULL,
|
||||
interval_start timestamp with time zone NOT NULL,
|
||||
interval_seconds integer NOT NULL,
|
||||
action integer NOT NULL,
|
||||
inline bigint NOT NULL,
|
||||
allocated bigint NOT NULL,
|
||||
settled bigint NOT NULL,
|
||||
PRIMARY KEY ( bucket_name, project_id, interval_start, action )
|
||||
);
|
||||
CREATE TABLE bucket_storage_tallies (
|
||||
bucket_name bytea NOT NULL,
|
||||
project_id bytea NOT NULL,
|
||||
interval_start timestamp with time zone NOT NULL,
|
||||
inline bigint NOT NULL,
|
||||
remote bigint NOT NULL,
|
||||
remote_segments_count integer NOT NULL,
|
||||
inline_segments_count integer NOT NULL,
|
||||
object_count integer NOT NULL,
|
||||
metadata_size bigint NOT NULL,
|
||||
PRIMARY KEY ( bucket_name, project_id, interval_start )
|
||||
);
|
||||
CREATE TABLE coinpayments_transactions (
|
||||
id text NOT NULL,
|
||||
user_id bytea NOT NULL,
|
||||
address text NOT NULL,
|
||||
amount bytea NOT NULL,
|
||||
received bytea NOT NULL,
|
||||
status integer NOT NULL,
|
||||
key text NOT NULL,
|
||||
timeout integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE consumed_serials (
|
||||
storage_node_id bytea NOT NULL,
|
||||
serial_number bytea NOT NULL,
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( storage_node_id, serial_number )
|
||||
);
|
||||
CREATE TABLE coupons (
|
||||
id bytea NOT NULL,
|
||||
user_id bytea NOT NULL,
|
||||
amount bigint NOT NULL,
|
||||
description text NOT NULL,
|
||||
type integer NOT NULL,
|
||||
status integer NOT NULL,
|
||||
duration bigint NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE coupon_usages (
|
||||
coupon_id bytea NOT NULL,
|
||||
amount bigint NOT NULL,
|
||||
status integer NOT NULL,
|
||||
period timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( coupon_id, period )
|
||||
);
|
||||
CREATE TABLE graceful_exit_progress (
|
||||
node_id bytea NOT NULL,
|
||||
bytes_transferred bigint NOT NULL,
|
||||
pieces_transferred bigint NOT NULL DEFAULT 0,
|
||||
pieces_failed bigint NOT NULL DEFAULT 0,
|
||||
updated_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( node_id )
|
||||
);
|
||||
CREATE TABLE graceful_exit_transfer_queue (
|
||||
node_id bytea NOT NULL,
|
||||
path bytea NOT NULL,
|
||||
piece_num integer NOT NULL,
|
||||
root_piece_id bytea,
|
||||
durability_ratio double precision NOT NULL,
|
||||
queued_at timestamp with time zone NOT NULL,
|
||||
requested_at timestamp with time zone,
|
||||
last_failed_at timestamp with time zone,
|
||||
last_failed_code integer,
|
||||
failed_count integer,
|
||||
finished_at timestamp with time zone,
|
||||
order_limit_send_count integer NOT NULL DEFAULT 0,
|
||||
PRIMARY KEY ( node_id, path, piece_num )
|
||||
);
|
||||
CREATE TABLE injuredsegments (
|
||||
path bytea NOT NULL,
|
||||
data bytea NOT NULL,
|
||||
attempted timestamp with time zone,
|
||||
updated_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
|
||||
segment_health double precision NOT NULL DEFAULT 1,
|
||||
PRIMARY KEY ( path )
|
||||
);
|
||||
CREATE TABLE irreparabledbs (
|
||||
segmentpath bytea NOT NULL,
|
||||
segmentdetail bytea NOT NULL,
|
||||
pieces_lost_count bigint NOT NULL,
|
||||
seg_damaged_unix_sec bigint NOT NULL,
|
||||
repair_attempt_count bigint NOT NULL,
|
||||
PRIMARY KEY ( segmentpath )
|
||||
);
|
||||
CREATE TABLE nodes (
|
||||
id bytea NOT NULL,
|
||||
address text NOT NULL DEFAULT '',
|
||||
last_net text NOT NULL,
|
||||
last_ip_port text,
|
||||
protocol integer NOT NULL DEFAULT 0,
|
||||
type integer NOT NULL DEFAULT 0,
|
||||
email text NOT NULL,
|
||||
wallet text NOT NULL,
|
||||
free_disk bigint NOT NULL DEFAULT -1,
|
||||
piece_count bigint NOT NULL DEFAULT 0,
|
||||
major bigint NOT NULL DEFAULT 0,
|
||||
minor bigint NOT NULL DEFAULT 0,
|
||||
patch bigint NOT NULL DEFAULT 0,
|
||||
hash text NOT NULL DEFAULT '',
|
||||
timestamp timestamp with time zone NOT NULL DEFAULT '0001-01-01 00:00:00+00',
|
||||
release boolean NOT NULL DEFAULT false,
|
||||
latency_90 bigint NOT NULL DEFAULT 0,
|
||||
audit_success_count bigint NOT NULL DEFAULT 0,
|
||||
total_audit_count bigint NOT NULL DEFAULT 0,
|
||||
vetted_at timestamp with time zone,
|
||||
uptime_success_count bigint NOT NULL,
|
||||
total_uptime_count bigint NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
|
||||
updated_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
|
||||
last_contact_success timestamp with time zone NOT NULL DEFAULT 'epoch',
|
||||
last_contact_failure timestamp with time zone NOT NULL DEFAULT 'epoch',
|
||||
contained boolean NOT NULL DEFAULT false,
|
||||
disqualified timestamp with time zone,
|
||||
suspended timestamp with time zone,
|
||||
unknown_audit_suspended timestamp with time zone,
|
||||
offline_suspended timestamp with time zone,
|
||||
under_review timestamp with time zone,
|
||||
online_score double precision NOT NULL DEFAULT 1,
|
||||
audit_reputation_alpha double precision NOT NULL DEFAULT 1,
|
||||
audit_reputation_beta double precision NOT NULL DEFAULT 0,
|
||||
unknown_audit_reputation_alpha double precision NOT NULL DEFAULT 1,
|
||||
unknown_audit_reputation_beta double precision NOT NULL DEFAULT 0,
|
||||
uptime_reputation_alpha double precision NOT NULL DEFAULT 1,
|
||||
uptime_reputation_beta double precision NOT NULL DEFAULT 0,
|
||||
exit_initiated_at timestamp with time zone,
|
||||
exit_loop_completed_at timestamp with time zone,
|
||||
exit_finished_at timestamp with time zone,
|
||||
exit_success boolean NOT NULL DEFAULT false,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE node_api_versions (
|
||||
id bytea NOT NULL,
|
||||
api_version integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
updated_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE nodes_offline_times (
|
||||
node_id bytea NOT NULL,
|
||||
tracked_at timestamp with time zone NOT NULL,
|
||||
seconds integer NOT NULL,
|
||||
PRIMARY KEY ( node_id, tracked_at )
|
||||
);
|
||||
CREATE TABLE offers (
|
||||
id serial NOT NULL,
|
||||
name text NOT NULL,
|
||||
description text NOT NULL,
|
||||
award_credit_in_cents integer NOT NULL DEFAULT 0,
|
||||
invitee_credit_in_cents integer NOT NULL DEFAULT 0,
|
||||
award_credit_duration_days integer,
|
||||
invitee_credit_duration_days integer,
|
||||
redeemable_cap integer,
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
status integer NOT NULL,
|
||||
type integer NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE peer_identities (
|
||||
node_id bytea NOT NULL,
|
||||
leaf_serial_number bytea NOT NULL,
|
||||
chain bytea NOT NULL,
|
||||
updated_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( node_id )
|
||||
);
|
||||
CREATE TABLE pending_audits (
|
||||
node_id bytea NOT NULL,
|
||||
piece_id bytea NOT NULL,
|
||||
stripe_index bigint NOT NULL,
|
||||
share_size bigint NOT NULL,
|
||||
expected_share_hash bytea NOT NULL,
|
||||
reverify_count bigint NOT NULL,
|
||||
path bytea NOT NULL,
|
||||
PRIMARY KEY ( node_id )
|
||||
);
|
||||
CREATE TABLE pending_serial_queue (
|
||||
storage_node_id bytea NOT NULL,
|
||||
bucket_id bytea NOT NULL,
|
||||
serial_number bytea NOT NULL,
|
||||
action integer NOT NULL,
|
||||
settled bigint NOT NULL,
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( storage_node_id, bucket_id, serial_number )
|
||||
);
|
||||
CREATE TABLE projects (
|
||||
id bytea NOT NULL,
|
||||
name text NOT NULL,
|
||||
description text NOT NULL,
|
||||
usage_limit bigint,
|
||||
bandwidth_limit bigint,
|
||||
rate_limit integer,
|
||||
max_buckets integer,
|
||||
partner_id bytea,
|
||||
owner_id bytea NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE project_bandwidth_rollups (
|
||||
project_id bytea NOT NULL,
|
||||
interval_month date NOT NULL,
|
||||
egress_allocated bigint NOT NULL,
|
||||
PRIMARY KEY ( project_id, interval_month )
|
||||
);
|
||||
CREATE TABLE registration_tokens (
|
||||
secret bytea NOT NULL,
|
||||
owner_id bytea,
|
||||
project_limit integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( secret ),
|
||||
UNIQUE ( owner_id )
|
||||
);
|
||||
CREATE TABLE reported_serials (
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
storage_node_id bytea NOT NULL,
|
||||
bucket_id bytea NOT NULL,
|
||||
action integer NOT NULL,
|
||||
serial_number bytea NOT NULL,
|
||||
settled bigint NOT NULL,
|
||||
observed_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( expires_at, storage_node_id, bucket_id, action, serial_number )
|
||||
);
|
||||
CREATE TABLE reset_password_tokens (
|
||||
secret bytea NOT NULL,
|
||||
owner_id bytea NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( secret ),
|
||||
UNIQUE ( owner_id )
|
||||
);
|
||||
CREATE TABLE revocations (
|
||||
revoked bytea NOT NULL,
|
||||
api_key_id bytea NOT NULL,
|
||||
PRIMARY KEY ( revoked )
|
||||
);
|
||||
CREATE TABLE serial_numbers (
|
||||
id serial NOT NULL,
|
||||
serial_number bytea NOT NULL,
|
||||
bucket_id bytea NOT NULL,
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE storagenode_bandwidth_rollups (
|
||||
storagenode_id bytea NOT NULL,
|
||||
interval_start timestamp with time zone NOT NULL,
|
||||
interval_seconds integer NOT NULL,
|
||||
action integer NOT NULL,
|
||||
allocated bigint DEFAULT 0,
|
||||
settled bigint NOT NULL,
|
||||
PRIMARY KEY ( storagenode_id, interval_start, action )
|
||||
);
|
||||
CREATE TABLE storagenode_bandwidth_rollups_phase2 (
|
||||
storagenode_id bytea NOT NULL,
|
||||
interval_start timestamp with time zone NOT NULL,
|
||||
interval_seconds integer NOT NULL,
|
||||
action integer NOT NULL,
|
||||
allocated bigint DEFAULT 0,
|
||||
settled bigint NOT NULL,
|
||||
PRIMARY KEY ( storagenode_id, interval_start, action )
|
||||
);
|
||||
CREATE TABLE storagenode_payments (
|
||||
id bigserial NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
node_id bytea NOT NULL,
|
||||
period text NOT NULL,
|
||||
amount bigint NOT NULL,
|
||||
receipt text,
|
||||
notes text,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE storagenode_paystubs (
|
||||
period text NOT NULL,
|
||||
node_id bytea NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
codes text NOT NULL,
|
||||
usage_at_rest double precision NOT NULL,
|
||||
usage_get bigint NOT NULL,
|
||||
usage_put bigint NOT NULL,
|
||||
usage_get_repair bigint NOT NULL,
|
||||
usage_put_repair bigint NOT NULL,
|
||||
usage_get_audit bigint NOT NULL,
|
||||
comp_at_rest bigint NOT NULL,
|
||||
comp_get bigint NOT NULL,
|
||||
comp_put bigint NOT NULL,
|
||||
comp_get_repair bigint NOT NULL,
|
||||
comp_put_repair bigint NOT NULL,
|
||||
comp_get_audit bigint NOT NULL,
|
||||
surge_percent bigint NOT NULL,
|
||||
held bigint NOT NULL,
|
||||
owed bigint NOT NULL,
|
||||
disposed bigint NOT NULL,
|
||||
paid bigint NOT NULL,
|
||||
PRIMARY KEY ( period, node_id )
|
||||
);
|
||||
CREATE TABLE storagenode_storage_tallies (
|
||||
node_id bytea NOT NULL,
|
||||
interval_end_time timestamp with time zone NOT NULL,
|
||||
data_total double precision NOT NULL,
|
||||
PRIMARY KEY ( interval_end_time, node_id )
|
||||
);
|
||||
CREATE TABLE stripe_customers (
|
||||
user_id bytea NOT NULL,
|
||||
customer_id text NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( user_id ),
|
||||
UNIQUE ( customer_id )
|
||||
);
|
||||
CREATE TABLE stripecoinpayments_invoice_project_records (
|
||||
id bytea NOT NULL,
|
||||
project_id bytea NOT NULL,
|
||||
storage double precision NOT NULL,
|
||||
egress bigint NOT NULL,
|
||||
objects bigint NOT NULL,
|
||||
period_start timestamp with time zone NOT NULL,
|
||||
period_end timestamp with time zone NOT NULL,
|
||||
state integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id ),
|
||||
UNIQUE ( project_id, period_start, period_end )
|
||||
);
|
||||
CREATE TABLE stripecoinpayments_tx_conversion_rates (
|
||||
tx_id text NOT NULL,
|
||||
rate bytea NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( tx_id )
|
||||
);
|
||||
CREATE TABLE users (
|
||||
id bytea NOT NULL,
|
||||
email text NOT NULL,
|
||||
normalized_email text NOT NULL,
|
||||
full_name text NOT NULL,
|
||||
short_name text,
|
||||
password_hash bytea NOT NULL,
|
||||
status integer NOT NULL,
|
||||
partner_id bytea,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
project_limit integer NOT NULL DEFAULT 0,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE value_attributions (
|
||||
project_id bytea NOT NULL,
|
||||
bucket_name bytea NOT NULL,
|
||||
partner_id bytea NOT NULL,
|
||||
last_updated timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( project_id, bucket_name )
|
||||
);
|
||||
CREATE TABLE api_keys (
|
||||
id bytea NOT NULL,
|
||||
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
||||
head bytea NOT NULL,
|
||||
name text NOT NULL,
|
||||
secret bytea NOT NULL,
|
||||
partner_id bytea,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id ),
|
||||
UNIQUE ( head ),
|
||||
UNIQUE ( name, project_id )
|
||||
);
|
||||
CREATE TABLE bucket_metainfos (
|
||||
id bytea NOT NULL,
|
||||
project_id bytea NOT NULL REFERENCES projects( id ),
|
||||
name bytea NOT NULL,
|
||||
partner_id bytea,
|
||||
path_cipher integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
default_segment_size integer NOT NULL,
|
||||
default_encryption_cipher_suite integer NOT NULL,
|
||||
default_encryption_block_size integer NOT NULL,
|
||||
default_redundancy_algorithm integer NOT NULL,
|
||||
default_redundancy_share_size integer NOT NULL,
|
||||
default_redundancy_required_shares integer NOT NULL,
|
||||
default_redundancy_repair_shares integer NOT NULL,
|
||||
default_redundancy_optimal_shares integer NOT NULL,
|
||||
default_redundancy_total_shares integer NOT NULL,
|
||||
PRIMARY KEY ( id ),
|
||||
UNIQUE ( name, project_id ),
|
||||
UNIQUE ( project_id, name )
|
||||
);
|
||||
CREATE TABLE project_members (
|
||||
member_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
|
||||
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( member_id, project_id )
|
||||
);
|
||||
CREATE TABLE stripecoinpayments_apply_balance_intents (
|
||||
tx_id text NOT NULL REFERENCES coinpayments_transactions( id ) ON DELETE CASCADE,
|
||||
state integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( tx_id )
|
||||
);
|
||||
CREATE TABLE used_serials (
|
||||
serial_number_id integer NOT NULL REFERENCES serial_numbers( id ) ON DELETE CASCADE,
|
||||
storage_node_id bytea NOT NULL,
|
||||
PRIMARY KEY ( serial_number_id, storage_node_id )
|
||||
);
|
||||
CREATE TABLE user_credits (
|
||||
id serial NOT NULL,
|
||||
user_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
|
||||
offer_id integer NOT NULL REFERENCES offers( id ),
|
||||
referred_by bytea REFERENCES users( id ) ON DELETE SET NULL,
|
||||
type text NOT NULL,
|
||||
credits_earned_in_cents integer NOT NULL,
|
||||
credits_used_in_cents integer NOT NULL,
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id ),
|
||||
UNIQUE ( id, offer_id )
|
||||
);
|
||||
CREATE INDEX accounting_rollups_start_time_index ON accounting_rollups ( start_time );
|
||||
CREATE INDEX bucket_bandwidth_rollups_project_id_action_interval_index ON bucket_bandwidth_rollups ( project_id, action, interval_start );
|
||||
CREATE INDEX bucket_bandwidth_rollups_action_interval_project_id_index ON bucket_bandwidth_rollups ( action, interval_start, project_id );
|
||||
CREATE INDEX consumed_serials_expires_at_index ON consumed_serials ( expires_at );
|
||||
CREATE INDEX graceful_exit_transfer_queue_nid_dr_qa_fa_lfa_index ON graceful_exit_transfer_queue ( node_id, durability_ratio, queued_at, finished_at, last_failed_at );
|
||||
CREATE INDEX injuredsegments_attempted_index ON injuredsegments ( attempted );
|
||||
CREATE INDEX injuredsegments_segment_health_index ON injuredsegments ( segment_health );
|
||||
CREATE INDEX injuredsegments_updated_at_index ON injuredsegments ( updated_at );
|
||||
CREATE INDEX node_last_ip ON nodes ( last_net );
|
||||
CREATE INDEX nodes_offline_times_node_id_index ON nodes_offline_times ( node_id );
|
||||
CREATE UNIQUE INDEX serial_number_index ON serial_numbers ( serial_number );
|
||||
CREATE INDEX serial_numbers_expires_at_index ON serial_numbers ( expires_at );
|
||||
CREATE INDEX storagenode_bandwidth_rollups_interval_start_index ON storagenode_bandwidth_rollups ( interval_start );
|
||||
CREATE INDEX storagenode_payments_node_id_period_index ON storagenode_payments ( node_id, period );
|
||||
CREATE INDEX storagenode_paystubs_node_id_index ON storagenode_paystubs ( node_id );
|
||||
CREATE INDEX storagenode_storage_tallies_node_id_index ON storagenode_storage_tallies ( node_id );
|
||||
CREATE UNIQUE INDEX credits_earned_user_id_offer_id ON user_credits ( id, offer_id );
|
||||
|
||||
INSERT INTO "accounting_rollups"("node_id", "start_time", "put_total", "get_total", "get_audit_total", "get_repair_total", "put_repair_total", "at_rest_total") VALUES (E'\\367M\\177\\251]t/\\022\\256\\214\\265\\025\\224\\204:\\217\\212\\0102<\\321\\374\\020&\\271Qc\\325\\261\\354\\246\\233'::bytea, '2019-02-09 00:00:00+00', 3000, 6000, 9000, 12000, 0, 15000);
|
||||
|
||||
INSERT INTO "accounting_timestamps" VALUES ('LastAtRestTally', '0001-01-01 00:00:00+00');
|
||||
INSERT INTO "accounting_timestamps" VALUES ('LastRollup', '0001-01-01 00:00:00+00');
|
||||
INSERT INTO "accounting_timestamps" VALUES ('LastBandwidthTally', '0001-01-01 00:00:00+00');
|
||||
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 5, false, 1);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '127.0.0.1:55518', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 0, 3, 3, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 0, false, 1);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014', '127.0.0.1:55517', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 0, 0, 0, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 0, false, 1);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\015', '127.0.0.1:55519', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 1, 2, 1, 2, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 1, false, 1);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "vetted_at", "online_score") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', '127.0.0.1:55520', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 300, 400, 300, 400, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 300, 0, 1, 0, 300, 100, false, '2020-03-18 12:00:00.000000+00', 1);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\154\\313\\233\\074\\327\\177\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 75, 25, 100, 5, false, 1);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "last_ip_port", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\154\\313\\233\\074\\327\\177\\136\\070\\346\\002', '127.0.0.1:55516', '127.0.0.0', '127.0.0.1:55516', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 75, 25, 100, 5, false, 1);
|
||||
|
||||
INSERT INTO "users"("id", "full_name", "short_name", "email", "normalized_email", "password_hash", "status", "partner_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'Noahson', 'William', '1email1@mail.test', '1EMAIL1@MAIL.TEST', E'some_readable_hash'::bytea, 1, NULL, '2019-02-14 08:28:24.614594+00');
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets", "partner_id", "owner_id", "created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, 'ProjectName', 'projects description', NULL, NULL, NULL, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:28:24.254934+00');
|
||||
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets", "partner_id", "owner_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, 'projName1', 'Test project 1', NULL, NULL, NULL, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:28:24.636949+00');
|
||||
INSERT INTO "project_members"("member_id", "project_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, '2019-02-14 08:28:24.677953+00');
|
||||
INSERT INTO "project_members"("member_id", "project_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, '2019-02-13 08:28:24.677953+00');
|
||||
|
||||
INSERT INTO "irreparabledbs" ("segmentpath", "segmentdetail", "pieces_lost_count", "seg_damaged_unix_sec", "repair_attempt_count") VALUES ('\x49616d5365676d656e746b6579696e666f30', '\x49616d5365676d656e7464657461696c696e666f30', 10, 1550159554, 10);
|
||||
|
||||
INSERT INTO "registration_tokens" ("secret", "owner_id", "project_limit", "created_at") VALUES (E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, null, 1, '2019-02-14 08:28:24.677953+00');
|
||||
|
||||
INSERT INTO "serial_numbers" ("id", "serial_number", "bucket_id", "expires_at") VALUES (1, E'0123456701234567'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014/testbucket'::bytea, '2019-03-06 08:28:24.677953+00');
|
||||
INSERT INTO "used_serials" ("serial_number_id", "storage_node_id") VALUES (1, E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n');
|
||||
|
||||
INSERT INTO "storagenode_bandwidth_rollups" ("storagenode_id", "interval_start", "interval_seconds", "action", "allocated", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024);
|
||||
INSERT INTO "storagenode_storage_tallies" VALUES (E'\\3510\\323\\225"~\\036<\\342\\330m\\0253Jhr\\246\\233K\\246#\\2303\\351\\256\\275j\\212UM\\362\\207', '2019-02-14 08:16:57.812849+00', 1000);
|
||||
|
||||
INSERT INTO "bucket_bandwidth_rollups" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024, 3024);
|
||||
INSERT INTO "bucket_storage_tallies" ("bucket_name", "project_id", "interval_start", "inline", "remote", "remote_segments_count", "inline_segments_count", "object_count", "metadata_size") VALUES (E'testbucket'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 4024, 5024, 0, 0, 0, 0);
|
||||
INSERT INTO "bucket_bandwidth_rollups" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\170\\160\\157\\370\\274\\366\\113\\364\\272\\235\\301\\243\\321\\102\\321\\136'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024, 3024);
|
||||
INSERT INTO "bucket_storage_tallies" ("bucket_name", "project_id", "interval_start", "inline", "remote", "remote_segments_count", "inline_segments_count", "object_count", "metadata_size") VALUES (E'testbucket'::bytea, E'\\170\\160\\157\\370\\274\\366\\113\\364\\272\\235\\301\\243\\321\\102\\321\\136'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 4024, 5024, 0, 0, 0, 0);
|
||||
|
||||
INSERT INTO "reset_password_tokens" ("secret", "owner_id", "created_at") VALUES (E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-05-08 08:28:24.677953+00');
|
||||
|
||||
INSERT INTO "offers" ("id", "name", "description", "award_credit_in_cents", "invitee_credit_in_cents", "expires_at", "created_at", "status", "type", "award_credit_duration_days", "invitee_credit_duration_days") VALUES (1, 'Default referral offer', 'Is active when no other active referral offer', 300, 600, '2119-03-14 08:28:24.636949+00', '2019-07-14 08:28:24.636949+00', 1, 2, 365, 14);
|
||||
INSERT INTO "offers" ("id", "name", "description", "award_credit_in_cents", "invitee_credit_in_cents", "expires_at", "created_at", "status", "type", "award_credit_duration_days", "invitee_credit_duration_days") VALUES (2, 'Default free credit offer', 'Is active when no active free credit offer', 0, 300, '2119-03-14 08:28:24.636949+00', '2019-07-14 08:28:24.636949+00', 1, 1, NULL, 14);
|
||||
|
||||
INSERT INTO "api_keys" ("id", "project_id", "head", "name", "secret", "partner_id", "created_at") VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\111\\142\\147\\304\\132\\375\\070\\163\\270\\160\\251\\370\\126\\063\\351\\037\\257\\071\\143\\375\\351\\320\\253\\232\\220\\260\\075\\173\\306\\307\\115\\136'::bytea, 'key 2', E'\\254\\011\\315\\333\\273\\365\\001\\071\\024\\154\\253\\332\\301\\216\\361\\074\\221\\367\\251\\231\\274\\333\\300\\367\\001\\272\\327\\111\\315\\123\\042\\016'::bytea, NULL, '2019-02-14 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "value_attributions" ("project_id", "bucket_name", "partner_id", "last_updated") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E''::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-02-14 08:07:31.028103+00');
|
||||
|
||||
INSERT INTO "user_credits" ("id", "user_id", "offer_id", "referred_by", "credits_earned_in_cents", "credits_used_in_cents", "type", "expires_at", "created_at") VALUES (1, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 1, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 200, 0, 'invalid', '2019-10-01 08:28:24.267934+00', '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "bucket_metainfos" ("id", "project_id", "name", "partner_id", "created_at", "path_cipher", "default_segment_size", "default_encryption_cipher_suite", "default_encryption_block_size", "default_redundancy_algorithm", "default_redundancy_share_size", "default_redundancy_required_shares", "default_redundancy_repair_shares", "default_redundancy_optimal_shares", "default_redundancy_total_shares") VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'testbucketuniquename'::bytea, NULL, '2019-06-14 08:28:24.677953+00', 1, 65536, 1, 8192, 1, 4096, 4, 6, 8, 10);
|
||||
|
||||
INSERT INTO "pending_audits" ("node_id", "piece_id", "stripe_index", "share_size", "expected_share_hash", "reverify_count", "path") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 5, 1024, E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, 1, 'not null');
|
||||
|
||||
INSERT INTO "peer_identities" VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:07:31.335028+00');
|
||||
|
||||
INSERT INTO "graceful_exit_progress" ("node_id", "bytes_transferred", "pieces_transferred", "pieces_failed", "updated_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', 1000000000000000, 0, 0, '2019-09-12 10:07:31.028103+00');
|
||||
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n\\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\311', 8, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
|
||||
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n\\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\312', 8, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
|
||||
|
||||
INSERT INTO "stripe_customers" ("user_id", "customer_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'stripe_id', '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n\\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\311', 9, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
|
||||
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n\\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\312', 9, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
|
||||
|
||||
INSERT INTO "stripecoinpayments_invoice_project_records"("id", "project_id", "storage", "egress", "objects", "period_start", "period_end", "state", "created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\021\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, 0, 0, 0, '2019-06-01 08:28:24.267934+00', '2019-06-01 08:28:24.267934+00', 0, '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "root_piece_id", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n\\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\311', 10, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
|
||||
|
||||
INSERT INTO "stripecoinpayments_tx_conversion_rates" ("tx_id", "rate", "created_at") VALUES ('tx_id', E'\\363\\311\\033w\\222\\303Ci,'::bytea, '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "coinpayments_transactions" ("id", "user_id", "address", "amount", "received", "status", "key", "timeout", "created_at") VALUES ('tx_id', E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'address', E'\\363\\311\\033w'::bytea, E'\\363\\311\\033w'::bytea, 1, 'key', 60, '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "nodes_offline_times" ("node_id", "tracked_at", "seconds") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001'::bytea, '2019-06-01 09:28:24.267934+00', 3600);
|
||||
INSERT INTO "nodes_offline_times" ("node_id", "tracked_at", "seconds") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001'::bytea, '2017-06-01 09:28:24.267934+00', 100);
|
||||
INSERT INTO "nodes_offline_times" ("node_id", "tracked_at", "seconds") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n'::bytea, '2019-06-01 09:28:24.267934+00', 3600);
|
||||
|
||||
INSERT INTO "storagenode_bandwidth_rollups" ("storagenode_id", "interval_start", "interval_seconds", "action", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2020-01-11 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 2024);
|
||||
|
||||
INSERT INTO "coupons" ("id", "user_id", "amount", "description", "type", "status", "duration", "created_at") VALUES (E'\\362\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 50, 'description', 0, 0, 2, '2019-06-01 08:28:24.267934+00');
|
||||
INSERT INTO "coupon_usages" ("coupon_id", "amount", "status", "period") VALUES (E'\\362\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, 22, 0, '2019-06-01 09:28:24.267934+00');
|
||||
|
||||
INSERT INTO "reported_serials" ("expires_at", "storage_node_id", "bucket_id", "action", "serial_number", "settled", "observed_at") VALUES ('2020-01-11 08:00:00.000000+00', E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014/testbucket'::bytea, 1, E'0123456701234567'::bytea, 100, '2020-01-11 08:00:00.000000+00');
|
||||
|
||||
INSERT INTO "stripecoinpayments_apply_balance_intents" ("tx_id", "state", "created_at") VALUES ('tx_id', 0, '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets", "rate_limit", "partner_id", "owner_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\347'::bytea, 'projName1', 'Test project 1', NULL, NULL, NULL, 2000000, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2020-01-15 08:28:24.636949+00');
|
||||
|
||||
INSERT INTO "pending_serial_queue" ("storage_node_id", "bucket_id", "serial_number", "action", "settled", "expires_at") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014/testbucket'::bytea, E'5123456701234567'::bytea, 1, 100, '2020-01-11 08:00:00.000000+00');
|
||||
|
||||
INSERT INTO "consumed_serials" ("storage_node_id", "serial_number", "expires_at") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', E'1234567012345678'::bytea, '2020-01-12 08:00:00.000000+00');
|
||||
|
||||
INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('0', '\x0a0130120100', 1.0, '2020-09-01 00:00:00.000000+00');
|
||||
INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('here''s/a/great/path', '\x0a136865726527732f612f67726561742f70617468120a0102030405060708090a', 1.0, '2020-09-01 00:00:00.000000+00');
|
||||
INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('yet/another/cool/path', '\x0a157965742f616e6f746865722f636f6f6c2f70617468120a0102030405060708090a', 1.0, '2020-09-01 00:00:00.000000+00');
|
||||
INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('/this/is/a/new/path', '\x0a23736f2f6d616e792f69636f6e69632f70617468732f746f2f63686f6f73652f66726f6d120a0102030405060708090a', 1.0, '2020-09-01 00:00:00.000000+00');
|
||||
INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('/some/path/1/23/4', '\x0a23736f2f6d618e792f69636f6e69632f70617468732f746f2f63686f6f73652f66726f6d120a0102030405060708090a', 0.2, '2020-09-01 00:00:00.000000+00');
|
||||
|
||||
INSERT INTO "project_bandwidth_rollups"("project_id", "interval_month", egress_allocated) VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\347'::bytea, '2020-04-01', 10000);
|
||||
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets","rate_limit", "partner_id", "owner_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\345'::bytea, 'egress101', 'High Bandwidth Project', NULL, NULL, NULL, 2000000, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2020-05-15 08:46:24.000000+00');
|
||||
|
||||
INSERT INTO "storagenode_paystubs"("period", "node_id", "created_at", "codes", "usage_at_rest", "usage_get", "usage_put", "usage_get_repair", "usage_put_repair", "usage_get_audit", "comp_at_rest", "comp_get", "comp_put", "comp_get_repair", "comp_put_repair", "comp_get_audit", "surge_percent", "held", "owed", "disposed", "paid") VALUES ('2020-01', '\xf2a3b4c4dfdf7221310382fd5db5aa73e1d227d6df09734ec4e5305000000000', '2020-04-07T20:14:21.479141Z', '', 1327959864508416, 294054066688, 159031363328, 226751, 0, 836608, 2861984, 5881081, 0, 226751, 0, 8, 300, 0, 26909472, 0, 26909472);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "unknown_audit_suspended", "offline_suspended", "under_review") VALUES (E'\\153\\313\\233\\074\\327\\255\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 5, false, '2019-02-14 08:07:31.108963+00', '2019-02-14 08:07:31.108963+00', '2019-02-14 08:07:31.108963+00');
|
||||
|
||||
INSERT INTO "audit_histories" ("node_id", "history") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', '\x0a23736f2f6d616e792f69636f6e69632f70617468732f746f2f63686f6f73652f66726f6d120a0102030405060708090a');
|
||||
|
||||
INSERT INTO "node_api_versions"("id", "api_version", "created_at", "updated_at") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', 1, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00');
|
||||
INSERT INTO "node_api_versions"("id", "api_version", "created_at", "updated_at") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', 2, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00');
|
||||
INSERT INTO "node_api_versions"("id", "api_version", "created_at", "updated_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014', 3, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00');
|
||||
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\256\\263'::bytea, 'egress102', 'High Bandwidth Project 2', NULL, NULL, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-05-15 08:46:24.000000+00', 1000);
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\255\\244'::bytea, 'egress103', 'High Bandwidth Project 3', NULL, NULL, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-05-15 08:46:24.000000+00', 1000);
|
||||
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\253\\231'::bytea, 'Limit Test 1', 'This project is above the default', 50000000001, 50000000001, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-10-14 10:10:10.000000+00', 101);
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\252\\230'::bytea, 'Limit Test 2', 'This project is below the default', NULL, NULL, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-10-14 10:10:11.000000+00', NULL);
|
||||
|
||||
INSERT INTO "storagenode_bandwidth_rollups_phase2" ("storagenode_id", "interval_start", "interval_seconds", "action", "allocated", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024);
|
577
satellite/satellitedb/testdata/postgres.v136.sql
vendored
Normal file
577
satellite/satellitedb/testdata/postgres.v136.sql
vendored
Normal file
@ -0,0 +1,577 @@
|
||||
-- AUTOGENERATED BY storj.io/dbx
|
||||
-- DO NOT EDIT
|
||||
CREATE TABLE accounting_rollups (
|
||||
node_id bytea NOT NULL,
|
||||
start_time timestamp with time zone NOT NULL,
|
||||
put_total bigint NOT NULL,
|
||||
get_total bigint NOT NULL,
|
||||
get_audit_total bigint NOT NULL,
|
||||
get_repair_total bigint NOT NULL,
|
||||
put_repair_total bigint NOT NULL,
|
||||
at_rest_total double precision NOT NULL,
|
||||
PRIMARY KEY ( node_id, start_time )
|
||||
);
|
||||
CREATE TABLE accounting_timestamps (
|
||||
name text NOT NULL,
|
||||
value timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( name )
|
||||
);
|
||||
CREATE TABLE audit_histories (
|
||||
node_id bytea NOT NULL,
|
||||
history bytea NOT NULL,
|
||||
PRIMARY KEY ( node_id )
|
||||
);
|
||||
CREATE TABLE bucket_bandwidth_rollups (
|
||||
bucket_name bytea NOT NULL,
|
||||
project_id bytea NOT NULL,
|
||||
interval_start timestamp with time zone NOT NULL,
|
||||
interval_seconds integer NOT NULL,
|
||||
action integer NOT NULL,
|
||||
inline bigint NOT NULL,
|
||||
allocated bigint NOT NULL,
|
||||
settled bigint NOT NULL,
|
||||
PRIMARY KEY ( bucket_name, project_id, interval_start, action )
|
||||
);
|
||||
CREATE TABLE bucket_storage_tallies (
|
||||
bucket_name bytea NOT NULL,
|
||||
project_id bytea NOT NULL,
|
||||
interval_start timestamp with time zone NOT NULL,
|
||||
inline bigint NOT NULL,
|
||||
remote bigint NOT NULL,
|
||||
remote_segments_count integer NOT NULL,
|
||||
inline_segments_count integer NOT NULL,
|
||||
object_count integer NOT NULL,
|
||||
metadata_size bigint NOT NULL,
|
||||
PRIMARY KEY ( bucket_name, project_id, interval_start )
|
||||
);
|
||||
CREATE TABLE coinpayments_transactions (
|
||||
id text NOT NULL,
|
||||
user_id bytea NOT NULL,
|
||||
address text NOT NULL,
|
||||
amount bytea NOT NULL,
|
||||
received bytea NOT NULL,
|
||||
status integer NOT NULL,
|
||||
key text NOT NULL,
|
||||
timeout integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE consumed_serials (
|
||||
storage_node_id bytea NOT NULL,
|
||||
serial_number bytea NOT NULL,
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( storage_node_id, serial_number )
|
||||
);
|
||||
CREATE TABLE coupons (
|
||||
id bytea NOT NULL,
|
||||
user_id bytea NOT NULL,
|
||||
amount bigint NOT NULL,
|
||||
description text NOT NULL,
|
||||
type integer NOT NULL,
|
||||
status integer NOT NULL,
|
||||
duration bigint NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE coupon_usages (
|
||||
coupon_id bytea NOT NULL,
|
||||
amount bigint NOT NULL,
|
||||
status integer NOT NULL,
|
||||
period timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( coupon_id, period )
|
||||
);
|
||||
CREATE TABLE graceful_exit_progress (
|
||||
node_id bytea NOT NULL,
|
||||
bytes_transferred bigint NOT NULL,
|
||||
pieces_transferred bigint NOT NULL DEFAULT 0,
|
||||
pieces_failed bigint NOT NULL DEFAULT 0,
|
||||
updated_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( node_id )
|
||||
);
|
||||
CREATE TABLE graceful_exit_transfer_queue (
|
||||
node_id bytea NOT NULL,
|
||||
path bytea NOT NULL,
|
||||
piece_num integer NOT NULL,
|
||||
root_piece_id bytea,
|
||||
durability_ratio double precision NOT NULL,
|
||||
queued_at timestamp with time zone NOT NULL,
|
||||
requested_at timestamp with time zone,
|
||||
last_failed_at timestamp with time zone,
|
||||
last_failed_code integer,
|
||||
failed_count integer,
|
||||
finished_at timestamp with time zone,
|
||||
order_limit_send_count integer NOT NULL DEFAULT 0,
|
||||
PRIMARY KEY ( node_id, path, piece_num )
|
||||
);
|
||||
CREATE TABLE injuredsegments (
|
||||
path bytea NOT NULL,
|
||||
data bytea NOT NULL,
|
||||
attempted timestamp with time zone,
|
||||
updated_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
|
||||
segment_health double precision NOT NULL DEFAULT 1,
|
||||
PRIMARY KEY ( path )
|
||||
);
|
||||
CREATE TABLE irreparabledbs (
|
||||
segmentpath bytea NOT NULL,
|
||||
segmentdetail bytea NOT NULL,
|
||||
pieces_lost_count bigint NOT NULL,
|
||||
seg_damaged_unix_sec bigint NOT NULL,
|
||||
repair_attempt_count bigint NOT NULL,
|
||||
PRIMARY KEY ( segmentpath )
|
||||
);
|
||||
CREATE TABLE nodes (
|
||||
id bytea NOT NULL,
|
||||
address text NOT NULL DEFAULT '',
|
||||
last_net text NOT NULL,
|
||||
last_ip_port text,
|
||||
protocol integer NOT NULL DEFAULT 0,
|
||||
type integer NOT NULL DEFAULT 0,
|
||||
email text NOT NULL,
|
||||
wallet text NOT NULL,
|
||||
free_disk bigint NOT NULL DEFAULT -1,
|
||||
piece_count bigint NOT NULL DEFAULT 0,
|
||||
major bigint NOT NULL DEFAULT 0,
|
||||
minor bigint NOT NULL DEFAULT 0,
|
||||
patch bigint NOT NULL DEFAULT 0,
|
||||
hash text NOT NULL DEFAULT '',
|
||||
timestamp timestamp with time zone NOT NULL DEFAULT '0001-01-01 00:00:00+00',
|
||||
release boolean NOT NULL DEFAULT false,
|
||||
latency_90 bigint NOT NULL DEFAULT 0,
|
||||
audit_success_count bigint NOT NULL DEFAULT 0,
|
||||
total_audit_count bigint NOT NULL DEFAULT 0,
|
||||
vetted_at timestamp with time zone,
|
||||
uptime_success_count bigint NOT NULL,
|
||||
total_uptime_count bigint NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
|
||||
updated_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
|
||||
last_contact_success timestamp with time zone NOT NULL DEFAULT 'epoch',
|
||||
last_contact_failure timestamp with time zone NOT NULL DEFAULT 'epoch',
|
||||
contained boolean NOT NULL DEFAULT false,
|
||||
disqualified timestamp with time zone,
|
||||
suspended timestamp with time zone,
|
||||
unknown_audit_suspended timestamp with time zone,
|
||||
offline_suspended timestamp with time zone,
|
||||
under_review timestamp with time zone,
|
||||
online_score double precision NOT NULL DEFAULT 1,
|
||||
audit_reputation_alpha double precision NOT NULL DEFAULT 1,
|
||||
audit_reputation_beta double precision NOT NULL DEFAULT 0,
|
||||
unknown_audit_reputation_alpha double precision NOT NULL DEFAULT 1,
|
||||
unknown_audit_reputation_beta double precision NOT NULL DEFAULT 0,
|
||||
uptime_reputation_alpha double precision NOT NULL DEFAULT 1,
|
||||
uptime_reputation_beta double precision NOT NULL DEFAULT 0,
|
||||
exit_initiated_at timestamp with time zone,
|
||||
exit_loop_completed_at timestamp with time zone,
|
||||
exit_finished_at timestamp with time zone,
|
||||
exit_success boolean NOT NULL DEFAULT false,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE node_api_versions (
|
||||
id bytea NOT NULL,
|
||||
api_version integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
updated_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE nodes_offline_times (
|
||||
node_id bytea NOT NULL,
|
||||
tracked_at timestamp with time zone NOT NULL,
|
||||
seconds integer NOT NULL,
|
||||
PRIMARY KEY ( node_id, tracked_at )
|
||||
);
|
||||
CREATE TABLE offers (
|
||||
id serial NOT NULL,
|
||||
name text NOT NULL,
|
||||
description text NOT NULL,
|
||||
award_credit_in_cents integer NOT NULL DEFAULT 0,
|
||||
invitee_credit_in_cents integer NOT NULL DEFAULT 0,
|
||||
award_credit_duration_days integer,
|
||||
invitee_credit_duration_days integer,
|
||||
redeemable_cap integer,
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
status integer NOT NULL,
|
||||
type integer NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE peer_identities (
|
||||
node_id bytea NOT NULL,
|
||||
leaf_serial_number bytea NOT NULL,
|
||||
chain bytea NOT NULL,
|
||||
updated_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( node_id )
|
||||
);
|
||||
CREATE TABLE pending_audits (
|
||||
node_id bytea NOT NULL,
|
||||
piece_id bytea NOT NULL,
|
||||
stripe_index bigint NOT NULL,
|
||||
share_size bigint NOT NULL,
|
||||
expected_share_hash bytea NOT NULL,
|
||||
reverify_count bigint NOT NULL,
|
||||
path bytea NOT NULL,
|
||||
PRIMARY KEY ( node_id )
|
||||
);
|
||||
CREATE TABLE pending_serial_queue (
|
||||
storage_node_id bytea NOT NULL,
|
||||
bucket_id bytea NOT NULL,
|
||||
serial_number bytea NOT NULL,
|
||||
action integer NOT NULL,
|
||||
settled bigint NOT NULL,
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( storage_node_id, bucket_id, serial_number )
|
||||
);
|
||||
CREATE TABLE projects (
|
||||
id bytea NOT NULL,
|
||||
name text NOT NULL,
|
||||
description text NOT NULL,
|
||||
usage_limit bigint,
|
||||
bandwidth_limit bigint,
|
||||
rate_limit integer,
|
||||
max_buckets integer,
|
||||
partner_id bytea,
|
||||
owner_id bytea NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE project_bandwidth_rollups (
|
||||
project_id bytea NOT NULL,
|
||||
interval_month date NOT NULL,
|
||||
egress_allocated bigint NOT NULL,
|
||||
PRIMARY KEY ( project_id, interval_month )
|
||||
);
|
||||
CREATE TABLE registration_tokens (
|
||||
secret bytea NOT NULL,
|
||||
owner_id bytea,
|
||||
project_limit integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( secret ),
|
||||
UNIQUE ( owner_id )
|
||||
);
|
||||
CREATE TABLE reported_serials (
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
storage_node_id bytea NOT NULL,
|
||||
bucket_id bytea NOT NULL,
|
||||
action integer NOT NULL,
|
||||
serial_number bytea NOT NULL,
|
||||
settled bigint NOT NULL,
|
||||
observed_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( expires_at, storage_node_id, bucket_id, action, serial_number )
|
||||
);
|
||||
CREATE TABLE reset_password_tokens (
|
||||
secret bytea NOT NULL,
|
||||
owner_id bytea NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( secret ),
|
||||
UNIQUE ( owner_id )
|
||||
);
|
||||
CREATE TABLE revocations (
|
||||
revoked bytea NOT NULL,
|
||||
api_key_id bytea NOT NULL,
|
||||
PRIMARY KEY ( revoked )
|
||||
);
|
||||
CREATE TABLE serial_numbers (
|
||||
id serial NOT NULL,
|
||||
serial_number bytea NOT NULL,
|
||||
bucket_id bytea NOT NULL,
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE storagenode_bandwidth_rollups (
|
||||
storagenode_id bytea NOT NULL,
|
||||
interval_start timestamp with time zone NOT NULL,
|
||||
interval_seconds integer NOT NULL,
|
||||
action integer NOT NULL,
|
||||
allocated bigint DEFAULT 0,
|
||||
settled bigint NOT NULL,
|
||||
PRIMARY KEY ( storagenode_id, interval_start, action )
|
||||
);
|
||||
CREATE TABLE storagenode_bandwidth_rollups_phase2 (
|
||||
storagenode_id bytea NOT NULL,
|
||||
interval_start timestamp with time zone NOT NULL,
|
||||
interval_seconds integer NOT NULL,
|
||||
action integer NOT NULL,
|
||||
allocated bigint DEFAULT 0,
|
||||
settled bigint NOT NULL,
|
||||
PRIMARY KEY ( storagenode_id, interval_start, action )
|
||||
);
|
||||
CREATE TABLE storagenode_payments (
|
||||
id bigserial NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
node_id bytea NOT NULL,
|
||||
period text NOT NULL,
|
||||
amount bigint NOT NULL,
|
||||
receipt text,
|
||||
notes text,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE storagenode_paystubs (
|
||||
period text NOT NULL,
|
||||
node_id bytea NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
codes text NOT NULL,
|
||||
usage_at_rest double precision NOT NULL,
|
||||
usage_get bigint NOT NULL,
|
||||
usage_put bigint NOT NULL,
|
||||
usage_get_repair bigint NOT NULL,
|
||||
usage_put_repair bigint NOT NULL,
|
||||
usage_get_audit bigint NOT NULL,
|
||||
comp_at_rest bigint NOT NULL,
|
||||
comp_get bigint NOT NULL,
|
||||
comp_put bigint NOT NULL,
|
||||
comp_get_repair bigint NOT NULL,
|
||||
comp_put_repair bigint NOT NULL,
|
||||
comp_get_audit bigint NOT NULL,
|
||||
surge_percent bigint NOT NULL,
|
||||
held bigint NOT NULL,
|
||||
owed bigint NOT NULL,
|
||||
disposed bigint NOT NULL,
|
||||
paid bigint NOT NULL,
|
||||
PRIMARY KEY ( period, node_id )
|
||||
);
|
||||
CREATE TABLE storagenode_storage_tallies (
|
||||
node_id bytea NOT NULL,
|
||||
interval_end_time timestamp with time zone NOT NULL,
|
||||
data_total double precision NOT NULL,
|
||||
PRIMARY KEY ( interval_end_time, node_id )
|
||||
);
|
||||
CREATE TABLE stripe_customers (
|
||||
user_id bytea NOT NULL,
|
||||
customer_id text NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( user_id ),
|
||||
UNIQUE ( customer_id )
|
||||
);
|
||||
CREATE TABLE stripecoinpayments_invoice_project_records (
|
||||
id bytea NOT NULL,
|
||||
project_id bytea NOT NULL,
|
||||
storage double precision NOT NULL,
|
||||
egress bigint NOT NULL,
|
||||
objects bigint NOT NULL,
|
||||
period_start timestamp with time zone NOT NULL,
|
||||
period_end timestamp with time zone NOT NULL,
|
||||
state integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id ),
|
||||
UNIQUE ( project_id, period_start, period_end )
|
||||
);
|
||||
CREATE TABLE stripecoinpayments_tx_conversion_rates (
|
||||
tx_id text NOT NULL,
|
||||
rate bytea NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( tx_id )
|
||||
);
|
||||
CREATE TABLE users (
|
||||
id bytea NOT NULL,
|
||||
email text NOT NULL,
|
||||
normalized_email text NOT NULL,
|
||||
full_name text NOT NULL,
|
||||
short_name text,
|
||||
password_hash bytea NOT NULL,
|
||||
status integer NOT NULL,
|
||||
partner_id bytea,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
project_limit integer NOT NULL DEFAULT 0,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE value_attributions (
|
||||
project_id bytea NOT NULL,
|
||||
bucket_name bytea NOT NULL,
|
||||
partner_id bytea NOT NULL,
|
||||
last_updated timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( project_id, bucket_name )
|
||||
);
|
||||
CREATE TABLE api_keys (
|
||||
id bytea NOT NULL,
|
||||
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
||||
head bytea NOT NULL,
|
||||
name text NOT NULL,
|
||||
secret bytea NOT NULL,
|
||||
partner_id bytea,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id ),
|
||||
UNIQUE ( head ),
|
||||
UNIQUE ( name, project_id )
|
||||
);
|
||||
CREATE TABLE bucket_metainfos (
|
||||
id bytea NOT NULL,
|
||||
project_id bytea NOT NULL REFERENCES projects( id ),
|
||||
name bytea NOT NULL,
|
||||
partner_id bytea,
|
||||
path_cipher integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
default_segment_size integer NOT NULL,
|
||||
default_encryption_cipher_suite integer NOT NULL,
|
||||
default_encryption_block_size integer NOT NULL,
|
||||
default_redundancy_algorithm integer NOT NULL,
|
||||
default_redundancy_share_size integer NOT NULL,
|
||||
default_redundancy_required_shares integer NOT NULL,
|
||||
default_redundancy_repair_shares integer NOT NULL,
|
||||
default_redundancy_optimal_shares integer NOT NULL,
|
||||
default_redundancy_total_shares integer NOT NULL,
|
||||
PRIMARY KEY ( id ),
|
||||
UNIQUE ( name, project_id ),
|
||||
UNIQUE ( project_id, name )
|
||||
);
|
||||
CREATE TABLE project_members (
|
||||
member_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
|
||||
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( member_id, project_id )
|
||||
);
|
||||
CREATE TABLE stripecoinpayments_apply_balance_intents (
|
||||
tx_id text NOT NULL REFERENCES coinpayments_transactions( id ) ON DELETE CASCADE,
|
||||
state integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( tx_id )
|
||||
);
|
||||
CREATE TABLE used_serials (
|
||||
serial_number_id integer NOT NULL REFERENCES serial_numbers( id ) ON DELETE CASCADE,
|
||||
storage_node_id bytea NOT NULL,
|
||||
PRIMARY KEY ( serial_number_id, storage_node_id )
|
||||
);
|
||||
CREATE TABLE user_credits (
|
||||
id serial NOT NULL,
|
||||
user_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
|
||||
offer_id integer NOT NULL REFERENCES offers( id ),
|
||||
referred_by bytea REFERENCES users( id ) ON DELETE SET NULL,
|
||||
type text NOT NULL,
|
||||
credits_earned_in_cents integer NOT NULL,
|
||||
credits_used_in_cents integer NOT NULL,
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id ),
|
||||
UNIQUE ( id, offer_id )
|
||||
);
|
||||
CREATE INDEX accounting_rollups_start_time_index ON accounting_rollups ( start_time );
|
||||
CREATE INDEX bucket_bandwidth_rollups_project_id_action_interval_index ON bucket_bandwidth_rollups ( project_id, action, interval_start );
|
||||
CREATE INDEX bucket_bandwidth_rollups_action_interval_project_id_index ON bucket_bandwidth_rollups ( action, interval_start, project_id );
|
||||
CREATE INDEX bucket_storage_tallies_project_id_index ON bucket_storage_tallies (project_id);
|
||||
CREATE INDEX consumed_serials_expires_at_index ON consumed_serials ( expires_at );
|
||||
CREATE INDEX graceful_exit_transfer_queue_nid_dr_qa_fa_lfa_index ON graceful_exit_transfer_queue ( node_id, durability_ratio, queued_at, finished_at, last_failed_at );
|
||||
CREATE INDEX injuredsegments_attempted_index ON injuredsegments ( attempted );
|
||||
CREATE INDEX injuredsegments_segment_health_index ON injuredsegments ( segment_health );
|
||||
CREATE INDEX injuredsegments_updated_at_index ON injuredsegments ( updated_at );
|
||||
CREATE INDEX node_last_ip ON nodes ( last_net );
|
||||
CREATE INDEX nodes_offline_times_node_id_index ON nodes_offline_times ( node_id );
|
||||
CREATE UNIQUE INDEX serial_number_index ON serial_numbers ( serial_number );
|
||||
CREATE INDEX serial_numbers_expires_at_index ON serial_numbers ( expires_at );
|
||||
CREATE INDEX storagenode_bandwidth_rollups_interval_start_index ON storagenode_bandwidth_rollups ( interval_start );
|
||||
CREATE INDEX storagenode_payments_node_id_period_index ON storagenode_payments ( node_id, period );
|
||||
CREATE INDEX storagenode_paystubs_node_id_index ON storagenode_paystubs ( node_id );
|
||||
CREATE INDEX storagenode_storage_tallies_node_id_index ON storagenode_storage_tallies ( node_id );
|
||||
CREATE UNIQUE INDEX credits_earned_user_id_offer_id ON user_credits ( id, offer_id );
|
||||
|
||||
INSERT INTO "accounting_rollups"("node_id", "start_time", "put_total", "get_total", "get_audit_total", "get_repair_total", "put_repair_total", "at_rest_total") VALUES (E'\\367M\\177\\251]t/\\022\\256\\214\\265\\025\\224\\204:\\217\\212\\0102<\\321\\374\\020&\\271Qc\\325\\261\\354\\246\\233'::bytea, '2019-02-09 00:00:00+00', 3000, 6000, 9000, 12000, 0, 15000);
|
||||
|
||||
INSERT INTO "accounting_timestamps" VALUES ('LastAtRestTally', '0001-01-01 00:00:00+00');
|
||||
INSERT INTO "accounting_timestamps" VALUES ('LastRollup', '0001-01-01 00:00:00+00');
|
||||
INSERT INTO "accounting_timestamps" VALUES ('LastBandwidthTally', '0001-01-01 00:00:00+00');
|
||||
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 5, false, 1);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '127.0.0.1:55518', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 0, 3, 3, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 0, false, 1);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014', '127.0.0.1:55517', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 0, 0, 0, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 0, false, 1);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\015', '127.0.0.1:55519', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 1, 2, 1, 2, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 1, false, 1);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "vetted_at", "online_score") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', '127.0.0.1:55520', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 300, 400, 300, 400, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 300, 0, 1, 0, 300, 100, false, '2020-03-18 12:00:00.000000+00', 1);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\154\\313\\233\\074\\327\\177\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 75, 25, 100, 5, false, 1);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "last_ip_port", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\154\\313\\233\\074\\327\\177\\136\\070\\346\\002', '127.0.0.1:55516', '127.0.0.0', '127.0.0.1:55516', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 75, 25, 100, 5, false, 1);
|
||||
|
||||
INSERT INTO "users"("id", "full_name", "short_name", "email", "normalized_email", "password_hash", "status", "partner_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'Noahson', 'William', '1email1@mail.test', '1EMAIL1@MAIL.TEST', E'some_readable_hash'::bytea, 1, NULL, '2019-02-14 08:28:24.614594+00');
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets", "partner_id", "owner_id", "created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, 'ProjectName', 'projects description', NULL, NULL, NULL, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:28:24.254934+00');
|
||||
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets", "partner_id", "owner_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, 'projName1', 'Test project 1', NULL, NULL, NULL, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:28:24.636949+00');
|
||||
INSERT INTO "project_members"("member_id", "project_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, '2019-02-14 08:28:24.677953+00');
|
||||
INSERT INTO "project_members"("member_id", "project_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, '2019-02-13 08:28:24.677953+00');
|
||||
|
||||
INSERT INTO "irreparabledbs" ("segmentpath", "segmentdetail", "pieces_lost_count", "seg_damaged_unix_sec", "repair_attempt_count") VALUES ('\x49616d5365676d656e746b6579696e666f30', '\x49616d5365676d656e7464657461696c696e666f30', 10, 1550159554, 10);
|
||||
|
||||
INSERT INTO "registration_tokens" ("secret", "owner_id", "project_limit", "created_at") VALUES (E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, null, 1, '2019-02-14 08:28:24.677953+00');
|
||||
|
||||
INSERT INTO "serial_numbers" ("id", "serial_number", "bucket_id", "expires_at") VALUES (1, E'0123456701234567'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014/testbucket'::bytea, '2019-03-06 08:28:24.677953+00');
|
||||
INSERT INTO "used_serials" ("serial_number_id", "storage_node_id") VALUES (1, E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n');
|
||||
|
||||
INSERT INTO "storagenode_bandwidth_rollups" ("storagenode_id", "interval_start", "interval_seconds", "action", "allocated", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024);
|
||||
INSERT INTO "storagenode_storage_tallies" VALUES (E'\\3510\\323\\225"~\\036<\\342\\330m\\0253Jhr\\246\\233K\\246#\\2303\\351\\256\\275j\\212UM\\362\\207', '2019-02-14 08:16:57.812849+00', 1000);
|
||||
|
||||
INSERT INTO "bucket_bandwidth_rollups" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024, 3024);
|
||||
INSERT INTO "bucket_storage_tallies" ("bucket_name", "project_id", "interval_start", "inline", "remote", "remote_segments_count", "inline_segments_count", "object_count", "metadata_size") VALUES (E'testbucket'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 4024, 5024, 0, 0, 0, 0);
|
||||
INSERT INTO "bucket_bandwidth_rollups" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\170\\160\\157\\370\\274\\366\\113\\364\\272\\235\\301\\243\\321\\102\\321\\136'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024, 3024);
|
||||
INSERT INTO "bucket_storage_tallies" ("bucket_name", "project_id", "interval_start", "inline", "remote", "remote_segments_count", "inline_segments_count", "object_count", "metadata_size") VALUES (E'testbucket'::bytea, E'\\170\\160\\157\\370\\274\\366\\113\\364\\272\\235\\301\\243\\321\\102\\321\\136'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 4024, 5024, 0, 0, 0, 0);
|
||||
|
||||
INSERT INTO "reset_password_tokens" ("secret", "owner_id", "created_at") VALUES (E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-05-08 08:28:24.677953+00');
|
||||
|
||||
INSERT INTO "offers" ("id", "name", "description", "award_credit_in_cents", "invitee_credit_in_cents", "expires_at", "created_at", "status", "type", "award_credit_duration_days", "invitee_credit_duration_days") VALUES (1, 'Default referral offer', 'Is active when no other active referral offer', 300, 600, '2119-03-14 08:28:24.636949+00', '2019-07-14 08:28:24.636949+00', 1, 2, 365, 14);
|
||||
INSERT INTO "offers" ("id", "name", "description", "award_credit_in_cents", "invitee_credit_in_cents", "expires_at", "created_at", "status", "type", "award_credit_duration_days", "invitee_credit_duration_days") VALUES (2, 'Default free credit offer', 'Is active when no active free credit offer', 0, 300, '2119-03-14 08:28:24.636949+00', '2019-07-14 08:28:24.636949+00', 1, 1, NULL, 14);
|
||||
|
||||
INSERT INTO "api_keys" ("id", "project_id", "head", "name", "secret", "partner_id", "created_at") VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\111\\142\\147\\304\\132\\375\\070\\163\\270\\160\\251\\370\\126\\063\\351\\037\\257\\071\\143\\375\\351\\320\\253\\232\\220\\260\\075\\173\\306\\307\\115\\136'::bytea, 'key 2', E'\\254\\011\\315\\333\\273\\365\\001\\071\\024\\154\\253\\332\\301\\216\\361\\074\\221\\367\\251\\231\\274\\333\\300\\367\\001\\272\\327\\111\\315\\123\\042\\016'::bytea, NULL, '2019-02-14 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "value_attributions" ("project_id", "bucket_name", "partner_id", "last_updated") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E''::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-02-14 08:07:31.028103+00');
|
||||
|
||||
INSERT INTO "user_credits" ("id", "user_id", "offer_id", "referred_by", "credits_earned_in_cents", "credits_used_in_cents", "type", "expires_at", "created_at") VALUES (1, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 1, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 200, 0, 'invalid', '2019-10-01 08:28:24.267934+00', '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "bucket_metainfos" ("id", "project_id", "name", "partner_id", "created_at", "path_cipher", "default_segment_size", "default_encryption_cipher_suite", "default_encryption_block_size", "default_redundancy_algorithm", "default_redundancy_share_size", "default_redundancy_required_shares", "default_redundancy_repair_shares", "default_redundancy_optimal_shares", "default_redundancy_total_shares") VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'testbucketuniquename'::bytea, NULL, '2019-06-14 08:28:24.677953+00', 1, 65536, 1, 8192, 1, 4096, 4, 6, 8, 10);
|
||||
|
||||
INSERT INTO "pending_audits" ("node_id", "piece_id", "stripe_index", "share_size", "expected_share_hash", "reverify_count", "path") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 5, 1024, E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, 1, 'not null');
|
||||
|
||||
INSERT INTO "peer_identities" VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:07:31.335028+00');
|
||||
|
||||
INSERT INTO "graceful_exit_progress" ("node_id", "bytes_transferred", "pieces_transferred", "pieces_failed", "updated_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', 1000000000000000, 0, 0, '2019-09-12 10:07:31.028103+00');
|
||||
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n\\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\311', 8, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
|
||||
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n\\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\312', 8, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
|
||||
|
||||
INSERT INTO "stripe_customers" ("user_id", "customer_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'stripe_id', '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n\\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\311', 9, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
|
||||
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n\\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\312', 9, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
|
||||
|
||||
INSERT INTO "stripecoinpayments_invoice_project_records"("id", "project_id", "storage", "egress", "objects", "period_start", "period_end", "state", "created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\021\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, 0, 0, 0, '2019-06-01 08:28:24.267934+00', '2019-06-01 08:28:24.267934+00', 0, '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "root_piece_id", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n\\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\311', 10, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
|
||||
|
||||
INSERT INTO "stripecoinpayments_tx_conversion_rates" ("tx_id", "rate", "created_at") VALUES ('tx_id', E'\\363\\311\\033w\\222\\303Ci,'::bytea, '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "coinpayments_transactions" ("id", "user_id", "address", "amount", "received", "status", "key", "timeout", "created_at") VALUES ('tx_id', E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'address', E'\\363\\311\\033w'::bytea, E'\\363\\311\\033w'::bytea, 1, 'key', 60, '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "nodes_offline_times" ("node_id", "tracked_at", "seconds") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001'::bytea, '2019-06-01 09:28:24.267934+00', 3600);
|
||||
INSERT INTO "nodes_offline_times" ("node_id", "tracked_at", "seconds") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001'::bytea, '2017-06-01 09:28:24.267934+00', 100);
|
||||
INSERT INTO "nodes_offline_times" ("node_id", "tracked_at", "seconds") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n'::bytea, '2019-06-01 09:28:24.267934+00', 3600);
|
||||
|
||||
INSERT INTO "storagenode_bandwidth_rollups" ("storagenode_id", "interval_start", "interval_seconds", "action", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2020-01-11 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 2024);
|
||||
|
||||
INSERT INTO "coupons" ("id", "user_id", "amount", "description", "type", "status", "duration", "created_at") VALUES (E'\\362\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 50, 'description', 0, 0, 2, '2019-06-01 08:28:24.267934+00');
|
||||
INSERT INTO "coupon_usages" ("coupon_id", "amount", "status", "period") VALUES (E'\\362\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, 22, 0, '2019-06-01 09:28:24.267934+00');
|
||||
|
||||
INSERT INTO "reported_serials" ("expires_at", "storage_node_id", "bucket_id", "action", "serial_number", "settled", "observed_at") VALUES ('2020-01-11 08:00:00.000000+00', E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014/testbucket'::bytea, 1, E'0123456701234567'::bytea, 100, '2020-01-11 08:00:00.000000+00');
|
||||
|
||||
INSERT INTO "stripecoinpayments_apply_balance_intents" ("tx_id", "state", "created_at") VALUES ('tx_id', 0, '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets", "rate_limit", "partner_id", "owner_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\347'::bytea, 'projName1', 'Test project 1', NULL, NULL, NULL, 2000000, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2020-01-15 08:28:24.636949+00');
|
||||
|
||||
INSERT INTO "pending_serial_queue" ("storage_node_id", "bucket_id", "serial_number", "action", "settled", "expires_at") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014/testbucket'::bytea, E'5123456701234567'::bytea, 1, 100, '2020-01-11 08:00:00.000000+00');
|
||||
|
||||
INSERT INTO "consumed_serials" ("storage_node_id", "serial_number", "expires_at") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', E'1234567012345678'::bytea, '2020-01-12 08:00:00.000000+00');
|
||||
|
||||
INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('0', '\x0a0130120100', 1.0, '2020-09-01 00:00:00.000000+00');
|
||||
INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('here''s/a/great/path', '\x0a136865726527732f612f67726561742f70617468120a0102030405060708090a', 1.0, '2020-09-01 00:00:00.000000+00');
|
||||
INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('yet/another/cool/path', '\x0a157965742f616e6f746865722f636f6f6c2f70617468120a0102030405060708090a', 1.0, '2020-09-01 00:00:00.000000+00');
|
||||
INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('/this/is/a/new/path', '\x0a23736f2f6d616e792f69636f6e69632f70617468732f746f2f63686f6f73652f66726f6d120a0102030405060708090a', 1.0, '2020-09-01 00:00:00.000000+00');
|
||||
INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('/some/path/1/23/4', '\x0a23736f2f6d618e792f69636f6e69632f70617468732f746f2f63686f6f73652f66726f6d120a0102030405060708090a', 0.2, '2020-09-01 00:00:00.000000+00');
|
||||
|
||||
INSERT INTO "project_bandwidth_rollups"("project_id", "interval_month", egress_allocated) VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\347'::bytea, '2020-04-01', 10000);
|
||||
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets","rate_limit", "partner_id", "owner_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\345'::bytea, 'egress101', 'High Bandwidth Project', NULL, NULL, NULL, 2000000, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2020-05-15 08:46:24.000000+00');
|
||||
|
||||
INSERT INTO "storagenode_paystubs"("period", "node_id", "created_at", "codes", "usage_at_rest", "usage_get", "usage_put", "usage_get_repair", "usage_put_repair", "usage_get_audit", "comp_at_rest", "comp_get", "comp_put", "comp_get_repair", "comp_put_repair", "comp_get_audit", "surge_percent", "held", "owed", "disposed", "paid") VALUES ('2020-01', '\xf2a3b4c4dfdf7221310382fd5db5aa73e1d227d6df09734ec4e5305000000000', '2020-04-07T20:14:21.479141Z', '', 1327959864508416, 294054066688, 159031363328, 226751, 0, 836608, 2861984, 5881081, 0, 226751, 0, 8, 300, 0, 26909472, 0, 26909472);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "unknown_audit_suspended", "offline_suspended", "under_review") VALUES (E'\\153\\313\\233\\074\\327\\255\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 5, false, '2019-02-14 08:07:31.108963+00', '2019-02-14 08:07:31.108963+00', '2019-02-14 08:07:31.108963+00');
|
||||
|
||||
INSERT INTO "audit_histories" ("node_id", "history") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', '\x0a23736f2f6d616e792f69636f6e69632f70617468732f746f2f63686f6f73652f66726f6d120a0102030405060708090a');
|
||||
|
||||
INSERT INTO "node_api_versions"("id", "api_version", "created_at", "updated_at") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', 1, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00');
|
||||
INSERT INTO "node_api_versions"("id", "api_version", "created_at", "updated_at") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', 2, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00');
|
||||
INSERT INTO "node_api_versions"("id", "api_version", "created_at", "updated_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014', 3, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00');
|
||||
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\256\\263'::bytea, 'egress102', 'High Bandwidth Project 2', NULL, NULL, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-05-15 08:46:24.000000+00', 1000);
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\255\\244'::bytea, 'egress103', 'High Bandwidth Project 3', NULL, NULL, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-05-15 08:46:24.000000+00', 1000);
|
||||
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\253\\231'::bytea, 'Limit Test 1', 'This project is above the default', 50000000001, 50000000001, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-10-14 10:10:10.000000+00', 101);
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\252\\230'::bytea, 'Limit Test 2', 'This project is below the default', NULL, NULL, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-10-14 10:10:11.000000+00', NULL);
|
||||
|
||||
INSERT INTO "storagenode_bandwidth_rollups_phase2" ("storagenode_id", "interval_start", "interval_seconds", "action", "allocated", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024);
|
578
satellite/satellitedb/testdata/postgres.v137.sql
vendored
Normal file
578
satellite/satellitedb/testdata/postgres.v137.sql
vendored
Normal file
@ -0,0 +1,578 @@
|
||||
-- AUTOGENERATED BY storj.io/dbx
|
||||
-- DO NOT EDIT
|
||||
CREATE TABLE accounting_rollups (
|
||||
node_id bytea NOT NULL,
|
||||
start_time timestamp with time zone NOT NULL,
|
||||
put_total bigint NOT NULL,
|
||||
get_total bigint NOT NULL,
|
||||
get_audit_total bigint NOT NULL,
|
||||
get_repair_total bigint NOT NULL,
|
||||
put_repair_total bigint NOT NULL,
|
||||
at_rest_total double precision NOT NULL,
|
||||
PRIMARY KEY ( node_id, start_time )
|
||||
);
|
||||
CREATE TABLE accounting_timestamps (
|
||||
name text NOT NULL,
|
||||
value timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( name )
|
||||
);
|
||||
CREATE TABLE audit_histories (
|
||||
node_id bytea NOT NULL,
|
||||
history bytea NOT NULL,
|
||||
PRIMARY KEY ( node_id )
|
||||
);
|
||||
CREATE TABLE bucket_bandwidth_rollups (
|
||||
bucket_name bytea NOT NULL,
|
||||
project_id bytea NOT NULL,
|
||||
interval_start timestamp with time zone NOT NULL,
|
||||
interval_seconds integer NOT NULL,
|
||||
action integer NOT NULL,
|
||||
inline bigint NOT NULL,
|
||||
allocated bigint NOT NULL,
|
||||
settled bigint NOT NULL,
|
||||
PRIMARY KEY ( bucket_name, project_id, interval_start, action )
|
||||
);
|
||||
CREATE TABLE bucket_storage_tallies (
|
||||
bucket_name bytea NOT NULL,
|
||||
project_id bytea NOT NULL,
|
||||
interval_start timestamp with time zone NOT NULL,
|
||||
inline bigint NOT NULL,
|
||||
remote bigint NOT NULL,
|
||||
remote_segments_count integer NOT NULL,
|
||||
inline_segments_count integer NOT NULL,
|
||||
object_count integer NOT NULL,
|
||||
metadata_size bigint NOT NULL,
|
||||
PRIMARY KEY ( bucket_name, project_id, interval_start )
|
||||
);
|
||||
CREATE TABLE coinpayments_transactions (
|
||||
id text NOT NULL,
|
||||
user_id bytea NOT NULL,
|
||||
address text NOT NULL,
|
||||
amount bytea NOT NULL,
|
||||
received bytea NOT NULL,
|
||||
status integer NOT NULL,
|
||||
key text NOT NULL,
|
||||
timeout integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE consumed_serials (
|
||||
storage_node_id bytea NOT NULL,
|
||||
serial_number bytea NOT NULL,
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( storage_node_id, serial_number )
|
||||
);
|
||||
CREATE TABLE coupons (
|
||||
id bytea NOT NULL,
|
||||
user_id bytea NOT NULL,
|
||||
amount bigint NOT NULL,
|
||||
description text NOT NULL,
|
||||
type integer NOT NULL,
|
||||
status integer NOT NULL,
|
||||
duration bigint NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE coupon_usages (
|
||||
coupon_id bytea NOT NULL,
|
||||
amount bigint NOT NULL,
|
||||
status integer NOT NULL,
|
||||
period timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( coupon_id, period )
|
||||
);
|
||||
CREATE TABLE graceful_exit_progress (
|
||||
node_id bytea NOT NULL,
|
||||
bytes_transferred bigint NOT NULL,
|
||||
pieces_transferred bigint NOT NULL DEFAULT 0,
|
||||
pieces_failed bigint NOT NULL DEFAULT 0,
|
||||
updated_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( node_id )
|
||||
);
|
||||
CREATE TABLE graceful_exit_transfer_queue (
|
||||
node_id bytea NOT NULL,
|
||||
path bytea NOT NULL,
|
||||
piece_num integer NOT NULL,
|
||||
root_piece_id bytea,
|
||||
durability_ratio double precision NOT NULL,
|
||||
queued_at timestamp with time zone NOT NULL,
|
||||
requested_at timestamp with time zone,
|
||||
last_failed_at timestamp with time zone,
|
||||
last_failed_code integer,
|
||||
failed_count integer,
|
||||
finished_at timestamp with time zone,
|
||||
order_limit_send_count integer NOT NULL DEFAULT 0,
|
||||
PRIMARY KEY ( node_id, path, piece_num )
|
||||
);
|
||||
CREATE TABLE injuredsegments (
|
||||
path bytea NOT NULL,
|
||||
data bytea NOT NULL,
|
||||
attempted timestamp with time zone,
|
||||
updated_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
|
||||
segment_health double precision NOT NULL DEFAULT 1,
|
||||
PRIMARY KEY ( path )
|
||||
);
|
||||
CREATE TABLE irreparabledbs (
|
||||
segmentpath bytea NOT NULL,
|
||||
segmentdetail bytea NOT NULL,
|
||||
pieces_lost_count bigint NOT NULL,
|
||||
seg_damaged_unix_sec bigint NOT NULL,
|
||||
repair_attempt_count bigint NOT NULL,
|
||||
PRIMARY KEY ( segmentpath )
|
||||
);
|
||||
CREATE TABLE nodes (
|
||||
id bytea NOT NULL,
|
||||
address text NOT NULL DEFAULT '',
|
||||
last_net text NOT NULL,
|
||||
last_ip_port text,
|
||||
protocol integer NOT NULL DEFAULT 0,
|
||||
type integer NOT NULL DEFAULT 0,
|
||||
email text NOT NULL,
|
||||
wallet text NOT NULL,
|
||||
free_disk bigint NOT NULL DEFAULT -1,
|
||||
piece_count bigint NOT NULL DEFAULT 0,
|
||||
major bigint NOT NULL DEFAULT 0,
|
||||
minor bigint NOT NULL DEFAULT 0,
|
||||
patch bigint NOT NULL DEFAULT 0,
|
||||
hash text NOT NULL DEFAULT '',
|
||||
timestamp timestamp with time zone NOT NULL DEFAULT '0001-01-01 00:00:00+00',
|
||||
release boolean NOT NULL DEFAULT false,
|
||||
latency_90 bigint NOT NULL DEFAULT 0,
|
||||
audit_success_count bigint NOT NULL DEFAULT 0,
|
||||
total_audit_count bigint NOT NULL DEFAULT 0,
|
||||
vetted_at timestamp with time zone,
|
||||
uptime_success_count bigint NOT NULL,
|
||||
total_uptime_count bigint NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
|
||||
updated_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
|
||||
last_contact_success timestamp with time zone NOT NULL DEFAULT 'epoch',
|
||||
last_contact_failure timestamp with time zone NOT NULL DEFAULT 'epoch',
|
||||
contained boolean NOT NULL DEFAULT false,
|
||||
disqualified timestamp with time zone,
|
||||
suspended timestamp with time zone,
|
||||
unknown_audit_suspended timestamp with time zone,
|
||||
offline_suspended timestamp with time zone,
|
||||
under_review timestamp with time zone,
|
||||
online_score double precision NOT NULL DEFAULT 1,
|
||||
audit_reputation_alpha double precision NOT NULL DEFAULT 1,
|
||||
audit_reputation_beta double precision NOT NULL DEFAULT 0,
|
||||
unknown_audit_reputation_alpha double precision NOT NULL DEFAULT 1,
|
||||
unknown_audit_reputation_beta double precision NOT NULL DEFAULT 0,
|
||||
uptime_reputation_alpha double precision NOT NULL DEFAULT 1,
|
||||
uptime_reputation_beta double precision NOT NULL DEFAULT 0,
|
||||
exit_initiated_at timestamp with time zone,
|
||||
exit_loop_completed_at timestamp with time zone,
|
||||
exit_finished_at timestamp with time zone,
|
||||
exit_success boolean NOT NULL DEFAULT false,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE node_api_versions (
|
||||
id bytea NOT NULL,
|
||||
api_version integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
updated_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE nodes_offline_times (
|
||||
node_id bytea NOT NULL,
|
||||
tracked_at timestamp with time zone NOT NULL,
|
||||
seconds integer NOT NULL,
|
||||
PRIMARY KEY ( node_id, tracked_at )
|
||||
);
|
||||
CREATE TABLE offers (
|
||||
id serial NOT NULL,
|
||||
name text NOT NULL,
|
||||
description text NOT NULL,
|
||||
award_credit_in_cents integer NOT NULL DEFAULT 0,
|
||||
invitee_credit_in_cents integer NOT NULL DEFAULT 0,
|
||||
award_credit_duration_days integer,
|
||||
invitee_credit_duration_days integer,
|
||||
redeemable_cap integer,
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
status integer NOT NULL,
|
||||
type integer NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE peer_identities (
|
||||
node_id bytea NOT NULL,
|
||||
leaf_serial_number bytea NOT NULL,
|
||||
chain bytea NOT NULL,
|
||||
updated_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( node_id )
|
||||
);
|
||||
CREATE TABLE pending_audits (
|
||||
node_id bytea NOT NULL,
|
||||
piece_id bytea NOT NULL,
|
||||
stripe_index bigint NOT NULL,
|
||||
share_size bigint NOT NULL,
|
||||
expected_share_hash bytea NOT NULL,
|
||||
reverify_count bigint NOT NULL,
|
||||
path bytea NOT NULL,
|
||||
PRIMARY KEY ( node_id )
|
||||
);
|
||||
CREATE TABLE pending_serial_queue (
|
||||
storage_node_id bytea NOT NULL,
|
||||
bucket_id bytea NOT NULL,
|
||||
serial_number bytea NOT NULL,
|
||||
action integer NOT NULL,
|
||||
settled bigint NOT NULL,
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( storage_node_id, bucket_id, serial_number )
|
||||
);
|
||||
CREATE TABLE projects (
|
||||
id bytea NOT NULL,
|
||||
name text NOT NULL,
|
||||
description text NOT NULL,
|
||||
usage_limit bigint,
|
||||
bandwidth_limit bigint,
|
||||
rate_limit integer,
|
||||
max_buckets integer,
|
||||
partner_id bytea,
|
||||
owner_id bytea NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE project_bandwidth_rollups (
|
||||
project_id bytea NOT NULL,
|
||||
interval_month date NOT NULL,
|
||||
egress_allocated bigint NOT NULL,
|
||||
PRIMARY KEY ( project_id, interval_month )
|
||||
);
|
||||
CREATE TABLE registration_tokens (
|
||||
secret bytea NOT NULL,
|
||||
owner_id bytea,
|
||||
project_limit integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( secret ),
|
||||
UNIQUE ( owner_id )
|
||||
);
|
||||
CREATE TABLE reported_serials (
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
storage_node_id bytea NOT NULL,
|
||||
bucket_id bytea NOT NULL,
|
||||
action integer NOT NULL,
|
||||
serial_number bytea NOT NULL,
|
||||
settled bigint NOT NULL,
|
||||
observed_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( expires_at, storage_node_id, bucket_id, action, serial_number )
|
||||
);
|
||||
CREATE TABLE reset_password_tokens (
|
||||
secret bytea NOT NULL,
|
||||
owner_id bytea NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( secret ),
|
||||
UNIQUE ( owner_id )
|
||||
);
|
||||
CREATE TABLE revocations (
|
||||
revoked bytea NOT NULL,
|
||||
api_key_id bytea NOT NULL,
|
||||
PRIMARY KEY ( revoked )
|
||||
);
|
||||
CREATE TABLE serial_numbers (
|
||||
id serial NOT NULL,
|
||||
serial_number bytea NOT NULL,
|
||||
bucket_id bytea NOT NULL,
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE storagenode_bandwidth_rollups (
|
||||
storagenode_id bytea NOT NULL,
|
||||
interval_start timestamp with time zone NOT NULL,
|
||||
interval_seconds integer NOT NULL,
|
||||
action integer NOT NULL,
|
||||
allocated bigint DEFAULT 0,
|
||||
settled bigint NOT NULL,
|
||||
PRIMARY KEY ( storagenode_id, interval_start, action )
|
||||
);
|
||||
CREATE TABLE storagenode_bandwidth_rollups_phase2 (
|
||||
storagenode_id bytea NOT NULL,
|
||||
interval_start timestamp with time zone NOT NULL,
|
||||
interval_seconds integer NOT NULL,
|
||||
action integer NOT NULL,
|
||||
allocated bigint DEFAULT 0,
|
||||
settled bigint NOT NULL,
|
||||
PRIMARY KEY ( storagenode_id, interval_start, action )
|
||||
);
|
||||
CREATE TABLE storagenode_payments (
|
||||
id bigserial NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
node_id bytea NOT NULL,
|
||||
period text NOT NULL,
|
||||
amount bigint NOT NULL,
|
||||
receipt text,
|
||||
notes text,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE storagenode_paystubs (
|
||||
period text NOT NULL,
|
||||
node_id bytea NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
codes text NOT NULL,
|
||||
usage_at_rest double precision NOT NULL,
|
||||
usage_get bigint NOT NULL,
|
||||
usage_put bigint NOT NULL,
|
||||
usage_get_repair bigint NOT NULL,
|
||||
usage_put_repair bigint NOT NULL,
|
||||
usage_get_audit bigint NOT NULL,
|
||||
comp_at_rest bigint NOT NULL,
|
||||
comp_get bigint NOT NULL,
|
||||
comp_put bigint NOT NULL,
|
||||
comp_get_repair bigint NOT NULL,
|
||||
comp_put_repair bigint NOT NULL,
|
||||
comp_get_audit bigint NOT NULL,
|
||||
surge_percent bigint NOT NULL,
|
||||
held bigint NOT NULL,
|
||||
owed bigint NOT NULL,
|
||||
disposed bigint NOT NULL,
|
||||
paid bigint NOT NULL,
|
||||
PRIMARY KEY ( period, node_id )
|
||||
);
|
||||
CREATE TABLE storagenode_storage_tallies (
|
||||
node_id bytea NOT NULL,
|
||||
interval_end_time timestamp with time zone NOT NULL,
|
||||
data_total double precision NOT NULL,
|
||||
PRIMARY KEY ( interval_end_time, node_id )
|
||||
);
|
||||
CREATE TABLE stripe_customers (
|
||||
user_id bytea NOT NULL,
|
||||
customer_id text NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( user_id ),
|
||||
UNIQUE ( customer_id )
|
||||
);
|
||||
CREATE TABLE stripecoinpayments_invoice_project_records (
|
||||
id bytea NOT NULL,
|
||||
project_id bytea NOT NULL,
|
||||
storage double precision NOT NULL,
|
||||
egress bigint NOT NULL,
|
||||
objects bigint NOT NULL,
|
||||
period_start timestamp with time zone NOT NULL,
|
||||
period_end timestamp with time zone NOT NULL,
|
||||
state integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id ),
|
||||
UNIQUE ( project_id, period_start, period_end )
|
||||
);
|
||||
CREATE TABLE stripecoinpayments_tx_conversion_rates (
|
||||
tx_id text NOT NULL,
|
||||
rate bytea NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( tx_id )
|
||||
);
|
||||
CREATE TABLE users (
|
||||
id bytea NOT NULL,
|
||||
email text NOT NULL,
|
||||
normalized_email text NOT NULL,
|
||||
full_name text NOT NULL,
|
||||
short_name text,
|
||||
password_hash bytea NOT NULL,
|
||||
status integer NOT NULL,
|
||||
partner_id bytea,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
project_limit integer NOT NULL DEFAULT 0,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE value_attributions (
|
||||
project_id bytea NOT NULL,
|
||||
bucket_name bytea NOT NULL,
|
||||
partner_id bytea NOT NULL,
|
||||
last_updated timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( project_id, bucket_name )
|
||||
);
|
||||
CREATE TABLE api_keys (
|
||||
id bytea NOT NULL,
|
||||
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
||||
head bytea NOT NULL,
|
||||
name text NOT NULL,
|
||||
secret bytea NOT NULL,
|
||||
partner_id bytea,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id ),
|
||||
UNIQUE ( head ),
|
||||
UNIQUE ( name, project_id )
|
||||
);
|
||||
CREATE TABLE bucket_metainfos (
|
||||
id bytea NOT NULL,
|
||||
project_id bytea NOT NULL REFERENCES projects( id ),
|
||||
name bytea NOT NULL,
|
||||
partner_id bytea,
|
||||
path_cipher integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
default_segment_size integer NOT NULL,
|
||||
default_encryption_cipher_suite integer NOT NULL,
|
||||
default_encryption_block_size integer NOT NULL,
|
||||
default_redundancy_algorithm integer NOT NULL,
|
||||
default_redundancy_share_size integer NOT NULL,
|
||||
default_redundancy_required_shares integer NOT NULL,
|
||||
default_redundancy_repair_shares integer NOT NULL,
|
||||
default_redundancy_optimal_shares integer NOT NULL,
|
||||
default_redundancy_total_shares integer NOT NULL,
|
||||
PRIMARY KEY ( id ),
|
||||
UNIQUE ( name, project_id ),
|
||||
UNIQUE ( project_id, name )
|
||||
);
|
||||
CREATE TABLE project_members (
|
||||
member_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
|
||||
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( member_id, project_id )
|
||||
);
|
||||
CREATE TABLE stripecoinpayments_apply_balance_intents (
|
||||
tx_id text NOT NULL REFERENCES coinpayments_transactions( id ) ON DELETE CASCADE,
|
||||
state integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( tx_id )
|
||||
);
|
||||
CREATE TABLE used_serials (
|
||||
serial_number_id integer NOT NULL REFERENCES serial_numbers( id ) ON DELETE CASCADE,
|
||||
storage_node_id bytea NOT NULL,
|
||||
PRIMARY KEY ( serial_number_id, storage_node_id )
|
||||
);
|
||||
CREATE TABLE user_credits (
|
||||
id serial NOT NULL,
|
||||
user_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
|
||||
offer_id integer NOT NULL REFERENCES offers( id ),
|
||||
referred_by bytea REFERENCES users( id ) ON DELETE SET NULL,
|
||||
type text NOT NULL,
|
||||
credits_earned_in_cents integer NOT NULL,
|
||||
credits_used_in_cents integer NOT NULL,
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id ),
|
||||
UNIQUE ( id, offer_id )
|
||||
);
|
||||
CREATE INDEX accounting_rollups_start_time_index ON accounting_rollups ( start_time );
|
||||
CREATE INDEX bucket_bandwidth_rollups_project_id_action_interval_index ON bucket_bandwidth_rollups ( project_id, action, interval_start );
|
||||
CREATE INDEX bucket_bandwidth_rollups_action_interval_project_id_index ON bucket_bandwidth_rollups ( action, interval_start, project_id );
|
||||
CREATE INDEX bucket_storage_tallies_project_id_index ON bucket_storage_tallies (project_id);
|
||||
CREATE INDEX consumed_serials_expires_at_index ON consumed_serials ( expires_at );
|
||||
CREATE INDEX graceful_exit_transfer_queue_nid_dr_qa_fa_lfa_index ON graceful_exit_transfer_queue ( node_id, durability_ratio, queued_at, finished_at, last_failed_at );
|
||||
CREATE INDEX injuredsegments_attempted_index ON injuredsegments ( attempted );
|
||||
CREATE INDEX injuredsegments_segment_health_index ON injuredsegments ( segment_health );
|
||||
CREATE INDEX injuredsegments_updated_at_index ON injuredsegments ( updated_at );
|
||||
CREATE INDEX node_last_ip ON nodes ( last_net );
|
||||
CREATE INDEX nodes_dis_unk_exit_fin_last_success_index ON nodes(disqualified, unknown_audit_suspended, exit_finished_at, last_contact_success);
|
||||
CREATE INDEX nodes_offline_times_node_id_index ON nodes_offline_times ( node_id );
|
||||
CREATE UNIQUE INDEX serial_number_index ON serial_numbers ( serial_number );
|
||||
CREATE INDEX serial_numbers_expires_at_index ON serial_numbers ( expires_at );
|
||||
CREATE INDEX storagenode_bandwidth_rollups_interval_start_index ON storagenode_bandwidth_rollups ( interval_start );
|
||||
CREATE INDEX storagenode_payments_node_id_period_index ON storagenode_payments ( node_id, period );
|
||||
CREATE INDEX storagenode_paystubs_node_id_index ON storagenode_paystubs ( node_id );
|
||||
CREATE INDEX storagenode_storage_tallies_node_id_index ON storagenode_storage_tallies ( node_id );
|
||||
CREATE UNIQUE INDEX credits_earned_user_id_offer_id ON user_credits ( id, offer_id );
|
||||
|
||||
INSERT INTO "accounting_rollups"("node_id", "start_time", "put_total", "get_total", "get_audit_total", "get_repair_total", "put_repair_total", "at_rest_total") VALUES (E'\\367M\\177\\251]t/\\022\\256\\214\\265\\025\\224\\204:\\217\\212\\0102<\\321\\374\\020&\\271Qc\\325\\261\\354\\246\\233'::bytea, '2019-02-09 00:00:00+00', 3000, 6000, 9000, 12000, 0, 15000);
|
||||
|
||||
INSERT INTO "accounting_timestamps" VALUES ('LastAtRestTally', '0001-01-01 00:00:00+00');
|
||||
INSERT INTO "accounting_timestamps" VALUES ('LastRollup', '0001-01-01 00:00:00+00');
|
||||
INSERT INTO "accounting_timestamps" VALUES ('LastBandwidthTally', '0001-01-01 00:00:00+00');
|
||||
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 5, false, 1);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '127.0.0.1:55518', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 0, 3, 3, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 0, false, 1);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014', '127.0.0.1:55517', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 0, 0, 0, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 0, false, 1);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\015', '127.0.0.1:55519', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 1, 2, 1, 2, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 1, false, 1);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "vetted_at", "online_score") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', '127.0.0.1:55520', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 300, 400, 300, 400, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 300, 0, 1, 0, 300, 100, false, '2020-03-18 12:00:00.000000+00', 1);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\154\\313\\233\\074\\327\\177\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 75, 25, 100, 5, false, 1);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "last_ip_port", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\154\\313\\233\\074\\327\\177\\136\\070\\346\\002', '127.0.0.1:55516', '127.0.0.0', '127.0.0.1:55516', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 75, 25, 100, 5, false, 1);
|
||||
|
||||
INSERT INTO "users"("id", "full_name", "short_name", "email", "normalized_email", "password_hash", "status", "partner_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'Noahson', 'William', '1email1@mail.test', '1EMAIL1@MAIL.TEST', E'some_readable_hash'::bytea, 1, NULL, '2019-02-14 08:28:24.614594+00');
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets", "partner_id", "owner_id", "created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, 'ProjectName', 'projects description', NULL, NULL, NULL, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:28:24.254934+00');
|
||||
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets", "partner_id", "owner_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, 'projName1', 'Test project 1', NULL, NULL, NULL, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:28:24.636949+00');
|
||||
INSERT INTO "project_members"("member_id", "project_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, '2019-02-14 08:28:24.677953+00');
|
||||
INSERT INTO "project_members"("member_id", "project_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, '2019-02-13 08:28:24.677953+00');
|
||||
|
||||
INSERT INTO "irreparabledbs" ("segmentpath", "segmentdetail", "pieces_lost_count", "seg_damaged_unix_sec", "repair_attempt_count") VALUES ('\x49616d5365676d656e746b6579696e666f30', '\x49616d5365676d656e7464657461696c696e666f30', 10, 1550159554, 10);
|
||||
|
||||
INSERT INTO "registration_tokens" ("secret", "owner_id", "project_limit", "created_at") VALUES (E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, null, 1, '2019-02-14 08:28:24.677953+00');
|
||||
|
||||
INSERT INTO "serial_numbers" ("id", "serial_number", "bucket_id", "expires_at") VALUES (1, E'0123456701234567'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014/testbucket'::bytea, '2019-03-06 08:28:24.677953+00');
|
||||
INSERT INTO "used_serials" ("serial_number_id", "storage_node_id") VALUES (1, E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n');
|
||||
|
||||
INSERT INTO "storagenode_bandwidth_rollups" ("storagenode_id", "interval_start", "interval_seconds", "action", "allocated", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024);
|
||||
INSERT INTO "storagenode_storage_tallies" VALUES (E'\\3510\\323\\225"~\\036<\\342\\330m\\0253Jhr\\246\\233K\\246#\\2303\\351\\256\\275j\\212UM\\362\\207', '2019-02-14 08:16:57.812849+00', 1000);
|
||||
|
||||
INSERT INTO "bucket_bandwidth_rollups" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024, 3024);
|
||||
INSERT INTO "bucket_storage_tallies" ("bucket_name", "project_id", "interval_start", "inline", "remote", "remote_segments_count", "inline_segments_count", "object_count", "metadata_size") VALUES (E'testbucket'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 4024, 5024, 0, 0, 0, 0);
|
||||
INSERT INTO "bucket_bandwidth_rollups" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\170\\160\\157\\370\\274\\366\\113\\364\\272\\235\\301\\243\\321\\102\\321\\136'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024, 3024);
|
||||
INSERT INTO "bucket_storage_tallies" ("bucket_name", "project_id", "interval_start", "inline", "remote", "remote_segments_count", "inline_segments_count", "object_count", "metadata_size") VALUES (E'testbucket'::bytea, E'\\170\\160\\157\\370\\274\\366\\113\\364\\272\\235\\301\\243\\321\\102\\321\\136'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 4024, 5024, 0, 0, 0, 0);
|
||||
|
||||
INSERT INTO "reset_password_tokens" ("secret", "owner_id", "created_at") VALUES (E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-05-08 08:28:24.677953+00');
|
||||
|
||||
INSERT INTO "offers" ("id", "name", "description", "award_credit_in_cents", "invitee_credit_in_cents", "expires_at", "created_at", "status", "type", "award_credit_duration_days", "invitee_credit_duration_days") VALUES (1, 'Default referral offer', 'Is active when no other active referral offer', 300, 600, '2119-03-14 08:28:24.636949+00', '2019-07-14 08:28:24.636949+00', 1, 2, 365, 14);
|
||||
INSERT INTO "offers" ("id", "name", "description", "award_credit_in_cents", "invitee_credit_in_cents", "expires_at", "created_at", "status", "type", "award_credit_duration_days", "invitee_credit_duration_days") VALUES (2, 'Default free credit offer', 'Is active when no active free credit offer', 0, 300, '2119-03-14 08:28:24.636949+00', '2019-07-14 08:28:24.636949+00', 1, 1, NULL, 14);
|
||||
|
||||
INSERT INTO "api_keys" ("id", "project_id", "head", "name", "secret", "partner_id", "created_at") VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\111\\142\\147\\304\\132\\375\\070\\163\\270\\160\\251\\370\\126\\063\\351\\037\\257\\071\\143\\375\\351\\320\\253\\232\\220\\260\\075\\173\\306\\307\\115\\136'::bytea, 'key 2', E'\\254\\011\\315\\333\\273\\365\\001\\071\\024\\154\\253\\332\\301\\216\\361\\074\\221\\367\\251\\231\\274\\333\\300\\367\\001\\272\\327\\111\\315\\123\\042\\016'::bytea, NULL, '2019-02-14 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "value_attributions" ("project_id", "bucket_name", "partner_id", "last_updated") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E''::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-02-14 08:07:31.028103+00');
|
||||
|
||||
INSERT INTO "user_credits" ("id", "user_id", "offer_id", "referred_by", "credits_earned_in_cents", "credits_used_in_cents", "type", "expires_at", "created_at") VALUES (1, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 1, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 200, 0, 'invalid', '2019-10-01 08:28:24.267934+00', '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "bucket_metainfos" ("id", "project_id", "name", "partner_id", "created_at", "path_cipher", "default_segment_size", "default_encryption_cipher_suite", "default_encryption_block_size", "default_redundancy_algorithm", "default_redundancy_share_size", "default_redundancy_required_shares", "default_redundancy_repair_shares", "default_redundancy_optimal_shares", "default_redundancy_total_shares") VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'testbucketuniquename'::bytea, NULL, '2019-06-14 08:28:24.677953+00', 1, 65536, 1, 8192, 1, 4096, 4, 6, 8, 10);
|
||||
|
||||
INSERT INTO "pending_audits" ("node_id", "piece_id", "stripe_index", "share_size", "expected_share_hash", "reverify_count", "path") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 5, 1024, E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, 1, 'not null');
|
||||
|
||||
INSERT INTO "peer_identities" VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:07:31.335028+00');
|
||||
|
||||
INSERT INTO "graceful_exit_progress" ("node_id", "bytes_transferred", "pieces_transferred", "pieces_failed", "updated_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', 1000000000000000, 0, 0, '2019-09-12 10:07:31.028103+00');
|
||||
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n\\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\311', 8, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
|
||||
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n\\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\312', 8, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
|
||||
|
||||
INSERT INTO "stripe_customers" ("user_id", "customer_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'stripe_id', '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n\\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\311', 9, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
|
||||
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n\\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\312', 9, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
|
||||
|
||||
INSERT INTO "stripecoinpayments_invoice_project_records"("id", "project_id", "storage", "egress", "objects", "period_start", "period_end", "state", "created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\021\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, 0, 0, 0, '2019-06-01 08:28:24.267934+00', '2019-06-01 08:28:24.267934+00', 0, '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "root_piece_id", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n\\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\311', 10, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
|
||||
|
||||
INSERT INTO "stripecoinpayments_tx_conversion_rates" ("tx_id", "rate", "created_at") VALUES ('tx_id', E'\\363\\311\\033w\\222\\303Ci,'::bytea, '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "coinpayments_transactions" ("id", "user_id", "address", "amount", "received", "status", "key", "timeout", "created_at") VALUES ('tx_id', E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'address', E'\\363\\311\\033w'::bytea, E'\\363\\311\\033w'::bytea, 1, 'key', 60, '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "nodes_offline_times" ("node_id", "tracked_at", "seconds") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001'::bytea, '2019-06-01 09:28:24.267934+00', 3600);
|
||||
INSERT INTO "nodes_offline_times" ("node_id", "tracked_at", "seconds") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001'::bytea, '2017-06-01 09:28:24.267934+00', 100);
|
||||
INSERT INTO "nodes_offline_times" ("node_id", "tracked_at", "seconds") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n'::bytea, '2019-06-01 09:28:24.267934+00', 3600);
|
||||
|
||||
INSERT INTO "storagenode_bandwidth_rollups" ("storagenode_id", "interval_start", "interval_seconds", "action", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2020-01-11 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 2024);
|
||||
|
||||
INSERT INTO "coupons" ("id", "user_id", "amount", "description", "type", "status", "duration", "created_at") VALUES (E'\\362\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 50, 'description', 0, 0, 2, '2019-06-01 08:28:24.267934+00');
|
||||
INSERT INTO "coupon_usages" ("coupon_id", "amount", "status", "period") VALUES (E'\\362\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, 22, 0, '2019-06-01 09:28:24.267934+00');
|
||||
|
||||
INSERT INTO "reported_serials" ("expires_at", "storage_node_id", "bucket_id", "action", "serial_number", "settled", "observed_at") VALUES ('2020-01-11 08:00:00.000000+00', E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014/testbucket'::bytea, 1, E'0123456701234567'::bytea, 100, '2020-01-11 08:00:00.000000+00');
|
||||
|
||||
INSERT INTO "stripecoinpayments_apply_balance_intents" ("tx_id", "state", "created_at") VALUES ('tx_id', 0, '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets", "rate_limit", "partner_id", "owner_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\347'::bytea, 'projName1', 'Test project 1', NULL, NULL, NULL, 2000000, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2020-01-15 08:28:24.636949+00');
|
||||
|
||||
INSERT INTO "pending_serial_queue" ("storage_node_id", "bucket_id", "serial_number", "action", "settled", "expires_at") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014/testbucket'::bytea, E'5123456701234567'::bytea, 1, 100, '2020-01-11 08:00:00.000000+00');
|
||||
|
||||
INSERT INTO "consumed_serials" ("storage_node_id", "serial_number", "expires_at") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', E'1234567012345678'::bytea, '2020-01-12 08:00:00.000000+00');
|
||||
|
||||
INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('0', '\x0a0130120100', 1.0, '2020-09-01 00:00:00.000000+00');
|
||||
INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('here''s/a/great/path', '\x0a136865726527732f612f67726561742f70617468120a0102030405060708090a', 1.0, '2020-09-01 00:00:00.000000+00');
|
||||
INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('yet/another/cool/path', '\x0a157965742f616e6f746865722f636f6f6c2f70617468120a0102030405060708090a', 1.0, '2020-09-01 00:00:00.000000+00');
|
||||
INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('/this/is/a/new/path', '\x0a23736f2f6d616e792f69636f6e69632f70617468732f746f2f63686f6f73652f66726f6d120a0102030405060708090a', 1.0, '2020-09-01 00:00:00.000000+00');
|
||||
INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('/some/path/1/23/4', '\x0a23736f2f6d618e792f69636f6e69632f70617468732f746f2f63686f6f73652f66726f6d120a0102030405060708090a', 0.2, '2020-09-01 00:00:00.000000+00');
|
||||
|
||||
INSERT INTO "project_bandwidth_rollups"("project_id", "interval_month", egress_allocated) VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\347'::bytea, '2020-04-01', 10000);
|
||||
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets","rate_limit", "partner_id", "owner_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\345'::bytea, 'egress101', 'High Bandwidth Project', NULL, NULL, NULL, 2000000, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2020-05-15 08:46:24.000000+00');
|
||||
|
||||
INSERT INTO "storagenode_paystubs"("period", "node_id", "created_at", "codes", "usage_at_rest", "usage_get", "usage_put", "usage_get_repair", "usage_put_repair", "usage_get_audit", "comp_at_rest", "comp_get", "comp_put", "comp_get_repair", "comp_put_repair", "comp_get_audit", "surge_percent", "held", "owed", "disposed", "paid") VALUES ('2020-01', '\xf2a3b4c4dfdf7221310382fd5db5aa73e1d227d6df09734ec4e5305000000000', '2020-04-07T20:14:21.479141Z', '', 1327959864508416, 294054066688, 159031363328, 226751, 0, 836608, 2861984, 5881081, 0, 226751, 0, 8, 300, 0, 26909472, 0, 26909472);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "unknown_audit_suspended", "offline_suspended", "under_review") VALUES (E'\\153\\313\\233\\074\\327\\255\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 5, false, '2019-02-14 08:07:31.108963+00', '2019-02-14 08:07:31.108963+00', '2019-02-14 08:07:31.108963+00');
|
||||
|
||||
INSERT INTO "audit_histories" ("node_id", "history") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', '\x0a23736f2f6d616e792f69636f6e69632f70617468732f746f2f63686f6f73652f66726f6d120a0102030405060708090a');
|
||||
|
||||
INSERT INTO "node_api_versions"("id", "api_version", "created_at", "updated_at") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', 1, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00');
|
||||
INSERT INTO "node_api_versions"("id", "api_version", "created_at", "updated_at") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', 2, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00');
|
||||
INSERT INTO "node_api_versions"("id", "api_version", "created_at", "updated_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014', 3, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00');
|
||||
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\256\\263'::bytea, 'egress102', 'High Bandwidth Project 2', NULL, NULL, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-05-15 08:46:24.000000+00', 1000);
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\255\\244'::bytea, 'egress103', 'High Bandwidth Project 3', NULL, NULL, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-05-15 08:46:24.000000+00', 1000);
|
||||
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\253\\231'::bytea, 'Limit Test 1', 'This project is above the default', 50000000001, 50000000001, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-10-14 10:10:10.000000+00', 101);
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\252\\230'::bytea, 'Limit Test 2', 'This project is below the default', NULL, NULL, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-10-14 10:10:11.000000+00', NULL);
|
||||
|
||||
INSERT INTO "storagenode_bandwidth_rollups_phase2" ("storagenode_id", "interval_start", "interval_seconds", "action", "allocated", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024);
|
@ -9,7 +9,7 @@ for v in alpha arm beta latest; do
|
||||
docker manifest create --amend storjlabs/storagenode:$v \
|
||||
storjlabs/storagenode:${TAG}-amd64 \
|
||||
storjlabs/storagenode:${TAG}-arm32v6 \
|
||||
storjlabs/storagenode:${TAG}-aarch64
|
||||
storjlabs/storagenode:${TAG}-arm64v8
|
||||
|
||||
docker manifest annotate storjlabs/storagenode:$v \
|
||||
storjlabs/storagenode:${TAG}-amd64 --os linux --arch amd64
|
||||
@ -18,7 +18,7 @@ for v in alpha arm beta latest; do
|
||||
storjlabs/storagenode:${TAG}-arm32v6 --os linux --arch arm --variant v6
|
||||
|
||||
docker manifest annotate storjlabs/storagenode:$v \
|
||||
storjlabs/storagenode:${TAG}-aarch64 --os linux --arch arm64
|
||||
storjlabs/storagenode:${TAG}-arm64v8 --os linux --arch arm64 --variant v8
|
||||
|
||||
docker manifest push --purge storjlabs/storagenode:$v
|
||||
done
|
||||
|
9
scripts/testdata/satellite-config.yaml.lock
vendored
9
scripts/testdata/satellite-config.yaml.lock
vendored
@ -160,12 +160,6 @@ contact.external-address: ""
|
||||
# macaroon revocation cache expiration
|
||||
# database-options.revocations-cache.expiration: 5m0s
|
||||
|
||||
# number of records to delete per delete execution. 0 indicates no limit. applicable to cockroach DB only.
|
||||
# db-cleanup.batch-size: 1000
|
||||
|
||||
# how often to delete expired serial numbers
|
||||
# db-cleanup.serials-interval: 4h0m0s
|
||||
|
||||
# Maximum Database Connection Lifetime, -1ns means the stdlib default
|
||||
# db.conn_max_lifetime: 30m0s
|
||||
|
||||
@ -424,9 +418,6 @@ identity.key-path: /root/.local/share/storj/identity/satellite/identity.key
|
||||
# how often to flush the rollups write cache to the database
|
||||
# orders.flush-interval: 1m0s
|
||||
|
||||
# include encrypted metadata in the order limit
|
||||
# orders.include-encrypted-metadata: false
|
||||
|
||||
# how many concurrent orders to process at once. zero is unlimited
|
||||
# orders.orders-semaphore-size: 2
|
||||
|
||||
|
5
web/multinode/package-lock.json
generated
5
web/multinode/package-lock.json
generated
@ -13123,6 +13123,11 @@
|
||||
"resolved": "https://registry.npmjs.org/vue-property-decorator/-/vue-property-decorator-9.1.2.tgz",
|
||||
"integrity": "sha512-xYA8MkZynPBGd/w5QFJ2d/NM0z/YeegMqYTphy7NJQXbZcuU6FC6AOdUAcy4SXP+YnkerC6AfH+ldg7PDk9ESQ=="
|
||||
},
|
||||
"vue-router": {
|
||||
"version": "3.4.9",
|
||||
"resolved": "https://registry.npmjs.org/vue-router/-/vue-router-3.4.9.tgz",
|
||||
"integrity": "sha512-CGAKWN44RqXW06oC+u4mPgHLQQi2t6vLD/JbGRDAXm0YpMv0bgpKuU5bBd7AvMgfTz9kXVRIWKHqRwGEb8xFkA=="
|
||||
},
|
||||
"vue-style-loader": {
|
||||
"version": "4.1.2",
|
||||
"resolved": "https://registry.npmjs.org/vue-style-loader/-/vue-style-loader-4.1.2.tgz",
|
||||
|
@ -11,7 +11,8 @@
|
||||
"dependencies": {
|
||||
"vue": "2.6.11",
|
||||
"vue-class-component": "7.2.6",
|
||||
"vue-property-decorator": "9.1.2"
|
||||
"vue-property-decorator": "9.1.2",
|
||||
"vue-router": "3.4.9"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/core": "7.12.10",
|
||||
|
@ -3,6 +3,7 @@
|
||||
|
||||
<template>
|
||||
<div id="app">
|
||||
<router-view/>
|
||||
</div>
|
||||
</template>
|
||||
|
||||
|
55
web/multinode/src/app/router/index.ts
Normal file
55
web/multinode/src/app/router/index.ts
Normal file
@ -0,0 +1,55 @@
|
||||
// Copyright (C) 2020 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
import Router, { RouterMode } from 'vue-router';
|
||||
import { Component } from 'vue-router/types/router';
|
||||
|
||||
import Dashboard from '@/app/views/Dashboard.vue';
|
||||
|
||||
/**
|
||||
* Metadata holds arbitrary information to routes like transition names, who can access the route, etc.
|
||||
*/
|
||||
class Metadata {
|
||||
public requiresAuth: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* Route holds all needed information to fill up router config.
|
||||
*/
|
||||
export class Route {
|
||||
public readonly path: string;
|
||||
public readonly name: string;
|
||||
public readonly component: Component;
|
||||
public readonly children?: Route[];
|
||||
public readonly meta?: Metadata;
|
||||
|
||||
/**
|
||||
* default contructor.
|
||||
* @param path - route path.
|
||||
* @param name - name of the route, needed fot identifying route by name.
|
||||
* @param component - component mapped to route.
|
||||
* @param children - all nested components of current route.
|
||||
* @param meta - arbitrary information to routes like transition names, who can access the route, etc.
|
||||
*/
|
||||
public constructor(path: string, name: string, component: Component, children: Route[] | undefined = undefined, meta: Metadata | undefined = undefined) {
|
||||
this.path = path;
|
||||
this.name = name;
|
||||
this.component = component;
|
||||
this.children = children;
|
||||
this.meta = meta;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Config contains configuration of all available routes for a Multinode Dashboard router.
|
||||
*/
|
||||
export class Config {
|
||||
public static Root: Route = new Route('/', 'Root', Dashboard, undefined, {requiresAuth: true});
|
||||
|
||||
public mode: RouterMode = 'history';
|
||||
public static routes: Route[] = [
|
||||
Config.Root,
|
||||
];
|
||||
}
|
||||
|
||||
export const router = new Router(Config);
|
15
web/multinode/src/app/views/Dashboard.vue
Normal file
15
web/multinode/src/app/views/Dashboard.vue
Normal file
@ -0,0 +1,15 @@
|
||||
// Copyright (C) 2020 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
<template>
|
||||
<div>
|
||||
</div>
|
||||
</template>
|
||||
|
||||
<script lang="ts">
|
||||
import { Component, Vue } from 'vue-property-decorator';
|
||||
|
||||
@Component
|
||||
export default class Dashboard extends Vue {}
|
||||
</script>
|
||||
|
@ -2,13 +2,18 @@
|
||||
// See LICENSE for copying information.
|
||||
|
||||
import Vue from 'vue';
|
||||
import Router from 'vue-router';
|
||||
|
||||
import App from '@/app/App.vue';
|
||||
import { router } from '@/app/router';
|
||||
|
||||
Vue.config.productionTip = false;
|
||||
|
||||
new Vue({
|
||||
// TODO: add router,
|
||||
Vue.use(Router);
|
||||
|
||||
const app = new Vue({
|
||||
router,
|
||||
render: (h) => h(App),
|
||||
// TODO: add store,
|
||||
}).$mount('#app');
|
||||
});
|
||||
|
||||
app.$mount('#app');
|
||||
|
@ -1,4 +1,4 @@
|
||||
FROM node:10.15.1 as satellite-ui
|
||||
FROM node:14.15.3 as satellite-ui
|
||||
|
||||
WORKDIR /app
|
||||
COPY web/satellite/ /app
|
||||
|
@ -85,7 +85,7 @@ export default class CLIStep extends Vue {
|
||||
* Copies token to clipboard.
|
||||
*/
|
||||
public onCopyClick(): void {
|
||||
this.$copyText(this.key);
|
||||
this.$copyText(this.restrictedKey);
|
||||
this.$notify.success('Token was copied successfully');
|
||||
}
|
||||
|
||||
|
@ -4,7 +4,7 @@
|
||||
<template>
|
||||
<div class="disk-stat-area">
|
||||
<p class="disk-stat-area__title">Total Disk Space</p>
|
||||
<p class="disk-stat-area__amount">{{ total }}</p>
|
||||
<p class="disk-stat-area__amount">{{ diskSpace.available | bytesToBase10String }}</p>
|
||||
<DoughnutChart class="disk-stat-area__chart" :chart-data="chartData" />
|
||||
<div class="disk-stat-area__info-area">
|
||||
<div class="disk-stat-area__info-area__item">
|
||||
@ -12,21 +12,28 @@
|
||||
<div class="disk-stat-area__info-area__item__labels-area__circle used"></div>
|
||||
<p class="disk-stat-area__info-area__item__labels-area__label">Used</p>
|
||||
</div>
|
||||
<p class="disk-stat-area__info-area__item__labels-area__amount">{{ used }}</p>
|
||||
<p class="disk-stat-area__info-area__item__labels-area__amount">{{ diskSpace.used | bytesToBase10String }}</p>
|
||||
</div>
|
||||
<div class="disk-stat-area__info-area__item">
|
||||
<div class="disk-stat-area__info-area__item__labels-area">
|
||||
<div class="disk-stat-area__info-area__item__labels-area__circle free"></div>
|
||||
<p class="disk-stat-area__info-area__item__labels-area__label">Free</p>
|
||||
</div>
|
||||
<p class="disk-stat-area__info-area__item__labels-area__amount">{{ free }}</p>
|
||||
<p class="disk-stat-area__info-area__item__labels-area__amount">{{ free | bytesToBase10String }}</p>
|
||||
</div>
|
||||
<div class="disk-stat-area__info-area__item">
|
||||
<div class="disk-stat-area__info-area__item__labels-area">
|
||||
<div class="disk-stat-area__info-area__item__labels-area__circle trash"></div>
|
||||
<p class="disk-stat-area__info-area__item__labels-area__label">Trash</p>
|
||||
</div>
|
||||
<p class="disk-stat-area__info-area__item__labels-area__amount">{{ trash }}</p>
|
||||
<p class="disk-stat-area__info-area__item__labels-area__amount">{{ diskSpace.trash | bytesToBase10String }}</p>
|
||||
</div>
|
||||
<div class="disk-stat-area__info-area__item">
|
||||
<div class="disk-stat-area__info-area__item__labels-area">
|
||||
<div class="disk-stat-area__info-area__item__labels-area__circle overused"></div>
|
||||
<p class="disk-stat-area__info-area__item__labels-area__label">Overused</p>
|
||||
</div>
|
||||
<p class="disk-stat-area__info-area__item__labels-area__amount">{{ diskSpace.overused | bytesToBase10String }}</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
@ -41,7 +48,7 @@ import {
|
||||
DiskStatChartData,
|
||||
DiskStatDataSet,
|
||||
} from '@/app/types/chartData';
|
||||
import { formatBytes } from '@/app/utils/converter';
|
||||
import { Traffic } from '@/storagenode/sno/sno';
|
||||
|
||||
@Component({
|
||||
components: {
|
||||
@ -53,52 +60,36 @@ export default class DiskStatChart extends Vue {
|
||||
* Holds datasets for chart.
|
||||
*/
|
||||
public get chartData(): DiskStatChartData {
|
||||
const diskSpace = this.$store.state.node.utilization.diskSpace;
|
||||
const free = diskSpace.available - diskSpace.used - diskSpace.trash;
|
||||
|
||||
return new DiskStatChartData([
|
||||
new DiskStatDataSet(
|
||||
'',
|
||||
['#D6D6D6', '#0059D0', '#8FA7C6'],
|
||||
['#D6D6D6', '#0059D0', '#8FA7C6', '#2582FF'],
|
||||
[
|
||||
free,
|
||||
diskSpace.used,
|
||||
diskSpace.trash,
|
||||
this.free,
|
||||
this.diskSpace.used,
|
||||
this.diskSpace.trash,
|
||||
this.diskSpace.overused,
|
||||
],
|
||||
),
|
||||
]);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns formatted used disk space amount.
|
||||
* Returns disk space information from store.
|
||||
*/
|
||||
public get total(): string {
|
||||
return formatBytes(this.$store.state.node.utilization.diskSpace.available);
|
||||
public get diskSpace(): Traffic {
|
||||
return this.$store.state.node.utilization.diskSpace;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns formatted used disk space amount.
|
||||
* Returns free disk space amount.
|
||||
*/
|
||||
public get used(): string {
|
||||
return formatBytes(this.$store.state.node.utilization.diskSpace.used);
|
||||
}
|
||||
public get free(): number {
|
||||
let free = this.diskSpace.available - this.diskSpace.used - this.diskSpace.trash;
|
||||
|
||||
/**
|
||||
* Returns formatted available disk space amount.
|
||||
*/
|
||||
public get free(): string {
|
||||
return formatBytes(
|
||||
this.$store.state.node.utilization.diskSpace.available -
|
||||
this.$store.state.node.utilization.diskSpace.used -
|
||||
this.$store.state.node.utilization.diskSpace.trash,
|
||||
);
|
||||
}
|
||||
if (free < 0) free = 0;
|
||||
|
||||
/**
|
||||
* Returns formatted trash disk space amount.
|
||||
*/
|
||||
public get trash(): string {
|
||||
return formatBytes(this.$store.state.node.utilization.diskSpace.trash);
|
||||
return free;
|
||||
}
|
||||
}
|
||||
</script>
|
||||
@ -111,7 +102,7 @@ export default class DiskStatChart extends Vue {
|
||||
background-color: var(--block-background-color);
|
||||
border: 1px solid var(--block-border-color);
|
||||
border-radius: 11px;
|
||||
padding: 32px 30px;
|
||||
padding: 32px 20px;
|
||||
position: relative;
|
||||
|
||||
&__title {
|
||||
@ -130,24 +121,25 @@ export default class DiskStatChart extends Vue {
|
||||
|
||||
&__chart {
|
||||
position: absolute;
|
||||
left: 30px;
|
||||
width: calc(50% - 30px);
|
||||
width: calc(58% - 25px);
|
||||
height: 220px;
|
||||
margin-top: 35px;
|
||||
top: 135px;
|
||||
}
|
||||
|
||||
&__info-area {
|
||||
position: absolute;
|
||||
right: 30px;
|
||||
top: 57%;
|
||||
top: 60%;
|
||||
transform: translateY(-50%);
|
||||
width: calc(50% - 50px);
|
||||
width: calc(40% - 35px);
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
box-sizing: border-box;
|
||||
|
||||
&__item {
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
flex-direction: column;
|
||||
margin-top: 19px;
|
||||
|
||||
&:first-of-type {
|
||||
@ -156,6 +148,7 @@ export default class DiskStatChart extends Vue {
|
||||
|
||||
&__labels-area {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
|
||||
&__circle {
|
||||
width: 14px;
|
||||
@ -175,6 +168,8 @@ export default class DiskStatChart extends Vue {
|
||||
font-weight: bold;
|
||||
font-size: 14px;
|
||||
color: var(--disk-stat-chart-text-color);
|
||||
margin-left: 22px;
|
||||
margin-top: 6px;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -193,6 +188,10 @@ export default class DiskStatChart extends Vue {
|
||||
background: #8fa7c6;
|
||||
}
|
||||
|
||||
.overused {
|
||||
background: #2582ff;
|
||||
}
|
||||
|
||||
@media screen and (max-width: 1000px) {
|
||||
|
||||
.disk-stat-area {
|
||||
@ -202,13 +201,20 @@ export default class DiskStatChart extends Vue {
|
||||
width: 250px;
|
||||
height: 250px;
|
||||
margin-left: 100px;
|
||||
margin-top: 0;
|
||||
top: 100px;
|
||||
}
|
||||
|
||||
&__info-area {
|
||||
top: 60%;
|
||||
right: 120px;
|
||||
width: 185px;
|
||||
|
||||
&__item {
|
||||
flex-direction: row;
|
||||
|
||||
&__labels-area__amount {
|
||||
margin: 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -222,9 +228,16 @@ export default class DiskStatChart extends Vue {
|
||||
}
|
||||
|
||||
&__info-area {
|
||||
top: 60%;
|
||||
right: 90px;
|
||||
width: 140px;
|
||||
|
||||
&__item {
|
||||
flex-direction: row;
|
||||
|
||||
&__labels-area__amount {
|
||||
margin: 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -234,13 +247,14 @@ export default class DiskStatChart extends Vue {
|
||||
.disk-stat-area {
|
||||
|
||||
&__chart {
|
||||
top: 125px;
|
||||
width: 200px;
|
||||
height: 200px;
|
||||
margin-left: 50px;
|
||||
}
|
||||
|
||||
&__info-area {
|
||||
top: 50%;
|
||||
top: 55%;
|
||||
right: 90px;
|
||||
width: 140px;
|
||||
}
|
||||
@ -255,6 +269,7 @@ export default class DiskStatChart extends Vue {
|
||||
padding: 24px 18px;
|
||||
|
||||
&__chart {
|
||||
top: 100px;
|
||||
width: 200px;
|
||||
height: 200px;
|
||||
left: 50%;
|
||||
@ -268,6 +283,7 @@ export default class DiskStatChart extends Vue {
|
||||
transform: translateX(50%);
|
||||
bottom: 10px;
|
||||
height: 100px;
|
||||
width: 200px;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -60,8 +60,8 @@ export function newNodeModule(service: StorageNodeService): StoreModule<StorageN
|
||||
);
|
||||
|
||||
state.utilization = new Utilization(
|
||||
new Traffic(nodeInfo.bandwidth.used),
|
||||
new Traffic(nodeInfo.diskSpace.used, nodeInfo.diskSpace.available, nodeInfo.diskSpace.trash),
|
||||
nodeInfo.bandwidth,
|
||||
nodeInfo.diskSpace,
|
||||
);
|
||||
|
||||
state.disqualifiedSatellites = nodeInfo.satellites.filter((satellite: SatelliteInfo) => satellite.disqualified);
|
||||
|
@ -8,6 +8,7 @@ import { DirectiveBinding } from 'vue/types/options';
|
||||
import App from '@/app/App.vue';
|
||||
import { router } from '@/app/router';
|
||||
import { store } from '@/app/store';
|
||||
import { formatBytes } from '@/app/utils/converter';
|
||||
|
||||
Vue.config.productionTip = false;
|
||||
VueClipboard.config.autoSetContainer = true;
|
||||
@ -22,6 +23,7 @@ let clickOutsideEvent: EventListener;
|
||||
Vue.directive('click-outside', {
|
||||
bind: function (el: HTMLElement, binding: DirectiveBinding, vnode: VNode) {
|
||||
clickOutsideEvent = function(event: Event): void {
|
||||
// TODO: improve and test this
|
||||
if (el === event.target) {
|
||||
return;
|
||||
}
|
||||
@ -45,6 +47,14 @@ Vue.filter('centsToDollars', (cents: number): string => {
|
||||
return `$${(cents / 100).toFixed(2)}`;
|
||||
});
|
||||
|
||||
/**
|
||||
* Converts bytes to base-10 types.
|
||||
*/
|
||||
Vue.filter('bytesToBase10String', (amountInBytes: number): string => {
|
||||
// TODO: move to Size package
|
||||
return `${formatBytes(amountInBytes)}`;
|
||||
});
|
||||
|
||||
new Vue({
|
||||
router,
|
||||
render: (h) => h(App),
|
||||
|
@ -42,7 +42,7 @@ export class StorageNodeApi {
|
||||
return new SatelliteInfo(satellite.id, satellite.url, disqualified, suspended);
|
||||
});
|
||||
|
||||
const diskSpace: Traffic = new Traffic(data.diskSpace.used, data.diskSpace.available, data.diskSpace.trash);
|
||||
const diskSpace: Traffic = new Traffic(data.diskSpace.used, data.diskSpace.available, data.diskSpace.trash, data.diskSpace.overused);
|
||||
const bandwidth: Traffic = new Traffic(data.bandwidth.used);
|
||||
|
||||
return new Dashboard(data.nodeID, data.wallet, satellites, diskSpace, bandwidth,
|
||||
|
@ -37,6 +37,7 @@ export class Traffic {
|
||||
public used: number = 0,
|
||||
public available: number = 1,
|
||||
public trash: number = 0,
|
||||
public overused: number = 0,
|
||||
) {}
|
||||
}
|
||||
|
||||
|
@ -6,6 +6,7 @@ import Vuex from 'vuex';
|
||||
import DiskStatChart from '@/app/components/DiskStatChart.vue';
|
||||
|
||||
import { newNodeModule, NODE_ACTIONS } from '@/app/store/modules/node';
|
||||
import { formatBytes } from '@/app/utils/converter';
|
||||
import { StorageNodeApi } from '@/storagenode/api/storagenode';
|
||||
import { StorageNodeService } from '@/storagenode/sno/service';
|
||||
import { Dashboard, SatelliteInfo, Traffic } from '@/storagenode/sno/sno';
|
||||
@ -14,6 +15,10 @@ import { createLocalVue, shallowMount } from '@vue/test-utils';
|
||||
const localVue = createLocalVue();
|
||||
localVue.use(Vuex);
|
||||
|
||||
localVue.filter('bytesToBase10String', (amountInBytes: number): string => {
|
||||
return `${formatBytes(amountInBytes)}`;
|
||||
});
|
||||
|
||||
const nodeApi = new StorageNodeApi();
|
||||
const nodeService = new StorageNodeService(nodeApi);
|
||||
const nodeModule = newNodeModule(nodeService);
|
||||
@ -45,7 +50,7 @@ describe('DiskStatChart', (): void => {
|
||||
new SatelliteInfo('3', 'url1', null, null),
|
||||
new SatelliteInfo('4', 'url2', new Date(2020, 1, 1), new Date(2020, 0, 1)),
|
||||
],
|
||||
new Traffic(550000, 1000000, 22000),
|
||||
new Traffic(550000, 1000000, 22000, 11000),
|
||||
new Traffic(50),
|
||||
new Date(),
|
||||
new Date(2019, 3, 1),
|
||||
|
@ -27,6 +27,13 @@ exports[`DiskStatChart renders correctly 1`] = `
|
||||
</div>
|
||||
<p class="disk-stat-area__info-area__item__labels-area__amount">0 Bytes</p>
|
||||
</div>
|
||||
<div class="disk-stat-area__info-area__item">
|
||||
<div class="disk-stat-area__info-area__item__labels-area">
|
||||
<div class="disk-stat-area__info-area__item__labels-area__circle overused"></div>
|
||||
<p class="disk-stat-area__info-area__item__labels-area__label">Overused</p>
|
||||
</div>
|
||||
<p class="disk-stat-area__info-area__item__labels-area__amount">0 Bytes</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
`;
|
||||
@ -58,6 +65,13 @@ exports[`DiskStatChart renders correctly with actual values 1`] = `
|
||||
</div>
|
||||
<p class="disk-stat-area__info-area__item__labels-area__amount">22KB</p>
|
||||
</div>
|
||||
<div class="disk-stat-area__info-area__item">
|
||||
<div class="disk-stat-area__info-area__item__labels-area">
|
||||
<div class="disk-stat-area__info-area__item__labels-area__circle overused"></div>
|
||||
<p class="disk-stat-area__info-area__item__labels-area__label">Overused</p>
|
||||
</div>
|
||||
<p class="disk-stat-area__info-area__item__labels-area__amount">11KB</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
`;
|
||||
|
Loading…
Reference in New Issue
Block a user