satellite: Check macaroon revocation
When a request comes in on the satellite api and we validate the macaroon, we now also check if any of the macaroon's tails have been revoked. Change-Id: I80ce4312602baf431cfa1b1285f79bed88bb4497
This commit is contained in:
parent
433fc91054
commit
2d727bb14e
@ -32,6 +32,7 @@ func cmdAPIRun(cmd *cobra.Command, args []string) (err error) {
|
|||||||
|
|
||||||
db, err := satellitedb.New(log.Named("db"), runCfg.Database, satellitedb.Options{
|
db, err := satellitedb.New(log.Named("db"), runCfg.Database, satellitedb.Options{
|
||||||
APIKeysLRUOptions: runCfg.APIKeysLRUOptions(),
|
APIKeysLRUOptions: runCfg.APIKeysLRUOptions(),
|
||||||
|
RevocationLRUOptions: runCfg.RevocationLRUOptions(),
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errs.New("Error starting master database on satellite api: %+v", err)
|
return errs.New("Error starting master database on satellite api: %+v", err)
|
||||||
|
@ -46,6 +46,10 @@ type Satellite struct {
|
|||||||
Expiration time.Duration `help:"satellite database api key expiration" default:"60s"`
|
Expiration time.Duration `help:"satellite database api key expiration" default:"60s"`
|
||||||
Capacity int `help:"satellite database api key lru capacity" default:"1000"`
|
Capacity int `help:"satellite database api key lru capacity" default:"1000"`
|
||||||
}
|
}
|
||||||
|
RevocationsCache struct {
|
||||||
|
Expiration time.Duration `help:"macaroon revocation cache expiration" default:"5m"`
|
||||||
|
Capacity int `help:"macaroon revocation cache capacity" default:"10000"`
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
satellite.Config
|
satellite.Config
|
||||||
@ -59,6 +63,14 @@ func (s *Satellite) APIKeysLRUOptions() cache.Options {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RevocationLRUOptions returns a cache.Options based on the Revocations LRU config
|
||||||
|
func (s *Satellite) RevocationLRUOptions() cache.Options {
|
||||||
|
return cache.Options{
|
||||||
|
Expiration: s.DatabaseOptions.RevocationsCache.Expiration,
|
||||||
|
Capacity: s.DatabaseOptions.RevocationsCache.Capacity,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
rootCmd = &cobra.Command{
|
rootCmd = &cobra.Command{
|
||||||
Use: "satellite",
|
Use: "satellite",
|
||||||
|
3
go.sum
3
go.sum
@ -620,7 +620,10 @@ storj.io/common v0.0.0-20200611114417-9a3d012fdb62/go.mod h1:6S6Ub92/BB+ofU7hbyP
|
|||||||
storj.io/common v0.0.0-20200616122322-79b46deca70e h1:eAq23qVHALvUhH1PrtHnRPUXo7uobeh21G1DU5scz6k=
|
storj.io/common v0.0.0-20200616122322-79b46deca70e h1:eAq23qVHALvUhH1PrtHnRPUXo7uobeh21G1DU5scz6k=
|
||||||
storj.io/common v0.0.0-20200616122322-79b46deca70e/go.mod h1:ZSjZI9XJNevOP527K+PL1c68j8w5M4vbHha3V2tYWdQ=
|
storj.io/common v0.0.0-20200616122322-79b46deca70e/go.mod h1:ZSjZI9XJNevOP527K+PL1c68j8w5M4vbHha3V2tYWdQ=
|
||||||
storj.io/drpc v0.0.11/go.mod h1:TiFc2obNjL9/3isMW1Rpxjy8V9uE0B2HMeMFGiiI7Iw=
|
storj.io/drpc v0.0.11/go.mod h1:TiFc2obNjL9/3isMW1Rpxjy8V9uE0B2HMeMFGiiI7Iw=
|
||||||
|
storj.io/drpc v0.0.11/go.mod h1:TiFc2obNjL9/3isMW1Rpxjy8V9uE0B2HMeMFGiiI7Iw=
|
||||||
storj.io/drpc v0.0.12 h1:4ei1M4cnWlYxcQheX0Dg4+c12zCD+oJqfweVQVWarsA=
|
storj.io/drpc v0.0.12 h1:4ei1M4cnWlYxcQheX0Dg4+c12zCD+oJqfweVQVWarsA=
|
||||||
|
storj.io/drpc v0.0.12 h1:4ei1M4cnWlYxcQheX0Dg4+c12zCD+oJqfweVQVWarsA=
|
||||||
|
storj.io/drpc v0.0.12/go.mod h1:82nfl+6YwRwF6UG31cEWWUqv/FaKvP5SGqUvoqTxCMA=
|
||||||
storj.io/drpc v0.0.12/go.mod h1:82nfl+6YwRwF6UG31cEWWUqv/FaKvP5SGqUvoqTxCMA=
|
storj.io/drpc v0.0.12/go.mod h1:82nfl+6YwRwF6UG31cEWWUqv/FaKvP5SGqUvoqTxCMA=
|
||||||
storj.io/monkit-jaeger v0.0.0-20200518165323-80778fc3f91b h1:Bbg9JCtY6l3HrDxs3BXzT2UYnYCBLqNi6i84Y8QIPUs=
|
storj.io/monkit-jaeger v0.0.0-20200518165323-80778fc3f91b h1:Bbg9JCtY6l3HrDxs3BXzT2UYnYCBLqNi6i84Y8QIPUs=
|
||||||
storj.io/monkit-jaeger v0.0.0-20200518165323-80778fc3f91b/go.mod h1:gj4vuCeyCRjRmH8LIrgoyU9Dc9uR6H+/GcDUXmTbf80=
|
storj.io/monkit-jaeger v0.0.0-20200518165323-80778fc3f91b/go.mod h1:gj4vuCeyCRjRmH8LIrgoyU9Dc9uR6H+/GcDUXmTbf80=
|
||||||
|
@ -418,6 +418,7 @@ func NewAPI(log *zap.Logger, full *identity.FullIdentity, db DB,
|
|||||||
peer.Accounting.ProjectUsage,
|
peer.Accounting.ProjectUsage,
|
||||||
peer.DB.Console().Projects(),
|
peer.DB.Console().Projects(),
|
||||||
signing.SignerFromFullIdentity(peer.Identity),
|
signing.SignerFromFullIdentity(peer.Identity),
|
||||||
|
peer.DB.Revocation(),
|
||||||
config.Metainfo,
|
config.Metainfo,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -33,6 +33,7 @@ import (
|
|||||||
"storj.io/storj/satellite/metainfo/pointerverification"
|
"storj.io/storj/satellite/metainfo/pointerverification"
|
||||||
"storj.io/storj/satellite/orders"
|
"storj.io/storj/satellite/orders"
|
||||||
"storj.io/storj/satellite/overlay"
|
"storj.io/storj/satellite/overlay"
|
||||||
|
"storj.io/storj/satellite/revocation"
|
||||||
"storj.io/storj/satellite/rewards"
|
"storj.io/storj/satellite/rewards"
|
||||||
"storj.io/uplink/private/eestream"
|
"storj.io/uplink/private/eestream"
|
||||||
"storj.io/uplink/private/storage/meta"
|
"storj.io/uplink/private/storage/meta"
|
||||||
@ -61,13 +62,6 @@ type APIKeys interface {
|
|||||||
GetByHead(ctx context.Context, head []byte) (*console.APIKeyInfo, error)
|
GetByHead(ctx context.Context, head []byte) (*console.APIKeyInfo, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Revocations is the revocations store methods used by the endpoint
|
|
||||||
//
|
|
||||||
// architecture: Database
|
|
||||||
type Revocations interface {
|
|
||||||
GetByProjectID(ctx context.Context, projectID uuid.UUID) ([][]byte, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Endpoint metainfo endpoint.
|
// Endpoint metainfo endpoint.
|
||||||
//
|
//
|
||||||
// architecture: Endpoint
|
// architecture: Endpoint
|
||||||
@ -87,6 +81,7 @@ type Endpoint struct {
|
|||||||
satellite signing.Signer
|
satellite signing.Signer
|
||||||
limiterCache *lrucache.ExpiringLRU
|
limiterCache *lrucache.ExpiringLRU
|
||||||
encInlineSegmentSize int64 // max inline segment size + encryption overhead
|
encInlineSegmentSize int64 // max inline segment size + encryption overhead
|
||||||
|
revocations revocation.DB
|
||||||
config Config
|
config Config
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -95,7 +90,7 @@ func NewEndpoint(log *zap.Logger, metainfo *Service, deletePieces *piecedeletion
|
|||||||
orders *orders.Service, cache *overlay.Service, attributions attribution.DB,
|
orders *orders.Service, cache *overlay.Service, attributions attribution.DB,
|
||||||
partners *rewards.PartnersService, peerIdentities overlay.PeerIdentities,
|
partners *rewards.PartnersService, peerIdentities overlay.PeerIdentities,
|
||||||
apiKeys APIKeys, projectUsage *accounting.Service, projects console.Projects,
|
apiKeys APIKeys, projectUsage *accounting.Service, projects console.Projects,
|
||||||
satellite signing.Signer, config Config) (*Endpoint, error) {
|
satellite signing.Signer, revocations revocation.DB, config Config) (*Endpoint, error) {
|
||||||
// TODO do something with too many params
|
// TODO do something with too many params
|
||||||
|
|
||||||
encInlineSegmentSize, err := encryption.CalcEncryptedSize(config.MaxInlineSegmentSize.Int64(), storj.EncryptionParameters{
|
encInlineSegmentSize, err := encryption.CalcEncryptedSize(config.MaxInlineSegmentSize.Int64(), storj.EncryptionParameters{
|
||||||
@ -124,6 +119,7 @@ func NewEndpoint(log *zap.Logger, metainfo *Service, deletePieces *piecedeletion
|
|||||||
Expiration: config.RateLimiter.CacheExpiration,
|
Expiration: config.RateLimiter.CacheExpiration,
|
||||||
}),
|
}),
|
||||||
encInlineSegmentSize: encInlineSegmentSize,
|
encInlineSegmentSize: encInlineSegmentSize,
|
||||||
|
revocations: revocations,
|
||||||
config: config,
|
config: config,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
@ -35,6 +35,114 @@ import (
|
|||||||
"storj.io/uplink/private/testuplink"
|
"storj.io/uplink/private/testuplink"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func TestRevokedMacaroon(t *testing.T) {
|
||||||
|
testplanet.Run(t, testplanet.Config{
|
||||||
|
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 1,
|
||||||
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||||
|
|
||||||
|
// I want the api key for the single satellite in this test
|
||||||
|
apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
|
||||||
|
|
||||||
|
client, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer ctx.Check(client.Close)
|
||||||
|
|
||||||
|
// Sanity check: it should work before revoke
|
||||||
|
_, err = client.ListBuckets(ctx, metainfo.ListBucketsParams{
|
||||||
|
ListOpts: storj.BucketListOptions{
|
||||||
|
Cursor: "",
|
||||||
|
Direction: storj.Forward,
|
||||||
|
Limit: 10,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
err = planet.Satellites[0].API.DB.Revocation().Revoke(ctx, apiKey.Tail(), []byte("apikey"))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
_, err = client.ListBuckets(ctx, metainfo.ListBucketsParams{})
|
||||||
|
assert.True(t, errs2.IsRPC(err, rpcstatus.PermissionDenied))
|
||||||
|
|
||||||
|
_, err = client.BeginObject(ctx, metainfo.BeginObjectParams{})
|
||||||
|
assert.True(t, errs2.IsRPC(err, rpcstatus.PermissionDenied))
|
||||||
|
|
||||||
|
_, _, err = client.BeginDeleteObject(ctx, metainfo.BeginDeleteObjectParams{})
|
||||||
|
assert.True(t, errs2.IsRPC(err, rpcstatus.PermissionDenied))
|
||||||
|
|
||||||
|
_, err = client.ListBuckets(ctx, metainfo.ListBucketsParams{})
|
||||||
|
assert.True(t, errs2.IsRPC(err, rpcstatus.PermissionDenied))
|
||||||
|
|
||||||
|
_, _, err = client.ListObjects(ctx, metainfo.ListObjectsParams{})
|
||||||
|
assert.True(t, errs2.IsRPC(err, rpcstatus.PermissionDenied))
|
||||||
|
|
||||||
|
_, err = client.CreateBucket(ctx, metainfo.CreateBucketParams{})
|
||||||
|
assert.True(t, errs2.IsRPC(err, rpcstatus.PermissionDenied))
|
||||||
|
|
||||||
|
_, err = client.DeleteBucket(ctx, metainfo.DeleteBucketParams{})
|
||||||
|
assert.True(t, errs2.IsRPC(err, rpcstatus.PermissionDenied))
|
||||||
|
|
||||||
|
_, _, err = client.BeginDeleteObject(ctx, metainfo.BeginDeleteObjectParams{})
|
||||||
|
assert.True(t, errs2.IsRPC(err, rpcstatus.PermissionDenied))
|
||||||
|
|
||||||
|
_, err = client.GetBucket(ctx, metainfo.GetBucketParams{})
|
||||||
|
assert.True(t, errs2.IsRPC(err, rpcstatus.PermissionDenied))
|
||||||
|
|
||||||
|
_, err = client.GetObject(ctx, metainfo.GetObjectParams{})
|
||||||
|
assert.True(t, errs2.IsRPC(err, rpcstatus.PermissionDenied))
|
||||||
|
|
||||||
|
_, err = client.GetProjectInfo(ctx)
|
||||||
|
assert.True(t, errs2.IsRPC(err, rpcstatus.PermissionDenied))
|
||||||
|
|
||||||
|
signer := signing.SignerFromFullIdentity(planet.Satellites[0].Identity)
|
||||||
|
satStreamID := &pb.SatStreamID{
|
||||||
|
CreationDate: time.Now(),
|
||||||
|
}
|
||||||
|
signedStreamID, err := signing.SignStreamID(ctx, signer, satStreamID)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
encodedStreamID, err := pb.Marshal(signedStreamID)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
err = client.CommitObject(ctx, metainfo.CommitObjectParams{StreamID: encodedStreamID})
|
||||||
|
assert.True(t, errs2.IsRPC(err, rpcstatus.PermissionDenied))
|
||||||
|
|
||||||
|
err = client.FinishDeleteObject(ctx, metainfo.FinishDeleteObjectParams{StreamID: encodedStreamID})
|
||||||
|
assert.True(t, errs2.IsRPC(err, rpcstatus.PermissionDenied))
|
||||||
|
|
||||||
|
_, _, _, err = client.BeginSegment(ctx, metainfo.BeginSegmentParams{StreamID: encodedStreamID})
|
||||||
|
assert.True(t, errs2.IsRPC(err, rpcstatus.PermissionDenied))
|
||||||
|
|
||||||
|
_, _, _, err = client.BeginDeleteSegment(ctx, metainfo.BeginDeleteSegmentParams{StreamID: encodedStreamID})
|
||||||
|
assert.True(t, errs2.IsRPC(err, rpcstatus.PermissionDenied))
|
||||||
|
|
||||||
|
err = client.MakeInlineSegment(ctx, metainfo.MakeInlineSegmentParams{StreamID: encodedStreamID})
|
||||||
|
assert.True(t, errs2.IsRPC(err, rpcstatus.PermissionDenied))
|
||||||
|
|
||||||
|
_, _, err = client.ListSegments(ctx, metainfo.ListSegmentsParams{StreamID: encodedStreamID})
|
||||||
|
assert.True(t, errs2.IsRPC(err, rpcstatus.PermissionDenied))
|
||||||
|
|
||||||
|
_, _, err = client.DownloadSegment(ctx, metainfo.DownloadSegmentParams{StreamID: encodedStreamID})
|
||||||
|
assert.True(t, errs2.IsRPC(err, rpcstatus.PermissionDenied))
|
||||||
|
|
||||||
|
// these methods needs SegmentID
|
||||||
|
|
||||||
|
signedSegmentID, err := signing.SignSegmentID(ctx, signer, &pb.SatSegmentID{
|
||||||
|
StreamId: satStreamID,
|
||||||
|
CreationDate: time.Now(),
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
encodedSegmentID, err := pb.Marshal(signedSegmentID)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
segmentID, err := storj.SegmentIDFromBytes(encodedSegmentID)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
err = client.CommitSegment(ctx, metainfo.CommitSegmentParams{SegmentID: segmentID})
|
||||||
|
assert.True(t, errs2.IsRPC(err, rpcstatus.PermissionDenied))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestInvalidAPIKey(t *testing.T) {
|
func TestInvalidAPIKey(t *testing.T) {
|
||||||
testplanet.Run(t, testplanet.Config{
|
testplanet.Run(t, testplanet.Config{
|
||||||
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 1,
|
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 1,
|
||||||
@ -61,9 +169,6 @@ func TestInvalidAPIKey(t *testing.T) {
|
|||||||
_, _, err = client.ListObjects(ctx, metainfo.ListObjectsParams{})
|
_, _, err = client.ListObjects(ctx, metainfo.ListObjectsParams{})
|
||||||
assertInvalidArgument(t, err, false)
|
assertInvalidArgument(t, err, false)
|
||||||
|
|
||||||
err = client.CommitObject(ctx, metainfo.CommitObjectParams{})
|
|
||||||
assertInvalidArgument(t, err, false)
|
|
||||||
|
|
||||||
_, err = client.CreateBucket(ctx, metainfo.CreateBucketParams{})
|
_, err = client.CreateBucket(ctx, metainfo.CreateBucketParams{})
|
||||||
assertInvalidArgument(t, err, false)
|
assertInvalidArgument(t, err, false)
|
||||||
|
|
||||||
@ -73,9 +178,6 @@ func TestInvalidAPIKey(t *testing.T) {
|
|||||||
_, _, err = client.BeginDeleteObject(ctx, metainfo.BeginDeleteObjectParams{})
|
_, _, err = client.BeginDeleteObject(ctx, metainfo.BeginDeleteObjectParams{})
|
||||||
assertInvalidArgument(t, err, false)
|
assertInvalidArgument(t, err, false)
|
||||||
|
|
||||||
err = client.FinishDeleteObject(ctx, metainfo.FinishDeleteObjectParams{})
|
|
||||||
assertInvalidArgument(t, err, false)
|
|
||||||
|
|
||||||
_, err = client.GetBucket(ctx, metainfo.GetBucketParams{})
|
_, err = client.GetBucket(ctx, metainfo.GetBucketParams{})
|
||||||
assertInvalidArgument(t, err, false)
|
assertInvalidArgument(t, err, false)
|
||||||
|
|
||||||
@ -100,6 +202,12 @@ func TestInvalidAPIKey(t *testing.T) {
|
|||||||
streamID, err := storj.StreamIDFromBytes(encodedStreamID)
|
streamID, err := storj.StreamIDFromBytes(encodedStreamID)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
err = client.CommitObject(ctx, metainfo.CommitObjectParams{StreamID: streamID})
|
||||||
|
assertInvalidArgument(t, err, false)
|
||||||
|
|
||||||
|
err = client.FinishDeleteObject(ctx, metainfo.FinishDeleteObjectParams{StreamID: streamID})
|
||||||
|
assertInvalidArgument(t, err, false)
|
||||||
|
|
||||||
_, _, _, err = client.BeginSegment(ctx, metainfo.BeginSegmentParams{StreamID: streamID})
|
_, _, _, err = client.BeginSegment(ctx, metainfo.BeginSegmentParams{StreamID: streamID})
|
||||||
assertInvalidArgument(t, err, false)
|
assertInvalidArgument(t, err, false)
|
||||||
|
|
||||||
|
@ -155,8 +155,7 @@ func (endpoint *Endpoint) validateAuth(ctx context.Context, header *pb.RequestHe
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Revocations are currently handled by just deleting the key.
|
err = key.Check(ctx, keyInfo.Secret, action, endpoint.revocations)
|
||||||
err = key.Check(ctx, keyInfo.Secret, action, nil)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
endpoint.log.Debug("unauthorized request", zap.Error(err))
|
endpoint.log.Debug("unauthorized request", zap.Error(err))
|
||||||
return nil, rpcstatus.Error(rpcstatus.PermissionDenied, "Unauthorized API credentials")
|
return nil, rpcstatus.Error(rpcstatus.PermissionDenied, "Unauthorized API credentials")
|
||||||
|
@ -43,6 +43,7 @@ import (
|
|||||||
"storj.io/storj/satellite/repair/irreparable"
|
"storj.io/storj/satellite/repair/irreparable"
|
||||||
"storj.io/storj/satellite/repair/queue"
|
"storj.io/storj/satellite/repair/queue"
|
||||||
"storj.io/storj/satellite/repair/repairer"
|
"storj.io/storj/satellite/repair/repairer"
|
||||||
|
"storj.io/storj/satellite/revocation"
|
||||||
"storj.io/storj/satellite/rewards"
|
"storj.io/storj/satellite/rewards"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -96,6 +97,8 @@ type DB interface {
|
|||||||
HeldAmount() heldamount.DB
|
HeldAmount() heldamount.DB
|
||||||
// Compoensation tracks storage node compensation
|
// Compoensation tracks storage node compensation
|
||||||
Compensation() compensation.DB
|
Compensation() compensation.DB
|
||||||
|
// Revocation tracks revoked macaroons
|
||||||
|
Revocation() revocation.DB
|
||||||
}
|
}
|
||||||
|
|
||||||
// Config is the global config satellite
|
// Config is the global config satellite
|
||||||
|
14
satellite/revocation/revocation.go
Normal file
14
satellite/revocation/revocation.go
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
// Copyright (C) 2020 Storj Labs, Inc.
|
||||||
|
// See LICENSE for copying information.
|
||||||
|
|
||||||
|
package revocation
|
||||||
|
|
||||||
|
import "context"
|
||||||
|
|
||||||
|
// DB is the interface for a revocation DB
|
||||||
|
type DB interface {
|
||||||
|
// Revoke is the method to revoke the supplied tail
|
||||||
|
Revoke(ctx context.Context, tail []byte, apiKeyID []byte) error
|
||||||
|
// Check will check whether any of the supplied tails have been revoked
|
||||||
|
Check(ctx context.Context, tails [][]byte) (bool, error)
|
||||||
|
}
|
@ -26,6 +26,7 @@ import (
|
|||||||
"storj.io/storj/satellite/payments/stripecoinpayments"
|
"storj.io/storj/satellite/payments/stripecoinpayments"
|
||||||
"storj.io/storj/satellite/repair/irreparable"
|
"storj.io/storj/satellite/repair/irreparable"
|
||||||
"storj.io/storj/satellite/repair/queue"
|
"storj.io/storj/satellite/repair/queue"
|
||||||
|
"storj.io/storj/satellite/revocation"
|
||||||
"storj.io/storj/satellite/rewards"
|
"storj.io/storj/satellite/rewards"
|
||||||
"storj.io/storj/satellite/satellitedb/dbx"
|
"storj.io/storj/satellite/satellitedb/dbx"
|
||||||
)
|
)
|
||||||
@ -48,11 +49,15 @@ type satelliteDB struct {
|
|||||||
|
|
||||||
consoleDBOnce sync.Once
|
consoleDBOnce sync.Once
|
||||||
consoleDB *ConsoleDB
|
consoleDB *ConsoleDB
|
||||||
|
|
||||||
|
revocationDBOnce sync.Once
|
||||||
|
revocationDB *revocationDB
|
||||||
}
|
}
|
||||||
|
|
||||||
// Options includes options for how a satelliteDB runs
|
// Options includes options for how a satelliteDB runs
|
||||||
type Options struct {
|
type Options struct {
|
||||||
APIKeysLRUOptions cache.Options
|
APIKeysLRUOptions cache.Options
|
||||||
|
RevocationLRUOptions cache.Options
|
||||||
|
|
||||||
// How many records to read in a single transaction when asked for all of the
|
// How many records to read in a single transaction when asked for all of the
|
||||||
// billable bandwidth from the reported serials table.
|
// billable bandwidth from the reported serials table.
|
||||||
@ -133,6 +138,18 @@ func (db *satelliteDB) Irreparable() irreparable.DB {
|
|||||||
return &irreparableDB{db: db}
|
return &irreparableDB{db: db}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Revocation returns the database to deal with macaroon revocation
|
||||||
|
func (db *satelliteDB) Revocation() revocation.DB {
|
||||||
|
db.revocationDBOnce.Do(func() {
|
||||||
|
db.revocationDB = &revocationDB{
|
||||||
|
db: db,
|
||||||
|
lru: cache.New(db.opts.RevocationLRUOptions),
|
||||||
|
methods: db,
|
||||||
|
}
|
||||||
|
})
|
||||||
|
return db.revocationDB
|
||||||
|
}
|
||||||
|
|
||||||
// Console returns database for storing users, projects and api keys
|
// Console returns database for storing users, projects and api keys
|
||||||
func (db *satelliteDB) Console() console.DB {
|
func (db *satelliteDB) Console() console.DB {
|
||||||
db.consoleDBOnce.Do(func() {
|
db.consoleDBOnce.Do(func() {
|
||||||
|
@ -569,6 +569,14 @@ read scalar (
|
|||||||
where bucket_bandwidth_rollup.action = ?
|
where bucket_bandwidth_rollup.action = ?
|
||||||
)
|
)
|
||||||
|
|
||||||
|
model revocation (
|
||||||
|
key revoked
|
||||||
|
field revoked blob
|
||||||
|
field api_key_id blob
|
||||||
|
)
|
||||||
|
|
||||||
|
create revocation ( noreturn )
|
||||||
|
|
||||||
model project_bandwidth_rollup (
|
model project_bandwidth_rollup (
|
||||||
key project_id interval_month
|
key project_id interval_month
|
||||||
|
|
||||||
|
@ -266,6 +266,11 @@ CREATE TABLE reset_password_tokens (
|
|||||||
PRIMARY KEY ( secret ),
|
PRIMARY KEY ( secret ),
|
||||||
UNIQUE ( owner_id )
|
UNIQUE ( owner_id )
|
||||||
);
|
);
|
||||||
|
CREATE TABLE revocations (
|
||||||
|
revoked bytea NOT NULL,
|
||||||
|
api_key_id bytea NOT NULL,
|
||||||
|
PRIMARY KEY ( revoked )
|
||||||
|
);
|
||||||
CREATE TABLE serial_numbers (
|
CREATE TABLE serial_numbers (
|
||||||
id serial NOT NULL,
|
id serial NOT NULL,
|
||||||
serial_number bytea NOT NULL,
|
serial_number bytea NOT NULL,
|
||||||
|
@ -538,6 +538,11 @@ CREATE TABLE reset_password_tokens (
|
|||||||
PRIMARY KEY ( secret ),
|
PRIMARY KEY ( secret ),
|
||||||
UNIQUE ( owner_id )
|
UNIQUE ( owner_id )
|
||||||
);
|
);
|
||||||
|
CREATE TABLE revocations (
|
||||||
|
revoked bytea NOT NULL,
|
||||||
|
api_key_id bytea NOT NULL,
|
||||||
|
PRIMARY KEY ( revoked )
|
||||||
|
);
|
||||||
CREATE TABLE serial_numbers (
|
CREATE TABLE serial_numbers (
|
||||||
id serial NOT NULL,
|
id serial NOT NULL,
|
||||||
serial_number bytea NOT NULL,
|
serial_number bytea NOT NULL,
|
||||||
@ -1052,6 +1057,11 @@ CREATE TABLE reset_password_tokens (
|
|||||||
PRIMARY KEY ( secret ),
|
PRIMARY KEY ( secret ),
|
||||||
UNIQUE ( owner_id )
|
UNIQUE ( owner_id )
|
||||||
);
|
);
|
||||||
|
CREATE TABLE revocations (
|
||||||
|
revoked bytea NOT NULL,
|
||||||
|
api_key_id bytea NOT NULL,
|
||||||
|
PRIMARY KEY ( revoked )
|
||||||
|
);
|
||||||
CREATE TABLE serial_numbers (
|
CREATE TABLE serial_numbers (
|
||||||
id serial NOT NULL,
|
id serial NOT NULL,
|
||||||
serial_number bytea NOT NULL,
|
serial_number bytea NOT NULL,
|
||||||
@ -5752,6 +5762,54 @@ func (f ResetPasswordToken_CreatedAt_Field) value() interface{} {
|
|||||||
|
|
||||||
func (ResetPasswordToken_CreatedAt_Field) _Column() string { return "created_at" }
|
func (ResetPasswordToken_CreatedAt_Field) _Column() string { return "created_at" }
|
||||||
|
|
||||||
|
type Revocation struct {
|
||||||
|
Revoked []byte
|
||||||
|
ApiKeyId []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func (Revocation) _Table() string { return "revocations" }
|
||||||
|
|
||||||
|
type Revocation_Update_Fields struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
type Revocation_Revoked_Field struct {
|
||||||
|
_set bool
|
||||||
|
_null bool
|
||||||
|
_value []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func Revocation_Revoked(v []byte) Revocation_Revoked_Field {
|
||||||
|
return Revocation_Revoked_Field{_set: true, _value: v}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f Revocation_Revoked_Field) value() interface{} {
|
||||||
|
if !f._set || f._null {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return f._value
|
||||||
|
}
|
||||||
|
|
||||||
|
func (Revocation_Revoked_Field) _Column() string { return "revoked" }
|
||||||
|
|
||||||
|
type Revocation_ApiKeyId_Field struct {
|
||||||
|
_set bool
|
||||||
|
_null bool
|
||||||
|
_value []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func Revocation_ApiKeyId(v []byte) Revocation_ApiKeyId_Field {
|
||||||
|
return Revocation_ApiKeyId_Field{_set: true, _value: v}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f Revocation_ApiKeyId_Field) value() interface{} {
|
||||||
|
if !f._set || f._null {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return f._value
|
||||||
|
}
|
||||||
|
|
||||||
|
func (Revocation_ApiKeyId_Field) _Column() string { return "api_key_id" }
|
||||||
|
|
||||||
type SerialNumber struct {
|
type SerialNumber struct {
|
||||||
Id int
|
Id int
|
||||||
SerialNumber []byte
|
SerialNumber []byte
|
||||||
@ -9333,6 +9391,30 @@ func (obj *postgresImpl) CreateNoReturn_ConsumedSerial(ctx context.Context,
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (obj *postgresImpl) CreateNoReturn_Revocation(ctx context.Context,
|
||||||
|
revocation_revoked Revocation_Revoked_Field,
|
||||||
|
revocation_api_key_id Revocation_ApiKeyId_Field) (
|
||||||
|
err error) {
|
||||||
|
defer mon.Task()(&ctx)(&err)
|
||||||
|
__revoked_val := revocation_revoked.value()
|
||||||
|
__api_key_id_val := revocation_api_key_id.value()
|
||||||
|
|
||||||
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO revocations ( revoked, api_key_id ) VALUES ( ?, ? )")
|
||||||
|
|
||||||
|
var __values []interface{}
|
||||||
|
__values = append(__values, __revoked_val, __api_key_id_val)
|
||||||
|
|
||||||
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
||||||
|
obj.logStmt(__stmt, __values...)
|
||||||
|
|
||||||
|
_, err = obj.driver.ExecContext(ctx, __stmt, __values...)
|
||||||
|
if err != nil {
|
||||||
|
return obj.makeErr(err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
func (obj *postgresImpl) CreateNoReturn_BucketStorageTally(ctx context.Context,
|
func (obj *postgresImpl) CreateNoReturn_BucketStorageTally(ctx context.Context,
|
||||||
bucket_storage_tally_bucket_name BucketStorageTally_BucketName_Field,
|
bucket_storage_tally_bucket_name BucketStorageTally_BucketName_Field,
|
||||||
bucket_storage_tally_project_id BucketStorageTally_ProjectId_Field,
|
bucket_storage_tally_project_id BucketStorageTally_ProjectId_Field,
|
||||||
@ -14562,6 +14644,16 @@ func (obj *postgresImpl) deleteAll(ctx context.Context) (count int64, err error)
|
|||||||
return 0, obj.makeErr(err)
|
return 0, obj.makeErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
__count, err = __res.RowsAffected()
|
||||||
|
if err != nil {
|
||||||
|
return 0, obj.makeErr(err)
|
||||||
|
}
|
||||||
|
count += __count
|
||||||
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM revocations;")
|
||||||
|
if err != nil {
|
||||||
|
return 0, obj.makeErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
__count, err = __res.RowsAffected()
|
__count, err = __res.RowsAffected()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, obj.makeErr(err)
|
return 0, obj.makeErr(err)
|
||||||
@ -15476,6 +15568,30 @@ func (obj *cockroachImpl) CreateNoReturn_ConsumedSerial(ctx context.Context,
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (obj *cockroachImpl) CreateNoReturn_Revocation(ctx context.Context,
|
||||||
|
revocation_revoked Revocation_Revoked_Field,
|
||||||
|
revocation_api_key_id Revocation_ApiKeyId_Field) (
|
||||||
|
err error) {
|
||||||
|
defer mon.Task()(&ctx)(&err)
|
||||||
|
__revoked_val := revocation_revoked.value()
|
||||||
|
__api_key_id_val := revocation_api_key_id.value()
|
||||||
|
|
||||||
|
var __embed_stmt = __sqlbundle_Literal("INSERT INTO revocations ( revoked, api_key_id ) VALUES ( ?, ? )")
|
||||||
|
|
||||||
|
var __values []interface{}
|
||||||
|
__values = append(__values, __revoked_val, __api_key_id_val)
|
||||||
|
|
||||||
|
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
||||||
|
obj.logStmt(__stmt, __values...)
|
||||||
|
|
||||||
|
_, err = obj.driver.ExecContext(ctx, __stmt, __values...)
|
||||||
|
if err != nil {
|
||||||
|
return obj.makeErr(err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
func (obj *cockroachImpl) CreateNoReturn_BucketStorageTally(ctx context.Context,
|
func (obj *cockroachImpl) CreateNoReturn_BucketStorageTally(ctx context.Context,
|
||||||
bucket_storage_tally_bucket_name BucketStorageTally_BucketName_Field,
|
bucket_storage_tally_bucket_name BucketStorageTally_BucketName_Field,
|
||||||
bucket_storage_tally_project_id BucketStorageTally_ProjectId_Field,
|
bucket_storage_tally_project_id BucketStorageTally_ProjectId_Field,
|
||||||
@ -20705,6 +20821,16 @@ func (obj *cockroachImpl) deleteAll(ctx context.Context) (count int64, err error
|
|||||||
return 0, obj.makeErr(err)
|
return 0, obj.makeErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
__count, err = __res.RowsAffected()
|
||||||
|
if err != nil {
|
||||||
|
return 0, obj.makeErr(err)
|
||||||
|
}
|
||||||
|
count += __count
|
||||||
|
__res, err = obj.driver.ExecContext(ctx, "DELETE FROM revocations;")
|
||||||
|
if err != nil {
|
||||||
|
return 0, obj.makeErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
__count, err = __res.RowsAffected()
|
__count, err = __res.RowsAffected()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, obj.makeErr(err)
|
return 0, obj.makeErr(err)
|
||||||
@ -21402,6 +21528,18 @@ func (rx *Rx) CreateNoReturn_PeerIdentity(ctx context.Context,
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (rx *Rx) CreateNoReturn_Revocation(ctx context.Context,
|
||||||
|
revocation_revoked Revocation_Revoked_Field,
|
||||||
|
revocation_api_key_id Revocation_ApiKeyId_Field) (
|
||||||
|
err error) {
|
||||||
|
var tx *Tx
|
||||||
|
if tx, err = rx.getTx(ctx); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return tx.CreateNoReturn_Revocation(ctx, revocation_revoked, revocation_api_key_id)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
func (rx *Rx) CreateNoReturn_SerialNumber(ctx context.Context,
|
func (rx *Rx) CreateNoReturn_SerialNumber(ctx context.Context,
|
||||||
serial_number_serial_number SerialNumber_SerialNumber_Field,
|
serial_number_serial_number SerialNumber_SerialNumber_Field,
|
||||||
serial_number_bucket_id SerialNumber_BucketId_Field,
|
serial_number_bucket_id SerialNumber_BucketId_Field,
|
||||||
@ -22973,6 +23111,11 @@ type Methods interface {
|
|||||||
peer_identity_chain PeerIdentity_Chain_Field) (
|
peer_identity_chain PeerIdentity_Chain_Field) (
|
||||||
err error)
|
err error)
|
||||||
|
|
||||||
|
CreateNoReturn_Revocation(ctx context.Context,
|
||||||
|
revocation_revoked Revocation_Revoked_Field,
|
||||||
|
revocation_api_key_id Revocation_ApiKeyId_Field) (
|
||||||
|
err error)
|
||||||
|
|
||||||
CreateNoReturn_SerialNumber(ctx context.Context,
|
CreateNoReturn_SerialNumber(ctx context.Context,
|
||||||
serial_number_serial_number SerialNumber_SerialNumber_Field,
|
serial_number_serial_number SerialNumber_SerialNumber_Field,
|
||||||
serial_number_bucket_id SerialNumber_BucketId_Field,
|
serial_number_bucket_id SerialNumber_BucketId_Field,
|
||||||
|
@ -266,6 +266,11 @@ CREATE TABLE reset_password_tokens (
|
|||||||
PRIMARY KEY ( secret ),
|
PRIMARY KEY ( secret ),
|
||||||
UNIQUE ( owner_id )
|
UNIQUE ( owner_id )
|
||||||
);
|
);
|
||||||
|
CREATE TABLE revocations (
|
||||||
|
revoked bytea NOT NULL,
|
||||||
|
api_key_id bytea NOT NULL,
|
||||||
|
PRIMARY KEY ( revoked )
|
||||||
|
);
|
||||||
CREATE TABLE serial_numbers (
|
CREATE TABLE serial_numbers (
|
||||||
id serial NOT NULL,
|
id serial NOT NULL,
|
||||||
serial_number bytea NOT NULL,
|
serial_number bytea NOT NULL,
|
||||||
|
@ -1161,6 +1161,18 @@ func (db *satelliteDB) PostgresMigration() *migrate.Migration {
|
|||||||
`ALTER TABLE nodes ADD COLUMN under_review TIMESTAMP WITH TIME ZONE;`,
|
`ALTER TABLE nodes ADD COLUMN under_review TIMESTAMP WITH TIME ZONE;`,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
DB: db.DB,
|
||||||
|
Description: "add revocations database",
|
||||||
|
Version: 115,
|
||||||
|
Action: migrate.SQL{`
|
||||||
|
CREATE TABLE revocations (
|
||||||
|
revoked bytea NOT NULL,
|
||||||
|
api_key_id bytea NOT NULL,
|
||||||
|
PRIMARY KEY ( revoked )
|
||||||
|
);
|
||||||
|
`},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
71
satellite/satellitedb/revocation.go
Normal file
71
satellite/satellitedb/revocation.go
Normal file
@ -0,0 +1,71 @@
|
|||||||
|
// Copyright (C) 2020 Storj Labs, Inc.
|
||||||
|
// See LICENSE for copying information.
|
||||||
|
|
||||||
|
package satellitedb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/zeebo/errs"
|
||||||
|
|
||||||
|
"storj.io/storj/pkg/cache"
|
||||||
|
"storj.io/storj/satellite/satellitedb/dbx"
|
||||||
|
)
|
||||||
|
|
||||||
|
type revocationDB struct {
|
||||||
|
db *satelliteDB
|
||||||
|
lru *cache.ExpiringLRU
|
||||||
|
methods dbx.Methods
|
||||||
|
}
|
||||||
|
|
||||||
|
// Revoke will revoke the supplied tail
|
||||||
|
func (db *revocationDB) Revoke(ctx context.Context, tail []byte, apiKeyID []byte) error {
|
||||||
|
return errs.Wrap(db.methods.CreateNoReturn_Revocation(ctx, dbx.Revocation_Revoked(tail), dbx.Revocation_ApiKeyId(apiKeyID)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check will check whether any of the supplied tails have been revoked
|
||||||
|
func (db *revocationDB) Check(ctx context.Context, tails [][]byte) (bool, error) {
|
||||||
|
numTails := len(tails)
|
||||||
|
if numTails == 0 {
|
||||||
|
return false, errs.New("Empty list of tails")
|
||||||
|
}
|
||||||
|
|
||||||
|
finalTail := tails[numTails-1]
|
||||||
|
|
||||||
|
val, err := db.lru.Get(string(finalTail), func() (interface{}, error) {
|
||||||
|
const query = "select exists(select 1 from revocations where revoked in (%s))"
|
||||||
|
|
||||||
|
var (
|
||||||
|
tailQuery, comma string
|
||||||
|
tailsForQuery = make([]interface{}, numTails)
|
||||||
|
revoked bool
|
||||||
|
)
|
||||||
|
|
||||||
|
for i, tail := range tails {
|
||||||
|
if i == 1 {
|
||||||
|
comma = ","
|
||||||
|
}
|
||||||
|
tailQuery += fmt.Sprintf("%s$%d", comma, i+1)
|
||||||
|
tailsForQuery[i] = tail
|
||||||
|
}
|
||||||
|
|
||||||
|
row := db.db.QueryRowContext(ctx, fmt.Sprintf(query, tailQuery), tailsForQuery...)
|
||||||
|
err := row.Scan(&revoked)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return revoked, nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return false, errs.Wrap(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
revoked, ok := val.(bool)
|
||||||
|
if !ok {
|
||||||
|
return false, errs.New("Revoked not a bool")
|
||||||
|
}
|
||||||
|
|
||||||
|
return revoked, nil
|
||||||
|
}
|
70
satellite/satellitedb/revocation_test.go
Normal file
70
satellite/satellitedb/revocation_test.go
Normal file
@ -0,0 +1,70 @@
|
|||||||
|
// Copyright (C) 2020 Storj Labs, Inc.
|
||||||
|
// See LICENSE for copying information.
|
||||||
|
|
||||||
|
package satellitedb_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"storj.io/common/macaroon"
|
||||||
|
"storj.io/common/testcontext"
|
||||||
|
"storj.io/storj/satellite"
|
||||||
|
"storj.io/storj/satellite/satellitedb/satellitedbtest"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestRevocation(t *testing.T) {
|
||||||
|
satellitedbtest.Run(t, func(ctx *testcontext.Context, t *testing.T, db satellite.DB) {
|
||||||
|
secret, err := macaroon.NewSecret()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// mac: original macaroon
|
||||||
|
mac, err := macaroon.NewUnrestricted(secret)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// mac1 based on mac
|
||||||
|
mac1, err := mac.AddFirstPartyCaveat([]byte("this is a very serious caveat, you'd better not violate it"))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// mac2 based on mac
|
||||||
|
mac2, err := mac.AddFirstPartyCaveat([]byte("don't mess with this caveat"))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// mac1a based on mac1
|
||||||
|
mac1a, err := mac1.AddFirstPartyCaveat([]byte("now you can't do anything"))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
revocation := db.Revocation()
|
||||||
|
|
||||||
|
// Check all macaroons as sanity check, they work before revocation
|
||||||
|
for _, mac := range []*macaroon.Macaroon{mac, mac1, mac2, mac1a} {
|
||||||
|
revoked, err := revocation.Check(ctx, mac.Tails(secret))
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.False(t, revoked)
|
||||||
|
}
|
||||||
|
|
||||||
|
apiKeyID := []byte("api1")
|
||||||
|
|
||||||
|
// Now revoke mac1, which should also revoke mac1a but not affect mac or mac2
|
||||||
|
require.NoError(t, revocation.Revoke(ctx, mac1.Tail(), apiKeyID))
|
||||||
|
// Also revoke some random bytes, so the db has more than 1 entry
|
||||||
|
require.NoError(t, revocation.Revoke(ctx, []byte("random tail"), apiKeyID))
|
||||||
|
require.NoError(t, revocation.Revoke(ctx, []byte("random tail2"), apiKeyID))
|
||||||
|
|
||||||
|
// Verify mac1 and mac1a got revoked
|
||||||
|
for _, mac := range []*macaroon.Macaroon{mac1, mac1a} {
|
||||||
|
revoked, err := revocation.Check(ctx, mac.Tails(secret))
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.True(t, revoked)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify mac and mac2 are not revoked
|
||||||
|
for _, mac := range []*macaroon.Macaroon{mac, mac2} {
|
||||||
|
revoked, err := revocation.Check(ctx, mac.Tails(secret))
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.False(t, revoked)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
566
satellite/satellitedb/testdata/postgres.v115.sql
vendored
Normal file
566
satellite/satellitedb/testdata/postgres.v115.sql
vendored
Normal file
@ -0,0 +1,566 @@
|
|||||||
|
-- AUTOGENERATED BY storj.io/dbx
|
||||||
|
-- DO NOT EDIT
|
||||||
|
CREATE TABLE accounting_rollups (
|
||||||
|
id bigserial NOT NULL,
|
||||||
|
node_id bytea NOT NULL,
|
||||||
|
start_time timestamp with time zone NOT NULL,
|
||||||
|
put_total bigint NOT NULL,
|
||||||
|
get_total bigint NOT NULL,
|
||||||
|
get_audit_total bigint NOT NULL,
|
||||||
|
get_repair_total bigint NOT NULL,
|
||||||
|
put_repair_total bigint NOT NULL,
|
||||||
|
at_rest_total double precision NOT NULL,
|
||||||
|
PRIMARY KEY ( id )
|
||||||
|
);
|
||||||
|
CREATE TABLE accounting_timestamps (
|
||||||
|
name text NOT NULL,
|
||||||
|
value timestamp with time zone NOT NULL,
|
||||||
|
PRIMARY KEY ( name )
|
||||||
|
);
|
||||||
|
CREATE TABLE bucket_bandwidth_rollups (
|
||||||
|
bucket_name bytea NOT NULL,
|
||||||
|
project_id bytea NOT NULL,
|
||||||
|
interval_start timestamp with time zone NOT NULL,
|
||||||
|
interval_seconds integer NOT NULL,
|
||||||
|
action integer NOT NULL,
|
||||||
|
inline bigint NOT NULL,
|
||||||
|
allocated bigint NOT NULL,
|
||||||
|
settled bigint NOT NULL,
|
||||||
|
PRIMARY KEY ( bucket_name, project_id, interval_start, action )
|
||||||
|
);
|
||||||
|
CREATE TABLE bucket_storage_tallies (
|
||||||
|
bucket_name bytea NOT NULL,
|
||||||
|
project_id bytea NOT NULL,
|
||||||
|
interval_start timestamp with time zone NOT NULL,
|
||||||
|
inline bigint NOT NULL,
|
||||||
|
remote bigint NOT NULL,
|
||||||
|
remote_segments_count integer NOT NULL,
|
||||||
|
inline_segments_count integer NOT NULL,
|
||||||
|
object_count integer NOT NULL,
|
||||||
|
metadata_size bigint NOT NULL,
|
||||||
|
PRIMARY KEY ( bucket_name, project_id, interval_start )
|
||||||
|
);
|
||||||
|
CREATE TABLE coinpayments_transactions (
|
||||||
|
id text NOT NULL,
|
||||||
|
user_id bytea NOT NULL,
|
||||||
|
address text NOT NULL,
|
||||||
|
amount bytea NOT NULL,
|
||||||
|
received bytea NOT NULL,
|
||||||
|
status integer NOT NULL,
|
||||||
|
key text NOT NULL,
|
||||||
|
timeout integer NOT NULL,
|
||||||
|
created_at timestamp with time zone NOT NULL,
|
||||||
|
PRIMARY KEY ( id )
|
||||||
|
);
|
||||||
|
CREATE TABLE consumed_serials (
|
||||||
|
storage_node_id bytea NOT NULL,
|
||||||
|
serial_number bytea NOT NULL,
|
||||||
|
expires_at timestamp with time zone NOT NULL,
|
||||||
|
PRIMARY KEY ( storage_node_id, serial_number )
|
||||||
|
);
|
||||||
|
CREATE TABLE coupons (
|
||||||
|
id bytea NOT NULL,
|
||||||
|
user_id bytea NOT NULL,
|
||||||
|
amount bigint NOT NULL,
|
||||||
|
description text NOT NULL,
|
||||||
|
type integer NOT NULL,
|
||||||
|
status integer NOT NULL,
|
||||||
|
duration bigint NOT NULL,
|
||||||
|
created_at timestamp with time zone NOT NULL,
|
||||||
|
PRIMARY KEY ( id )
|
||||||
|
);
|
||||||
|
CREATE TABLE coupon_usages (
|
||||||
|
coupon_id bytea NOT NULL,
|
||||||
|
amount bigint NOT NULL,
|
||||||
|
status integer NOT NULL,
|
||||||
|
period timestamp with time zone NOT NULL,
|
||||||
|
PRIMARY KEY ( coupon_id, period )
|
||||||
|
);
|
||||||
|
CREATE TABLE credits (
|
||||||
|
user_id bytea NOT NULL,
|
||||||
|
transaction_id text NOT NULL,
|
||||||
|
amount bigint NOT NULL,
|
||||||
|
created_at timestamp with time zone NOT NULL,
|
||||||
|
PRIMARY KEY ( transaction_id )
|
||||||
|
);
|
||||||
|
CREATE TABLE credits_spendings (
|
||||||
|
id bytea NOT NULL,
|
||||||
|
user_id bytea NOT NULL,
|
||||||
|
project_id bytea NOT NULL,
|
||||||
|
amount bigint NOT NULL,
|
||||||
|
status integer NOT NULL,
|
||||||
|
period timestamp with time zone NOT NULL,
|
||||||
|
created_at timestamp with time zone NOT NULL,
|
||||||
|
PRIMARY KEY ( id )
|
||||||
|
);
|
||||||
|
CREATE TABLE graceful_exit_progress (
|
||||||
|
node_id bytea NOT NULL,
|
||||||
|
bytes_transferred bigint NOT NULL,
|
||||||
|
pieces_transferred bigint NOT NULL DEFAULT 0,
|
||||||
|
pieces_failed bigint NOT NULL DEFAULT 0,
|
||||||
|
updated_at timestamp with time zone NOT NULL,
|
||||||
|
PRIMARY KEY ( node_id )
|
||||||
|
);
|
||||||
|
CREATE TABLE graceful_exit_transfer_queue (
|
||||||
|
node_id bytea NOT NULL,
|
||||||
|
path bytea NOT NULL,
|
||||||
|
piece_num integer NOT NULL,
|
||||||
|
root_piece_id bytea,
|
||||||
|
durability_ratio double precision NOT NULL,
|
||||||
|
queued_at timestamp with time zone NOT NULL,
|
||||||
|
requested_at timestamp with time zone,
|
||||||
|
last_failed_at timestamp with time zone,
|
||||||
|
last_failed_code integer,
|
||||||
|
failed_count integer,
|
||||||
|
finished_at timestamp with time zone,
|
||||||
|
order_limit_send_count integer NOT NULL DEFAULT 0,
|
||||||
|
PRIMARY KEY ( node_id, path, piece_num )
|
||||||
|
);
|
||||||
|
CREATE TABLE injuredsegments (
|
||||||
|
path bytea NOT NULL,
|
||||||
|
data bytea NOT NULL,
|
||||||
|
attempted timestamp with time zone,
|
||||||
|
num_healthy_pieces integer NOT NULL DEFAULT 52,
|
||||||
|
PRIMARY KEY ( path )
|
||||||
|
);
|
||||||
|
CREATE TABLE irreparabledbs (
|
||||||
|
segmentpath bytea NOT NULL,
|
||||||
|
segmentdetail bytea NOT NULL,
|
||||||
|
pieces_lost_count bigint NOT NULL,
|
||||||
|
seg_damaged_unix_sec bigint NOT NULL,
|
||||||
|
repair_attempt_count bigint NOT NULL,
|
||||||
|
PRIMARY KEY ( segmentpath )
|
||||||
|
);
|
||||||
|
CREATE TABLE nodes (
|
||||||
|
id bytea NOT NULL,
|
||||||
|
address text NOT NULL DEFAULT '',
|
||||||
|
last_net text NOT NULL,
|
||||||
|
last_ip_port text,
|
||||||
|
protocol integer NOT NULL DEFAULT 0,
|
||||||
|
type integer NOT NULL DEFAULT 0,
|
||||||
|
email text NOT NULL,
|
||||||
|
wallet text NOT NULL,
|
||||||
|
free_disk bigint NOT NULL DEFAULT -1,
|
||||||
|
piece_count bigint NOT NULL DEFAULT 0,
|
||||||
|
major bigint NOT NULL DEFAULT 0,
|
||||||
|
minor bigint NOT NULL DEFAULT 0,
|
||||||
|
patch bigint NOT NULL DEFAULT 0,
|
||||||
|
hash text NOT NULL DEFAULT '',
|
||||||
|
timestamp timestamp with time zone NOT NULL DEFAULT '0001-01-01 00:00:00+00',
|
||||||
|
release boolean NOT NULL DEFAULT false,
|
||||||
|
latency_90 bigint NOT NULL DEFAULT 0,
|
||||||
|
audit_success_count bigint NOT NULL DEFAULT 0,
|
||||||
|
total_audit_count bigint NOT NULL DEFAULT 0,
|
||||||
|
vetted_at timestamp with time zone,
|
||||||
|
uptime_success_count bigint NOT NULL,
|
||||||
|
total_uptime_count bigint NOT NULL,
|
||||||
|
created_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
|
||||||
|
updated_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
|
||||||
|
last_contact_success timestamp with time zone NOT NULL DEFAULT 'epoch',
|
||||||
|
last_contact_failure timestamp with time zone NOT NULL DEFAULT 'epoch',
|
||||||
|
contained boolean NOT NULL DEFAULT false,
|
||||||
|
disqualified timestamp with time zone,
|
||||||
|
suspended timestamp with time zone,
|
||||||
|
unknown_audit_suspended timestamp with time zone,
|
||||||
|
offline_suspended timestamp with time zone,
|
||||||
|
under_review timestamp with time zone,
|
||||||
|
audit_reputation_alpha double precision NOT NULL DEFAULT 1,
|
||||||
|
audit_reputation_beta double precision NOT NULL DEFAULT 0,
|
||||||
|
unknown_audit_reputation_alpha double precision NOT NULL DEFAULT 1,
|
||||||
|
unknown_audit_reputation_beta double precision NOT NULL DEFAULT 0,
|
||||||
|
uptime_reputation_alpha double precision NOT NULL DEFAULT 1,
|
||||||
|
uptime_reputation_beta double precision NOT NULL DEFAULT 0,
|
||||||
|
exit_initiated_at timestamp with time zone,
|
||||||
|
exit_loop_completed_at timestamp with time zone,
|
||||||
|
exit_finished_at timestamp with time zone,
|
||||||
|
exit_success boolean NOT NULL DEFAULT false,
|
||||||
|
PRIMARY KEY ( id )
|
||||||
|
);
|
||||||
|
CREATE TABLE nodes_offline_times (
|
||||||
|
node_id bytea NOT NULL,
|
||||||
|
tracked_at timestamp with time zone NOT NULL,
|
||||||
|
seconds integer NOT NULL,
|
||||||
|
PRIMARY KEY ( node_id, tracked_at )
|
||||||
|
);
|
||||||
|
CREATE TABLE offers (
|
||||||
|
id serial NOT NULL,
|
||||||
|
name text NOT NULL,
|
||||||
|
description text NOT NULL,
|
||||||
|
award_credit_in_cents integer NOT NULL DEFAULT 0,
|
||||||
|
invitee_credit_in_cents integer NOT NULL DEFAULT 0,
|
||||||
|
award_credit_duration_days integer,
|
||||||
|
invitee_credit_duration_days integer,
|
||||||
|
redeemable_cap integer,
|
||||||
|
expires_at timestamp with time zone NOT NULL,
|
||||||
|
created_at timestamp with time zone NOT NULL,
|
||||||
|
status integer NOT NULL,
|
||||||
|
type integer NOT NULL,
|
||||||
|
PRIMARY KEY ( id )
|
||||||
|
);
|
||||||
|
CREATE TABLE peer_identities (
|
||||||
|
node_id bytea NOT NULL,
|
||||||
|
leaf_serial_number bytea NOT NULL,
|
||||||
|
chain bytea NOT NULL,
|
||||||
|
updated_at timestamp with time zone NOT NULL,
|
||||||
|
PRIMARY KEY ( node_id )
|
||||||
|
);
|
||||||
|
CREATE TABLE pending_audits (
|
||||||
|
node_id bytea NOT NULL,
|
||||||
|
piece_id bytea NOT NULL,
|
||||||
|
stripe_index bigint NOT NULL,
|
||||||
|
share_size bigint NOT NULL,
|
||||||
|
expected_share_hash bytea NOT NULL,
|
||||||
|
reverify_count bigint NOT NULL,
|
||||||
|
path bytea NOT NULL,
|
||||||
|
PRIMARY KEY ( node_id )
|
||||||
|
);
|
||||||
|
CREATE TABLE pending_serial_queue (
|
||||||
|
storage_node_id bytea NOT NULL,
|
||||||
|
bucket_id bytea NOT NULL,
|
||||||
|
serial_number bytea NOT NULL,
|
||||||
|
action integer NOT NULL,
|
||||||
|
settled bigint NOT NULL,
|
||||||
|
expires_at timestamp with time zone NOT NULL,
|
||||||
|
PRIMARY KEY ( storage_node_id, bucket_id, serial_number )
|
||||||
|
);
|
||||||
|
CREATE TABLE projects (
|
||||||
|
id bytea NOT NULL,
|
||||||
|
name text NOT NULL,
|
||||||
|
description text NOT NULL,
|
||||||
|
usage_limit bigint NOT NULL DEFAULT 0,
|
||||||
|
bandwidth_limit bigint NOT NULL DEFAULT 0,
|
||||||
|
rate_limit integer,
|
||||||
|
partner_id bytea,
|
||||||
|
owner_id bytea NOT NULL,
|
||||||
|
created_at timestamp with time zone NOT NULL,
|
||||||
|
PRIMARY KEY ( id )
|
||||||
|
);
|
||||||
|
CREATE TABLE project_bandwidth_rollups (
|
||||||
|
project_id bytea NOT NULL,
|
||||||
|
interval_month date NOT NULL,
|
||||||
|
egress_allocated bigint NOT NULL,
|
||||||
|
PRIMARY KEY ( project_id, interval_month )
|
||||||
|
);
|
||||||
|
CREATE TABLE registration_tokens (
|
||||||
|
secret bytea NOT NULL,
|
||||||
|
owner_id bytea,
|
||||||
|
project_limit integer NOT NULL,
|
||||||
|
created_at timestamp with time zone NOT NULL,
|
||||||
|
PRIMARY KEY ( secret ),
|
||||||
|
UNIQUE ( owner_id )
|
||||||
|
);
|
||||||
|
CREATE TABLE reported_serials (
|
||||||
|
expires_at timestamp with time zone NOT NULL,
|
||||||
|
storage_node_id bytea NOT NULL,
|
||||||
|
bucket_id bytea NOT NULL,
|
||||||
|
action integer NOT NULL,
|
||||||
|
serial_number bytea NOT NULL,
|
||||||
|
settled bigint NOT NULL,
|
||||||
|
observed_at timestamp with time zone NOT NULL,
|
||||||
|
PRIMARY KEY ( expires_at, storage_node_id, bucket_id, action, serial_number )
|
||||||
|
);
|
||||||
|
CREATE TABLE reset_password_tokens (
|
||||||
|
secret bytea NOT NULL,
|
||||||
|
owner_id bytea NOT NULL,
|
||||||
|
created_at timestamp with time zone NOT NULL,
|
||||||
|
PRIMARY KEY ( secret ),
|
||||||
|
UNIQUE ( owner_id )
|
||||||
|
);
|
||||||
|
CREATE TABLE revocations (
|
||||||
|
revoked bytea NOT NULL,
|
||||||
|
api_key_id bytea NOT NULL,
|
||||||
|
PRIMARY KEY ( revoked )
|
||||||
|
);
|
||||||
|
CREATE TABLE serial_numbers (
|
||||||
|
id serial NOT NULL,
|
||||||
|
serial_number bytea NOT NULL,
|
||||||
|
bucket_id bytea NOT NULL,
|
||||||
|
expires_at timestamp with time zone NOT NULL,
|
||||||
|
PRIMARY KEY ( id )
|
||||||
|
);
|
||||||
|
CREATE TABLE storagenode_bandwidth_rollups (
|
||||||
|
storagenode_id bytea NOT NULL,
|
||||||
|
interval_start timestamp with time zone NOT NULL,
|
||||||
|
interval_seconds integer NOT NULL,
|
||||||
|
action integer NOT NULL,
|
||||||
|
allocated bigint DEFAULT 0,
|
||||||
|
settled bigint NOT NULL,
|
||||||
|
PRIMARY KEY ( storagenode_id, interval_start, action )
|
||||||
|
);
|
||||||
|
CREATE TABLE storagenode_payments (
|
||||||
|
id bigserial NOT NULL,
|
||||||
|
created_at timestamp with time zone NOT NULL,
|
||||||
|
node_id bytea NOT NULL,
|
||||||
|
period text NOT NULL,
|
||||||
|
amount bigint NOT NULL,
|
||||||
|
receipt text,
|
||||||
|
notes text,
|
||||||
|
PRIMARY KEY ( id )
|
||||||
|
);
|
||||||
|
CREATE TABLE storagenode_paystubs (
|
||||||
|
period text NOT NULL,
|
||||||
|
node_id bytea NOT NULL,
|
||||||
|
created_at timestamp with time zone NOT NULL,
|
||||||
|
codes text NOT NULL,
|
||||||
|
usage_at_rest double precision NOT NULL,
|
||||||
|
usage_get bigint NOT NULL,
|
||||||
|
usage_put bigint NOT NULL,
|
||||||
|
usage_get_repair bigint NOT NULL,
|
||||||
|
usage_put_repair bigint NOT NULL,
|
||||||
|
usage_get_audit bigint NOT NULL,
|
||||||
|
comp_at_rest bigint NOT NULL,
|
||||||
|
comp_get bigint NOT NULL,
|
||||||
|
comp_put bigint NOT NULL,
|
||||||
|
comp_get_repair bigint NOT NULL,
|
||||||
|
comp_put_repair bigint NOT NULL,
|
||||||
|
comp_get_audit bigint NOT NULL,
|
||||||
|
surge_percent bigint NOT NULL,
|
||||||
|
held bigint NOT NULL,
|
||||||
|
owed bigint NOT NULL,
|
||||||
|
disposed bigint NOT NULL,
|
||||||
|
paid bigint NOT NULL,
|
||||||
|
PRIMARY KEY ( period, node_id )
|
||||||
|
);
|
||||||
|
CREATE TABLE storagenode_storage_tallies (
|
||||||
|
node_id bytea NOT NULL,
|
||||||
|
interval_end_time timestamp with time zone NOT NULL,
|
||||||
|
data_total double precision NOT NULL,
|
||||||
|
PRIMARY KEY ( interval_end_time, node_id )
|
||||||
|
);
|
||||||
|
CREATE TABLE stripe_customers (
|
||||||
|
user_id bytea NOT NULL,
|
||||||
|
customer_id text NOT NULL,
|
||||||
|
created_at timestamp with time zone NOT NULL,
|
||||||
|
PRIMARY KEY ( user_id ),
|
||||||
|
UNIQUE ( customer_id )
|
||||||
|
);
|
||||||
|
CREATE TABLE stripecoinpayments_invoice_project_records (
|
||||||
|
id bytea NOT NULL,
|
||||||
|
project_id bytea NOT NULL,
|
||||||
|
storage double precision NOT NULL,
|
||||||
|
egress bigint NOT NULL,
|
||||||
|
objects bigint NOT NULL,
|
||||||
|
period_start timestamp with time zone NOT NULL,
|
||||||
|
period_end timestamp with time zone NOT NULL,
|
||||||
|
state integer NOT NULL,
|
||||||
|
created_at timestamp with time zone NOT NULL,
|
||||||
|
PRIMARY KEY ( id ),
|
||||||
|
UNIQUE ( project_id, period_start, period_end )
|
||||||
|
);
|
||||||
|
CREATE TABLE stripecoinpayments_tx_conversion_rates (
|
||||||
|
tx_id text NOT NULL,
|
||||||
|
rate bytea NOT NULL,
|
||||||
|
created_at timestamp with time zone NOT NULL,
|
||||||
|
PRIMARY KEY ( tx_id )
|
||||||
|
);
|
||||||
|
CREATE TABLE users (
|
||||||
|
id bytea NOT NULL,
|
||||||
|
email text NOT NULL,
|
||||||
|
normalized_email text NOT NULL,
|
||||||
|
full_name text NOT NULL,
|
||||||
|
short_name text,
|
||||||
|
password_hash bytea NOT NULL,
|
||||||
|
status integer NOT NULL,
|
||||||
|
partner_id bytea,
|
||||||
|
created_at timestamp with time zone NOT NULL,
|
||||||
|
PRIMARY KEY ( id )
|
||||||
|
);
|
||||||
|
CREATE TABLE value_attributions (
|
||||||
|
project_id bytea NOT NULL,
|
||||||
|
bucket_name bytea NOT NULL,
|
||||||
|
partner_id bytea NOT NULL,
|
||||||
|
last_updated timestamp with time zone NOT NULL,
|
||||||
|
PRIMARY KEY ( project_id, bucket_name )
|
||||||
|
);
|
||||||
|
CREATE TABLE api_keys (
|
||||||
|
id bytea NOT NULL,
|
||||||
|
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
||||||
|
head bytea NOT NULL,
|
||||||
|
name text NOT NULL,
|
||||||
|
secret bytea NOT NULL,
|
||||||
|
partner_id bytea,
|
||||||
|
created_at timestamp with time zone NOT NULL,
|
||||||
|
PRIMARY KEY ( id ),
|
||||||
|
UNIQUE ( head ),
|
||||||
|
UNIQUE ( name, project_id )
|
||||||
|
);
|
||||||
|
CREATE TABLE bucket_metainfos (
|
||||||
|
id bytea NOT NULL,
|
||||||
|
project_id bytea NOT NULL REFERENCES projects( id ),
|
||||||
|
name bytea NOT NULL,
|
||||||
|
partner_id bytea,
|
||||||
|
path_cipher integer NOT NULL,
|
||||||
|
created_at timestamp with time zone NOT NULL,
|
||||||
|
default_segment_size integer NOT NULL,
|
||||||
|
default_encryption_cipher_suite integer NOT NULL,
|
||||||
|
default_encryption_block_size integer NOT NULL,
|
||||||
|
default_redundancy_algorithm integer NOT NULL,
|
||||||
|
default_redundancy_share_size integer NOT NULL,
|
||||||
|
default_redundancy_required_shares integer NOT NULL,
|
||||||
|
default_redundancy_repair_shares integer NOT NULL,
|
||||||
|
default_redundancy_optimal_shares integer NOT NULL,
|
||||||
|
default_redundancy_total_shares integer NOT NULL,
|
||||||
|
PRIMARY KEY ( id ),
|
||||||
|
UNIQUE ( name, project_id )
|
||||||
|
);
|
||||||
|
CREATE TABLE project_invoice_stamps (
|
||||||
|
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
||||||
|
invoice_id bytea NOT NULL,
|
||||||
|
start_date timestamp with time zone NOT NULL,
|
||||||
|
end_date timestamp with time zone NOT NULL,
|
||||||
|
created_at timestamp with time zone NOT NULL,
|
||||||
|
PRIMARY KEY ( project_id, start_date, end_date ),
|
||||||
|
UNIQUE ( invoice_id )
|
||||||
|
);
|
||||||
|
CREATE TABLE project_members (
|
||||||
|
member_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
|
||||||
|
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
||||||
|
created_at timestamp with time zone NOT NULL,
|
||||||
|
PRIMARY KEY ( member_id, project_id )
|
||||||
|
);
|
||||||
|
CREATE TABLE stripecoinpayments_apply_balance_intents (
|
||||||
|
tx_id text NOT NULL REFERENCES coinpayments_transactions( id ) ON DELETE CASCADE,
|
||||||
|
state integer NOT NULL,
|
||||||
|
created_at timestamp with time zone NOT NULL,
|
||||||
|
PRIMARY KEY ( tx_id )
|
||||||
|
);
|
||||||
|
CREATE TABLE used_serials (
|
||||||
|
serial_number_id integer NOT NULL REFERENCES serial_numbers( id ) ON DELETE CASCADE,
|
||||||
|
storage_node_id bytea NOT NULL,
|
||||||
|
PRIMARY KEY ( serial_number_id, storage_node_id )
|
||||||
|
);
|
||||||
|
CREATE TABLE user_credits (
|
||||||
|
id serial NOT NULL,
|
||||||
|
user_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
|
||||||
|
offer_id integer NOT NULL REFERENCES offers( id ),
|
||||||
|
referred_by bytea REFERENCES users( id ) ON DELETE SET NULL,
|
||||||
|
type text NOT NULL,
|
||||||
|
credits_earned_in_cents integer NOT NULL,
|
||||||
|
credits_used_in_cents integer NOT NULL,
|
||||||
|
expires_at timestamp with time zone NOT NULL,
|
||||||
|
created_at timestamp with time zone NOT NULL,
|
||||||
|
PRIMARY KEY ( id ),
|
||||||
|
UNIQUE ( id, offer_id )
|
||||||
|
);
|
||||||
|
CREATE INDEX accounting_rollups_start_time_index ON accounting_rollups ( start_time );
|
||||||
|
CREATE INDEX bucket_bandwidth_rollups_project_id_action_interval_index ON bucket_bandwidth_rollups ( project_id, action, interval_start );
|
||||||
|
CREATE INDEX bucket_bandwidth_rollups_action_interval_project_id_index ON bucket_bandwidth_rollups ( action, interval_start, project_id );
|
||||||
|
CREATE INDEX consumed_serials_expires_at_index ON consumed_serials ( expires_at );
|
||||||
|
CREATE INDEX injuredsegments_attempted_index ON injuredsegments ( attempted );
|
||||||
|
CREATE INDEX injuredsegments_num_healthy_pieces_index ON injuredsegments ( num_healthy_pieces );
|
||||||
|
CREATE INDEX node_last_ip ON nodes ( last_net );
|
||||||
|
CREATE INDEX nodes_offline_times_node_id_index ON nodes_offline_times ( node_id );
|
||||||
|
CREATE UNIQUE INDEX serial_number_index ON serial_numbers ( serial_number );
|
||||||
|
CREATE INDEX serial_numbers_expires_at_index ON serial_numbers ( expires_at );
|
||||||
|
CREATE INDEX storagenode_payments_node_id_period_index ON storagenode_payments ( node_id, period );
|
||||||
|
CREATE INDEX storagenode_paystubs_node_id_index ON storagenode_paystubs ( node_id );
|
||||||
|
CREATE INDEX storagenode_storage_tallies_node_id_index ON storagenode_storage_tallies ( node_id );
|
||||||
|
CREATE UNIQUE INDEX credits_earned_user_id_offer_id ON user_credits ( id, offer_id );
|
||||||
|
|
||||||
|
INSERT INTO "accounting_rollups"("id", "node_id", "start_time", "put_total", "get_total", "get_audit_total", "get_repair_total", "put_repair_total", "at_rest_total") VALUES (1, E'\\367M\\177\\251]t/\\022\\256\\214\\265\\025\\224\\204:\\217\\212\\0102<\\321\\374\\020&\\271Qc\\325\\261\\354\\246\\233'::bytea, '2019-02-09 00:00:00+00', 1000, 2000, 3000, 4000, 0, 5000);
|
||||||
|
|
||||||
|
INSERT INTO "accounting_timestamps" VALUES ('LastAtRestTally', '0001-01-01 00:00:00+00');
|
||||||
|
INSERT INTO "accounting_timestamps" VALUES ('LastRollup', '0001-01-01 00:00:00+00');
|
||||||
|
INSERT INTO "accounting_timestamps" VALUES ('LastBandwidthTally', '0001-01-01 00:00:00+00');
|
||||||
|
|
||||||
|
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 5, false);
|
||||||
|
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '127.0.0.1:55518', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 0, 3, 3, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 0, false);
|
||||||
|
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014', '127.0.0.1:55517', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 0, 0, 0, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 0, false);
|
||||||
|
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\015', '127.0.0.1:55519', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 1, 2, 1, 2, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 1, false);
|
||||||
|
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "vetted_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', '127.0.0.1:55520', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 300, 400, 300, 400, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 300, 0, 1, 0, 300, 100, false, '2020-03-18 12:00:00.000000+00');
|
||||||
|
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success") VALUES (E'\\154\\313\\233\\074\\327\\177\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 75, 25, 100, 5, false);
|
||||||
|
INSERT INTO "nodes"("id", "address", "last_net", "last_ip_port", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success") VALUES (E'\\154\\313\\233\\074\\327\\177\\136\\070\\346\\002', '127.0.0.1:55516', '127.0.0.0', '127.0.0.1:55516', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 75, 25, 100, 5, false);
|
||||||
|
|
||||||
|
INSERT INTO "users"("id", "full_name", "short_name", "email", "normalized_email", "password_hash", "status", "partner_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'Noahson', 'William', '1email1@mail.test', '1EMAIL1@MAIL.TEST', E'some_readable_hash'::bytea, 1, NULL, '2019-02-14 08:28:24.614594+00');
|
||||||
|
INSERT INTO "projects"("id", "name", "description", "usage_limit", "partner_id", "owner_id", "created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, 'ProjectName', 'projects description', 0, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:28:24.254934+00');
|
||||||
|
|
||||||
|
INSERT INTO "projects"("id", "name", "description", "usage_limit", "partner_id", "owner_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, 'projName1', 'Test project 1', 0, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:28:24.636949+00');
|
||||||
|
INSERT INTO "project_members"("member_id", "project_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, '2019-02-14 08:28:24.677953+00');
|
||||||
|
INSERT INTO "project_members"("member_id", "project_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, '2019-02-13 08:28:24.677953+00');
|
||||||
|
|
||||||
|
INSERT INTO "irreparabledbs" ("segmentpath", "segmentdetail", "pieces_lost_count", "seg_damaged_unix_sec", "repair_attempt_count") VALUES ('\x49616d5365676d656e746b6579696e666f30', '\x49616d5365676d656e7464657461696c696e666f30', 10, 1550159554, 10);
|
||||||
|
|
||||||
|
INSERT INTO "registration_tokens" ("secret", "owner_id", "project_limit", "created_at") VALUES (E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, null, 1, '2019-02-14 08:28:24.677953+00');
|
||||||
|
|
||||||
|
INSERT INTO "serial_numbers" ("id", "serial_number", "bucket_id", "expires_at") VALUES (1, E'0123456701234567'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014/testbucket'::bytea, '2019-03-06 08:28:24.677953+00');
|
||||||
|
INSERT INTO "used_serials" ("serial_number_id", "storage_node_id") VALUES (1, E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n');
|
||||||
|
|
||||||
|
INSERT INTO "storagenode_bandwidth_rollups" ("storagenode_id", "interval_start", "interval_seconds", "action", "allocated", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024);
|
||||||
|
INSERT INTO "storagenode_storage_tallies" VALUES (E'\\3510\\323\\225"~\\036<\\342\\330m\\0253Jhr\\246\\233K\\246#\\2303\\351\\256\\275j\\212UM\\362\\207', '2019-02-14 08:16:57.812849+00', 1000);
|
||||||
|
|
||||||
|
INSERT INTO "bucket_bandwidth_rollups" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024, 3024);
|
||||||
|
INSERT INTO "bucket_storage_tallies" ("bucket_name", "project_id", "interval_start", "inline", "remote", "remote_segments_count", "inline_segments_count", "object_count", "metadata_size") VALUES (E'testbucket'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 4024, 5024, 0, 0, 0, 0);
|
||||||
|
INSERT INTO "bucket_bandwidth_rollups" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\170\\160\\157\\370\\274\\366\\113\\364\\272\\235\\301\\243\\321\\102\\321\\136'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024, 3024);
|
||||||
|
INSERT INTO "bucket_storage_tallies" ("bucket_name", "project_id", "interval_start", "inline", "remote", "remote_segments_count", "inline_segments_count", "object_count", "metadata_size") VALUES (E'testbucket'::bytea, E'\\170\\160\\157\\370\\274\\366\\113\\364\\272\\235\\301\\243\\321\\102\\321\\136'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 4024, 5024, 0, 0, 0, 0);
|
||||||
|
|
||||||
|
INSERT INTO "reset_password_tokens" ("secret", "owner_id", "created_at") VALUES (E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-05-08 08:28:24.677953+00');
|
||||||
|
|
||||||
|
INSERT INTO "offers" ("id", "name", "description", "award_credit_in_cents", "invitee_credit_in_cents", "expires_at", "created_at", "status", "type", "award_credit_duration_days", "invitee_credit_duration_days") VALUES (1, 'Default referral offer', 'Is active when no other active referral offer', 300, 600, '2119-03-14 08:28:24.636949+00', '2019-07-14 08:28:24.636949+00', 1, 2, 365, 14);
|
||||||
|
INSERT INTO "offers" ("id", "name", "description", "award_credit_in_cents", "invitee_credit_in_cents", "expires_at", "created_at", "status", "type", "award_credit_duration_days", "invitee_credit_duration_days") VALUES (2, 'Default free credit offer', 'Is active when no active free credit offer', 0, 300, '2119-03-14 08:28:24.636949+00', '2019-07-14 08:28:24.636949+00', 1, 1, NULL, 14);
|
||||||
|
|
||||||
|
INSERT INTO "api_keys" ("id", "project_id", "head", "name", "secret", "partner_id", "created_at") VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\111\\142\\147\\304\\132\\375\\070\\163\\270\\160\\251\\370\\126\\063\\351\\037\\257\\071\\143\\375\\351\\320\\253\\232\\220\\260\\075\\173\\306\\307\\115\\136'::bytea, 'key 2', E'\\254\\011\\315\\333\\273\\365\\001\\071\\024\\154\\253\\332\\301\\216\\361\\074\\221\\367\\251\\231\\274\\333\\300\\367\\001\\272\\327\\111\\315\\123\\042\\016'::bytea, NULL, '2019-02-14 08:28:24.267934+00');
|
||||||
|
|
||||||
|
INSERT INTO "project_invoice_stamps" ("project_id", "invoice_id", "start_date", "end_date", "created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\363\\311\\033w\\222\\303,'::bytea, '2019-06-01 08:28:24.267934+00', '2019-06-29 08:28:24.267934+00', '2019-06-01 08:28:24.267934+00');
|
||||||
|
|
||||||
|
INSERT INTO "value_attributions" ("project_id", "bucket_name", "partner_id", "last_updated") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E''::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-02-14 08:07:31.028103+00');
|
||||||
|
|
||||||
|
INSERT INTO "user_credits" ("id", "user_id", "offer_id", "referred_by", "credits_earned_in_cents", "credits_used_in_cents", "type", "expires_at", "created_at") VALUES (1, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 1, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 200, 0, 'invalid', '2019-10-01 08:28:24.267934+00', '2019-06-01 08:28:24.267934+00');
|
||||||
|
|
||||||
|
INSERT INTO "bucket_metainfos" ("id", "project_id", "name", "partner_id", "created_at", "path_cipher", "default_segment_size", "default_encryption_cipher_suite", "default_encryption_block_size", "default_redundancy_algorithm", "default_redundancy_share_size", "default_redundancy_required_shares", "default_redundancy_repair_shares", "default_redundancy_optimal_shares", "default_redundancy_total_shares") VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'testbucketuniquename'::bytea, NULL, '2019-06-14 08:28:24.677953+00', 1, 65536, 1, 8192, 1, 4096, 4, 6, 8, 10);
|
||||||
|
|
||||||
|
INSERT INTO "pending_audits" ("node_id", "piece_id", "stripe_index", "share_size", "expected_share_hash", "reverify_count", "path") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 5, 1024, E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, 1, 'not null');
|
||||||
|
|
||||||
|
INSERT INTO "peer_identities" VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:07:31.335028+00');
|
||||||
|
|
||||||
|
INSERT INTO "graceful_exit_progress" ("node_id", "bytes_transferred", "pieces_transferred", "pieces_failed", "updated_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', 1000000000000000, 0, 0, '2019-09-12 10:07:31.028103+00');
|
||||||
|
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n\\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\311', 8, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
|
||||||
|
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n\\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\312', 8, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
|
||||||
|
|
||||||
|
INSERT INTO "stripe_customers" ("user_id", "customer_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'stripe_id', '2019-06-01 08:28:24.267934+00');
|
||||||
|
|
||||||
|
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n\\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\311', 9, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
|
||||||
|
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n\\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\312', 9, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
|
||||||
|
|
||||||
|
INSERT INTO "stripecoinpayments_invoice_project_records"("id", "project_id", "storage", "egress", "objects", "period_start", "period_end", "state", "created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\021\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, 0, 0, 0, '2019-06-01 08:28:24.267934+00', '2019-06-01 08:28:24.267934+00', 0, '2019-06-01 08:28:24.267934+00');
|
||||||
|
|
||||||
|
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "root_piece_id", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n\\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\311', 10, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
|
||||||
|
|
||||||
|
INSERT INTO "stripecoinpayments_tx_conversion_rates" ("tx_id", "rate", "created_at") VALUES ('tx_id', E'\\363\\311\\033w\\222\\303Ci,'::bytea, '2019-06-01 08:28:24.267934+00');
|
||||||
|
|
||||||
|
INSERT INTO "coinpayments_transactions" ("id", "user_id", "address", "amount", "received", "status", "key", "timeout", "created_at") VALUES ('tx_id', E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'address', E'\\363\\311\\033w'::bytea, E'\\363\\311\\033w'::bytea, 1, 'key', 60, '2019-06-01 08:28:24.267934+00');
|
||||||
|
|
||||||
|
INSERT INTO "nodes_offline_times" ("node_id", "tracked_at", "seconds") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001'::bytea, '2019-06-01 09:28:24.267934+00', 3600);
|
||||||
|
INSERT INTO "nodes_offline_times" ("node_id", "tracked_at", "seconds") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001'::bytea, '2017-06-01 09:28:24.267934+00', 100);
|
||||||
|
INSERT INTO "nodes_offline_times" ("node_id", "tracked_at", "seconds") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n'::bytea, '2019-06-01 09:28:24.267934+00', 3600);
|
||||||
|
|
||||||
|
INSERT INTO "storagenode_bandwidth_rollups" ("storagenode_id", "interval_start", "interval_seconds", "action", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2020-01-11 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 2024);
|
||||||
|
|
||||||
|
INSERT INTO "coupons" ("id", "user_id", "amount", "description", "type", "status", "duration", "created_at") VALUES (E'\\362\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 50, 'description', 0, 0, 2, '2019-06-01 08:28:24.267934+00');
|
||||||
|
INSERT INTO "coupon_usages" ("coupon_id", "amount", "status", "period") VALUES (E'\\362\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, 22, 0, '2019-06-01 09:28:24.267934+00');
|
||||||
|
|
||||||
|
INSERT INTO "reported_serials" ("expires_at", "storage_node_id", "bucket_id", "action", "serial_number", "settled", "observed_at") VALUES ('2020-01-11 08:00:00.000000+00', E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014/testbucket'::bytea, 1, E'0123456701234567'::bytea, 100, '2020-01-11 08:00:00.000000+00');
|
||||||
|
|
||||||
|
INSERT INTO "stripecoinpayments_apply_balance_intents" ("tx_id", "state", "created_at") VALUES ('tx_id', 0, '2019-06-01 08:28:24.267934+00');
|
||||||
|
|
||||||
|
INSERT INTO "projects"("id", "name", "description", "usage_limit", "rate_limit", "partner_id", "owner_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\347'::bytea, 'projName1', 'Test project 1', 0, 2000000, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2020-01-15 08:28:24.636949+00');
|
||||||
|
|
||||||
|
INSERT INTO "credits" ("user_id", "transaction_id", "amount", "created_at") VALUES (E'\\362\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, 'transactionID', 10, '2019-06-01 08:28:24.267934+00');
|
||||||
|
INSERT INTO "credits_spendings" ("id", "user_id", "project_id", "amount", "status", "period", "created_at") VALUES (E'\\362\\342\\363\\371>+F\\256\\263\\300\\275|\\342N\\347\\014'::bytea, E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 5, 0, 'epoch', '2019-06-01 09:28:24.267934+00');
|
||||||
|
|
||||||
|
INSERT INTO "pending_serial_queue" ("storage_node_id", "bucket_id", "serial_number", "action", "settled", "expires_at") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014/testbucket'::bytea, E'5123456701234567'::bytea, 1, 100, '2020-01-11 08:00:00.000000+00');
|
||||||
|
|
||||||
|
INSERT INTO "consumed_serials" ("storage_node_id", "serial_number", "expires_at") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', E'1234567012345678'::bytea, '2020-01-12 08:00:00.000000+00');
|
||||||
|
|
||||||
|
INSERT INTO "injuredsegments" ("path", "data", "num_healthy_pieces") VALUES ('0', '\x0a0130120100', 52);
|
||||||
|
INSERT INTO "injuredsegments" ("path", "data", "num_healthy_pieces") VALUES ('here''s/a/great/path', '\x0a136865726527732f612f67726561742f70617468120a0102030405060708090a', 30);
|
||||||
|
INSERT INTO "injuredsegments" ("path", "data", "num_healthy_pieces") VALUES ('yet/another/cool/path', '\x0a157965742f616e6f746865722f636f6f6c2f70617468120a0102030405060708090a', 51);
|
||||||
|
INSERT INTO "injuredsegments" ("path", "data", "num_healthy_pieces") VALUES ('/this/is/a/new/path', '\x0a23736f2f6d616e792f69636f6e69632f70617468732f746f2f63686f6f73652f66726f6d120a0102030405060708090a', 40);
|
||||||
|
|
||||||
|
INSERT INTO "project_bandwidth_rollups"("project_id", "interval_month", egress_allocated) VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\347'::bytea, '2020-04-01', 10000);
|
||||||
|
|
||||||
|
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\345'::bytea, 'egress101', 'High Bandwidth Project', 0, 0, 2000000, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2020-05-15 08:46:24.000000+00');
|
||||||
|
|
||||||
|
INSERT INTO "storagenode_paystubs"("period", "node_id", "created_at", "codes", "usage_at_rest", "usage_get", "usage_put", "usage_get_repair", "usage_put_repair", "usage_get_audit", "comp_at_rest", "comp_get", "comp_put", "comp_get_repair", "comp_put_repair", "comp_get_audit", "surge_percent", "held", "owed", "disposed", "paid") VALUES ('2020-01', '\xf2a3b4c4dfdf7221310382fd5db5aa73e1d227d6df09734ec4e5305000000000', '2020-04-07T20:14:21.479141Z', '', 1327959864508416, 294054066688, 159031363328, 226751, 0, 836608, 2861984, 5881081, 0, 226751, 0, 8, 300, 0, 26909472, 0, 26909472);
|
||||||
|
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "unknown_audit_suspended", "offline_suspended", "under_review") VALUES (E'\\153\\313\\233\\074\\327\\255\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 5, false, '2019-02-14 08:07:31.108963+00', '2019-02-14 08:07:31.108963+00', '2019-02-14 08:07:31.108963+00');
|
||||||
|
|
||||||
|
-- NEW DATA --
|
6
scripts/testdata/satellite-config.yaml.lock
vendored
6
scripts/testdata/satellite-config.yaml.lock
vendored
@ -136,6 +136,12 @@ contact.external-address: ""
|
|||||||
# satellite database api key expiration
|
# satellite database api key expiration
|
||||||
# database-options.api-keys-cache.expiration: 1m0s
|
# database-options.api-keys-cache.expiration: 1m0s
|
||||||
|
|
||||||
|
# macaroon revocation cache capacity
|
||||||
|
# database-options.revocations-cache.capacity: 10000
|
||||||
|
|
||||||
|
# macaroon revocation cache expiration
|
||||||
|
# database-options.revocations-cache.expiration: 5m0s
|
||||||
|
|
||||||
# how often to delete expired serial numbers
|
# how often to delete expired serial numbers
|
||||||
# db-cleanup.serials-interval: 4h0m0s
|
# db-cleanup.serials-interval: 4h0m0s
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user