Merge remote-tracking branch 'origin/main' into multipart-upload
Conflicts: go.mod go.sum satellite/repair/repair_test.go satellite/repair/repairer/segments.go Change-Id: Ie51a56878bee84ad9f2d31135f984881a882e906
This commit is contained in:
commit
6f3d0c4ad5
@ -11,6 +11,7 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
@ -27,7 +28,7 @@ import (
|
||||
|
||||
// generateGracefulExitCSV creates a report with graceful exit data for exiting or exited nodes in a given period.
|
||||
func generateGracefulExitCSV(ctx context.Context, completed bool, start time.Time, end time.Time, output io.Writer) error {
|
||||
db, err := satellitedb.Open(ctx, zap.L().Named("db"), gracefulExitCfg.Database, satellitedb.Options{ApplicationName: "satellite-gracefulexit"})
|
||||
db, err := satellitedb.Open(ctx, zap.L().Named("db"), reportsGracefulExitCfg.Database, satellitedb.Options{ApplicationName: "satellite-gracefulexit"})
|
||||
if err != nil {
|
||||
return errs.New("error connecting to master database on satellite: %+v", err)
|
||||
}
|
||||
@ -149,6 +150,77 @@ func verifyGracefulExitReceipt(ctx context.Context, identity *identity.FullIdent
|
||||
return writeVerificationMessage(true, completed.SatelliteId, completed.NodeId, completed.Completed)
|
||||
}
|
||||
|
||||
func cleanupGEOrphanedData(ctx context.Context, before time.Time) (err error) {
|
||||
db, err := satellitedb.Open(ctx, zap.L().Named("db"), consistencyGECleanupCfg.Database, satellitedb.Options{ApplicationName: "satellite-gracefulexit"})
|
||||
if err != nil {
|
||||
return errs.New("error connecting to master database on satellite: %+v", err)
|
||||
}
|
||||
defer func() {
|
||||
err = errs.Combine(err, db.Close())
|
||||
}()
|
||||
|
||||
nodesItems, err := db.GracefulExit().CountFinishedTransferQueueItemsByNode(ctx, before)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(nodesItems) == 0 {
|
||||
fmt.Printf("There isn't any item left in the DB for nodes exited before %s\n", before.Format("2006-01-02"))
|
||||
return nil
|
||||
}
|
||||
|
||||
{ // print the nodesItems
|
||||
fmt.Println(" Node ID | Num. Items ")
|
||||
fmt.Println("----------------------------------------------------------------------------------------")
|
||||
|
||||
var totalItems int64
|
||||
for id, n := range nodesItems {
|
||||
sid := id.String()
|
||||
// 61 is the char positions between the beginning of the line and the next
|
||||
// column separator, and 24 the char positions for the second column
|
||||
// length. Measuring the length of the first column value (node ID), we
|
||||
// calculate how many positions to shift to start printing in the next
|
||||
// column and then we tell the Printf to align the value to the right by
|
||||
// 24 positions which is where the column ends and where last column char
|
||||
// value should end.
|
||||
fmt.Printf(fmt.Sprintf(" %%s %%%dd\n", 24+61-len(sid)), sid, n)
|
||||
totalItems += n
|
||||
}
|
||||
|
||||
fmt.Println("----------------------------------------------------------------------------------------")
|
||||
fmt.Printf(" Total | %22d \n\n", totalItems)
|
||||
}
|
||||
|
||||
_, err = fmt.Printf("Confirm that you want to delete the above items from the DB? (confirm with 'yes') ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var confirm string
|
||||
n, err := fmt.Scanln(&confirm)
|
||||
if err != nil {
|
||||
if n != 0 {
|
||||
return err
|
||||
}
|
||||
// fmt.Scanln cannot handle empty input
|
||||
confirm = "n"
|
||||
}
|
||||
|
||||
if strings.ToLower(confirm) != "yes" {
|
||||
fmt.Println("Aborted, NO ITEMS have been deleted")
|
||||
return nil
|
||||
}
|
||||
|
||||
total, err := db.GracefulExit().DeleteAllFinishedTransferQueueItems(ctx, before)
|
||||
if err != nil {
|
||||
fmt.Println("Error, NO ITEMS have been deleted")
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("%d number of items have been deleted from\n", total)
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkIDs(satelliteID storj.NodeID, providedSNID storj.NodeID, receiptSatelliteID storj.NodeID, receiptSNID storj.NodeID) error {
|
||||
if satelliteID != receiptSatelliteID {
|
||||
return errs.New("satellite ID (%v) does not match receipt satellite ID (%v).", satelliteID, receiptSatelliteID)
|
||||
|
@ -137,19 +137,19 @@ var (
|
||||
Args: cobra.MinimumNArgs(3),
|
||||
RunE: cmdValueAttribution,
|
||||
}
|
||||
gracefulExitCmd = &cobra.Command{
|
||||
reportsGracefulExitCmd = &cobra.Command{
|
||||
Use: "graceful-exit [start] [end]",
|
||||
Short: "Generate a graceful exit report",
|
||||
Long: "Generate a node usage report for a given period to use for payments. Format dates using YYYY-MM-DD. The end date is exclusive.",
|
||||
Args: cobra.MinimumNArgs(2),
|
||||
RunE: cmdGracefulExit,
|
||||
RunE: cmdReportsGracefulExit,
|
||||
}
|
||||
verifyGracefulExitReceiptCmd = &cobra.Command{
|
||||
reportsVerifyGEReceiptCmd = &cobra.Command{
|
||||
Use: "verify-exit-receipt [storage node ID] [receipt]",
|
||||
Short: "Verify a graceful exit receipt",
|
||||
Long: "Verify a graceful exit receipt is valid.",
|
||||
Args: cobra.MinimumNArgs(2),
|
||||
RunE: cmdVerifyGracefulExitReceipt,
|
||||
RunE: reportsVerifyGEReceipt,
|
||||
}
|
||||
compensationCmd = &cobra.Command{
|
||||
Use: "compensation",
|
||||
@ -220,6 +220,17 @@ var (
|
||||
Long: "Ensures that we have a stripe customer for every satellite user.",
|
||||
RunE: cmdStripeCustomer,
|
||||
}
|
||||
consistencyCmd = &cobra.Command{
|
||||
Use: "consistency",
|
||||
Short: "Readdress DB consistency issues",
|
||||
Long: "Readdress DB consistency issues and perform data cleanups for improving the DB performance.",
|
||||
}
|
||||
consistencyGECleanupCmd = &cobra.Command{
|
||||
Use: "ge-cleanup-orphaned-data",
|
||||
Short: "Cleanup Graceful Exit orphaned data",
|
||||
Long: "Cleanup Graceful Exit data which is lingering in the transfer queue DB table on nodes which has finished the exit.",
|
||||
RunE: cmdConsistencyGECleanup,
|
||||
}
|
||||
|
||||
runCfg Satellite
|
||||
setupCfg Satellite
|
||||
@ -248,13 +259,18 @@ var (
|
||||
Database string `help:"satellite database connection string" releaseDefault:"postgres://" devDefault:"postgres://"`
|
||||
Output string `help:"destination of report output" default:""`
|
||||
}
|
||||
gracefulExitCfg struct {
|
||||
reportsGracefulExitCfg struct {
|
||||
Database string `help:"satellite database connection string" releaseDefault:"postgres://" devDefault:"postgres://"`
|
||||
Output string `help:"destination of report output" default:""`
|
||||
Completed bool `help:"whether to output (initiated and completed) or (initiated and not completed)" default:"false"`
|
||||
}
|
||||
verifyGracefulExitReceiptCfg struct {
|
||||
reportsVerifyGracefulExitReceiptCfg struct {
|
||||
}
|
||||
consistencyGECleanupCfg struct {
|
||||
Database string `help:"satellite database connection string" releaseDefault:"postgres://" devDefault:"postgres://"`
|
||||
Before string `help:"select only exited nodes before this UTC date formatted like YYYY-MM. Date cannot be newer than the current time (required)"`
|
||||
}
|
||||
|
||||
confDir string
|
||||
identityDir string
|
||||
)
|
||||
@ -276,10 +292,11 @@ func init() {
|
||||
rootCmd.AddCommand(reportsCmd)
|
||||
rootCmd.AddCommand(compensationCmd)
|
||||
rootCmd.AddCommand(billingCmd)
|
||||
rootCmd.AddCommand(consistencyCmd)
|
||||
reportsCmd.AddCommand(nodeUsageCmd)
|
||||
reportsCmd.AddCommand(partnerAttributionCmd)
|
||||
reportsCmd.AddCommand(gracefulExitCmd)
|
||||
reportsCmd.AddCommand(verifyGracefulExitReceiptCmd)
|
||||
reportsCmd.AddCommand(reportsGracefulExitCmd)
|
||||
reportsCmd.AddCommand(reportsVerifyGEReceiptCmd)
|
||||
compensationCmd.AddCommand(generateInvoicesCmd)
|
||||
compensationCmd.AddCommand(recordPeriodCmd)
|
||||
compensationCmd.AddCommand(recordOneOffPaymentsCmd)
|
||||
@ -289,6 +306,7 @@ func init() {
|
||||
billingCmd.AddCommand(createCustomerInvoicesCmd)
|
||||
billingCmd.AddCommand(finalizeCustomerInvoicesCmd)
|
||||
billingCmd.AddCommand(stripeCustomerCmd)
|
||||
consistencyCmd.AddCommand(consistencyGECleanupCmd)
|
||||
process.Bind(runCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
||||
process.Bind(runMigrationCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
||||
process.Bind(runAPICmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
||||
@ -301,8 +319,8 @@ func init() {
|
||||
process.Bind(generateInvoicesCmd, &generateInvoicesCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
||||
process.Bind(recordPeriodCmd, &recordPeriodCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
||||
process.Bind(recordOneOffPaymentsCmd, &recordOneOffPaymentsCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
||||
process.Bind(gracefulExitCmd, &gracefulExitCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
||||
process.Bind(verifyGracefulExitReceiptCmd, &verifyGracefulExitReceiptCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
||||
process.Bind(reportsGracefulExitCmd, &reportsGracefulExitCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
||||
process.Bind(reportsVerifyGEReceiptCmd, &reportsVerifyGracefulExitReceiptCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
||||
process.Bind(partnerAttributionCmd, &partnerAttribtionCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
||||
process.Bind(prepareCustomerInvoiceRecordsCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
||||
process.Bind(createCustomerInvoiceItemsCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
||||
@ -310,6 +328,11 @@ func init() {
|
||||
process.Bind(createCustomerInvoicesCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
||||
process.Bind(finalizeCustomerInvoicesCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
||||
process.Bind(stripeCustomerCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
||||
process.Bind(consistencyGECleanupCmd, &consistencyGECleanupCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
||||
|
||||
if err := consistencyGECleanupCmd.MarkFlagRequired("before"); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func cmdRun(cmd *cobra.Command, args []string) (err error) {
|
||||
@ -327,10 +350,9 @@ func cmdRun(cmd *cobra.Command, args []string) (err error) {
|
||||
}
|
||||
|
||||
db, err := satellitedb.Open(ctx, log.Named("db"), runCfg.Database, satellitedb.Options{
|
||||
ApplicationName: "satellite-core",
|
||||
ReportedRollupsReadBatchSize: runCfg.Orders.SettlementBatchSize,
|
||||
SaveRollupBatchSize: runCfg.Tally.SaveRollupBatchSize,
|
||||
ReadRollupBatchSize: runCfg.Tally.ReadRollupBatchSize,
|
||||
ApplicationName: "satellite-core",
|
||||
SaveRollupBatchSize: runCfg.Tally.SaveRollupBatchSize,
|
||||
ReadRollupBatchSize: runCfg.Tally.ReadRollupBatchSize,
|
||||
})
|
||||
if err != nil {
|
||||
return errs.New("Error starting master database on satellite: %+v", err)
|
||||
@ -515,7 +537,7 @@ func cmdQDiag(cmd *cobra.Command, args []string) (err error) {
|
||||
return w.Flush()
|
||||
}
|
||||
|
||||
func cmdVerifyGracefulExitReceipt(cmd *cobra.Command, args []string) (err error) {
|
||||
func reportsVerifyGEReceipt(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx, _ := process.Ctx(cmd)
|
||||
|
||||
identity, err := runCfg.Identity.Load()
|
||||
@ -532,7 +554,7 @@ func cmdVerifyGracefulExitReceipt(cmd *cobra.Command, args []string) (err error)
|
||||
return verifyGracefulExitReceipt(ctx, identity, nodeID, args[1])
|
||||
}
|
||||
|
||||
func cmdGracefulExit(cmd *cobra.Command, args []string) (err error) {
|
||||
func cmdReportsGracefulExit(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx, _ := process.Ctx(cmd)
|
||||
|
||||
start, end, err := reports.ParseRange(args[0], args[1])
|
||||
@ -541,12 +563,12 @@ func cmdGracefulExit(cmd *cobra.Command, args []string) (err error) {
|
||||
}
|
||||
|
||||
// send output to stdout
|
||||
if gracefulExitCfg.Output == "" {
|
||||
return generateGracefulExitCSV(ctx, gracefulExitCfg.Completed, start, end, os.Stdout)
|
||||
if reportsGracefulExitCfg.Output == "" {
|
||||
return generateGracefulExitCSV(ctx, reportsGracefulExitCfg.Completed, start, end, os.Stdout)
|
||||
}
|
||||
|
||||
// send output to file
|
||||
file, err := os.Create(gracefulExitCfg.Output)
|
||||
file, err := os.Create(reportsGracefulExitCfg.Output)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -555,7 +577,7 @@ func cmdGracefulExit(cmd *cobra.Command, args []string) (err error) {
|
||||
err = errs.Combine(err, file.Close())
|
||||
}()
|
||||
|
||||
return generateGracefulExitCSV(ctx, gracefulExitCfg.Completed, start, end, file)
|
||||
return generateGracefulExitCSV(ctx, reportsGracefulExitCfg.Completed, start, end, file)
|
||||
}
|
||||
|
||||
func cmdNodeUsage(cmd *cobra.Command, args []string) (err error) {
|
||||
@ -731,6 +753,21 @@ func cmdStripeCustomer(cmd *cobra.Command, args []string) (err error) {
|
||||
return generateStripeCustomers(ctx)
|
||||
}
|
||||
|
||||
func cmdConsistencyGECleanup(cmd *cobra.Command, args []string) error {
|
||||
ctx, _ := process.Ctx(cmd)
|
||||
|
||||
before, err := time.Parse("2006-01-02", consistencyGECleanupCfg.Before)
|
||||
if err != nil {
|
||||
return errs.New("before flag value isn't of the expected format. %+v", err)
|
||||
}
|
||||
|
||||
if before.After(time.Now()) {
|
||||
return errs.New("before flag value cannot be newer than the current time.")
|
||||
}
|
||||
|
||||
return cleanupGEOrphanedData(ctx, before.UTC())
|
||||
}
|
||||
|
||||
func main() {
|
||||
process.ExecCustomDebug(rootCmd)
|
||||
}
|
||||
|
@ -5,6 +5,7 @@ package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
@ -15,6 +16,7 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/zeebo/errs"
|
||||
|
||||
"storj.io/common/macaroon"
|
||||
"storj.io/common/pb"
|
||||
"storj.io/private/cfgstruct"
|
||||
"storj.io/private/process"
|
||||
@ -90,7 +92,27 @@ func accessList(cmd *cobra.Command, args []string) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
type base64url []byte
|
||||
|
||||
func (b base64url) MarshalJSON() ([]byte, error) {
|
||||
return []byte(`"` + base64.URLEncoding.EncodeToString(b) + `"`), nil
|
||||
}
|
||||
|
||||
type accessInfo struct {
|
||||
SatelliteAddr string `json:"satellite_addr,omitempty"`
|
||||
EncryptionAccess *pb.EncryptionAccess `json:"encryption_access,omitempty"`
|
||||
Macaroon accessInfoMacaroon `json:"macaroon"`
|
||||
}
|
||||
|
||||
type accessInfoMacaroon struct {
|
||||
Head base64url `json:"head"`
|
||||
Caveats []macaroon.Caveat `json:"caveats"`
|
||||
Tail base64url `json:"tail"`
|
||||
}
|
||||
|
||||
func accessInspect(cmd *cobra.Command, args []string) (err error) {
|
||||
// FIXME: This is inefficient. We end up parsing, serializing, parsing
|
||||
// again. It can get particularly bad with large access grants.
|
||||
access, err := getAccessFromArgZeroOrConfig(inspectCfg, args)
|
||||
if err != nil {
|
||||
return errs.New("no access specified: %w", err)
|
||||
@ -101,26 +123,64 @@ func accessInspect(cmd *cobra.Command, args []string) (err error) {
|
||||
return err
|
||||
}
|
||||
|
||||
satAddr, apiKey, ea, err := parseAccess(serializedAccesss)
|
||||
p, err := parseAccessRaw(serializedAccesss)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println("=========== ACCESS INFO ==================================================================")
|
||||
fmt.Println("Satellite :", satAddr)
|
||||
fmt.Println("API Key :", apiKey)
|
||||
fmt.Println("Encryption Access:", ea)
|
||||
m, err := macaroon.ParseMacaroon(p.ApiKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ai := accessInfo{
|
||||
SatelliteAddr: p.SatelliteAddr,
|
||||
EncryptionAccess: p.EncryptionAccess,
|
||||
Macaroon: accessInfoMacaroon{
|
||||
Head: m.Head(),
|
||||
Caveats: []macaroon.Caveat{},
|
||||
Tail: m.Tail(),
|
||||
},
|
||||
}
|
||||
|
||||
for _, cb := range m.Caveats() {
|
||||
var c macaroon.Caveat
|
||||
|
||||
err := pb.Unmarshal(cb, &c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ai.Macaroon.Caveats = append(ai.Macaroon.Caveats, c)
|
||||
}
|
||||
|
||||
bs, err := json.MarshalIndent(ai, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println(string(bs))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseAccess(access string) (sa string, apiKey string, ea string, err error) {
|
||||
func parseAccessRaw(access string) (_ *pb.Scope, err error) {
|
||||
data, version, err := base58.CheckDecode(access)
|
||||
if err != nil || version != 0 {
|
||||
return "", "", "", errs.New("invalid access grant format: %w", err)
|
||||
return nil, errs.New("invalid access grant format: %w", err)
|
||||
}
|
||||
|
||||
p := new(pb.Scope)
|
||||
if err := pb.Unmarshal(data, p); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func parseAccess(access string) (sa string, apiKey string, ea string, err error) {
|
||||
p, err := parseAccessRaw(access)
|
||||
if err != nil {
|
||||
return "", "", "", err
|
||||
}
|
||||
|
||||
@ -144,29 +204,8 @@ func accessRegister(cmd *cobra.Command, args []string) (err error) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch registerCfg.Format {
|
||||
case "env": // export / set compatible format
|
||||
fmt.Printf("AWS_ACCESS_KEY_ID=%s\n", accessKey)
|
||||
fmt.Printf("AWS_SECRET_ACCESS_KEY=%s\n", secretKey)
|
||||
// note that AWS_ENDPOINT configuration is not natively utilized by the AWS CLI
|
||||
fmt.Printf("AWS_ENDPOINT=%s\n", endpoint)
|
||||
case "aws": // aws configuration commands
|
||||
profile := ""
|
||||
if registerCfg.AWSProfile != "" {
|
||||
profile = " --profile " + registerCfg.AWSProfile
|
||||
fmt.Printf("aws configure %s\n", profile)
|
||||
}
|
||||
fmt.Printf("aws configure %s set aws_access_key_id %s\n", profile, accessKey)
|
||||
fmt.Printf("aws configure %s set aws_secret_access_key %s\n", profile, secretKey)
|
||||
// note that this configuration is not natively utilized by the AWS CLI
|
||||
fmt.Printf("aws configure %s set s3.endpoint_url %s\n", profile, endpoint)
|
||||
default: // plain text
|
||||
fmt.Println("========== CREDENTIALS ===================================================================")
|
||||
fmt.Println("Access Key ID: ", accessKey)
|
||||
fmt.Println("Secret Key : ", secretKey)
|
||||
fmt.Println("Endpoint : ", endpoint)
|
||||
}
|
||||
return nil
|
||||
|
||||
return DisplayGatewayCredentials(accessKey, secretKey, endpoint, registerCfg.Format, registerCfg.AWSProfile)
|
||||
}
|
||||
|
||||
func getAccessFromArgZeroOrConfig(config AccessConfig, args []string) (access *uplink.Access, err error) {
|
||||
@ -183,6 +222,48 @@ func getAccessFromArgZeroOrConfig(config AccessConfig, args []string) (access *u
|
||||
return config.GetAccess()
|
||||
}
|
||||
|
||||
// DisplayGatewayCredentials formats and writes credentials to stdout.
|
||||
func DisplayGatewayCredentials(accessKey, secretKey, endpoint, format, awsProfile string) (err error) {
|
||||
switch format {
|
||||
case "env": // export / set compatible format
|
||||
// note that AWS_ENDPOINT configuration is not natively utilized by the AWS CLI
|
||||
_, err = fmt.Printf("AWS_ACCESS_KEY_ID=%s\n"+
|
||||
"AWS_SECRET_ACCESS_KEY=%s\n"+
|
||||
"AWS_ENDPOINT=%s\n",
|
||||
accessKey, secretKey, endpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case "aws": // aws configuration commands
|
||||
profile := ""
|
||||
if awsProfile != "" {
|
||||
profile = " --profile " + awsProfile
|
||||
_, err = fmt.Printf("aws configure %s\n", profile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// note that the endpoint_url configuration is not natively utilized by the AWS CLI
|
||||
_, err = fmt.Printf("aws configure %s set aws_access_key_id %s\n"+
|
||||
"aws configure %s set aws_secret_access_key %s\n"+
|
||||
"aws configure %s set s3.endpoint_url %s\n",
|
||||
profile, accessKey, profile, secretKey, profile, endpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
default: // plain text
|
||||
_, err = fmt.Printf("========== CREDENTIALS ===================================================================\n"+
|
||||
"Access Key ID: %s\n"+
|
||||
"Secret Key : %s\n"+
|
||||
"Endpoint : %s\n",
|
||||
accessKey, secretKey, endpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RegisterAccess registers an access grant with a Gateway Authorization Service.
|
||||
func RegisterAccess(access *uplink.Access, authService string, public bool, timeout time.Duration) (accessKey, secretKey, endpoint string, err error) {
|
||||
if authService == "" {
|
||||
|
@ -66,15 +66,20 @@ func shareMain(cmd *cobra.Command, args []string) (err error) {
|
||||
return err
|
||||
}
|
||||
|
||||
var accessKey string
|
||||
|
||||
if shareCfg.Register || shareCfg.URL || shareCfg.DNS != "" {
|
||||
isPublic := (shareCfg.Public || shareCfg.URL || shareCfg.DNS != "")
|
||||
accessKey, _, _, err = RegisterAccess(newAccess, shareCfg.AuthService, isPublic, defaultAccessRegisterTimeout)
|
||||
accessKey, secretKey, endpoint, err := RegisterAccess(newAccess, shareCfg.AuthService, isPublic, defaultAccessRegisterTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = DisplayGatewayCredentials(accessKey, secretKey, endpoint, "", "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = fmt.Println("Public Access: ", isPublic)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Println("Public Access: ", isPublic)
|
||||
|
||||
if len(shareCfg.AllowedPathPrefix) == 1 && !permission.AllowUpload && !permission.AllowDelete {
|
||||
if shareCfg.URL {
|
||||
|
3
go.mod
3
go.mod
@ -44,9 +44,10 @@ require (
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221
|
||||
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e
|
||||
golang.org/x/tools v0.0.0-20200923182640-463111b69878 // indirect
|
||||
google.golang.org/api v0.20.0 // indirect
|
||||
google.golang.org/protobuf v1.25.0 // indirect
|
||||
storj.io/common v0.0.0-20210119231202-8321551aa24d
|
||||
storj.io/common v0.0.0-20210202120805-a5a4cfd90efa
|
||||
storj.io/drpc v0.0.16
|
||||
storj.io/monkit-jaeger v0.0.0-20200518165323-80778fc3f91b
|
||||
storj.io/private v0.0.0-20210120150301-bd3ac3e989f0
|
||||
|
18
go.sum
18
go.sum
@ -580,6 +580,7 @@ github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhe
|
||||
github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/gopher-lua v0.0.0-20191220021717-ab39c6098bdb h1:ZkM6LRnq40pR1Ox0hTHlnpkcOTuFIDQpZ1IN8rKKhX0=
|
||||
github.com/yuin/gopher-lua v0.0.0-20191220021717-ab39c6098bdb/go.mod h1:gqRgreBUhTSL0GeU64rtZ3Uq3wtjOa/TB2YfrtkCbVQ=
|
||||
github.com/zeebo/admission/v2 v2.0.0/go.mod h1:gSeHGelDHW7Vq6UyJo2boeSt/6Dsnqpisv0i4YZSOyM=
|
||||
@ -673,6 +674,8 @@ golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCc
|
||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@ -699,8 +702,9 @@ golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLL
|
||||
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU=
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up98WAHf3f/ulnJ62IyA=
|
||||
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
@ -718,6 +722,7 @@ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@ -802,15 +807,18 @@ golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtn
|
||||
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200117161641-43d50277825c h1:2EA2K0k9bcvvEDlqD8xdlOhCOqq+O/p9Voqi4x9W1YU=
|
||||
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200923182640-463111b69878 h1:VUw1+Jf6KJPf82mbTQMia6HCnNMv2BbAipkEZ4KTcqQ=
|
||||
golang.org/x/tools v0.0.0-20200923182640-463111b69878/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU=
|
||||
golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||
google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||
google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y=
|
||||
@ -910,8 +918,8 @@ sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3
|
||||
storj.io/common v0.0.0-20200424175742-65ac59022f4f/go.mod h1:pZyXiIE7bGETIRXtfs0nICqMwp7PM8HqnDuyUeldNA0=
|
||||
storj.io/common v0.0.0-20201026135900-1aaeec90670b/go.mod h1:GqdmNf3fLm2UZX/7Zr0BLFCJ4gFjgm6eHrk/fnmr5jQ=
|
||||
storj.io/common v0.0.0-20210113135631-07a5dc68dc1c/go.mod h1:KhVByBTvjV2rsaUQsft0pKgBRRMvCcY1JsDqt6BWr3I=
|
||||
storj.io/common v0.0.0-20210119231202-8321551aa24d h1:lOLCRtsKISuZlK2lBI5O0uBAc44mp/yO3CtUTXNNSUc=
|
||||
storj.io/common v0.0.0-20210119231202-8321551aa24d/go.mod h1:KhVByBTvjV2rsaUQsft0pKgBRRMvCcY1JsDqt6BWr3I=
|
||||
storj.io/common v0.0.0-20210202120805-a5a4cfd90efa h1:MkGCzbHxlmbZNmRxxLNnS4RUxKHhNEDFDsqsLChFnq4=
|
||||
storj.io/common v0.0.0-20210202120805-a5a4cfd90efa/go.mod h1:KhVByBTvjV2rsaUQsft0pKgBRRMvCcY1JsDqt6BWr3I=
|
||||
storj.io/drpc v0.0.11/go.mod h1:TiFc2obNjL9/3isMW1Rpxjy8V9uE0B2HMeMFGiiI7Iw=
|
||||
storj.io/drpc v0.0.14/go.mod h1:82nfl+6YwRwF6UG31cEWWUqv/FaKvP5SGqUvoqTxCMA=
|
||||
storj.io/drpc v0.0.16 h1:9sxypc5lKi/0D69cR21BR0S21+IvXfON8L5nXMVNTwQ=
|
||||
|
@ -90,7 +90,6 @@ storj.io/storj/satellite/repair/repairer."download_failed_not_enough_pieces_repa
|
||||
storj.io/storj/satellite/repair/repairer."healthy_ratio_after_repair" FloatVal
|
||||
storj.io/storj/satellite/repair/repairer."healthy_ratio_before_repair" FloatVal
|
||||
storj.io/storj/satellite/repair/repairer."repair_attempts" Meter
|
||||
storj.io/storj/satellite/repair/repairer."repair_expired" Meter
|
||||
storj.io/storj/satellite/repair/repairer."repair_failed" Meter
|
||||
storj.io/storj/satellite/repair/repairer."repair_nodes_unavailable" Meter
|
||||
storj.io/storj/satellite/repair/repairer."repair_partial" Meter
|
||||
|
@ -34,8 +34,8 @@ import (
|
||||
"storj.io/storj/satellite/accounting"
|
||||
"storj.io/storj/satellite/accounting/live"
|
||||
"storj.io/storj/satellite/accounting/projectbwcleanup"
|
||||
"storj.io/storj/satellite/accounting/reportedrollup"
|
||||
"storj.io/storj/satellite/accounting/rollup"
|
||||
"storj.io/storj/satellite/accounting/rolluparchive"
|
||||
"storj.io/storj/satellite/accounting/tally"
|
||||
"storj.io/storj/satellite/admin"
|
||||
"storj.io/storj/satellite/audit"
|
||||
@ -143,8 +143,8 @@ type Satellite struct {
|
||||
Tally *tally.Service
|
||||
Rollup *rollup.Service
|
||||
ProjectUsage *accounting.Service
|
||||
ReportedRollup *reportedrollup.Chore
|
||||
ProjectBWCleanup *projectbwcleanup.Chore
|
||||
RollupArchive *rolluparchive.Chore
|
||||
}
|
||||
|
||||
LiveAccounting struct {
|
||||
@ -504,13 +504,11 @@ func (planet *Planet) newSatellite(ctx context.Context, prefix string, index int
|
||||
},
|
||||
},
|
||||
Orders: orders.Config{
|
||||
Expiration: 7 * 24 * time.Hour,
|
||||
SettlementBatchSize: 10,
|
||||
FlushBatchSize: 10,
|
||||
FlushInterval: defaultInterval,
|
||||
NodeStatusLogging: true,
|
||||
WindowEndpointRolloutPhase: orders.WindowEndpointRolloutPhase3,
|
||||
EncryptionKeys: *encryptionKeys,
|
||||
Expiration: 7 * 24 * time.Hour,
|
||||
FlushBatchSize: 10,
|
||||
FlushInterval: defaultInterval,
|
||||
NodeStatusLogging: true,
|
||||
EncryptionKeys: *encryptionKeys,
|
||||
},
|
||||
Checker: checker.Config{
|
||||
Interval: defaultInterval,
|
||||
@ -570,8 +568,11 @@ func (planet *Planet) newSatellite(ctx context.Context, prefix string, index int
|
||||
Interval: defaultInterval,
|
||||
DeleteTallies: false,
|
||||
},
|
||||
ReportedRollup: reportedrollup.Config{
|
||||
Interval: defaultInterval,
|
||||
RollupArchive: rolluparchive.Config{
|
||||
Interval: defaultInterval,
|
||||
ArchiveAge: time.Hour * 24,
|
||||
BatchSize: 1000,
|
||||
Enabled: true,
|
||||
},
|
||||
ProjectBWCleanup: projectbwcleanup.Config{
|
||||
Interval: defaultInterval,
|
||||
@ -745,8 +746,8 @@ func createNewSystem(name string, log *zap.Logger, config satellite.Config, peer
|
||||
system.Accounting.Tally = peer.Accounting.Tally
|
||||
system.Accounting.Rollup = peer.Accounting.Rollup
|
||||
system.Accounting.ProjectUsage = api.Accounting.ProjectUsage
|
||||
system.Accounting.ReportedRollup = peer.Accounting.ReportedRollupChore
|
||||
system.Accounting.ProjectBWCleanup = peer.Accounting.ProjectBWCleanupChore
|
||||
system.Accounting.RollupArchive = peer.Accounting.RollupArchiveChore
|
||||
|
||||
system.LiveAccounting = peer.LiveAccounting
|
||||
|
||||
|
@ -368,6 +368,7 @@ func TestBilling_DownloadTraffic(t *testing.T) {
|
||||
require.NotZero(t, usage.Egress, "billed usage")
|
||||
})
|
||||
}
|
||||
|
||||
func TestBilling_ExpiredFiles(t *testing.T) {
|
||||
testplanet.Run(t, testplanet.Config{
|
||||
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
|
||||
@ -419,7 +420,6 @@ func getTallies(ctx context.Context, t *testing.T, planet *testplanet.Planet, sa
|
||||
tallies, err := sat.DB.ProjectAccounting().GetTallies(ctx)
|
||||
require.NoError(t, err)
|
||||
return tallies
|
||||
|
||||
}
|
||||
|
||||
func TestBilling_ZombieSegments(t *testing.T) {
|
||||
@ -507,11 +507,6 @@ func getProjectTotalFromStorageNodes(
|
||||
}
|
||||
|
||||
sat := planet.Satellites[satelliteIdx]
|
||||
{
|
||||
rollout := sat.Core.Accounting.ReportedRollupChore
|
||||
require.NoError(t, rollout.RunOnce(ctx, since))
|
||||
}
|
||||
|
||||
sat.Accounting.Tally.Loop.TriggerWait()
|
||||
|
||||
// flush rollups write cache
|
||||
|
@ -12,6 +12,7 @@ import (
|
||||
"storj.io/common/uuid"
|
||||
"storj.io/storj/satellite/compensation"
|
||||
"storj.io/storj/satellite/metainfo/metabase"
|
||||
"storj.io/storj/satellite/orders"
|
||||
)
|
||||
|
||||
// RollupStats is a convenience alias.
|
||||
@ -162,6 +163,12 @@ type StoragenodeAccounting interface {
|
||||
QueryStorageNodeUsage(ctx context.Context, nodeID storj.NodeID, start time.Time, end time.Time) ([]StorageNodeUsage, error)
|
||||
// DeleteTalliesBefore deletes all tallies prior to some time
|
||||
DeleteTalliesBefore(ctx context.Context, latestRollup time.Time) error
|
||||
// ArchiveRollupsBefore archives rollups older than a given time and returns num storagenode and bucket bandwidth rollups archived.
|
||||
ArchiveRollupsBefore(ctx context.Context, before time.Time, batchSize int) (numArchivedNodeBW int, err error)
|
||||
// GetRollupsSince retrieves all archived bandwidth rollup records since a given time. A hard limit batch size is used for results.
|
||||
GetRollupsSince(ctx context.Context, since time.Time) ([]StoragenodeBandwidthRollup, error)
|
||||
// GetArchivedRollupsSince retrieves all archived bandwidth rollup records since a given time. A hard limit batch size is used for results.
|
||||
GetArchivedRollupsSince(ctx context.Context, since time.Time) ([]StoragenodeBandwidthRollup, error)
|
||||
}
|
||||
|
||||
// ProjectAccounting stores information about bandwidth and storage usage for projects.
|
||||
@ -199,6 +206,12 @@ type ProjectAccounting interface {
|
||||
GetBucketUsageRollups(ctx context.Context, projectID uuid.UUID, since, before time.Time) ([]BucketUsageRollup, error)
|
||||
// GetBucketTotals returns per bucket usage summary for specified period of time.
|
||||
GetBucketTotals(ctx context.Context, projectID uuid.UUID, cursor BucketUsageCursor, since, before time.Time) (*BucketUsagePage, error)
|
||||
// ArchiveRollupsBefore archives rollups older than a given time and returns number of bucket bandwidth rollups archived.
|
||||
ArchiveRollupsBefore(ctx context.Context, before time.Time, batchSize int) (numArchivedBucketBW int, err error)
|
||||
// GetRollupsSince retrieves all archived bandwidth rollup records since a given time. A hard limit batch size is used for results.
|
||||
GetRollupsSince(ctx context.Context, since time.Time) ([]orders.BucketBandwidthRollup, error)
|
||||
// GetArchivedRollupsSince retrieves all archived bandwidth rollup records since a given time. A hard limit batch size is used for results.
|
||||
GetArchivedRollupsSince(ctx context.Context, since time.Time) ([]orders.BucketBandwidthRollup, error)
|
||||
}
|
||||
|
||||
// Cache stores live information about project storage which has not yet been synced to ProjectAccounting.
|
||||
|
@ -116,7 +116,6 @@ func TestProjectUsageBandwidth(t *testing.T) {
|
||||
testplanet.Run(t, testplanet.Config{
|
||||
SatelliteCount: 1, StorageNodeCount: 6, UplinkCount: 1,
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
|
||||
saDB := planet.Satellites[0].DB
|
||||
orderDB := saDB.Orders()
|
||||
|
||||
@ -175,14 +174,14 @@ func TestProjectBandwidthRollups(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
err = db.Orders().UpdateBucketBandwidthAllocation(ctx, p1, b2, pb.PieceAction_GET, 1000, hour)
|
||||
require.NoError(t, err)
|
||||
err = db.Orders().WithTransaction(ctx, func(ctx context.Context, tx orders.Transaction) error {
|
||||
rollups := []orders.BucketBandwidthRollup{
|
||||
{ProjectID: p1, BucketName: string(b1), Action: pb.PieceAction_GET, Inline: 1000, Allocated: 1000 /* counted */, Settled: 1000},
|
||||
{ProjectID: p1, BucketName: string(b2), Action: pb.PieceAction_GET, Inline: 1000, Allocated: 1000 /* counted */, Settled: 1000},
|
||||
}
|
||||
return tx.UpdateBucketBandwidthBatch(ctx, hour, rollups)
|
||||
})
|
||||
|
||||
rollups := []orders.BucketBandwidthRollup{
|
||||
{ProjectID: p1, BucketName: string(b1), Action: pb.PieceAction_GET, Inline: 1000, Allocated: 1000 /* counted */, Settled: 1000},
|
||||
{ProjectID: p1, BucketName: string(b2), Action: pb.PieceAction_GET, Inline: 1000, Allocated: 1000 /* counted */, Settled: 1000},
|
||||
}
|
||||
err = db.Orders().UpdateBucketBandwidthBatch(ctx, hour, rollups)
|
||||
require.NoError(t, err)
|
||||
|
||||
// things that shouldn't be counted
|
||||
err = db.Orders().UpdateBucketBandwidthAllocation(ctx, p1, b1, pb.PieceAction_PUT, 1000, hour)
|
||||
require.NoError(t, err)
|
||||
@ -208,23 +207,22 @@ func TestProjectBandwidthRollups(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
err = db.Orders().UpdateBucketBandwidthAllocation(ctx, p2, b2, pb.PieceAction_GET_REPAIR, 1000, hour)
|
||||
require.NoError(t, err)
|
||||
err = db.Orders().WithTransaction(ctx, func(ctx context.Context, tx orders.Transaction) error {
|
||||
rollups := []orders.BucketBandwidthRollup{
|
||||
{ProjectID: p1, BucketName: string(b1), Action: pb.PieceAction_PUT, Inline: 1000, Allocated: 1000, Settled: 1000},
|
||||
{ProjectID: p1, BucketName: string(b2), Action: pb.PieceAction_PUT, Inline: 1000, Allocated: 1000, Settled: 1000},
|
||||
{ProjectID: p1, BucketName: string(b1), Action: pb.PieceAction_PUT_GRACEFUL_EXIT, Inline: 1000, Allocated: 1000, Settled: 1000},
|
||||
{ProjectID: p1, BucketName: string(b2), Action: pb.PieceAction_PUT_REPAIR, Inline: 1000, Allocated: 1000, Settled: 1000},
|
||||
{ProjectID: p1, BucketName: string(b1), Action: pb.PieceAction_GET_AUDIT, Inline: 1000, Allocated: 1000, Settled: 1000},
|
||||
{ProjectID: p1, BucketName: string(b2), Action: pb.PieceAction_GET_REPAIR, Inline: 1000, Allocated: 1000, Settled: 1000},
|
||||
{ProjectID: p2, BucketName: string(b1), Action: pb.PieceAction_PUT, Inline: 1000, Allocated: 1000, Settled: 1000},
|
||||
{ProjectID: p2, BucketName: string(b2), Action: pb.PieceAction_PUT, Inline: 1000, Allocated: 1000, Settled: 1000},
|
||||
{ProjectID: p2, BucketName: string(b1), Action: pb.PieceAction_PUT_GRACEFUL_EXIT, Inline: 1000, Allocated: 1000, Settled: 1000},
|
||||
{ProjectID: p2, BucketName: string(b2), Action: pb.PieceAction_PUT_REPAIR, Inline: 1000, Allocated: 1000, Settled: 1000},
|
||||
{ProjectID: p2, BucketName: string(b1), Action: pb.PieceAction_GET_AUDIT, Inline: 1000, Allocated: 1000, Settled: 1000},
|
||||
{ProjectID: p2, BucketName: string(b2), Action: pb.PieceAction_GET_REPAIR, Inline: 1000, Allocated: 1000, Settled: 1000},
|
||||
}
|
||||
return tx.UpdateBucketBandwidthBatch(ctx, hour, rollups)
|
||||
})
|
||||
|
||||
rollups = []orders.BucketBandwidthRollup{
|
||||
{ProjectID: p1, BucketName: string(b1), Action: pb.PieceAction_PUT, Inline: 1000, Allocated: 1000, Settled: 1000},
|
||||
{ProjectID: p1, BucketName: string(b2), Action: pb.PieceAction_PUT, Inline: 1000, Allocated: 1000, Settled: 1000},
|
||||
{ProjectID: p1, BucketName: string(b1), Action: pb.PieceAction_PUT_GRACEFUL_EXIT, Inline: 1000, Allocated: 1000, Settled: 1000},
|
||||
{ProjectID: p1, BucketName: string(b2), Action: pb.PieceAction_PUT_REPAIR, Inline: 1000, Allocated: 1000, Settled: 1000},
|
||||
{ProjectID: p1, BucketName: string(b1), Action: pb.PieceAction_GET_AUDIT, Inline: 1000, Allocated: 1000, Settled: 1000},
|
||||
{ProjectID: p1, BucketName: string(b2), Action: pb.PieceAction_GET_REPAIR, Inline: 1000, Allocated: 1000, Settled: 1000},
|
||||
{ProjectID: p2, BucketName: string(b1), Action: pb.PieceAction_PUT, Inline: 1000, Allocated: 1000, Settled: 1000},
|
||||
{ProjectID: p2, BucketName: string(b2), Action: pb.PieceAction_PUT, Inline: 1000, Allocated: 1000, Settled: 1000},
|
||||
{ProjectID: p2, BucketName: string(b1), Action: pb.PieceAction_PUT_GRACEFUL_EXIT, Inline: 1000, Allocated: 1000, Settled: 1000},
|
||||
{ProjectID: p2, BucketName: string(b2), Action: pb.PieceAction_PUT_REPAIR, Inline: 1000, Allocated: 1000, Settled: 1000},
|
||||
{ProjectID: p2, BucketName: string(b1), Action: pb.PieceAction_GET_AUDIT, Inline: 1000, Allocated: 1000, Settled: 1000},
|
||||
{ProjectID: p2, BucketName: string(b2), Action: pb.PieceAction_GET_REPAIR, Inline: 1000, Allocated: 1000, Settled: 1000},
|
||||
}
|
||||
err = db.Orders().UpdateBucketBandwidthBatch(ctx, hour, rollups)
|
||||
require.NoError(t, err)
|
||||
|
||||
alloc, err := db.ProjectAccounting().GetProjectAllocatedBandwidth(ctx, p1, now.Year(), now.Month())
|
||||
@ -677,7 +675,6 @@ func TestProjectUsage_BandwidthCache(t *testing.T) {
|
||||
fromCache, err = projectUsage.GetProjectBandwidthUsage(ctx, project.ID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, badwidthUsed+increment, fromCache)
|
||||
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -1,253 +0,0 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package reportedrollup
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/spacemonkeygo/monkit/v3"
|
||||
"github.com/zeebo/errs"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"storj.io/common/pb"
|
||||
"storj.io/common/storj"
|
||||
"storj.io/common/sync2"
|
||||
"storj.io/common/uuid"
|
||||
"storj.io/storj/satellite/metainfo/metabase"
|
||||
"storj.io/storj/satellite/orders"
|
||||
)
|
||||
|
||||
var (
|
||||
mon = monkit.Package()
|
||||
|
||||
// Error is the error class for this package.
|
||||
Error = errs.Class("reportedrollup")
|
||||
)
|
||||
|
||||
// Config is a configuration struct for the Chore.
|
||||
type Config struct {
|
||||
Interval time.Duration `help:"how often to flush the reported serial rollups to the database" default:"5m"`
|
||||
QueueBatchSize int `help:"default queue batch size" default:"10000"`
|
||||
}
|
||||
|
||||
// Chore for flushing reported serials to the database as rollups.
|
||||
//
|
||||
// architecture: Chore
|
||||
type Chore struct {
|
||||
log *zap.Logger
|
||||
db orders.DB
|
||||
config Config
|
||||
|
||||
Loop *sync2.Cycle
|
||||
}
|
||||
|
||||
// NewChore creates new chore for flushing the reported serials to the database as rollups.
|
||||
func NewChore(log *zap.Logger, db orders.DB, config Config) *Chore {
|
||||
if config.QueueBatchSize == 0 {
|
||||
config.QueueBatchSize = 10000
|
||||
}
|
||||
|
||||
return &Chore{
|
||||
log: log,
|
||||
db: db,
|
||||
config: config,
|
||||
|
||||
Loop: sync2.NewCycle(config.Interval),
|
||||
}
|
||||
}
|
||||
|
||||
// Run starts the reported rollups chore.
|
||||
func (chore *Chore) Run(ctx context.Context) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
return chore.Loop.Run(ctx, func(ctx context.Context) error {
|
||||
err := chore.runOnceNow(ctx, time.Now)
|
||||
if err != nil {
|
||||
chore.log.Error("error flushing reported rollups", zap.Error(err))
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// Close stops the reported rollups chore.
|
||||
func (chore *Chore) Close() error {
|
||||
chore.Loop.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
// RunOnce finds expired bandwidth as of 'now' and inserts rollups into the appropriate tables.
|
||||
func (chore *Chore) RunOnce(ctx context.Context, now time.Time) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
return chore.runOnceNow(ctx, func() time.Time { return now })
|
||||
}
|
||||
|
||||
// runOnceNow runs the helper repeatedly, calling the nowFn each time it runs it. It does that
|
||||
// until the helper returns that it is done or an error occurs.
|
||||
//
|
||||
// This function exists because tests want to use RunOnce and have a single fixed time for
|
||||
// reproducibility, but the chore loop wants to use whatever time.Now is every time the helper
|
||||
// is run.
|
||||
func (chore *Chore) runOnceNow(ctx context.Context, nowFn func() time.Time) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
for {
|
||||
done, err := chore.runOnceHelper(ctx, nowFn())
|
||||
if err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
if done {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (chore *Chore) readWork(ctx context.Context, queue orders.Queue) (
|
||||
bucketRollups []orders.BucketBandwidthRollup,
|
||||
storagenodeRollups []orders.StoragenodeBandwidthRollup,
|
||||
consumedSerials []orders.ConsumedSerial,
|
||||
done bool, err error,
|
||||
) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
// Variables and types to keep track of bucket bandwidth rollups
|
||||
type bucketKey struct {
|
||||
projectID uuid.UUID
|
||||
bucketName string
|
||||
action pb.PieceAction
|
||||
}
|
||||
byBucket := make(map[bucketKey]uint64)
|
||||
|
||||
// Variables and types to keep track of storagenode bandwidth rollups
|
||||
type storagenodeKey struct {
|
||||
nodeID storj.NodeID
|
||||
action pb.PieceAction
|
||||
}
|
||||
byStoragenode := make(map[storagenodeKey]uint64)
|
||||
|
||||
// Variables to keep track of which serial numbers were consumed
|
||||
type consumedSerialKey struct {
|
||||
nodeID storj.NodeID
|
||||
serialNumber storj.SerialNumber
|
||||
}
|
||||
seenConsumedSerials := make(map[consumedSerialKey]struct{})
|
||||
|
||||
// Get a batch of pending serials from the queue.
|
||||
pendingSerials, queueDone, err := queue.GetPendingSerialsBatch(ctx, chore.config.QueueBatchSize)
|
||||
if err != nil {
|
||||
return nil, nil, nil, false, errs.Wrap(err)
|
||||
}
|
||||
|
||||
for _, row := range pendingSerials {
|
||||
row := row
|
||||
|
||||
// If we have seen this serial inside of this function already, don't
|
||||
// count it again and record it now.
|
||||
key := consumedSerialKey{
|
||||
nodeID: row.NodeID,
|
||||
serialNumber: row.SerialNumber,
|
||||
}
|
||||
if _, exists := seenConsumedSerials[key]; exists {
|
||||
continue
|
||||
}
|
||||
seenConsumedSerials[key] = struct{}{}
|
||||
|
||||
// Parse the node id, project id, and bucket name from the reported serial.
|
||||
bucket, err := metabase.ParseBucketPrefix(metabase.BucketPrefix(row.BucketID)) // TODO: rename row.BucketID -> row.BucketPrefix
|
||||
if err != nil {
|
||||
chore.log.Error("bad row inserted into reported serials",
|
||||
zap.Binary("bucket_id", row.BucketID),
|
||||
zap.String("node_id", row.NodeID.String()),
|
||||
zap.String("serial_number", row.SerialNumber.String()))
|
||||
continue
|
||||
}
|
||||
action := pb.PieceAction(row.Action)
|
||||
settled := row.Settled
|
||||
|
||||
// Update our batch state to include it.
|
||||
byBucket[bucketKey{
|
||||
projectID: bucket.ProjectID,
|
||||
bucketName: bucket.BucketName,
|
||||
action: action,
|
||||
}] += settled
|
||||
|
||||
byStoragenode[storagenodeKey{
|
||||
nodeID: row.NodeID,
|
||||
action: action,
|
||||
}] += settled
|
||||
|
||||
consumedSerials = append(consumedSerials, orders.ConsumedSerial{
|
||||
NodeID: row.NodeID,
|
||||
SerialNumber: row.SerialNumber,
|
||||
ExpiresAt: row.ExpiresAt,
|
||||
})
|
||||
}
|
||||
|
||||
// If we didn't get a full batch, the queue must have run out. We should signal
|
||||
// this fact to our caller so that they can stop looping.
|
||||
if queueDone {
|
||||
done = true
|
||||
}
|
||||
|
||||
// Convert bucket rollups into a slice.
|
||||
for key, settled := range byBucket {
|
||||
bucketRollups = append(bucketRollups, orders.BucketBandwidthRollup{
|
||||
ProjectID: key.projectID,
|
||||
BucketName: key.bucketName,
|
||||
Action: key.action,
|
||||
Settled: int64(settled),
|
||||
})
|
||||
}
|
||||
|
||||
// Convert storagenode rollups into a slice.
|
||||
for key, settled := range byStoragenode {
|
||||
storagenodeRollups = append(storagenodeRollups, orders.StoragenodeBandwidthRollup{
|
||||
NodeID: key.nodeID,
|
||||
Action: key.action,
|
||||
Settled: int64(settled),
|
||||
})
|
||||
}
|
||||
|
||||
chore.log.Debug("Read work",
|
||||
zap.Int("bucket_rollups", len(bucketRollups)),
|
||||
zap.Int("storagenode_rollups", len(storagenodeRollups)),
|
||||
zap.Int("consumed_serials", len(consumedSerials)),
|
||||
zap.Bool("done", done),
|
||||
)
|
||||
|
||||
return bucketRollups, storagenodeRollups, consumedSerials, done, nil
|
||||
}
|
||||
|
||||
func (chore *Chore) runOnceHelper(ctx context.Context, now time.Time) (done bool, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
err = chore.db.WithQueue(ctx, func(ctx context.Context, queue orders.Queue) error {
|
||||
var (
|
||||
bucketRollups []orders.BucketBandwidthRollup
|
||||
storagenodeRollups []orders.StoragenodeBandwidthRollup
|
||||
consumedSerials []orders.ConsumedSerial
|
||||
)
|
||||
|
||||
// Read the work we should insert.
|
||||
bucketRollups, storagenodeRollups, consumedSerials, done, err = chore.readWork(ctx, queue)
|
||||
if err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
|
||||
// Now that we have work, write it all in its own transaction.
|
||||
return errs.Wrap(chore.db.WithTransaction(ctx, func(ctx context.Context, tx orders.Transaction) error {
|
||||
if err := tx.UpdateBucketBandwidthBatch(ctx, now, bucketRollups); err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
if err := tx.UpdateStoragenodeBandwidthBatchPhase2(ctx, now, storagenodeRollups); err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
if err := tx.CreateConsumedSerialsBatch(ctx, consumedSerials); err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
return nil
|
||||
}))
|
||||
})
|
||||
return done, errs.Wrap(err)
|
||||
}
|
92
satellite/accounting/rolluparchive/rolluparchive.go
Normal file
92
satellite/accounting/rolluparchive/rolluparchive.go
Normal file
@ -0,0 +1,92 @@
|
||||
// Copyright (C) 2020 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package rolluparchive
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/spacemonkeygo/monkit/v3"
|
||||
"github.com/zeebo/errs"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"storj.io/common/sync2"
|
||||
"storj.io/storj/satellite/accounting"
|
||||
)
|
||||
|
||||
// Error is a standard error class for this package.
|
||||
var (
|
||||
Error = errs.Class("rolluparchive error")
|
||||
mon = monkit.Package()
|
||||
)
|
||||
|
||||
// Config contains configurable values for rollup archiver.
|
||||
type Config struct {
|
||||
Interval time.Duration `help:"how frequently rollup archiver should run" releaseDefault:"24h" devDefault:"120s"`
|
||||
ArchiveAge time.Duration `help:"age at which a rollup is archived" releaseDefault:"2160h" devDefault:"24h"`
|
||||
BatchSize int `help:"number of records to delete per delete execution. Used only for crdb which is slow without limit." default:"500"`
|
||||
Enabled bool `help:"whether or not the rollup archive is enabled." default:"true"`
|
||||
}
|
||||
|
||||
// Chore archives bucket and storagenode rollups at a given interval.
|
||||
//
|
||||
// architecture: Chore
|
||||
type Chore struct {
|
||||
log *zap.Logger
|
||||
Loop *sync2.Cycle
|
||||
archiveAge time.Duration
|
||||
batchSize int
|
||||
nodeAccounting accounting.StoragenodeAccounting
|
||||
projectAccounting accounting.ProjectAccounting
|
||||
}
|
||||
|
||||
// New creates a new rollup archiver chore.
|
||||
func New(log *zap.Logger, sdb accounting.StoragenodeAccounting, pdb accounting.ProjectAccounting, config Config) *Chore {
|
||||
return &Chore{
|
||||
log: log,
|
||||
Loop: sync2.NewCycle(config.Interval),
|
||||
archiveAge: config.ArchiveAge,
|
||||
batchSize: config.BatchSize,
|
||||
nodeAccounting: sdb,
|
||||
projectAccounting: pdb,
|
||||
}
|
||||
}
|
||||
|
||||
// Run starts the archiver chore.
|
||||
func (chore *Chore) Run(ctx context.Context) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
if chore.archiveAge < 0 {
|
||||
return Error.New("archive age can't be less than 0")
|
||||
}
|
||||
return chore.Loop.Run(ctx, func(ctx context.Context) error {
|
||||
cutoff := time.Now().UTC().Add(-chore.archiveAge)
|
||||
err := chore.ArchiveRollups(ctx, cutoff, chore.batchSize)
|
||||
if err != nil {
|
||||
chore.log.Error("error archiving SN and bucket bandwidth rollups", zap.Error(err))
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// Close stops the service and releases any resources.
|
||||
func (chore *Chore) Close() error {
|
||||
chore.Loop.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
// ArchiveRollups will remove old rollups from active rollup tables.
|
||||
func (chore *Chore) ArchiveRollups(ctx context.Context, cutoff time.Time, batchSize int) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
nodeRollupsArchived, err := chore.nodeAccounting.ArchiveRollupsBefore(ctx, cutoff, batchSize)
|
||||
if err != nil {
|
||||
chore.log.Error("archiving bandwidth rollups", zap.Int("node rollups archived", nodeRollupsArchived), zap.Error(err))
|
||||
return Error.Wrap(err)
|
||||
}
|
||||
bucketRollupsArchived, err := chore.projectAccounting.ArchiveRollupsBefore(ctx, cutoff, batchSize)
|
||||
if err != nil {
|
||||
chore.log.Error("archiving bandwidth rollups", zap.Int("bucket rollups archived", bucketRollupsArchived), zap.Error(err))
|
||||
return Error.Wrap(err)
|
||||
}
|
||||
return nil
|
||||
}
|
99
satellite/accounting/rolluparchive/rolluparchive_test.go
Normal file
99
satellite/accounting/rolluparchive/rolluparchive_test.go
Normal file
@ -0,0 +1,99 @@
|
||||
// Copyright (C) 2020 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package rolluparchive_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"storj.io/common/pb"
|
||||
"storj.io/common/testcontext"
|
||||
"storj.io/common/testrand"
|
||||
"storj.io/storj/private/testplanet"
|
||||
"storj.io/storj/satellite"
|
||||
)
|
||||
|
||||
func TestRollupArchiveChore(t *testing.T) {
|
||||
testplanet.Run(t, testplanet.Config{
|
||||
SatelliteCount: 1, StorageNodeCount: 10, UplinkCount: 0,
|
||||
Reconfigure: testplanet.Reconfigure{
|
||||
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
|
||||
// ensure that orders (and rollups) aren't marked as expired and removed
|
||||
config.Orders.Expiration = time.Hour * 24 * 7
|
||||
},
|
||||
},
|
||||
},
|
||||
func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
// The purpose of this test is to ensure that the archive chore deletes
|
||||
// entries in the storagenode_bandwidth_rollups and bucket_bandwidth_rollups tables
|
||||
// and inserts those entries into new archive tables.
|
||||
|
||||
satellite := planet.Satellites[0]
|
||||
satellite.Accounting.Rollup.Loop.Pause()
|
||||
|
||||
days := 6
|
||||
|
||||
currentTime := time.Now().UTC()
|
||||
// Set timestamp back by the number of days we want to save
|
||||
timestamp := currentTime.AddDate(0, 0, -days).Truncate(time.Millisecond)
|
||||
|
||||
projectID := testrand.UUID()
|
||||
|
||||
for i := 0; i < days; i++ {
|
||||
nodeID := testrand.NodeID()
|
||||
var bucketName string
|
||||
bwAmount := int64(1000)
|
||||
|
||||
// When the bucket name and intervalStart is different, a new record is created
|
||||
bucketName = fmt.Sprintf("%s%d", "testbucket", i)
|
||||
|
||||
err := satellite.DB.Orders().UpdateBucketBandwidthSettle(ctx,
|
||||
projectID, []byte(bucketName), pb.PieceAction_GET, bwAmount, timestamp,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = satellite.DB.Orders().UpdateStoragenodeBandwidthSettle(ctx,
|
||||
nodeID, pb.PieceAction_GET, bwAmount, timestamp)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Advance time by 24 hours
|
||||
timestamp = timestamp.Add(time.Hour * 24)
|
||||
}
|
||||
|
||||
lastWeek := currentTime.AddDate(0, 0, -7).Truncate(time.Millisecond)
|
||||
nodeRollups, err := satellite.DB.StoragenodeAccounting().GetRollupsSince(ctx, lastWeek)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, nodeRollups, days)
|
||||
|
||||
bucketRollups, err := satellite.DB.ProjectAccounting().GetRollupsSince(ctx, lastWeek)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, bucketRollups, days)
|
||||
|
||||
// We take off a millisecond so the before isn't exactly the same as one of the interval starts.
|
||||
before := currentTime.AddDate(0, 0, -days/2).Add(-time.Millisecond)
|
||||
batchSize := 1000
|
||||
err = satellite.Accounting.RollupArchive.ArchiveRollups(ctx, before, batchSize)
|
||||
require.NoError(t, err)
|
||||
|
||||
nodeRollups, err = satellite.DB.StoragenodeAccounting().GetRollupsSince(ctx, lastWeek)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, nodeRollups, days/2)
|
||||
|
||||
bucketRollups, err = satellite.DB.ProjectAccounting().GetRollupsSince(ctx, lastWeek)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, bucketRollups, days/2)
|
||||
|
||||
nodeRollups, err = satellite.DB.StoragenodeAccounting().GetArchivedRollupsSince(ctx, lastWeek)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, nodeRollups, days/2)
|
||||
|
||||
bucketRollups, err = satellite.DB.ProjectAccounting().GetArchivedRollupsSince(ctx, lastWeek)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, bucketRollups, days/2)
|
||||
})
|
||||
}
|
@ -173,7 +173,6 @@ func NewAPI(log *zap.Logger, full *identity.FullIdentity, db DB,
|
||||
pointerDB metainfo.PointerDB, metabaseDB metainfo.MetabaseDB, revocationDB extensions.RevocationDB,
|
||||
liveAccounting accounting.Cache, rollupsWriteCache *orders.RollupsWriteCache,
|
||||
config *Config, versionInfo version.Info, atomicLogLevel *zap.AtomicLevel) (*API, error) {
|
||||
|
||||
peer := &API{
|
||||
Log: log,
|
||||
Identity: full,
|
||||
@ -352,8 +351,6 @@ func NewAPI(log *zap.Logger, full *identity.FullIdentity, db DB,
|
||||
satelliteSignee,
|
||||
peer.Orders.DB,
|
||||
peer.DB.NodeAPIVersion(),
|
||||
config.Orders.SettlementBatchSize,
|
||||
config.Orders.WindowEndpointRolloutPhase,
|
||||
config.Orders.OrdersSemaphoreSize,
|
||||
peer.Orders.Service,
|
||||
)
|
||||
|
@ -33,6 +33,7 @@ type Paystub struct {
|
||||
Held currency.MicroUnit `csv:"held"`
|
||||
Disposed currency.MicroUnit `csv:"disposed"`
|
||||
Paid currency.MicroUnit `csv:"paid"`
|
||||
Distributed currency.MicroUnit `csv:"distributed"`
|
||||
}
|
||||
|
||||
// LoadPaystubs loads a collection of Paystubs in CSV form from the provided file.
|
||||
|
@ -26,8 +26,8 @@ import (
|
||||
version_checker "storj.io/storj/private/version/checker"
|
||||
"storj.io/storj/satellite/accounting"
|
||||
"storj.io/storj/satellite/accounting/projectbwcleanup"
|
||||
"storj.io/storj/satellite/accounting/reportedrollup"
|
||||
"storj.io/storj/satellite/accounting/rollup"
|
||||
"storj.io/storj/satellite/accounting/rolluparchive"
|
||||
"storj.io/storj/satellite/accounting/tally"
|
||||
"storj.io/storj/satellite/audit"
|
||||
"storj.io/storj/satellite/contact"
|
||||
@ -115,7 +115,7 @@ type Core struct {
|
||||
Accounting struct {
|
||||
Tally *tally.Service
|
||||
Rollup *rollup.Service
|
||||
ReportedRollupChore *reportedrollup.Chore
|
||||
RollupArchiveChore *rolluparchive.Chore
|
||||
ProjectBWCleanupChore *projectbwcleanup.Chore
|
||||
}
|
||||
|
||||
@ -423,15 +423,6 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB,
|
||||
peer.Debug.Server.Panel.Add(
|
||||
debug.Cycle("Accounting Rollup", peer.Accounting.Rollup.Loop))
|
||||
|
||||
peer.Accounting.ReportedRollupChore = reportedrollup.NewChore(peer.Log.Named("accounting:reported-rollup"), peer.DB.Orders(), config.ReportedRollup)
|
||||
peer.Services.Add(lifecycle.Item{
|
||||
Name: "accounting:reported-rollup",
|
||||
Run: peer.Accounting.ReportedRollupChore.Run,
|
||||
Close: peer.Accounting.ReportedRollupChore.Close,
|
||||
})
|
||||
peer.Debug.Server.Panel.Add(
|
||||
debug.Cycle("Accounting Reported Rollup", peer.Accounting.ReportedRollupChore.Loop))
|
||||
|
||||
peer.Accounting.ProjectBWCleanupChore = projectbwcleanup.NewChore(peer.Log.Named("accounting:chore"), peer.DB.ProjectAccounting(), config.ProjectBWCleanup)
|
||||
peer.Services.Add(lifecycle.Item{
|
||||
Name: "accounting:project-bw-rollup",
|
||||
@ -440,6 +431,19 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB,
|
||||
})
|
||||
peer.Debug.Server.Panel.Add(
|
||||
debug.Cycle("Accounting Project Bandwidth Rollup", peer.Accounting.ProjectBWCleanupChore.Loop))
|
||||
|
||||
if config.RollupArchive.Enabled {
|
||||
peer.Accounting.RollupArchiveChore = rolluparchive.New(peer.Log.Named("accounting:rollup-archive"), peer.DB.StoragenodeAccounting(), peer.DB.ProjectAccounting(), config.RollupArchive)
|
||||
peer.Services.Add(lifecycle.Item{
|
||||
Name: "accounting:rollup-archive",
|
||||
Run: peer.Accounting.RollupArchiveChore.Run,
|
||||
Close: peer.Accounting.RollupArchiveChore.Close,
|
||||
})
|
||||
peer.Debug.Server.Panel.Add(
|
||||
debug.Cycle("Accounting Rollup Archive", peer.Accounting.RollupArchiveChore.Loop))
|
||||
} else {
|
||||
peer.Log.Named("rolluparchive").Info("disabled")
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: remove in future, should be in API
|
||||
@ -474,7 +478,6 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB,
|
||||
pc.CouponProjectLimit,
|
||||
pc.MinCoinPayment,
|
||||
pc.PaywallProportion)
|
||||
|
||||
if err != nil {
|
||||
return nil, errs.Combine(err, peer.Close())
|
||||
}
|
||||
|
@ -55,6 +55,10 @@ type DB interface {
|
||||
DeleteTransferQueueItems(ctx context.Context, nodeID storj.NodeID) error
|
||||
// DeleteFinishedTransferQueueItem deletes finished graceful exit transfer queue entries.
|
||||
DeleteFinishedTransferQueueItems(ctx context.Context, nodeID storj.NodeID) error
|
||||
// DeleteAllFinishedTransferQueueItems deletes all graceful exit transfer
|
||||
// queue items whose nodes have finished the exit before the indicated time
|
||||
// returning the total number of deleted items.
|
||||
DeleteAllFinishedTransferQueueItems(ctx context.Context, before time.Time) (count int64, err error)
|
||||
// GetTransferQueueItem gets a graceful exit transfer queue entry.
|
||||
GetTransferQueueItem(ctx context.Context, nodeID storj.NodeID, key metabase.SegmentKey, pieceNum int32) (*TransferQueueItem, error)
|
||||
// GetIncomplete gets incomplete graceful exit transfer queue entries ordered by durability ratio and queued date ascending.
|
||||
@ -65,4 +69,8 @@ type DB interface {
|
||||
GetIncompleteFailed(ctx context.Context, nodeID storj.NodeID, maxFailures int, limit int, offset int64) ([]*TransferQueueItem, error)
|
||||
// IncrementOrderLimitSendCount increments the number of times a node has been sent an order limit for transferring.
|
||||
IncrementOrderLimitSendCount(ctx context.Context, nodeID storj.NodeID, key metabase.SegmentKey, pieceNum int32) error
|
||||
// CountFinishedTransferQueueItemsByNode return a map of the nodes which has
|
||||
// finished the exit before the indicated time but there are at least one item
|
||||
// left in the transfer queue.
|
||||
CountFinishedTransferQueueItemsByNode(ctx context.Context, before time.Time) (map[storj.NodeID]int64, error)
|
||||
}
|
||||
|
@ -1538,6 +1538,12 @@ func (endpoint *Endpoint) GetPendingObjects(ctx context.Context, req *pb.GetPend
|
||||
return nil, rpcstatus.Error(rpcstatus.Unimplemented, "not implemented")
|
||||
}
|
||||
|
||||
// ListPendingObjectStreams list pending objects according to specific parameters.
|
||||
func (endpoint *Endpoint) ListPendingObjectStreams(ctx context.Context, req *pb.ObjectListPendingStreamsRequest) (resp *pb.ObjectListPendingStreamsResponse, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
return nil, rpcstatus.Error(rpcstatus.Unimplemented, "Not Implemented")
|
||||
}
|
||||
|
||||
// DownloadSegment returns data necessary to download segment.
|
||||
func (endpoint *Endpoint) DownloadSegment(ctx context.Context, req *pb.SegmentDownloadRequest) (resp *pb.SegmentDownloadResponse, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
@ -30,23 +30,14 @@ import (
|
||||
//
|
||||
// architecture: Database
|
||||
type DB interface {
|
||||
// CreateSerialInfo creates serial number entry in database.
|
||||
CreateSerialInfo(ctx context.Context, serialNumber storj.SerialNumber, bucketID []byte, limitExpiration time.Time) error
|
||||
// UseSerialNumber creates a used serial number entry in database from an
|
||||
// existing serial number.
|
||||
// It returns the bucket ID associated to serialNumber.
|
||||
UseSerialNumber(ctx context.Context, serialNumber storj.SerialNumber, storageNodeID storj.NodeID) ([]byte, error)
|
||||
// UnuseSerialNumber removes pair serial number -> storage node id from database
|
||||
UnuseSerialNumber(ctx context.Context, serialNumber storj.SerialNumber, storageNodeID storj.NodeID) error
|
||||
// GetBucketIDFromSerialNumber returns the bucket ID associated with the serial number
|
||||
GetBucketIDFromSerialNumber(ctx context.Context, serialNumber storj.SerialNumber) ([]byte, error)
|
||||
|
||||
// UpdateBucketBandwidthAllocation updates 'allocated' bandwidth for given bucket
|
||||
UpdateBucketBandwidthAllocation(ctx context.Context, projectID uuid.UUID, bucketName []byte, action pb.PieceAction, amount int64, intervalStart time.Time) error
|
||||
// UpdateBucketBandwidthSettle updates 'settled' bandwidth for given bucket
|
||||
UpdateBucketBandwidthSettle(ctx context.Context, projectID uuid.UUID, bucketName []byte, action pb.PieceAction, amount int64, intervalStart time.Time) error
|
||||
// UpdateBucketBandwidthInline updates 'inline' bandwidth for given bucket
|
||||
UpdateBucketBandwidthInline(ctx context.Context, projectID uuid.UUID, bucketName []byte, action pb.PieceAction, amount int64, intervalStart time.Time) error
|
||||
// UpdateBucketBandwidthBatch updates all the bandwidth rollups in the database
|
||||
UpdateBucketBandwidthBatch(ctx context.Context, intervalStart time.Time, rollups []BucketBandwidthRollup) error
|
||||
|
||||
// UpdateStoragenodeBandwidthSettle updates 'settled' bandwidth for given storage node
|
||||
UpdateStoragenodeBandwidthSettle(ctx context.Context, storageNode storj.NodeID, action pb.PieceAction, amount int64, intervalStart time.Time) error
|
||||
@ -57,15 +48,6 @@ type DB interface {
|
||||
GetBucketBandwidth(ctx context.Context, projectID uuid.UUID, bucketName []byte, from, to time.Time) (int64, error)
|
||||
// GetStorageNodeBandwidth gets total storage node bandwidth from period of time
|
||||
GetStorageNodeBandwidth(ctx context.Context, nodeID storj.NodeID, from, to time.Time) (int64, error)
|
||||
|
||||
// ProcessOrders takes a list of order requests and processes them in a batch
|
||||
ProcessOrders(ctx context.Context, requests []*ProcessOrderRequest) (responses []*ProcessOrderResponse, err error)
|
||||
|
||||
// WithTransaction runs the callback and provides it with a Transaction.
|
||||
WithTransaction(ctx context.Context, cb func(ctx context.Context, tx Transaction) error) error
|
||||
// WithQueue runs the callback and provides it with a Queue. When the callback returns with
|
||||
// no error, any pending serials returned by the queue are removed from it.
|
||||
WithQueue(ctx context.Context, cb func(ctx context.Context, queue Queue) error) error
|
||||
}
|
||||
|
||||
// SerialDeleteOptions are option when deleting from serial tables.
|
||||
@ -73,28 +55,6 @@ type SerialDeleteOptions struct {
|
||||
BatchSize int
|
||||
}
|
||||
|
||||
// Transaction represents a database transaction but with higher level actions.
|
||||
type Transaction interface {
|
||||
// UpdateBucketBandwidthBatch updates all the bandwidth rollups in the database
|
||||
UpdateBucketBandwidthBatch(ctx context.Context, intervalStart time.Time, rollups []BucketBandwidthRollup) error
|
||||
|
||||
// UpdateStoragenodeBandwidthBatchPhase2 updates all the bandwidth rollups in the database
|
||||
UpdateStoragenodeBandwidthBatchPhase2(ctx context.Context, intervalStart time.Time, rollups []StoragenodeBandwidthRollup) error
|
||||
|
||||
// CreateConsumedSerialsBatch creates the batch of ConsumedSerials.
|
||||
CreateConsumedSerialsBatch(ctx context.Context, consumedSerials []ConsumedSerial) (err error)
|
||||
|
||||
// HasConsumedSerial returns true if the node and serial number have been consumed.
|
||||
HasConsumedSerial(ctx context.Context, nodeID storj.NodeID, serialNumber storj.SerialNumber) (bool, error)
|
||||
}
|
||||
|
||||
// Queue is an abstraction around a queue of pending serials.
|
||||
type Queue interface {
|
||||
// GetPendingSerialsBatch returns a batch of pending serials containing at most size
|
||||
// entries. It returns a boolean indicating true if the queue is empty.
|
||||
GetPendingSerialsBatch(ctx context.Context, size int) ([]PendingSerial, bool, error)
|
||||
}
|
||||
|
||||
// ConsumedSerial is a serial that has been consumed and its bandwidth recorded.
|
||||
type ConsumedSerial struct {
|
||||
NodeID storj.NodeID
|
||||
@ -119,8 +79,6 @@ var (
|
||||
// ErrUsingSerialNumber error class for serial number.
|
||||
ErrUsingSerialNumber = errs.Class("serial number")
|
||||
|
||||
errExpiredOrder = errs.Class("order limit expired")
|
||||
|
||||
mon = monkit.Package()
|
||||
)
|
||||
|
||||
@ -184,30 +142,16 @@ func SortStoragenodeBandwidthRollups(rollups []StoragenodeBandwidthRollup) {
|
||||
})
|
||||
}
|
||||
|
||||
// ProcessOrderRequest for batch order processing.
|
||||
type ProcessOrderRequest struct {
|
||||
Order *pb.Order
|
||||
OrderLimit *pb.OrderLimit
|
||||
}
|
||||
|
||||
// ProcessOrderResponse for batch order processing responses.
|
||||
type ProcessOrderResponse struct {
|
||||
SerialNumber storj.SerialNumber
|
||||
Status pb.SettlementResponse_Status
|
||||
}
|
||||
|
||||
// Endpoint for orders receiving.
|
||||
//
|
||||
// architecture: Endpoint
|
||||
type Endpoint struct {
|
||||
log *zap.Logger
|
||||
satelliteSignee signing.Signee
|
||||
DB DB
|
||||
nodeAPIVersionDB nodeapiversion.DB
|
||||
settlementBatchSize int
|
||||
windowEndpointRolloutPhase WindowEndpointRolloutPhase
|
||||
ordersSemaphore chan struct{}
|
||||
ordersService *Service
|
||||
log *zap.Logger
|
||||
satelliteSignee signing.Signee
|
||||
DB DB
|
||||
nodeAPIVersionDB nodeapiversion.DB
|
||||
ordersSemaphore chan struct{}
|
||||
ordersService *Service
|
||||
}
|
||||
|
||||
// NewEndpoint new orders receiving endpoint.
|
||||
@ -215,209 +159,30 @@ type Endpoint struct {
|
||||
// ordersSemaphoreSize controls the number of concurrent clients allowed to submit orders at once.
|
||||
// A value of zero means unlimited.
|
||||
func NewEndpoint(log *zap.Logger, satelliteSignee signing.Signee, db DB, nodeAPIVersionDB nodeapiversion.DB,
|
||||
settlementBatchSize int, windowEndpointRolloutPhase WindowEndpointRolloutPhase,
|
||||
ordersSemaphoreSize int, ordersService *Service) *Endpoint {
|
||||
|
||||
var ordersSemaphore chan struct{}
|
||||
if ordersSemaphoreSize > 0 {
|
||||
ordersSemaphore = make(chan struct{}, ordersSemaphoreSize)
|
||||
}
|
||||
|
||||
return &Endpoint{
|
||||
log: log,
|
||||
satelliteSignee: satelliteSignee,
|
||||
DB: db,
|
||||
nodeAPIVersionDB: nodeAPIVersionDB,
|
||||
settlementBatchSize: settlementBatchSize,
|
||||
windowEndpointRolloutPhase: windowEndpointRolloutPhase,
|
||||
ordersSemaphore: ordersSemaphore,
|
||||
ordersService: ordersService,
|
||||
}
|
||||
}
|
||||
|
||||
func monitoredSettlementStreamReceive(ctx context.Context, stream pb.DRPCOrders_SettlementStream) (_ *pb.SettlementRequest, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
return stream.Recv()
|
||||
}
|
||||
|
||||
func monitoredSettlementStreamSend(ctx context.Context, stream pb.DRPCOrders_SettlementStream, resp *pb.SettlementResponse) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
switch resp.Status {
|
||||
case pb.SettlementResponse_ACCEPTED:
|
||||
mon.Event("settlement_response_accepted")
|
||||
case pb.SettlementResponse_REJECTED:
|
||||
mon.Event("settlement_response_rejected")
|
||||
default:
|
||||
mon.Event("settlement_response_unknown")
|
||||
}
|
||||
return stream.Send(resp)
|
||||
}
|
||||
|
||||
// withOrdersSemaphore acquires a slot with the ordersSemaphore if one exists and returns
|
||||
// a function to exit it. If the context expires, it returns an error.
|
||||
func (endpoint *Endpoint) withOrdersSemaphore(ctx context.Context, cb func(ctx context.Context) error) error {
|
||||
if endpoint.ordersSemaphore == nil {
|
||||
return cb(ctx)
|
||||
}
|
||||
select {
|
||||
case endpoint.ordersSemaphore <- struct{}{}:
|
||||
err := cb(ctx)
|
||||
<-endpoint.ordersSemaphore
|
||||
return err
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
log: log,
|
||||
satelliteSignee: satelliteSignee,
|
||||
DB: db,
|
||||
nodeAPIVersionDB: nodeAPIVersionDB,
|
||||
ordersSemaphore: ordersSemaphore,
|
||||
ordersService: ordersService,
|
||||
}
|
||||
}
|
||||
|
||||
// Settlement receives orders and handles them in batches.
|
||||
//
|
||||
// Deprecated: an error is always returned to the client.
|
||||
func (endpoint *Endpoint) Settlement(stream pb.DRPCOrders_SettlementStream) (err error) {
|
||||
ctx := stream.Context()
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
switch endpoint.windowEndpointRolloutPhase {
|
||||
case WindowEndpointRolloutPhase1:
|
||||
case WindowEndpointRolloutPhase2, WindowEndpointRolloutPhase3:
|
||||
return rpcstatus.Error(rpcstatus.Unavailable, "endpoint disabled")
|
||||
default:
|
||||
return rpcstatus.Error(rpcstatus.Internal, "invalid window endpoint rollout phase")
|
||||
}
|
||||
|
||||
peer, err := identity.PeerIdentityFromContext(ctx)
|
||||
if err != nil {
|
||||
return rpcstatus.Error(rpcstatus.Unauthenticated, err.Error())
|
||||
}
|
||||
|
||||
formatError := func(err error) error {
|
||||
if errors.Is(err, io.EOF) {
|
||||
return nil
|
||||
}
|
||||
return rpcstatus.Error(rpcstatus.Unknown, err.Error())
|
||||
}
|
||||
|
||||
log := endpoint.log.Named(peer.ID.String())
|
||||
log.Debug("Settlement")
|
||||
|
||||
requests := make([]*ProcessOrderRequest, 0, endpoint.settlementBatchSize)
|
||||
|
||||
defer func() {
|
||||
if len(requests) > 0 {
|
||||
err = errs.Combine(err, endpoint.processOrders(ctx, stream, requests))
|
||||
if err != nil {
|
||||
err = formatError(err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
var expirationCount int64
|
||||
defer func() {
|
||||
if expirationCount > 0 {
|
||||
log.Debug("order verification found expired orders", zap.Int64("amount", expirationCount))
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
request, err := monitoredSettlementStreamReceive(ctx, stream)
|
||||
if err != nil {
|
||||
return formatError(err)
|
||||
}
|
||||
|
||||
if request == nil {
|
||||
return rpcstatus.Error(rpcstatus.InvalidArgument, "request missing")
|
||||
}
|
||||
if request.Limit == nil {
|
||||
return rpcstatus.Error(rpcstatus.InvalidArgument, "order limit missing")
|
||||
}
|
||||
if request.Order == nil {
|
||||
return rpcstatus.Error(rpcstatus.InvalidArgument, "order missing")
|
||||
}
|
||||
|
||||
orderLimit := request.Limit
|
||||
order := request.Order
|
||||
|
||||
rejectErr := func() error {
|
||||
if orderLimit.StorageNodeId != peer.ID {
|
||||
return rpcstatus.Error(rpcstatus.Unauthenticated, "only specified storage node can settle order")
|
||||
}
|
||||
|
||||
// check expiration first before the signatures so that we can throw out the large
|
||||
// amount of expired orders being sent to us before doing expensive signature
|
||||
// verification.
|
||||
if orderLimit.OrderExpiration.Before(time.Now()) {
|
||||
mon.Event("order_verification_failed_expired")
|
||||
expirationCount++
|
||||
return errExpiredOrder.New("order limit expired")
|
||||
}
|
||||
|
||||
// satellite verifies that it signed the order limit
|
||||
if err := signing.VerifyOrderLimitSignature(ctx, endpoint.satelliteSignee, orderLimit); err != nil {
|
||||
mon.Event("order_verification_failed_satellite_signature")
|
||||
return Error.New("unable to verify order limit")
|
||||
}
|
||||
|
||||
// satellite verifies that the order signature matches pub key in order limit
|
||||
if err := signing.VerifyUplinkOrderSignature(ctx, orderLimit.UplinkPublicKey, order); err != nil {
|
||||
mon.Event("order_verification_failed_uplink_signature")
|
||||
return Error.New("unable to verify order")
|
||||
}
|
||||
|
||||
// TODO should this reject or just error ??
|
||||
if orderLimit.SerialNumber != order.SerialNumber {
|
||||
mon.Event("order_verification_failed_serial_mismatch")
|
||||
return Error.New("invalid serial number")
|
||||
}
|
||||
return nil
|
||||
}()
|
||||
if rejectErr != nil {
|
||||
mon.Event("order_verification_failed")
|
||||
if !errExpiredOrder.Has(rejectErr) {
|
||||
log.Debug("order limit/order verification failed", zap.Stringer("serial", orderLimit.SerialNumber), zap.Error(rejectErr))
|
||||
}
|
||||
err := monitoredSettlementStreamSend(ctx, stream, &pb.SettlementResponse{
|
||||
SerialNumber: orderLimit.SerialNumber,
|
||||
Status: pb.SettlementResponse_REJECTED,
|
||||
})
|
||||
if err != nil {
|
||||
return formatError(err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
requests = append(requests, &ProcessOrderRequest{Order: order, OrderLimit: orderLimit})
|
||||
|
||||
if len(requests) >= endpoint.settlementBatchSize {
|
||||
err = endpoint.processOrders(ctx, stream, requests)
|
||||
requests = requests[:0]
|
||||
if err != nil {
|
||||
return formatError(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (endpoint *Endpoint) processOrders(ctx context.Context, stream pb.DRPCOrders_SettlementStream, requests []*ProcessOrderRequest) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
var responses []*ProcessOrderResponse
|
||||
err = endpoint.withOrdersSemaphore(ctx, func(ctx context.Context) error {
|
||||
responses, err = endpoint.DB.ProcessOrders(ctx, requests)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, response := range responses {
|
||||
r := &pb.SettlementResponse{
|
||||
SerialNumber: response.SerialNumber,
|
||||
Status: response.Status,
|
||||
}
|
||||
err = monitoredSettlementStreamSend(ctx, stream, r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return rpcstatus.Error(rpcstatus.Unavailable, "deprecated endpoint")
|
||||
}
|
||||
|
||||
type bucketIDAction struct {
|
||||
@ -430,134 +195,7 @@ type bucketIDAction struct {
|
||||
// Only one window is processed at a time.
|
||||
// Batches are atomic, all orders are settled successfully or they all fail.
|
||||
func (endpoint *Endpoint) SettlementWithWindow(stream pb.DRPCOrders_SettlementWithWindowStream) (err error) {
|
||||
switch endpoint.windowEndpointRolloutPhase {
|
||||
case WindowEndpointRolloutPhase1, WindowEndpointRolloutPhase2:
|
||||
return endpoint.SettlementWithWindowMigration(stream)
|
||||
case WindowEndpointRolloutPhase3:
|
||||
return endpoint.SettlementWithWindowFinal(stream)
|
||||
default:
|
||||
return rpcstatus.Error(rpcstatus.Internal, "invalid window endpoint rollout phase")
|
||||
}
|
||||
}
|
||||
|
||||
// SettlementWithWindowMigration implements phase 1 and phase 2 of the windowed order rollout where
|
||||
// it uses the same backend as the non-windowed settlement and inserts entries containing 0 for
|
||||
// the window which ensures that it is either entirely handled by the queue or entirely handled by
|
||||
// the phase 3 endpoint.
|
||||
func (endpoint *Endpoint) SettlementWithWindowMigration(stream pb.DRPCOrders_SettlementWithWindowStream) (err error) {
|
||||
ctx := stream.Context()
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
peer, err := identity.PeerIdentityFromContext(ctx)
|
||||
if err != nil {
|
||||
endpoint.log.Debug("err peer identity from context", zap.Error(err))
|
||||
return rpcstatus.Error(rpcstatus.Unauthenticated, err.Error())
|
||||
}
|
||||
|
||||
// update the node api version inside of the semaphore
|
||||
err = endpoint.withOrdersSemaphore(ctx, func(ctx context.Context) error {
|
||||
return endpoint.nodeAPIVersionDB.UpdateVersionAtLeast(ctx, peer.ID, nodeapiversion.HasWindowedOrders)
|
||||
})
|
||||
if err != nil {
|
||||
return rpcstatus.Wrap(rpcstatus.Internal, err)
|
||||
}
|
||||
|
||||
log := endpoint.log.Named(peer.ID.String())
|
||||
log.Debug("SettlementWithWindow")
|
||||
|
||||
var receivedCount int
|
||||
var window int64
|
||||
actions := map[pb.PieceAction]struct{}{}
|
||||
var requests []*ProcessOrderRequest
|
||||
var finished bool
|
||||
|
||||
for !finished {
|
||||
requests = requests[:0]
|
||||
|
||||
for len(requests) < endpoint.settlementBatchSize {
|
||||
request, err := stream.Recv()
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
finished = true
|
||||
break
|
||||
}
|
||||
log.Debug("err streaming order request", zap.Error(err))
|
||||
return rpcstatus.Error(rpcstatus.Unknown, err.Error())
|
||||
}
|
||||
receivedCount++
|
||||
|
||||
orderLimit := request.Limit
|
||||
if orderLimit == nil {
|
||||
log.Debug("request.OrderLimit is nil")
|
||||
continue
|
||||
}
|
||||
|
||||
order := request.Order
|
||||
if order == nil {
|
||||
log.Debug("request.Order is nil")
|
||||
continue
|
||||
}
|
||||
|
||||
if window == 0 {
|
||||
window = date.TruncateToHourInNano(orderLimit.OrderCreation)
|
||||
}
|
||||
|
||||
// don't process orders that aren't valid
|
||||
if !endpoint.isValid(ctx, log, order, orderLimit, peer.ID, window) {
|
||||
continue
|
||||
}
|
||||
|
||||
actions[orderLimit.Action] = struct{}{}
|
||||
|
||||
requests = append(requests, &ProcessOrderRequest{
|
||||
Order: order,
|
||||
OrderLimit: orderLimit,
|
||||
})
|
||||
}
|
||||
|
||||
// process all of the orders in the old way inside of the semaphore
|
||||
err := endpoint.withOrdersSemaphore(ctx, func(ctx context.Context) error {
|
||||
_, err = endpoint.DB.ProcessOrders(ctx, requests)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return rpcstatus.Wrap(rpcstatus.Internal, err)
|
||||
}
|
||||
}
|
||||
|
||||
// if we received no valid orders, then respond with rejected
|
||||
if len(actions) == 0 || window == 0 {
|
||||
return stream.SendAndClose(&pb.SettlementWithWindowResponse{
|
||||
Status: pb.SettlementWithWindowResponse_REJECTED,
|
||||
})
|
||||
}
|
||||
|
||||
// insert zero rows for every action involved in the set of orders. this prevents
|
||||
// many problems (double spends and underspends) by ensuring that any window is
|
||||
// either handled entirely by the queue or entirely with the phase 3 windowed endpoint.
|
||||
// enter the semaphore for the duration of the updates.
|
||||
|
||||
windowTime := time.Unix(0, window)
|
||||
err = endpoint.withOrdersSemaphore(ctx, func(ctx context.Context) error {
|
||||
for action := range actions {
|
||||
if err := endpoint.DB.UpdateStoragenodeBandwidthSettle(ctx, peer.ID, action, 0, windowTime); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return rpcstatus.Wrap(rpcstatus.Internal, err)
|
||||
}
|
||||
|
||||
log.Debug("orders processed",
|
||||
zap.Int("total orders received", receivedCount),
|
||||
zap.Time("window", windowTime),
|
||||
)
|
||||
|
||||
return stream.SendAndClose(&pb.SettlementWithWindowResponse{
|
||||
Status: pb.SettlementWithWindowResponse_ACCEPTED,
|
||||
})
|
||||
return endpoint.SettlementWithWindowFinal(stream)
|
||||
}
|
||||
|
||||
func trackFinalStatus(status pb.SettlementWithWindowResponse_Status) {
|
||||
@ -733,7 +371,6 @@ func (endpoint *Endpoint) SettlementWithWindowFinal(stream pb.DRPCOrders_Settlem
|
||||
|
||||
func (endpoint *Endpoint) isValid(ctx context.Context, log *zap.Logger, order *pb.Order,
|
||||
orderLimit *pb.OrderLimit, peerID storj.NodeID, window int64) bool {
|
||||
|
||||
if orderLimit.StorageNodeId != peerID {
|
||||
log.Debug("storage node id mismatch")
|
||||
mon.Event("order_not_valid_storagenodeid")
|
||||
|
@ -8,9 +8,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"storj.io/common/pb"
|
||||
"storj.io/common/rpc/rpcstatus"
|
||||
@ -19,31 +17,10 @@ import (
|
||||
"storj.io/common/testcontext"
|
||||
"storj.io/common/testrand"
|
||||
"storj.io/storj/private/testplanet"
|
||||
"storj.io/storj/satellite"
|
||||
"storj.io/storj/satellite/internalpb"
|
||||
"storj.io/storj/satellite/metainfo/metabase"
|
||||
"storj.io/storj/satellite/orders"
|
||||
)
|
||||
|
||||
func runTestWithPhases(t *testing.T, fn func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet)) {
|
||||
run := func(phase orders.WindowEndpointRolloutPhase) func(t *testing.T) {
|
||||
return func(t *testing.T) {
|
||||
testplanet.Run(t, testplanet.Config{
|
||||
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 1,
|
||||
Reconfigure: testplanet.Reconfigure{
|
||||
Satellite: func(_ *zap.Logger, _ int, config *satellite.Config) {
|
||||
config.Orders.WindowEndpointRolloutPhase = phase
|
||||
},
|
||||
},
|
||||
}, fn)
|
||||
}
|
||||
}
|
||||
|
||||
t.Run("Phase1", run(orders.WindowEndpointRolloutPhase1))
|
||||
t.Run("Phase2", run(orders.WindowEndpointRolloutPhase2))
|
||||
t.Run("Phase3", run(orders.WindowEndpointRolloutPhase3))
|
||||
}
|
||||
|
||||
func TestSettlementWithWindowEndpointManyOrders(t *testing.T) {
|
||||
testplanet.Run(t, testplanet.Config{
|
||||
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 1,
|
||||
@ -60,10 +37,9 @@ func TestSettlementWithWindowEndpointManyOrders(t *testing.T) {
|
||||
}
|
||||
key := satellite.Config.Orders.EncryptionKeys.Default
|
||||
|
||||
// stop any async flushes because we want to be sure when some values are
|
||||
// stop the async flush because we want to be sure when some values are
|
||||
// written to avoid races
|
||||
satellite.Orders.Chore.Loop.Pause()
|
||||
satellite.Accounting.ReportedRollup.Loop.Pause()
|
||||
|
||||
// confirm storagenode and bucket bandwidth tables start empty
|
||||
snbw, err := ordersDB.GetStorageNodeBandwidth(ctx, satellite.ID(), time.Time{}, now)
|
||||
@ -73,7 +49,7 @@ func TestSettlementWithWindowEndpointManyOrders(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(0), bucketbw)
|
||||
|
||||
var testCases = []struct {
|
||||
testCases := []struct {
|
||||
name string
|
||||
dataAmount int64
|
||||
orderCreation time.Time
|
||||
@ -177,18 +153,12 @@ func TestSettlementWithWindowEndpointManyOrders(t *testing.T) {
|
||||
resp, err := stream.CloseAndRecv()
|
||||
require.NoError(t, err)
|
||||
|
||||
// the settled amount is only returned during phase3
|
||||
var settled map[int32]int64
|
||||
if satellite.Config.Orders.WindowEndpointRolloutPhase == orders.WindowEndpointRolloutPhase3 {
|
||||
settled = map[int32]int64{int32(pb.PieceAction_PUT): tt.settledAmt}
|
||||
}
|
||||
settled := map[int32]int64{int32(pb.PieceAction_PUT): tt.settledAmt}
|
||||
require.Equal(t, &pb.SettlementWithWindowResponse{
|
||||
Status: pb.SettlementWithWindowResponse_ACCEPTED,
|
||||
ActionSettled: settled,
|
||||
}, resp)
|
||||
|
||||
// trigger and wait for all of the chores necessary to flush the orders
|
||||
assert.NoError(t, satellite.Accounting.ReportedRollup.RunOnce(ctx, tt.orderCreation))
|
||||
satellite.Orders.Chore.Loop.TriggerWait()
|
||||
|
||||
// assert all the right stuff is in the satellite storagenode and bucket bandwidth tables
|
||||
@ -203,11 +173,11 @@ func TestSettlementWithWindowEndpointManyOrders(t *testing.T) {
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestSettlementWithWindowEndpointSingleOrder(t *testing.T) {
|
||||
testplanet.Run(t, testplanet.Config{
|
||||
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 1,
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
|
||||
const dataAmount int64 = 50
|
||||
satellite := planet.Satellites[0]
|
||||
ordersDB := satellite.Orders.DB
|
||||
@ -221,10 +191,9 @@ func TestSettlementWithWindowEndpointSingleOrder(t *testing.T) {
|
||||
}
|
||||
key := satellite.Config.Orders.EncryptionKeys.Default
|
||||
|
||||
// stop any async flushes because we want to be sure when some values are
|
||||
// stop the async flush because we want to be sure when some values are
|
||||
// written to avoid races
|
||||
satellite.Orders.Chore.Loop.Pause()
|
||||
satellite.Accounting.ReportedRollup.Loop.Pause()
|
||||
|
||||
// confirm storagenode and bucket bandwidth tables start empty
|
||||
snbw, err := ordersDB.GetStorageNodeBandwidth(ctx, satellite.ID(), time.Time{}, now)
|
||||
@ -248,7 +217,7 @@ func TestSettlementWithWindowEndpointSingleOrder(t *testing.T) {
|
||||
piecePublicKey, piecePrivateKey, err := storj.NewPieceKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
var testCases = []struct {
|
||||
testCases := []struct {
|
||||
name string
|
||||
dataAmount int64
|
||||
expectedStatus pb.SettlementWithWindowResponse_Status
|
||||
@ -304,9 +273,6 @@ func TestSettlementWithWindowEndpointSingleOrder(t *testing.T) {
|
||||
|
||||
expected := new(pb.SettlementWithWindowResponse)
|
||||
switch {
|
||||
case satellite.Config.Orders.WindowEndpointRolloutPhase != orders.WindowEndpointRolloutPhase3:
|
||||
expected.Status = pb.SettlementWithWindowResponse_ACCEPTED
|
||||
expected.ActionSettled = nil
|
||||
case tt.expectedStatus == pb.SettlementWithWindowResponse_ACCEPTED:
|
||||
expected.Status = pb.SettlementWithWindowResponse_ACCEPTED
|
||||
expected.ActionSettled = map[int32]int64{int32(pb.PieceAction_PUT): tt.dataAmount}
|
||||
@ -316,8 +282,7 @@ func TestSettlementWithWindowEndpointSingleOrder(t *testing.T) {
|
||||
}
|
||||
require.Equal(t, expected, resp)
|
||||
|
||||
// flush all the chores
|
||||
assert.NoError(t, satellite.Accounting.ReportedRollup.RunOnce(ctx, now))
|
||||
// flush the chores
|
||||
satellite.Orders.Chore.Loop.TriggerWait()
|
||||
|
||||
// assert all the right stuff is in the satellite storagenode and bucket bandwidth tables
|
||||
@ -334,7 +299,9 @@ func TestSettlementWithWindowEndpointSingleOrder(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSettlementWithWindowEndpointErrors(t *testing.T) {
|
||||
runTestWithPhases(t, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
testplanet.Run(t, testplanet.Config{
|
||||
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 1,
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
satellite := planet.Satellites[0]
|
||||
ordersDB := satellite.Orders.DB
|
||||
storagenode := planet.StorageNodes[0]
|
||||
@ -346,10 +313,9 @@ func TestSettlementWithWindowEndpointErrors(t *testing.T) {
|
||||
BucketName: bucketname,
|
||||
}
|
||||
|
||||
// stop any async flushes because we want to be sure when some values are
|
||||
// stop the async flush because we want to be sure when some values are
|
||||
// written to avoid races
|
||||
satellite.Orders.Chore.Loop.Pause()
|
||||
satellite.Accounting.ReportedRollup.Loop.Pause()
|
||||
|
||||
// confirm storagenode and bucket bandwidth tables start empty
|
||||
snbw, err := ordersDB.GetStorageNodeBandwidth(ctx, satellite.ID(), time.Time{}, now)
|
||||
@ -360,20 +326,13 @@ func TestSettlementWithWindowEndpointErrors(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, 0, bucketbw)
|
||||
|
||||
// create serial number to use in test
|
||||
serialNumber1 := testrand.SerialNumber()
|
||||
err = ordersDB.CreateSerialInfo(ctx, serialNumber1, []byte(bucketLocation.Prefix()), now.AddDate(1, 0, 10))
|
||||
require.NoError(t, err)
|
||||
|
||||
serialNumber2 := testrand.SerialNumber()
|
||||
err = ordersDB.CreateSerialInfo(ctx, serialNumber2, []byte(bucketLocation.Prefix()), now.AddDate(1, 0, 10))
|
||||
require.NoError(t, err)
|
||||
|
||||
piecePublicKey1, piecePrivateKey1, err := storj.NewPieceKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
_, piecePrivateKey2, err := storj.NewPieceKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
serialNumber1 := testrand.SerialNumber()
|
||||
key := satellite.Config.Orders.EncryptionKeys.Default
|
||||
encrypted, err := key.EncryptMetadata(
|
||||
serialNumber1,
|
||||
@ -406,6 +365,7 @@ func TestSettlementWithWindowEndpointErrors(t *testing.T) {
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
serialNumber2 := testrand.SerialNumber()
|
||||
order2, err := signing.SignUplinkOrder(ctx, piecePrivateKey1, &pb.Order{
|
||||
SerialNumber: serialNumber2,
|
||||
Amount: int64(50),
|
||||
@ -418,7 +378,7 @@ func TestSettlementWithWindowEndpointErrors(t *testing.T) {
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
var testCases = []struct {
|
||||
testCases := []struct {
|
||||
name string
|
||||
order *pb.Order
|
||||
orderLimit *pb.OrderLimit
|
||||
@ -453,8 +413,7 @@ func TestSettlementWithWindowEndpointErrors(t *testing.T) {
|
||||
ActionSettled: nil,
|
||||
}, resp)
|
||||
|
||||
// flush all the chores
|
||||
assert.NoError(t, satellite.Accounting.ReportedRollup.RunOnce(ctx, now))
|
||||
// flush the chores
|
||||
satellite.Orders.Chore.Loop.TriggerWait()
|
||||
|
||||
// assert no data was added to satellite storagenode or bucket bandwidth tables
|
||||
@ -471,7 +430,9 @@ func TestSettlementWithWindowEndpointErrors(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSettlementEndpointSingleOrder(t *testing.T) {
|
||||
runTestWithPhases(t, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
testplanet.Run(t, testplanet.Config{
|
||||
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 1,
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
const dataAmount int64 = 50
|
||||
satellite := planet.Satellites[0]
|
||||
ordersDB := satellite.Orders.DB
|
||||
@ -483,10 +444,9 @@ func TestSettlementEndpointSingleOrder(t *testing.T) {
|
||||
ProjectID: projectID,
|
||||
BucketName: bucketname,
|
||||
}
|
||||
// stop any async flushes because we want to be sure when some values are
|
||||
// stop the async flush because we want to be sure when some values are
|
||||
// written to avoid races
|
||||
satellite.Orders.Chore.Loop.Pause()
|
||||
satellite.Accounting.ReportedRollup.Loop.Pause()
|
||||
|
||||
// confirm storagenode and bucket bandwidth tables start empty
|
||||
snbw, err := ordersDB.GetStorageNodeBandwidth(ctx, satellite.ID(), time.Time{}, now)
|
||||
@ -497,13 +457,10 @@ func TestSettlementEndpointSingleOrder(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, 0, bucketbw)
|
||||
|
||||
// create serial number to use in test
|
||||
serialNumber := testrand.SerialNumber()
|
||||
err = ordersDB.CreateSerialInfo(ctx, serialNumber, []byte(bucketLocation.Prefix()), now.AddDate(1, 0, 10))
|
||||
require.NoError(t, err)
|
||||
|
||||
piecePublicKey, piecePrivateKey, err := storj.NewPieceKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
serialNumber := testrand.SerialNumber()
|
||||
key := satellite.Config.Orders.EncryptionKeys.Default
|
||||
encrypted, err := key.EncryptMetadata(
|
||||
serialNumber,
|
||||
@ -546,54 +503,21 @@ func TestSettlementEndpointSingleOrder(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
defer ctx.Check(stream.Close)
|
||||
|
||||
// storagenode settles an order and orderlimit
|
||||
var resp *pb.SettlementResponse
|
||||
if satellite.Config.Orders.WindowEndpointRolloutPhase == orders.WindowEndpointRolloutPhase1 {
|
||||
err = stream.Send(&pb.SettlementRequest{
|
||||
Limit: orderLimit,
|
||||
Order: order,
|
||||
})
|
||||
// in phase2 and phase3, the endpoint was disabled. depending on how fast the
|
||||
// server sends that error message, we may see an io.EOF on the Send call, or
|
||||
// we may see no error at all. In either case, we have to call stream.Recv to
|
||||
// see the actual error. gRPC semantics are funky.
|
||||
err = stream.Send(&pb.SettlementRequest{
|
||||
Limit: orderLimit,
|
||||
Order: order,
|
||||
})
|
||||
if err != io.EOF {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, stream.CloseSend())
|
||||
|
||||
resp, err = stream.Recv()
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
// in phase2 and phase3, the endpoint is disabled. depending on how fast the
|
||||
// server sends that error message, we may see an io.EOF on the Send call, or
|
||||
// we may see no error at all. In either case, we have to call stream.Recv to
|
||||
// see the actual error. gRPC semantics are funky.
|
||||
err = stream.Send(&pb.SettlementRequest{
|
||||
Limit: orderLimit,
|
||||
Order: order,
|
||||
})
|
||||
if err != io.EOF {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.NoError(t, stream.CloseSend())
|
||||
|
||||
_, err = stream.Recv()
|
||||
require.Error(t, err)
|
||||
require.Equal(t, rpcstatus.Unavailable, rpcstatus.Code(err))
|
||||
return
|
||||
}
|
||||
require.NoError(t, stream.CloseSend())
|
||||
|
||||
require.Equal(t, &pb.SettlementResponse{
|
||||
SerialNumber: serialNumber,
|
||||
Status: pb.SettlementResponse_ACCEPTED,
|
||||
}, resp)
|
||||
|
||||
// flush all the chores
|
||||
assert.NoError(t, satellite.Accounting.ReportedRollup.RunOnce(ctx, now))
|
||||
satellite.Orders.Chore.Loop.TriggerWait()
|
||||
|
||||
// assert all the right stuff is in the satellite storagenode and bucket bandwidth tables
|
||||
snbw, err = ordersDB.GetStorageNodeBandwidth(ctx, storagenode.ID(), time.Time{}, now)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, dataAmount, snbw)
|
||||
|
||||
newBbw, err := ordersDB.GetBucketBandwidth(ctx, projectID, []byte(bucketname), time.Time{}, now)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, dataAmount, newBbw)
|
||||
_, err = stream.Recv()
|
||||
require.Error(t, err)
|
||||
require.Equal(t, rpcstatus.Unavailable, rpcstatus.Code(err))
|
||||
})
|
||||
}
|
||||
|
@ -4,25 +4,18 @@
|
||||
package orders_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/zap/zaptest"
|
||||
|
||||
"storj.io/common/memory"
|
||||
"storj.io/common/pb"
|
||||
"storj.io/common/storj"
|
||||
"storj.io/common/testcontext"
|
||||
"storj.io/common/testrand"
|
||||
"storj.io/common/uuid"
|
||||
"storj.io/storj/private/testplanet"
|
||||
"storj.io/storj/satellite"
|
||||
"storj.io/storj/satellite/accounting/reportedrollup"
|
||||
"storj.io/storj/satellite/orders"
|
||||
"storj.io/storj/satellite/satellitedb"
|
||||
"storj.io/storj/satellite/satellitedb/dbx"
|
||||
"storj.io/storj/satellite/satellitedb/satellitedbtest"
|
||||
@ -192,9 +185,6 @@ func TestUploadDownloadBandwidth(t *testing.T) {
|
||||
}
|
||||
planet.Satellites[0].Orders.Chore.Loop.TriggerWait()
|
||||
|
||||
reportedRollupChore := planet.Satellites[0].Core.Accounting.ReportedRollupChore
|
||||
require.NoError(t, reportedRollupChore.RunOnce(ctx, now))
|
||||
|
||||
ordersDB := planet.Satellites[0].DB.Orders()
|
||||
|
||||
bucketBandwidth, err := ordersDB.GetBucketBandwidth(ctx, planet.Uplinks[0].Projects[0].ID, []byte(bucketName), beforeRollup, afterRollup)
|
||||
@ -256,10 +246,6 @@ func TestMultiProjectUploadDownloadBandwidth(t *testing.T) {
|
||||
// flush rollups write cache
|
||||
planet.Satellites[0].Orders.Chore.Loop.TriggerWait()
|
||||
|
||||
// Run the chore as if we were far in the future so that the orders are expired.
|
||||
reportedRollupChore := planet.Satellites[0].Core.Accounting.ReportedRollupChore
|
||||
require.NoError(t, reportedRollupChore.RunOnce(ctx, now))
|
||||
|
||||
// Query and ensure that there's no data recorded for the bucket from the other project
|
||||
ordersDB := planet.Satellites[0].DB.Orders()
|
||||
uplink0Project := planet.Uplinks[0].Projects[0].ID
|
||||
@ -281,412 +267,6 @@ func TestMultiProjectUploadDownloadBandwidth(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkOrders(b *testing.B) {
|
||||
ctx := testcontext.New(b)
|
||||
defer ctx.Cleanup()
|
||||
|
||||
var counts []int
|
||||
if testing.Short() {
|
||||
counts = []int{50, 100}
|
||||
} else {
|
||||
counts = []int{50, 100, 250, 500, 1000}
|
||||
}
|
||||
|
||||
for _, c := range counts {
|
||||
c := c
|
||||
satellitedbtest.Bench(b, func(b *testing.B, db satellite.DB) {
|
||||
snID := testrand.NodeID()
|
||||
|
||||
projectID, _ := uuid.New()
|
||||
bucketID := []byte(projectID.String() + "/b")
|
||||
|
||||
b.Run("Benchmark Order Processing:"+strconv.Itoa(c), func(b *testing.B) {
|
||||
ctx := context.Background()
|
||||
for i := 0; i < b.N; i++ {
|
||||
requests := buildBenchmarkData(ctx, b, db, snID, bucketID, c)
|
||||
|
||||
_, err := db.Orders().ProcessOrders(ctx, requests)
|
||||
assert.NoError(b, err)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func buildBenchmarkData(ctx context.Context, b *testing.B, db satellite.DB, storageNodeID storj.NodeID, bucketID []byte, orderCount int) (_ []*orders.ProcessOrderRequest) {
|
||||
requests := make([]*orders.ProcessOrderRequest, 0, orderCount)
|
||||
|
||||
for i := 0; i < orderCount; i++ {
|
||||
snUUID, _ := uuid.New()
|
||||
sn, err := storj.SerialNumberFromBytes(snUUID[:])
|
||||
require.NoError(b, err)
|
||||
|
||||
err = db.Orders().CreateSerialInfo(ctx, sn, bucketID, time.Now().Add(time.Hour*24))
|
||||
require.NoError(b, err)
|
||||
|
||||
order := &pb.Order{
|
||||
SerialNumber: sn,
|
||||
Amount: 1,
|
||||
}
|
||||
|
||||
orderLimit := &pb.OrderLimit{
|
||||
SerialNumber: sn,
|
||||
StorageNodeId: storageNodeID,
|
||||
Action: 2,
|
||||
}
|
||||
requests = append(requests, &orders.ProcessOrderRequest{Order: order,
|
||||
OrderLimit: orderLimit})
|
||||
}
|
||||
return requests
|
||||
}
|
||||
|
||||
func TestLargeOrderLimit(t *testing.T) {
|
||||
satellitedbtest.Run(t, func(ctx *testcontext.Context, t *testing.T, db satellite.DB) {
|
||||
ordersDB := db.Orders()
|
||||
chore := reportedrollup.NewChore(zaptest.NewLogger(t), ordersDB, reportedrollup.Config{})
|
||||
serialNum := storj.SerialNumber{1}
|
||||
|
||||
projectID, _ := uuid.New()
|
||||
now := time.Now()
|
||||
beforeRollup := now.Add(-time.Hour)
|
||||
afterRollup := now.Add(time.Hour)
|
||||
|
||||
// setup: create serial number records
|
||||
err := ordersDB.CreateSerialInfo(ctx, serialNum, []byte(projectID.String()+"/b"), now.AddDate(0, 0, 1))
|
||||
require.NoError(t, err)
|
||||
|
||||
var requests []*orders.ProcessOrderRequest
|
||||
|
||||
// process one order with smaller amount than the order limit and confirm we get the correct response
|
||||
{
|
||||
requests = append(requests, &orders.ProcessOrderRequest{
|
||||
Order: &pb.Order{
|
||||
SerialNumber: serialNum,
|
||||
Amount: 100,
|
||||
},
|
||||
OrderLimit: &pb.OrderLimit{
|
||||
SerialNumber: serialNum,
|
||||
StorageNodeId: storj.NodeID{1},
|
||||
Action: pb.PieceAction_GET,
|
||||
OrderExpiration: now.AddDate(0, 0, 3),
|
||||
Limit: 250,
|
||||
},
|
||||
})
|
||||
actualResponses, err := ordersDB.ProcessOrders(ctx, requests)
|
||||
require.NoError(t, err)
|
||||
expectedResponses := []*orders.ProcessOrderResponse{
|
||||
{
|
||||
SerialNumber: serialNum,
|
||||
Status: pb.SettlementResponse_ACCEPTED,
|
||||
},
|
||||
}
|
||||
assert.Equal(t, expectedResponses, actualResponses)
|
||||
|
||||
require.NoError(t, chore.RunOnce(ctx, now))
|
||||
|
||||
// check only the bandwidth we've used is taken into account
|
||||
bucketBandwidth, err := ordersDB.GetBucketBandwidth(ctx, projectID, []byte("b"), beforeRollup, afterRollup)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(100), bucketBandwidth)
|
||||
|
||||
storageNodeBandwidth, err := ordersDB.GetStorageNodeBandwidth(ctx, storj.NodeID{1}, beforeRollup, afterRollup)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(100), storageNodeBandwidth)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestProcessOrders(t *testing.T) {
|
||||
satellitedbtest.Run(t, func(ctx *testcontext.Context, t *testing.T, db satellite.DB) {
|
||||
ordersDB := db.Orders()
|
||||
chore := reportedrollup.NewChore(zaptest.NewLogger(t), ordersDB, reportedrollup.Config{})
|
||||
invalidSerial := storj.SerialNumber{1}
|
||||
serialNum := storj.SerialNumber{2}
|
||||
serialNum2 := storj.SerialNumber{3}
|
||||
|
||||
projectID, _ := uuid.New()
|
||||
now := time.Now()
|
||||
beforeRollup := now.Add(-time.Hour - time.Second)
|
||||
afterRollup := now.Add(time.Hour + time.Second)
|
||||
|
||||
// assertion helpers
|
||||
checkBucketBandwidth := func(bucket string, amount int64) {
|
||||
settled, err := ordersDB.GetBucketBandwidth(ctx, projectID, []byte(bucket), beforeRollup, afterRollup)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, amount, settled)
|
||||
}
|
||||
checkStoragenodeBandwidth := func(node storj.NodeID, amount int64) {
|
||||
settled, err := ordersDB.GetStorageNodeBandwidth(ctx, node, beforeRollup, afterRollup)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, amount, settled)
|
||||
}
|
||||
|
||||
// setup: create serial number records
|
||||
err := ordersDB.CreateSerialInfo(ctx, serialNum, []byte(projectID.String()+"/b"), now.AddDate(0, 0, 1))
|
||||
require.NoError(t, err)
|
||||
err = ordersDB.CreateSerialInfo(ctx, serialNum2, []byte(projectID.String()+"/c"), now.AddDate(0, 0, 1))
|
||||
require.NoError(t, err)
|
||||
|
||||
var requests []*orders.ProcessOrderRequest
|
||||
|
||||
// process one order and confirm we get the correct response
|
||||
{
|
||||
requests = append(requests, &orders.ProcessOrderRequest{
|
||||
Order: &pb.Order{
|
||||
SerialNumber: serialNum,
|
||||
Amount: 100,
|
||||
},
|
||||
OrderLimit: &pb.OrderLimit{
|
||||
SerialNumber: serialNum,
|
||||
StorageNodeId: storj.NodeID{1},
|
||||
Action: pb.PieceAction_DELETE,
|
||||
OrderExpiration: now.AddDate(0, 0, 3),
|
||||
},
|
||||
})
|
||||
actualResponses, err := ordersDB.ProcessOrders(ctx, requests)
|
||||
require.NoError(t, err)
|
||||
expectedResponses := []*orders.ProcessOrderResponse{
|
||||
{
|
||||
SerialNumber: serialNum,
|
||||
Status: pb.SettlementResponse_ACCEPTED,
|
||||
},
|
||||
}
|
||||
assert.Equal(t, expectedResponses, actualResponses)
|
||||
}
|
||||
|
||||
// process two orders from different storagenodes and confirm there is an error
|
||||
{
|
||||
requests = append(requests, &orders.ProcessOrderRequest{
|
||||
Order: &pb.Order{
|
||||
SerialNumber: serialNum2,
|
||||
Amount: 200,
|
||||
},
|
||||
OrderLimit: &pb.OrderLimit{
|
||||
SerialNumber: serialNum2,
|
||||
StorageNodeId: storj.NodeID{2},
|
||||
Action: pb.PieceAction_PUT,
|
||||
OrderExpiration: now.AddDate(0, 0, 1)},
|
||||
})
|
||||
_, err = ordersDB.ProcessOrders(ctx, requests)
|
||||
require.Error(t, err, "different storage nodes")
|
||||
}
|
||||
|
||||
// process two orders from same storagenodes and confirm we get two responses
|
||||
{
|
||||
requests[0].OrderLimit.StorageNodeId = storj.NodeID{2}
|
||||
actualResponses, err := ordersDB.ProcessOrders(ctx, requests)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 2, len(actualResponses))
|
||||
}
|
||||
|
||||
// confirm the correct data from processing orders was written and consumed
|
||||
{
|
||||
require.NoError(t, chore.RunOnce(ctx, now))
|
||||
|
||||
checkBucketBandwidth("b", 200)
|
||||
checkBucketBandwidth("c", 200)
|
||||
checkStoragenodeBandwidth(storj.NodeID{1}, 100)
|
||||
checkStoragenodeBandwidth(storj.NodeID{2}, 300)
|
||||
}
|
||||
|
||||
// confirm invalid order at index 0 does not result in a SQL error
|
||||
{
|
||||
requests := []*orders.ProcessOrderRequest{
|
||||
{
|
||||
Order: &pb.Order{
|
||||
SerialNumber: invalidSerial,
|
||||
Amount: 200,
|
||||
},
|
||||
OrderLimit: &pb.OrderLimit{
|
||||
SerialNumber: invalidSerial,
|
||||
StorageNodeId: storj.NodeID{1},
|
||||
Action: pb.PieceAction_PUT,
|
||||
OrderExpiration: now.AddDate(0, 0, 1),
|
||||
},
|
||||
},
|
||||
{
|
||||
Order: &pb.Order{
|
||||
SerialNumber: serialNum,
|
||||
Amount: 200,
|
||||
},
|
||||
OrderLimit: &pb.OrderLimit{
|
||||
SerialNumber: serialNum,
|
||||
StorageNodeId: storj.NodeID{1},
|
||||
Action: pb.PieceAction_PUT,
|
||||
OrderExpiration: now.AddDate(0, 0, 1),
|
||||
},
|
||||
},
|
||||
}
|
||||
responses, err := ordersDB.ProcessOrders(ctx, requests)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, pb.SettlementResponse_REJECTED, responses[0].Status)
|
||||
}
|
||||
|
||||
// in case of conflicting ProcessOrderRequests, what has been recorded already wins
|
||||
{
|
||||
// unique nodeID so the other tests here don't interfere
|
||||
nodeID := testrand.NodeID()
|
||||
requests := []*orders.ProcessOrderRequest{
|
||||
{
|
||||
Order: &pb.Order{
|
||||
SerialNumber: serialNum,
|
||||
Amount: 100,
|
||||
},
|
||||
OrderLimit: &pb.OrderLimit{
|
||||
SerialNumber: serialNum,
|
||||
StorageNodeId: nodeID,
|
||||
Action: pb.PieceAction_GET,
|
||||
OrderExpiration: now.AddDate(0, 0, 1),
|
||||
},
|
||||
},
|
||||
{
|
||||
Order: &pb.Order{
|
||||
SerialNumber: serialNum2,
|
||||
Amount: 200,
|
||||
},
|
||||
OrderLimit: &pb.OrderLimit{
|
||||
SerialNumber: serialNum2,
|
||||
StorageNodeId: nodeID,
|
||||
Action: pb.PieceAction_GET,
|
||||
OrderExpiration: now.AddDate(0, 0, 1),
|
||||
},
|
||||
},
|
||||
}
|
||||
responses, err := ordersDB.ProcessOrders(ctx, requests)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, pb.SettlementResponse_ACCEPTED, responses[0].Status)
|
||||
require.Equal(t, pb.SettlementResponse_ACCEPTED, responses[1].Status)
|
||||
|
||||
requests = []*orders.ProcessOrderRequest{
|
||||
{
|
||||
Order: &pb.Order{
|
||||
SerialNumber: serialNum,
|
||||
Amount: 1,
|
||||
},
|
||||
OrderLimit: &pb.OrderLimit{
|
||||
SerialNumber: serialNum,
|
||||
StorageNodeId: nodeID,
|
||||
Action: pb.PieceAction_GET,
|
||||
OrderExpiration: now.AddDate(0, 0, 1),
|
||||
},
|
||||
},
|
||||
{
|
||||
Order: &pb.Order{
|
||||
SerialNumber: serialNum2,
|
||||
Amount: 500,
|
||||
},
|
||||
OrderLimit: &pb.OrderLimit{
|
||||
SerialNumber: serialNum2,
|
||||
StorageNodeId: nodeID,
|
||||
Action: pb.PieceAction_GET,
|
||||
OrderExpiration: now.AddDate(0, 0, 1),
|
||||
},
|
||||
},
|
||||
}
|
||||
responses, err = ordersDB.ProcessOrders(ctx, requests)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, pb.SettlementResponse_ACCEPTED, responses[0].Status)
|
||||
require.Equal(t, pb.SettlementResponse_ACCEPTED, responses[1].Status)
|
||||
|
||||
require.NoError(t, chore.RunOnce(ctx, now))
|
||||
|
||||
checkBucketBandwidth("b", 201)
|
||||
checkBucketBandwidth("c", 700)
|
||||
checkStoragenodeBandwidth(storj.NodeID{1}, 100)
|
||||
checkStoragenodeBandwidth(storj.NodeID{2}, 300)
|
||||
checkStoragenodeBandwidth(nodeID, 501)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestProcessOrders_DoubleSend(t *testing.T) {
|
||||
satellitedbtest.Run(t, func(ctx *testcontext.Context, t *testing.T, db satellite.DB) {
|
||||
ordersDB := db.Orders()
|
||||
chore := reportedrollup.NewChore(zaptest.NewLogger(t), ordersDB, reportedrollup.Config{})
|
||||
serialNum := storj.SerialNumber{2}
|
||||
projectID, _ := uuid.New()
|
||||
now := time.Now()
|
||||
beforeRollup := now.Add(-time.Hour - time.Second)
|
||||
afterRollup := now.Add(time.Hour + time.Second)
|
||||
|
||||
// assertion helpers
|
||||
checkBucketBandwidth := func(bucket string, amount int64) {
|
||||
settled, err := ordersDB.GetBucketBandwidth(ctx, projectID, []byte(bucket), beforeRollup, afterRollup)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, amount, settled)
|
||||
}
|
||||
checkStoragenodeBandwidth := func(node storj.NodeID, amount int64) {
|
||||
settled, err := ordersDB.GetStorageNodeBandwidth(ctx, node, beforeRollup, afterRollup)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, amount, settled)
|
||||
}
|
||||
|
||||
// setup: create serial number records
|
||||
err := ordersDB.CreateSerialInfo(ctx, serialNum, []byte(projectID.String()+"/b"), now.AddDate(0, 0, 1))
|
||||
require.NoError(t, err)
|
||||
|
||||
order := &orders.ProcessOrderRequest{
|
||||
Order: &pb.Order{
|
||||
SerialNumber: serialNum,
|
||||
Amount: 100,
|
||||
},
|
||||
OrderLimit: &pb.OrderLimit{
|
||||
SerialNumber: serialNum,
|
||||
StorageNodeId: storj.NodeID{1},
|
||||
Action: pb.PieceAction_PUT,
|
||||
OrderExpiration: now.AddDate(0, 0, 3),
|
||||
},
|
||||
}
|
||||
|
||||
// send the same order twice in the same request
|
||||
{
|
||||
actualResponses, err := ordersDB.ProcessOrders(ctx, []*orders.ProcessOrderRequest{order, order})
|
||||
require.NoError(t, err)
|
||||
expectedResponses := []*orders.ProcessOrderResponse{
|
||||
{
|
||||
SerialNumber: serialNum,
|
||||
Status: pb.SettlementResponse_ACCEPTED,
|
||||
},
|
||||
{
|
||||
SerialNumber: serialNum,
|
||||
Status: pb.SettlementResponse_REJECTED,
|
||||
},
|
||||
}
|
||||
assert.Equal(t, expectedResponses, actualResponses)
|
||||
}
|
||||
|
||||
// confirm the correct data from processing orders was written and consumed
|
||||
{
|
||||
require.NoError(t, chore.RunOnce(ctx, now))
|
||||
|
||||
checkBucketBandwidth("b", 100)
|
||||
checkStoragenodeBandwidth(storj.NodeID{1}, 100)
|
||||
}
|
||||
|
||||
// send the already sent and handled order again
|
||||
{
|
||||
actualResponses, err := ordersDB.ProcessOrders(ctx, []*orders.ProcessOrderRequest{order})
|
||||
require.NoError(t, err)
|
||||
expectedResponses := []*orders.ProcessOrderResponse{
|
||||
{
|
||||
SerialNumber: serialNum,
|
||||
Status: pb.SettlementResponse_ACCEPTED,
|
||||
},
|
||||
}
|
||||
assert.Equal(t, expectedResponses, actualResponses)
|
||||
}
|
||||
|
||||
// confirm the correct data from processing orders was written and consumed
|
||||
{
|
||||
require.NoError(t, chore.RunOnce(ctx, now))
|
||||
|
||||
checkBucketBandwidth("b", 100)
|
||||
checkStoragenodeBandwidth(storj.NodeID{1}, 100)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestUpdateStoragenodeBandwidthSettleWithWindow(t *testing.T) {
|
||||
satellitedbtest.Run(t, func(ctx *testcontext.Context, t *testing.T, db satellite.DB) {
|
||||
ordersDB := db.Orders()
|
||||
@ -711,12 +291,6 @@ func TestUpdateStoragenodeBandwidthSettleWithWindow(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(0), bucketbw)
|
||||
|
||||
// setup: create serial number record
|
||||
serialNum := testrand.SerialNumber()
|
||||
bucketID := storj.JoinPaths(projectID.String(), bucketname)
|
||||
err = ordersDB.CreateSerialInfo(ctx, serialNum, []byte(bucketID), now.AddDate(0, 0, 10))
|
||||
require.NoError(t, err)
|
||||
|
||||
// test: process an order from a storagenode that has not been processed before
|
||||
status, alreadyProcesed, err := ordersDB.UpdateStoragenodeBandwidthSettleWithWindow(ctx, snID, actionAmounts, windowTime)
|
||||
require.NoError(t, err)
|
||||
@ -757,7 +331,7 @@ func TestUpdateStoragenodeBandwidthSettleWithWindow(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSettledAmountsMatch(t *testing.T) {
|
||||
var testCases = []struct {
|
||||
testCases := []struct {
|
||||
name string
|
||||
rows []*dbx.StoragenodeBandwidthRollup
|
||||
orderActionAmounts map[int32]int64
|
||||
|
@ -124,9 +124,7 @@ func (cache *RollupsWriteCache) flush(ctx context.Context, pendingRollups Rollup
|
||||
})
|
||||
}
|
||||
|
||||
err := cache.DB.WithTransaction(ctx, func(ctx context.Context, tx Transaction) error {
|
||||
return tx.UpdateBucketBandwidthBatch(ctx, latestTime, rollups)
|
||||
})
|
||||
err := cache.DB.UpdateBucketBandwidthBatch(ctx, latestTime, rollups)
|
||||
if err != nil {
|
||||
cache.log.Error("MONEY LOST! Bucket bandwidth rollup batch flush failed.", zap.Error(err))
|
||||
}
|
||||
|
@ -32,15 +32,12 @@ var (
|
||||
|
||||
// Config is a configuration struct for orders Service.
|
||||
type Config struct {
|
||||
EncryptionKeys EncryptionKeys `help:"encryption keys to encrypt info in orders" default:""`
|
||||
Expiration time.Duration `help:"how long until an order expires" default:"48h"` // 2 days
|
||||
SettlementBatchSize int `help:"how many orders to batch per transaction" default:"250"`
|
||||
FlushBatchSize int `help:"how many items in the rollups write cache before they are flushed to the database" devDefault:"20" releaseDefault:"10000"`
|
||||
FlushInterval time.Duration `help:"how often to flush the rollups write cache to the database" devDefault:"30s" releaseDefault:"1m"`
|
||||
ReportedRollupsReadBatchSize int `help:"how many records to read in a single transaction when calculating billable bandwidth" default:"1000"`
|
||||
NodeStatusLogging bool `hidden:"true" help:"deprecated, log the offline/disqualification status of nodes" default:"false"`
|
||||
WindowEndpointRolloutPhase WindowEndpointRolloutPhase `help:"rollout phase for the windowed endpoint" default:"phase3"`
|
||||
OrdersSemaphoreSize int `help:"how many concurrent orders to process at once. zero is unlimited" default:"2"`
|
||||
EncryptionKeys EncryptionKeys `help:"encryption keys to encrypt info in orders" default:""`
|
||||
Expiration time.Duration `help:"how long until an order expires" default:"48h"` // 2 days
|
||||
FlushBatchSize int `help:"how many items in the rollups write cache before they are flushed to the database" devDefault:"20" releaseDefault:"10000"`
|
||||
FlushInterval time.Duration `help:"how often to flush the rollups write cache to the database" devDefault:"30s" releaseDefault:"1m"`
|
||||
NodeStatusLogging bool `hidden:"true" help:"deprecated, log the offline/disqualification status of nodes" default:"false"`
|
||||
OrdersSemaphoreSize int `help:"how many concurrent orders to process at once. zero is unlimited" default:"2"`
|
||||
}
|
||||
|
||||
// BucketsDB returns information about buckets.
|
||||
@ -522,7 +519,7 @@ func (service *Service) DecryptOrderMetadata(ctx context.Context, order *pb.Orde
|
||||
var orderKeyID EncryptionKeyID
|
||||
copy(orderKeyID[:], order.EncryptedMetadataKeyId)
|
||||
|
||||
var key = service.encryptionKeys.Default
|
||||
key := service.encryptionKeys.Default
|
||||
if key.ID != orderKeyID {
|
||||
val, ok := service.encryptionKeys.KeyByID[orderKeyID]
|
||||
if !ok {
|
||||
|
@ -16,8 +16,8 @@ import (
|
||||
"storj.io/storj/satellite/accounting"
|
||||
"storj.io/storj/satellite/accounting/live"
|
||||
"storj.io/storj/satellite/accounting/projectbwcleanup"
|
||||
"storj.io/storj/satellite/accounting/reportedrollup"
|
||||
"storj.io/storj/satellite/accounting/rollup"
|
||||
"storj.io/storj/satellite/accounting/rolluparchive"
|
||||
"storj.io/storj/satellite/accounting/tally"
|
||||
"storj.io/storj/satellite/admin"
|
||||
"storj.io/storj/satellite/attribution"
|
||||
@ -132,8 +132,8 @@ type Config struct {
|
||||
|
||||
Tally tally.Config
|
||||
Rollup rollup.Config
|
||||
RollupArchive rolluparchive.Config
|
||||
LiveAccounting live.Config
|
||||
ReportedRollup reportedrollup.Config
|
||||
ProjectBWCleanup projectbwcleanup.Config
|
||||
|
||||
Mail mailservice.Config
|
||||
|
@ -395,8 +395,8 @@ func testCorruptDataRepairSucceed(t *testing.T, inMemoryRepair bool) {
|
||||
// - Call checker to add segment to the repair queue
|
||||
// - Modify segment to be expired
|
||||
// - Run the repairer
|
||||
// - Verify segment is no longer in the repair queue.
|
||||
func TestRemoveExpiredSegmentFromQueue(t *testing.T) {
|
||||
// - Verify segment is still in the repair queue. We don't want the data repairer to have any special treatment for expired segment.
|
||||
func TestRepairExpiredSegment(t *testing.T) {
|
||||
testplanet.Run(t, testplanet.Config{
|
||||
SatelliteCount: 1,
|
||||
StorageNodeCount: 10,
|
||||
@ -471,7 +471,7 @@ func TestRemoveExpiredSegmentFromQueue(t *testing.T) {
|
||||
// Verify that the segment was removed
|
||||
count, err = satellite.DB.RepairQueue().Count(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, count)
|
||||
require.Equal(t, 1, count)
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -142,12 +142,6 @@ func (repairer *SegmentRepairer) Repair(ctx context.Context, path storj.Path) (s
|
||||
return true, invalidRepairError.New("cannot repair inline segment")
|
||||
}
|
||||
|
||||
// TODO how to deal with expiration date for segment
|
||||
if object.ExpiresAt != nil && object.ExpiresAt.Before(repairer.nowFn().UTC()) {
|
||||
mon.Meter("repair_expired").Mark(1) //mon:locked
|
||||
return true, nil
|
||||
}
|
||||
|
||||
mon.Meter("repair_attempts").Mark(1) //mon:locked
|
||||
mon.IntVal("repair_segment_size").Observe(int64(segment.EncryptedSize)) //mon:locked
|
||||
|
||||
|
@ -84,6 +84,7 @@ func recordPaystubs(ctx context.Context, tx *dbx.Tx, paystubs []compensation.Pay
|
||||
dbx.StoragenodePaystub_Owed(paystub.Owed.Value()),
|
||||
dbx.StoragenodePaystub_Disposed(paystub.Disposed.Value()),
|
||||
dbx.StoragenodePaystub_Paid(paystub.Paid.Value()),
|
||||
dbx.StoragenodePaystub_Distributed(paystub.Distributed.Value()),
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -69,10 +69,6 @@ type Options struct {
|
||||
APIKeysLRUOptions cache.Options
|
||||
RevocationLRUOptions cache.Options
|
||||
|
||||
// How many records to read in a single transaction when asked for all of the
|
||||
// billable bandwidth from the reported serials table.
|
||||
ReportedRollupsReadBatchSize int
|
||||
|
||||
// How many storage node rollups to save/read in one batch.
|
||||
SaveRollupBatchSize int
|
||||
ReadRollupBatchSize int
|
||||
@ -259,7 +255,7 @@ func (dbc *satelliteDBCollection) Rewards() rewards.DB {
|
||||
// Orders returns database for storing orders.
|
||||
func (dbc *satelliteDBCollection) Orders() orders.DB {
|
||||
db := dbc.getByName("orders")
|
||||
return &ordersDB{db: db, reportedRollupsReadBatchSize: db.opts.ReportedRollupsReadBatchSize}
|
||||
return &ordersDB{db: db}
|
||||
}
|
||||
|
||||
// Containment returns database for storing pending audit info.
|
||||
|
@ -449,111 +449,6 @@ read all (
|
||||
orderby asc api_key.name
|
||||
)
|
||||
|
||||
//--- tracking serial numbers ---//
|
||||
|
||||
model serial_number (
|
||||
key id
|
||||
index (
|
||||
name serial_number_index
|
||||
fields serial_number
|
||||
unique
|
||||
)
|
||||
|
||||
field id serial
|
||||
field serial_number blob
|
||||
|
||||
field bucket_id blob
|
||||
field expires_at timestamp
|
||||
|
||||
index (
|
||||
fields expires_at
|
||||
)
|
||||
)
|
||||
|
||||
read one (
|
||||
select serial_number.bucket_id
|
||||
where serial_number.serial_number = ?
|
||||
)
|
||||
|
||||
model used_serial (
|
||||
key serial_number_id storage_node_id
|
||||
|
||||
field serial_number_id serial_number.id cascade
|
||||
field storage_node_id blob
|
||||
)
|
||||
|
||||
// inserting a new serial number
|
||||
create serial_number ( noreturn )
|
||||
|
||||
// finding out information about the serial number
|
||||
read scalar (
|
||||
select serial_number
|
||||
where serial_number.serial_number = ?
|
||||
)
|
||||
|
||||
// deleting expired serial numbers
|
||||
delete serial_number (
|
||||
where serial_number.expires_at <= ?
|
||||
)
|
||||
|
||||
// for preventing duplicate serial numbers
|
||||
create used_serial ( noreturn )
|
||||
|
||||
//
|
||||
// DEPRECATED! vvvvvvvvvvvvvvvvvvvvvvvvvvv
|
||||
//
|
||||
|
||||
model reported_serial (
|
||||
key expires_at storage_node_id bucket_id action serial_number
|
||||
|
||||
field expires_at timestamp // ceil'd to the next day
|
||||
field storage_node_id blob // involved node id
|
||||
field bucket_id blob // involved project id and bucket name
|
||||
field action uint // action (get, put, audit, etc. see protobuf)
|
||||
field serial_number blob // identifies order (required for uniqueness)
|
||||
|
||||
field settled uint64
|
||||
field observed_at timestamp
|
||||
)
|
||||
|
||||
//
|
||||
// DEPRECATED! ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
//
|
||||
|
||||
model pending_serial_queue (
|
||||
table pending_serial_queue
|
||||
|
||||
key storage_node_id bucket_id serial_number
|
||||
field storage_node_id blob
|
||||
field bucket_id blob
|
||||
field serial_number blob
|
||||
|
||||
field action uint
|
||||
field settled uint64
|
||||
field expires_at timestamp
|
||||
)
|
||||
|
||||
create pending_serial_queue ( noreturn, replace )
|
||||
read paged ( select pending_serial_queue )
|
||||
|
||||
model consumed_serial (
|
||||
key storage_node_id serial_number
|
||||
index ( fields expires_at )
|
||||
|
||||
field storage_node_id blob
|
||||
field serial_number blob
|
||||
field expires_at timestamp
|
||||
)
|
||||
|
||||
create consumed_serial ( noreturn )
|
||||
delete consumed_serial ( where consumed_serial.expires_at <= ? )
|
||||
|
||||
read has (
|
||||
select consumed_serial
|
||||
where consumed_serial.storage_node_id = ?
|
||||
where consumed_serial.serial_number = ?
|
||||
)
|
||||
|
||||
// --- bucket accounting tables --- //
|
||||
|
||||
model bucket_bandwidth_rollup (
|
||||
@ -587,6 +482,39 @@ read scalar (
|
||||
where bucket_bandwidth_rollup.action = ?
|
||||
)
|
||||
|
||||
read paged (
|
||||
select bucket_bandwidth_rollup
|
||||
where bucket_bandwidth_rollup.interval_start >= ?
|
||||
)
|
||||
|
||||
model bucket_bandwidth_rollup_archive (
|
||||
key bucket_name project_id interval_start action
|
||||
index (
|
||||
name bucket_bandwidth_rollups_archive_project_id_action_interval_index
|
||||
fields project_id action interval_start
|
||||
)
|
||||
index (
|
||||
name bucket_bandwidth_rollups_archive_action_interval_project_id_index
|
||||
fields action interval_start project_id
|
||||
)
|
||||
|
||||
field bucket_name blob
|
||||
field project_id blob
|
||||
|
||||
field interval_start timestamp
|
||||
field interval_seconds uint
|
||||
field action uint
|
||||
|
||||
field inline uint64 ( updatable )
|
||||
field allocated uint64 ( updatable )
|
||||
field settled uint64 ( updatable )
|
||||
)
|
||||
|
||||
read paged (
|
||||
select bucket_bandwidth_rollup_archive
|
||||
where bucket_bandwidth_rollup_archive.interval_start >= ?
|
||||
)
|
||||
|
||||
model revocation (
|
||||
key revoked
|
||||
field revoked blob
|
||||
@ -680,12 +608,39 @@ read all (
|
||||
where storagenode_bandwidth_rollup.interval_start = ?
|
||||
)
|
||||
|
||||
read paged (
|
||||
select storagenode_bandwidth_rollup
|
||||
where storagenode_bandwidth_rollup.interval_start >= ?
|
||||
)
|
||||
|
||||
read paged (
|
||||
select storagenode_bandwidth_rollup
|
||||
where storagenode_bandwidth_rollup.storagenode_id = ?
|
||||
where storagenode_bandwidth_rollup.interval_start >= ?
|
||||
)
|
||||
|
||||
model storagenode_bandwidth_rollup_archive (
|
||||
key storagenode_id interval_start action
|
||||
|
||||
index (
|
||||
name storagenode_bandwidth_rollup_archives_interval_start_index
|
||||
fields interval_start
|
||||
)
|
||||
|
||||
field storagenode_id blob
|
||||
field interval_start timestamp
|
||||
field interval_seconds uint
|
||||
field action uint
|
||||
|
||||
field allocated uint64 ( updatable, nullable, default 0 )
|
||||
field settled uint64 ( updatable )
|
||||
)
|
||||
|
||||
read paged (
|
||||
select storagenode_bandwidth_rollup_archive
|
||||
where storagenode_bandwidth_rollup_archive.interval_start >= ?
|
||||
)
|
||||
|
||||
///////////////////////////////////////
|
||||
// orders phase2->phase3 rollout table
|
||||
///////////////////////////////////////
|
||||
@ -770,10 +725,11 @@ model storagenode_paystub (
|
||||
|
||||
field surge_percent int64 // percentage
|
||||
|
||||
field held int64 // in micro-units of currency
|
||||
field owed int64 // in micro-units of currency
|
||||
field disposed int64 // in micro-units of currency
|
||||
field paid int64 // in micro-units of currency
|
||||
field held int64 // in micro-units of currency
|
||||
field owed int64 // in micro-units of currency
|
||||
field disposed int64 // in micro-units of currency
|
||||
field paid int64 // in micro-units of currency
|
||||
field distributed int64 // in micro-units of currency
|
||||
)
|
||||
|
||||
create storagenode_paystub ( noreturn )
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -32,6 +32,17 @@ CREATE TABLE bucket_bandwidth_rollups (
|
||||
settled bigint NOT NULL,
|
||||
PRIMARY KEY ( bucket_name, project_id, interval_start, action )
|
||||
);
|
||||
CREATE TABLE bucket_bandwidth_rollup_archives (
|
||||
bucket_name bytea NOT NULL,
|
||||
project_id bytea NOT NULL,
|
||||
interval_start timestamp with time zone NOT NULL,
|
||||
interval_seconds integer NOT NULL,
|
||||
action integer NOT NULL,
|
||||
inline bigint NOT NULL,
|
||||
allocated bigint NOT NULL,
|
||||
settled bigint NOT NULL,
|
||||
PRIMARY KEY ( bucket_name, project_id, interval_start, action )
|
||||
);
|
||||
CREATE TABLE bucket_storage_tallies (
|
||||
bucket_name bytea NOT NULL,
|
||||
project_id bytea NOT NULL,
|
||||
@ -56,12 +67,6 @@ CREATE TABLE coinpayments_transactions (
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE consumed_serials (
|
||||
storage_node_id bytea NOT NULL,
|
||||
serial_number bytea NOT NULL,
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( storage_node_id, serial_number )
|
||||
);
|
||||
CREATE TABLE coupons (
|
||||
id bytea NOT NULL,
|
||||
user_id bytea NOT NULL,
|
||||
@ -204,15 +209,6 @@ CREATE TABLE pending_audits (
|
||||
path bytea NOT NULL,
|
||||
PRIMARY KEY ( node_id )
|
||||
);
|
||||
CREATE TABLE pending_serial_queue (
|
||||
storage_node_id bytea NOT NULL,
|
||||
bucket_id bytea NOT NULL,
|
||||
serial_number bytea NOT NULL,
|
||||
action integer NOT NULL,
|
||||
settled bigint NOT NULL,
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( storage_node_id, bucket_id, serial_number )
|
||||
);
|
||||
CREATE TABLE projects (
|
||||
id bytea NOT NULL,
|
||||
name text NOT NULL,
|
||||
@ -240,16 +236,6 @@ CREATE TABLE registration_tokens (
|
||||
PRIMARY KEY ( secret ),
|
||||
UNIQUE ( owner_id )
|
||||
);
|
||||
CREATE TABLE reported_serials (
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
storage_node_id bytea NOT NULL,
|
||||
bucket_id bytea NOT NULL,
|
||||
action integer NOT NULL,
|
||||
serial_number bytea NOT NULL,
|
||||
settled bigint NOT NULL,
|
||||
observed_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( expires_at, storage_node_id, bucket_id, action, serial_number )
|
||||
);
|
||||
CREATE TABLE reset_password_tokens (
|
||||
secret bytea NOT NULL,
|
||||
owner_id bytea NOT NULL,
|
||||
@ -262,13 +248,6 @@ CREATE TABLE revocations (
|
||||
api_key_id bytea NOT NULL,
|
||||
PRIMARY KEY ( revoked )
|
||||
);
|
||||
CREATE TABLE serial_numbers (
|
||||
id serial NOT NULL,
|
||||
serial_number bytea NOT NULL,
|
||||
bucket_id bytea NOT NULL,
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE storagenode_bandwidth_rollups (
|
||||
storagenode_id bytea NOT NULL,
|
||||
interval_start timestamp with time zone NOT NULL,
|
||||
@ -278,6 +257,15 @@ CREATE TABLE storagenode_bandwidth_rollups (
|
||||
settled bigint NOT NULL,
|
||||
PRIMARY KEY ( storagenode_id, interval_start, action )
|
||||
);
|
||||
CREATE TABLE storagenode_bandwidth_rollup_archives (
|
||||
storagenode_id bytea NOT NULL,
|
||||
interval_start timestamp with time zone NOT NULL,
|
||||
interval_seconds integer NOT NULL,
|
||||
action integer NOT NULL,
|
||||
allocated bigint DEFAULT 0,
|
||||
settled bigint NOT NULL,
|
||||
PRIMARY KEY ( storagenode_id, interval_start, action )
|
||||
);
|
||||
CREATE TABLE storagenode_bandwidth_rollups_phase2 (
|
||||
storagenode_id bytea NOT NULL,
|
||||
interval_start timestamp with time zone NOT NULL,
|
||||
@ -319,6 +307,7 @@ CREATE TABLE storagenode_paystubs (
|
||||
owed bigint NOT NULL,
|
||||
disposed bigint NOT NULL,
|
||||
paid bigint NOT NULL,
|
||||
distributed bigint NOT NULL,
|
||||
PRIMARY KEY ( period, node_id )
|
||||
);
|
||||
CREATE TABLE storagenode_storage_tallies (
|
||||
@ -421,11 +410,6 @@ CREATE TABLE stripecoinpayments_apply_balance_intents (
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( tx_id )
|
||||
);
|
||||
CREATE TABLE used_serials (
|
||||
serial_number_id integer NOT NULL REFERENCES serial_numbers( id ) ON DELETE CASCADE,
|
||||
storage_node_id bytea NOT NULL,
|
||||
PRIMARY KEY ( serial_number_id, storage_node_id )
|
||||
);
|
||||
CREATE TABLE user_credits (
|
||||
id serial NOT NULL,
|
||||
user_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
|
||||
@ -442,17 +426,17 @@ CREATE TABLE user_credits (
|
||||
CREATE INDEX accounting_rollups_start_time_index ON accounting_rollups ( start_time );
|
||||
CREATE INDEX bucket_bandwidth_rollups_project_id_action_interval_index ON bucket_bandwidth_rollups ( project_id, action, interval_start );
|
||||
CREATE INDEX bucket_bandwidth_rollups_action_interval_project_id_index ON bucket_bandwidth_rollups ( action, interval_start, project_id );
|
||||
CREATE INDEX bucket_bandwidth_rollups_archive_project_id_action_interval_index ON bucket_bandwidth_rollup_archives ( project_id, action, interval_start );
|
||||
CREATE INDEX bucket_bandwidth_rollups_archive_action_interval_project_id_index ON bucket_bandwidth_rollup_archives ( action, interval_start, project_id );
|
||||
CREATE INDEX bucket_storage_tallies_project_id_index ON bucket_storage_tallies ( project_id );
|
||||
CREATE INDEX consumed_serials_expires_at_index ON consumed_serials ( expires_at );
|
||||
CREATE INDEX graceful_exit_transfer_queue_nid_dr_qa_fa_lfa_index ON graceful_exit_transfer_queue ( node_id, durability_ratio, queued_at, finished_at, last_failed_at );
|
||||
CREATE INDEX injuredsegments_attempted_index ON injuredsegments ( attempted );
|
||||
CREATE INDEX injuredsegments_segment_health_index ON injuredsegments ( segment_health );
|
||||
CREATE INDEX injuredsegments_updated_at_index ON injuredsegments ( updated_at );
|
||||
CREATE INDEX node_last_ip ON nodes ( last_net );
|
||||
CREATE INDEX nodes_dis_unk_exit_fin_last_success_index ON nodes ( disqualified, unknown_audit_suspended, exit_finished_at, last_contact_success );
|
||||
CREATE UNIQUE INDEX serial_number_index ON serial_numbers ( serial_number );
|
||||
CREATE INDEX serial_numbers_expires_at_index ON serial_numbers ( expires_at );
|
||||
CREATE INDEX storagenode_bandwidth_rollups_interval_start_index ON storagenode_bandwidth_rollups ( interval_start );
|
||||
CREATE INDEX storagenode_bandwidth_rollup_archives_interval_start_index ON storagenode_bandwidth_rollup_archives ( interval_start );
|
||||
CREATE INDEX storagenode_payments_node_id_period_index ON storagenode_payments ( node_id, period );
|
||||
CREATE INDEX storagenode_paystubs_node_id_index ON storagenode_paystubs ( node_id );
|
||||
CREATE INDEX storagenode_storage_tallies_node_id_index ON storagenode_storage_tallies ( node_id );
|
||||
|
@ -32,6 +32,17 @@ CREATE TABLE bucket_bandwidth_rollups (
|
||||
settled bigint NOT NULL,
|
||||
PRIMARY KEY ( bucket_name, project_id, interval_start, action )
|
||||
);
|
||||
CREATE TABLE bucket_bandwidth_rollup_archives (
|
||||
bucket_name bytea NOT NULL,
|
||||
project_id bytea NOT NULL,
|
||||
interval_start timestamp with time zone NOT NULL,
|
||||
interval_seconds integer NOT NULL,
|
||||
action integer NOT NULL,
|
||||
inline bigint NOT NULL,
|
||||
allocated bigint NOT NULL,
|
||||
settled bigint NOT NULL,
|
||||
PRIMARY KEY ( bucket_name, project_id, interval_start, action )
|
||||
);
|
||||
CREATE TABLE bucket_storage_tallies (
|
||||
bucket_name bytea NOT NULL,
|
||||
project_id bytea NOT NULL,
|
||||
@ -56,12 +67,6 @@ CREATE TABLE coinpayments_transactions (
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE consumed_serials (
|
||||
storage_node_id bytea NOT NULL,
|
||||
serial_number bytea NOT NULL,
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( storage_node_id, serial_number )
|
||||
);
|
||||
CREATE TABLE coupons (
|
||||
id bytea NOT NULL,
|
||||
user_id bytea NOT NULL,
|
||||
@ -204,15 +209,6 @@ CREATE TABLE pending_audits (
|
||||
path bytea NOT NULL,
|
||||
PRIMARY KEY ( node_id )
|
||||
);
|
||||
CREATE TABLE pending_serial_queue (
|
||||
storage_node_id bytea NOT NULL,
|
||||
bucket_id bytea NOT NULL,
|
||||
serial_number bytea NOT NULL,
|
||||
action integer NOT NULL,
|
||||
settled bigint NOT NULL,
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( storage_node_id, bucket_id, serial_number )
|
||||
);
|
||||
CREATE TABLE projects (
|
||||
id bytea NOT NULL,
|
||||
name text NOT NULL,
|
||||
@ -240,16 +236,6 @@ CREATE TABLE registration_tokens (
|
||||
PRIMARY KEY ( secret ),
|
||||
UNIQUE ( owner_id )
|
||||
);
|
||||
CREATE TABLE reported_serials (
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
storage_node_id bytea NOT NULL,
|
||||
bucket_id bytea NOT NULL,
|
||||
action integer NOT NULL,
|
||||
serial_number bytea NOT NULL,
|
||||
settled bigint NOT NULL,
|
||||
observed_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( expires_at, storage_node_id, bucket_id, action, serial_number )
|
||||
);
|
||||
CREATE TABLE reset_password_tokens (
|
||||
secret bytea NOT NULL,
|
||||
owner_id bytea NOT NULL,
|
||||
@ -262,13 +248,6 @@ CREATE TABLE revocations (
|
||||
api_key_id bytea NOT NULL,
|
||||
PRIMARY KEY ( revoked )
|
||||
);
|
||||
CREATE TABLE serial_numbers (
|
||||
id serial NOT NULL,
|
||||
serial_number bytea NOT NULL,
|
||||
bucket_id bytea NOT NULL,
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE storagenode_bandwidth_rollups (
|
||||
storagenode_id bytea NOT NULL,
|
||||
interval_start timestamp with time zone NOT NULL,
|
||||
@ -278,6 +257,15 @@ CREATE TABLE storagenode_bandwidth_rollups (
|
||||
settled bigint NOT NULL,
|
||||
PRIMARY KEY ( storagenode_id, interval_start, action )
|
||||
);
|
||||
CREATE TABLE storagenode_bandwidth_rollup_archives (
|
||||
storagenode_id bytea NOT NULL,
|
||||
interval_start timestamp with time zone NOT NULL,
|
||||
interval_seconds integer NOT NULL,
|
||||
action integer NOT NULL,
|
||||
allocated bigint DEFAULT 0,
|
||||
settled bigint NOT NULL,
|
||||
PRIMARY KEY ( storagenode_id, interval_start, action )
|
||||
);
|
||||
CREATE TABLE storagenode_bandwidth_rollups_phase2 (
|
||||
storagenode_id bytea NOT NULL,
|
||||
interval_start timestamp with time zone NOT NULL,
|
||||
@ -319,6 +307,7 @@ CREATE TABLE storagenode_paystubs (
|
||||
owed bigint NOT NULL,
|
||||
disposed bigint NOT NULL,
|
||||
paid bigint NOT NULL,
|
||||
distributed bigint NOT NULL,
|
||||
PRIMARY KEY ( period, node_id )
|
||||
);
|
||||
CREATE TABLE storagenode_storage_tallies (
|
||||
@ -421,11 +410,6 @@ CREATE TABLE stripecoinpayments_apply_balance_intents (
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( tx_id )
|
||||
);
|
||||
CREATE TABLE used_serials (
|
||||
serial_number_id integer NOT NULL REFERENCES serial_numbers( id ) ON DELETE CASCADE,
|
||||
storage_node_id bytea NOT NULL,
|
||||
PRIMARY KEY ( serial_number_id, storage_node_id )
|
||||
);
|
||||
CREATE TABLE user_credits (
|
||||
id serial NOT NULL,
|
||||
user_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
|
||||
@ -442,17 +426,17 @@ CREATE TABLE user_credits (
|
||||
CREATE INDEX accounting_rollups_start_time_index ON accounting_rollups ( start_time );
|
||||
CREATE INDEX bucket_bandwidth_rollups_project_id_action_interval_index ON bucket_bandwidth_rollups ( project_id, action, interval_start );
|
||||
CREATE INDEX bucket_bandwidth_rollups_action_interval_project_id_index ON bucket_bandwidth_rollups ( action, interval_start, project_id );
|
||||
CREATE INDEX bucket_bandwidth_rollups_archive_project_id_action_interval_index ON bucket_bandwidth_rollup_archives ( project_id, action, interval_start );
|
||||
CREATE INDEX bucket_bandwidth_rollups_archive_action_interval_project_id_index ON bucket_bandwidth_rollup_archives ( action, interval_start, project_id );
|
||||
CREATE INDEX bucket_storage_tallies_project_id_index ON bucket_storage_tallies ( project_id );
|
||||
CREATE INDEX consumed_serials_expires_at_index ON consumed_serials ( expires_at );
|
||||
CREATE INDEX graceful_exit_transfer_queue_nid_dr_qa_fa_lfa_index ON graceful_exit_transfer_queue ( node_id, durability_ratio, queued_at, finished_at, last_failed_at );
|
||||
CREATE INDEX injuredsegments_attempted_index ON injuredsegments ( attempted );
|
||||
CREATE INDEX injuredsegments_segment_health_index ON injuredsegments ( segment_health );
|
||||
CREATE INDEX injuredsegments_updated_at_index ON injuredsegments ( updated_at );
|
||||
CREATE INDEX node_last_ip ON nodes ( last_net );
|
||||
CREATE INDEX nodes_dis_unk_exit_fin_last_success_index ON nodes ( disqualified, unknown_audit_suspended, exit_finished_at, last_contact_success );
|
||||
CREATE UNIQUE INDEX serial_number_index ON serial_numbers ( serial_number );
|
||||
CREATE INDEX serial_numbers_expires_at_index ON serial_numbers ( expires_at );
|
||||
CREATE INDEX storagenode_bandwidth_rollups_interval_start_index ON storagenode_bandwidth_rollups ( interval_start );
|
||||
CREATE INDEX storagenode_bandwidth_rollup_archives_interval_start_index ON storagenode_bandwidth_rollup_archives ( interval_start );
|
||||
CREATE INDEX storagenode_payments_node_id_period_index ON storagenode_payments ( node_id, period );
|
||||
CREATE INDEX storagenode_paystubs_node_id_index ON storagenode_paystubs ( node_id );
|
||||
CREATE INDEX storagenode_storage_tallies_node_id_index ON storagenode_storage_tallies ( node_id );
|
||||
|
@ -153,6 +153,36 @@ func (db *gracefulexitDB) DeleteFinishedTransferQueueItems(ctx context.Context,
|
||||
return Error.Wrap(err)
|
||||
}
|
||||
|
||||
// DeleteAllFinishedTransferQueueItems deletes all graceful exit transfer
|
||||
// queue items whose nodes have finished the exit before the indicated time
|
||||
// returning the total number of deleted items.
|
||||
func (db *gracefulexitDB) DeleteAllFinishedTransferQueueItems(
|
||||
ctx context.Context, before time.Time) (_ int64, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
statement := db.db.Rebind(
|
||||
`DELETE FROM graceful_exit_transfer_queue
|
||||
WHERE node_id IN (
|
||||
SELECT id
|
||||
FROM nodes
|
||||
WHERE exit_finished_at IS NOT NULL
|
||||
AND exit_finished_at < ?
|
||||
)`,
|
||||
)
|
||||
|
||||
res, err := db.db.ExecContext(ctx, statement, before)
|
||||
if err != nil {
|
||||
return 0, Error.Wrap(err)
|
||||
}
|
||||
|
||||
count, err := res.RowsAffected()
|
||||
if err != nil {
|
||||
return 0, Error.Wrap(err)
|
||||
}
|
||||
|
||||
return count, nil
|
||||
}
|
||||
|
||||
// GetTransferQueueItem gets a graceful exit transfer queue entry.
|
||||
func (db *gracefulexitDB) GetTransferQueueItem(ctx context.Context, nodeID storj.NodeID, key metabase.SegmentKey, pieceNum int32) (_ *gracefulexit.TransferQueueItem, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
@ -255,6 +285,45 @@ func (db *gracefulexitDB) IncrementOrderLimitSendCount(ctx context.Context, node
|
||||
return Error.Wrap(err)
|
||||
}
|
||||
|
||||
// CountFinishedTransferQueueItemsByNode return a map of the nodes which has
|
||||
// finished the exit before the indicated time but there are at least one item
|
||||
// left in the transfer queue.
|
||||
func (db *gracefulexitDB) CountFinishedTransferQueueItemsByNode(ctx context.Context, before time.Time) (_ map[storj.NodeID]int64, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
statement := db.db.Rebind(
|
||||
`SELECT n.id, count(getq.node_id)
|
||||
FROM nodes as n LEFT JOIN graceful_exit_transfer_queue as getq
|
||||
ON n.id = getq.node_id
|
||||
WHERE n.exit_finished_at IS NOT NULL
|
||||
AND n.exit_finished_at < ?
|
||||
GROUP BY n.id
|
||||
HAVING count(getq.node_id) > 0`,
|
||||
)
|
||||
|
||||
rows, err := db.db.QueryContext(ctx, statement, before)
|
||||
if err != nil {
|
||||
return nil, Error.Wrap(err)
|
||||
}
|
||||
defer func() { err = errs.Combine(err, Error.Wrap(rows.Close())) }()
|
||||
|
||||
nodesItemsCount := make(map[storj.NodeID]int64)
|
||||
for rows.Next() {
|
||||
var (
|
||||
nodeID storj.NodeID
|
||||
n int64
|
||||
)
|
||||
err := rows.Scan(&nodeID, &n)
|
||||
if err != nil {
|
||||
return nil, Error.Wrap(err)
|
||||
}
|
||||
|
||||
nodesItemsCount[nodeID] = n
|
||||
}
|
||||
|
||||
return nodesItemsCount, Error.Wrap(rows.Err())
|
||||
}
|
||||
|
||||
func scanRows(rows tagsql.Rows) (transferQueueItemRows []*gracefulexit.TransferQueueItem, err error) {
|
||||
for rows.Next() {
|
||||
transferQueueItem := &gracefulexit.TransferQueueItem{}
|
||||
|
172
satellite/satellitedb/gracefulexit_test.go
Normal file
172
satellite/satellitedb/gracefulexit_test.go
Normal file
@ -0,0 +1,172 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package satellitedb_test
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"storj.io/common/storj"
|
||||
"storj.io/common/testcontext"
|
||||
"storj.io/storj/private/testplanet"
|
||||
"storj.io/storj/satellite/gracefulexit"
|
||||
"storj.io/storj/satellite/metainfo/metabase"
|
||||
"storj.io/storj/satellite/overlay"
|
||||
)
|
||||
|
||||
func TestGracefulExit_DeleteAllFinishedTransferQueueItems(t *testing.T) {
|
||||
testplanet.Run(t, testplanet.Config{
|
||||
SatelliteCount: 1, StorageNodeCount: 7,
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
var (
|
||||
cache = planet.Satellites[0].DB.OverlayCache()
|
||||
currentTime = time.Now()
|
||||
)
|
||||
|
||||
// mark some of the storagenodes as successful exit
|
||||
nodeSuccessful1 := planet.StorageNodes[1]
|
||||
_, err := cache.UpdateExitStatus(ctx, &overlay.ExitStatusRequest{
|
||||
NodeID: nodeSuccessful1.ID(),
|
||||
ExitInitiatedAt: currentTime.Add(-time.Hour),
|
||||
ExitLoopCompletedAt: currentTime.Add(-30 * time.Minute),
|
||||
ExitFinishedAt: currentTime.Add(-25 * time.Minute),
|
||||
ExitSuccess: true,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
nodeSuccessful2 := planet.StorageNodes[2]
|
||||
_, err = cache.UpdateExitStatus(ctx, &overlay.ExitStatusRequest{
|
||||
NodeID: nodeSuccessful2.ID(),
|
||||
ExitInitiatedAt: currentTime.Add(-time.Hour),
|
||||
ExitLoopCompletedAt: currentTime.Add(-17 * time.Minute),
|
||||
ExitFinishedAt: currentTime.Add(-16 * time.Minute),
|
||||
ExitSuccess: true,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
nodeSuccessful3 := planet.StorageNodes[3]
|
||||
_, err = cache.UpdateExitStatus(ctx, &overlay.ExitStatusRequest{
|
||||
NodeID: nodeSuccessful3.ID(),
|
||||
ExitInitiatedAt: currentTime.Add(-time.Hour),
|
||||
ExitLoopCompletedAt: currentTime.Add(-9 * time.Minute),
|
||||
ExitFinishedAt: currentTime.Add(-5 * time.Minute),
|
||||
ExitSuccess: true,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// mark some of the storagenodes as failed exit
|
||||
nodeFailed1 := planet.StorageNodes[4]
|
||||
_, err = cache.UpdateExitStatus(ctx, &overlay.ExitStatusRequest{
|
||||
NodeID: nodeFailed1.ID(),
|
||||
ExitInitiatedAt: currentTime.Add(-time.Hour),
|
||||
ExitLoopCompletedAt: currentTime.Add(-28 * time.Minute),
|
||||
ExitFinishedAt: currentTime.Add(-20 * time.Minute),
|
||||
ExitSuccess: true,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
nodeFailed2 := planet.StorageNodes[5]
|
||||
_, err = cache.UpdateExitStatus(ctx, &overlay.ExitStatusRequest{
|
||||
NodeID: nodeFailed2.ID(),
|
||||
ExitInitiatedAt: currentTime.Add(-time.Hour),
|
||||
ExitLoopCompletedAt: currentTime.Add(-17 * time.Minute),
|
||||
ExitFinishedAt: currentTime.Add(-15 * time.Minute),
|
||||
ExitSuccess: true,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
nodeWithoutItems := planet.StorageNodes[6]
|
||||
_, err = cache.UpdateExitStatus(ctx, &overlay.ExitStatusRequest{
|
||||
NodeID: nodeWithoutItems.ID(),
|
||||
ExitInitiatedAt: currentTime.Add(-time.Hour),
|
||||
ExitLoopCompletedAt: currentTime.Add(-35 * time.Minute),
|
||||
ExitFinishedAt: currentTime.Add(-32 * time.Minute),
|
||||
ExitSuccess: false,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// add some items to the transfer queue for the exited nodes
|
||||
queueItems, nodesItems := generateTransferQueueItems(t, []*testplanet.StorageNode{
|
||||
nodeSuccessful1, nodeSuccessful2, nodeSuccessful3, nodeFailed1, nodeFailed2,
|
||||
})
|
||||
|
||||
gracefulExitDB := planet.Satellites[0].DB.GracefulExit()
|
||||
err = gracefulExitDB.Enqueue(ctx, queueItems)
|
||||
require.NoError(t, err)
|
||||
|
||||
// count nodes exited before 15 minutes ago
|
||||
nodes, err := gracefulExitDB.CountFinishedTransferQueueItemsByNode(ctx, currentTime.Add(-15*time.Minute))
|
||||
require.NoError(t, err)
|
||||
require.Len(t, nodes, 3, "invalid number of nodes which have exited 15 minutes ago")
|
||||
|
||||
for id, n := range nodes {
|
||||
assert.EqualValues(t, nodesItems[id], n, "unexpected number of items")
|
||||
}
|
||||
|
||||
// count nodes exited before 4 minutes ago
|
||||
nodes, err = gracefulExitDB.CountFinishedTransferQueueItemsByNode(ctx, currentTime.Add(-4*time.Minute))
|
||||
require.NoError(t, err)
|
||||
require.Len(t, nodes, 5, "invalid number of nodes which have exited 4 minutes ago")
|
||||
|
||||
for id, n := range nodes {
|
||||
assert.EqualValues(t, nodesItems[id], n, "unexpected number of items")
|
||||
}
|
||||
|
||||
// delete items of nodes exited before 15 minutes ago
|
||||
count, err := gracefulExitDB.DeleteAllFinishedTransferQueueItems(ctx, currentTime.Add(-15*time.Minute))
|
||||
require.NoError(t, err)
|
||||
expectedNumDeletedItems := nodesItems[nodeSuccessful1.ID()] +
|
||||
nodesItems[nodeSuccessful2.ID()] +
|
||||
nodesItems[nodeFailed1.ID()]
|
||||
require.EqualValues(t, expectedNumDeletedItems, count, "invalid number of delet items")
|
||||
|
||||
// check that only a few nodes have exited are left with items
|
||||
nodes, err = gracefulExitDB.CountFinishedTransferQueueItemsByNode(ctx, currentTime.Add(time.Minute))
|
||||
require.NoError(t, err)
|
||||
require.Len(t, nodes, 2, "invalid number of exited nodes with items")
|
||||
|
||||
for id, n := range nodes {
|
||||
assert.EqualValues(t, nodesItems[id], n, "unexpected number of items")
|
||||
assert.NotEqual(t, nodeSuccessful1.ID(), id, "node shouldn't have items")
|
||||
assert.NotEqual(t, nodeSuccessful2.ID(), id, "node shouldn't have items")
|
||||
assert.NotEqual(t, nodeFailed1.ID(), id, "node shouldn't have items")
|
||||
}
|
||||
|
||||
// delete items of there rest exited nodes
|
||||
count, err = gracefulExitDB.DeleteAllFinishedTransferQueueItems(ctx, currentTime.Add(time.Minute))
|
||||
require.NoError(t, err)
|
||||
expectedNumDeletedItems = nodesItems[nodeSuccessful3.ID()] + nodesItems[nodeFailed2.ID()]
|
||||
require.EqualValues(t, expectedNumDeletedItems, count, "invalid number of delet items")
|
||||
|
||||
// check that there aren't more exited nodes with items
|
||||
nodes, err = gracefulExitDB.CountFinishedTransferQueueItemsByNode(ctx, currentTime.Add(time.Minute))
|
||||
require.NoError(t, err)
|
||||
require.Len(t, nodes, 0, "invalid number of exited nodes with items")
|
||||
})
|
||||
}
|
||||
|
||||
func generateTransferQueueItems(t *testing.T, nodes []*testplanet.StorageNode) ([]gracefulexit.TransferQueueItem, map[storj.NodeID]int64) {
|
||||
getNodeID := func() storj.NodeID {
|
||||
n := rand.Intn(len(nodes))
|
||||
return nodes[n].ID()
|
||||
}
|
||||
|
||||
var (
|
||||
items = make([]gracefulexit.TransferQueueItem, rand.Intn(100)+10)
|
||||
nodesItems = make(map[storj.NodeID]int64, len(items))
|
||||
)
|
||||
for i, item := range items {
|
||||
item.NodeID = getNodeID()
|
||||
item.Key = metabase.SegmentKey{byte(rand.Int31n(256))}
|
||||
item.PieceNum = rand.Int31()
|
||||
items[i] = item
|
||||
nodesItems[item.NodeID]++
|
||||
}
|
||||
|
||||
return items, nodesItems
|
||||
}
|
@ -1212,6 +1212,85 @@ func (db *satelliteDB) PostgresMigration() *migrate.Migration {
|
||||
return nil
|
||||
}),
|
||||
},
|
||||
{
|
||||
DB: &db.migrationDB,
|
||||
Description: "add storagenode_bandwidth_rollups_archives and bucket_bandwidth_rollup_archives",
|
||||
Version: 142,
|
||||
SeparateTx: true,
|
||||
Action: migrate.SQL{
|
||||
`
|
||||
CREATE TABLE storagenode_bandwidth_rollup_archives (
|
||||
storagenode_id bytea NOT NULL,
|
||||
interval_start timestamp with time zone NOT NULL,
|
||||
interval_seconds integer NOT NULL,
|
||||
action integer NOT NULL,
|
||||
allocated bigint DEFAULT 0,
|
||||
settled bigint NOT NULL,
|
||||
PRIMARY KEY ( storagenode_id, interval_start, action )
|
||||
);`,
|
||||
`CREATE TABLE bucket_bandwidth_rollup_archives (
|
||||
bucket_name bytea NOT NULL,
|
||||
project_id bytea NOT NULL,
|
||||
interval_start timestamp with time zone NOT NULL,
|
||||
interval_seconds integer NOT NULL,
|
||||
action integer NOT NULL,
|
||||
inline bigint NOT NULL,
|
||||
allocated bigint NOT NULL,
|
||||
settled bigint NOT NULL,
|
||||
PRIMARY KEY ( bucket_name, project_id, interval_start, action )
|
||||
);`,
|
||||
`CREATE INDEX bucket_bandwidth_rollups_archive_project_id_action_interval_index ON bucket_bandwidth_rollup_archives ( project_id, action, interval_start );`,
|
||||
`CREATE INDEX bucket_bandwidth_rollups_archive_action_interval_project_id_index ON bucket_bandwidth_rollup_archives ( action, interval_start, project_id );`,
|
||||
`CREATE INDEX storagenode_bandwidth_rollup_archives_interval_start_index ON storagenode_bandwidth_rollup_archives (interval_start);`,
|
||||
},
|
||||
},
|
||||
{
|
||||
DB: &db.migrationDB,
|
||||
Description: "add distributed column to storagenode_paystubs table",
|
||||
Version: 143,
|
||||
Action: migrate.Func(func(ctx context.Context, log *zap.Logger, db tagsql.DB, tx tagsql.Tx) error {
|
||||
_, err := db.Exec(ctx, `
|
||||
ALTER TABLE storagenode_paystubs ADD COLUMN distributed BIGINT;
|
||||
`)
|
||||
if err != nil {
|
||||
return ErrMigrate.Wrap(err)
|
||||
}
|
||||
|
||||
_, err = db.Exec(ctx, `
|
||||
UPDATE storagenode_paystubs ps
|
||||
SET distributed = coalesce((
|
||||
SELECT sum(amount)::bigint
|
||||
FROM storagenode_payments pm
|
||||
WHERE pm.period = ps.period
|
||||
AND pm.node_id = ps.node_id
|
||||
), 0);
|
||||
`)
|
||||
if err != nil {
|
||||
return ErrMigrate.Wrap(err)
|
||||
}
|
||||
|
||||
_, err = db.Exec(ctx, `
|
||||
ALTER TABLE storagenode_paystubs ALTER COLUMN distributed SET NOT NULL;
|
||||
`)
|
||||
if err != nil {
|
||||
return ErrMigrate.Wrap(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}),
|
||||
},
|
||||
{
|
||||
DB: &db.migrationDB,
|
||||
Description: "delete deprecated and unused serial tables",
|
||||
Version: 144,
|
||||
Action: migrate.SQL{
|
||||
`DROP TABLE used_serials;`,
|
||||
`DROP TABLE reported_serials;`,
|
||||
`DROP TABLE pending_serial_queue;`,
|
||||
`DROP TABLE serial_numbers;`,
|
||||
`DROP TABLE consumed_serials;`,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -16,9 +16,7 @@ import (
|
||||
"storj.io/common/pb"
|
||||
"storj.io/common/storj"
|
||||
"storj.io/common/uuid"
|
||||
"storj.io/storj/private/dbutil"
|
||||
"storj.io/storj/private/dbutil/pgutil"
|
||||
"storj.io/storj/private/tagsql"
|
||||
"storj.io/storj/satellite/orders"
|
||||
"storj.io/storj/satellite/satellitedb/dbx"
|
||||
)
|
||||
@ -42,51 +40,6 @@ var (
|
||||
|
||||
type ordersDB struct {
|
||||
db *satelliteDB
|
||||
|
||||
reportedRollupsReadBatchSize int
|
||||
}
|
||||
|
||||
// CreateSerialInfo creates serial number entry in database.
|
||||
func (db *ordersDB) CreateSerialInfo(ctx context.Context, serialNumber storj.SerialNumber, bucketID []byte, limitExpiration time.Time) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
return db.db.CreateNoReturn_SerialNumber(
|
||||
ctx,
|
||||
dbx.SerialNumber_SerialNumber(serialNumber.Bytes()),
|
||||
dbx.SerialNumber_BucketId(bucketID),
|
||||
dbx.SerialNumber_ExpiresAt(limitExpiration.UTC()),
|
||||
)
|
||||
}
|
||||
|
||||
// UseSerialNumber creates a used serial number entry in database from an
|
||||
// existing serial number.
|
||||
// It returns the bucket ID associated to serialNumber.
|
||||
func (db *ordersDB) UseSerialNumber(ctx context.Context, serialNumber storj.SerialNumber, storageNodeID storj.NodeID) (_ []byte, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
statement := db.db.Rebind(
|
||||
`INSERT INTO used_serials (serial_number_id, storage_node_id)
|
||||
SELECT id, ? FROM serial_numbers WHERE serial_number = ?`,
|
||||
)
|
||||
_, err = db.db.ExecContext(ctx, statement, storageNodeID.Bytes(), serialNumber.Bytes())
|
||||
if err != nil {
|
||||
if pgutil.IsConstraintError(err) {
|
||||
return nil, orders.ErrUsingSerialNumber.New("serial number already used")
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dbxSerialNumber, err := db.db.Find_SerialNumber_By_SerialNumber(
|
||||
ctx,
|
||||
dbx.SerialNumber_SerialNumber(serialNumber.Bytes()),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if dbxSerialNumber == nil {
|
||||
return nil, orders.ErrUsingSerialNumber.New("serial number not found")
|
||||
}
|
||||
return dbxSerialNumber.BucketId, nil
|
||||
}
|
||||
|
||||
// UpdateBucketBandwidthAllocation updates 'allocated' bandwidth for given bucket.
|
||||
@ -227,236 +180,44 @@ func (db *ordersDB) GetStorageNodeBandwidth(ctx context.Context, nodeID storj.No
|
||||
return sum1 + sum2, nil
|
||||
}
|
||||
|
||||
// UnuseSerialNumber removes pair serial number -> storage node id from database.
|
||||
func (db *ordersDB) UnuseSerialNumber(ctx context.Context, serialNumber storj.SerialNumber, storageNodeID storj.NodeID) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
statement := `DELETE FROM used_serials WHERE storage_node_id = ? AND
|
||||
serial_number_id IN (SELECT id FROM serial_numbers WHERE serial_number = ?)`
|
||||
_, err = db.db.ExecContext(ctx, db.db.Rebind(statement), storageNodeID.Bytes(), serialNumber.Bytes())
|
||||
return err
|
||||
}
|
||||
|
||||
// ProcessOrders take a list of order requests and inserts them into the pending serials queue.
|
||||
//
|
||||
// ProcessOrders requires that all orders come from the same storage node.
|
||||
func (db *ordersDB) ProcessOrders(ctx context.Context, requests []*orders.ProcessOrderRequest) (responses []*orders.ProcessOrderResponse, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
if len(requests) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// check that all requests are from the same storage node
|
||||
storageNodeID := requests[0].OrderLimit.StorageNodeId
|
||||
for _, req := range requests[1:] {
|
||||
if req.OrderLimit.StorageNodeId != storageNodeID {
|
||||
return nil, ErrDifferentStorageNodes.New("requests from different storage nodes %v and %v", storageNodeID, req.OrderLimit.StorageNodeId)
|
||||
}
|
||||
}
|
||||
|
||||
// Do a read first to get all the project id/bucket ids. We could combine this with the
|
||||
// upsert below by doing a join, but there isn't really any need for special consistency
|
||||
// semantics between these two queries, and it should make things easier on the database
|
||||
// (particularly cockroachDB) to have the freedom to perform them separately.
|
||||
//
|
||||
// We don't expect the serial_number -> bucket_id relationship ever to change, as long as a
|
||||
// serial_number exists. There is a possibility of a serial_number being deleted between
|
||||
// this query and the next, but that is ok too (rows in reported_serials may end up having
|
||||
// serial numbers that no longer exist in serial_numbers, but that shouldn't break
|
||||
// anything.)
|
||||
bucketIDs, err := func() (bucketIDs [][]byte, err error) {
|
||||
bucketIDs = make([][]byte, len(requests))
|
||||
serialNums := make([][]byte, len(requests))
|
||||
for i, request := range requests {
|
||||
serialNums[i] = request.Order.SerialNumber.Bytes()
|
||||
}
|
||||
rows, err := db.db.QueryContext(ctx, `
|
||||
SELECT request.i, sn.bucket_id
|
||||
FROM
|
||||
serial_numbers sn,
|
||||
unnest($1::bytea[]) WITH ORDINALITY AS request(serial_number, i)
|
||||
WHERE request.serial_number = sn.serial_number
|
||||
`, pgutil.ByteaArray(serialNums))
|
||||
if err != nil {
|
||||
return nil, Error.Wrap(err)
|
||||
}
|
||||
defer func() { err = errs.Combine(err, rows.Err(), rows.Close()) }()
|
||||
for rows.Next() {
|
||||
var index int
|
||||
var bucketID []byte
|
||||
err = rows.Scan(&index, &bucketID)
|
||||
if err != nil {
|
||||
return nil, Error.Wrap(err)
|
||||
}
|
||||
bucketIDs[index-1] = bucketID
|
||||
}
|
||||
return bucketIDs, nil
|
||||
}()
|
||||
if err != nil {
|
||||
return nil, Error.Wrap(err)
|
||||
}
|
||||
|
||||
// perform all of the upserts into pending_serial_queue table
|
||||
expiresAtArray := make([]time.Time, 0, len(requests))
|
||||
bucketIDArray := make([][]byte, 0, len(requests))
|
||||
actionArray := make([]pb.PieceAction, 0, len(requests))
|
||||
serialNumArray := make([][]byte, 0, len(requests))
|
||||
settledArray := make([]int64, 0, len(requests))
|
||||
|
||||
// remove duplicate bucket_id, serial_number pairs sent in the same request.
|
||||
// postgres will complain.
|
||||
type requestKey struct {
|
||||
BucketID string
|
||||
SerialNumber storj.SerialNumber
|
||||
}
|
||||
seenRequests := make(map[requestKey]struct{})
|
||||
|
||||
for i, request := range requests {
|
||||
if bucketIDs[i] == nil {
|
||||
responses = append(responses, &orders.ProcessOrderResponse{
|
||||
SerialNumber: request.Order.SerialNumber,
|
||||
Status: pb.SettlementResponse_REJECTED,
|
||||
})
|
||||
continue
|
||||
}
|
||||
|
||||
// Filter duplicate requests and reject them.
|
||||
key := requestKey{
|
||||
BucketID: string(bucketIDs[i]),
|
||||
SerialNumber: request.Order.SerialNumber,
|
||||
}
|
||||
if _, seen := seenRequests[key]; seen {
|
||||
responses = append(responses, &orders.ProcessOrderResponse{
|
||||
SerialNumber: request.Order.SerialNumber,
|
||||
Status: pb.SettlementResponse_REJECTED,
|
||||
})
|
||||
continue
|
||||
}
|
||||
seenRequests[key] = struct{}{}
|
||||
|
||||
expiresAtArray = append(expiresAtArray, request.OrderLimit.OrderExpiration)
|
||||
bucketIDArray = append(bucketIDArray, bucketIDs[i])
|
||||
actionArray = append(actionArray, request.OrderLimit.Action)
|
||||
serialNumCopy := request.Order.SerialNumber
|
||||
serialNumArray = append(serialNumArray, serialNumCopy[:])
|
||||
settledArray = append(settledArray, request.Order.Amount)
|
||||
|
||||
responses = append(responses, &orders.ProcessOrderResponse{
|
||||
SerialNumber: request.Order.SerialNumber,
|
||||
Status: pb.SettlementResponse_ACCEPTED,
|
||||
})
|
||||
}
|
||||
|
||||
var stmt string
|
||||
switch db.db.implementation {
|
||||
case dbutil.Postgres:
|
||||
stmt = `
|
||||
INSERT INTO pending_serial_queue (
|
||||
storage_node_id, bucket_id, serial_number, action, settled, expires_at
|
||||
)
|
||||
SELECT
|
||||
$1::bytea,
|
||||
unnest($2::bytea[]),
|
||||
unnest($3::bytea[]),
|
||||
unnest($4::int4[]),
|
||||
unnest($5::bigint[]),
|
||||
unnest($6::timestamptz[])
|
||||
ON CONFLICT ( storage_node_id, bucket_id, serial_number )
|
||||
DO UPDATE SET
|
||||
action = EXCLUDED.action,
|
||||
settled = EXCLUDED.settled,
|
||||
expires_at = EXCLUDED.expires_at
|
||||
`
|
||||
case dbutil.Cockroach:
|
||||
stmt = `
|
||||
UPSERT INTO pending_serial_queue (
|
||||
storage_node_id, bucket_id, serial_number, action, settled, expires_at
|
||||
)
|
||||
SELECT
|
||||
$1::bytea,
|
||||
unnest($2::bytea[]),
|
||||
unnest($3::bytea[]),
|
||||
unnest($4::int4[]),
|
||||
unnest($5::bigint[]),
|
||||
unnest($6::timestamptz[])
|
||||
`
|
||||
default:
|
||||
return nil, Error.New("invalid dbType: %v", db.db.driver)
|
||||
}
|
||||
|
||||
actionNumArray := make([]int32, len(actionArray))
|
||||
for i, num := range actionArray {
|
||||
actionNumArray[i] = int32(num)
|
||||
}
|
||||
|
||||
_, err = db.db.ExecContext(ctx, stmt,
|
||||
storageNodeID.Bytes(),
|
||||
pgutil.ByteaArray(bucketIDArray),
|
||||
pgutil.ByteaArray(serialNumArray),
|
||||
pgutil.Int4Array(actionNumArray),
|
||||
pgutil.Int8Array(settledArray),
|
||||
pgutil.TimestampTZArray(expiresAtArray),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, Error.Wrap(err)
|
||||
}
|
||||
return responses, nil
|
||||
}
|
||||
|
||||
//
|
||||
// transaction/batch methods
|
||||
//
|
||||
|
||||
type ordersDBTx struct {
|
||||
tx *dbx.Tx
|
||||
db *satelliteDB
|
||||
log *zap.Logger
|
||||
}
|
||||
|
||||
func (db *ordersDB) WithTransaction(ctx context.Context, cb func(ctx context.Context, tx orders.Transaction) error) (err error) {
|
||||
func (db *ordersDB) UpdateBucketBandwidthBatch(ctx context.Context, intervalStart time.Time, rollups []orders.BucketBandwidthRollup) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
return db.db.WithTx(ctx, func(ctx context.Context, tx *dbx.Tx) error {
|
||||
return cb(ctx, &ordersDBTx{tx: tx, db: db.db, log: db.db.log})
|
||||
})
|
||||
}
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
func (tx *ordersDBTx) UpdateBucketBandwidthBatch(ctx context.Context, intervalStart time.Time, rollups []orders.BucketBandwidthRollup) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
if len(rollups) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
orders.SortBucketBandwidthRollups(rollups)
|
||||
|
||||
intervalStart = intervalStart.UTC()
|
||||
intervalStart = time.Date(intervalStart.Year(), intervalStart.Month(), intervalStart.Day(), intervalStart.Hour(), 0, 0, 0, time.UTC)
|
||||
|
||||
var bucketNames [][]byte
|
||||
var projectIDs [][]byte
|
||||
var actionSlice []int32
|
||||
var inlineSlice []int64
|
||||
var allocatedSlice []int64
|
||||
var settledSlice []int64
|
||||
var projectRUMap map[string]int64 = make(map[string]int64)
|
||||
|
||||
for _, rollup := range rollups {
|
||||
rollup := rollup
|
||||
bucketNames = append(bucketNames, []byte(rollup.BucketName))
|
||||
projectIDs = append(projectIDs, rollup.ProjectID[:])
|
||||
actionSlice = append(actionSlice, int32(rollup.Action))
|
||||
inlineSlice = append(inlineSlice, rollup.Inline)
|
||||
allocatedSlice = append(allocatedSlice, rollup.Allocated)
|
||||
settledSlice = append(settledSlice, rollup.Settled)
|
||||
|
||||
if rollup.Action == pb.PieceAction_GET {
|
||||
projectRUMap[rollup.ProjectID.String()] += rollup.Allocated
|
||||
if len(rollups) == 0 {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
_, err = tx.tx.Tx.ExecContext(ctx, `
|
||||
orders.SortBucketBandwidthRollups(rollups)
|
||||
|
||||
intervalStart = intervalStart.UTC()
|
||||
intervalStart = time.Date(intervalStart.Year(), intervalStart.Month(), intervalStart.Day(), intervalStart.Hour(), 0, 0, 0, time.UTC)
|
||||
|
||||
var bucketNames [][]byte
|
||||
var projectIDs [][]byte
|
||||
var actionSlice []int32
|
||||
var inlineSlice []int64
|
||||
var allocatedSlice []int64
|
||||
var settledSlice []int64
|
||||
projectRUMap := make(map[string]int64)
|
||||
|
||||
for _, rollup := range rollups {
|
||||
rollup := rollup
|
||||
bucketNames = append(bucketNames, []byte(rollup.BucketName))
|
||||
projectIDs = append(projectIDs, rollup.ProjectID[:])
|
||||
actionSlice = append(actionSlice, int32(rollup.Action))
|
||||
inlineSlice = append(inlineSlice, rollup.Inline)
|
||||
allocatedSlice = append(allocatedSlice, rollup.Allocated)
|
||||
settledSlice = append(settledSlice, rollup.Settled)
|
||||
|
||||
if rollup.Action == pb.PieceAction_GET {
|
||||
projectRUMap[rollup.ProjectID.String()] += rollup.Allocated
|
||||
}
|
||||
}
|
||||
|
||||
_, err = tx.Tx.ExecContext(ctx, `
|
||||
INSERT INTO bucket_bandwidth_rollups (
|
||||
bucket_name, project_id,
|
||||
interval_start, interval_seconds,
|
||||
@ -470,280 +231,47 @@ func (tx *ordersDBTx) UpdateBucketBandwidthBatch(ctx context.Context, intervalSt
|
||||
allocated = bucket_bandwidth_rollups.allocated + EXCLUDED.allocated,
|
||||
inline = bucket_bandwidth_rollups.inline + EXCLUDED.inline,
|
||||
settled = bucket_bandwidth_rollups.settled + EXCLUDED.settled`,
|
||||
pgutil.ByteaArray(bucketNames), pgutil.ByteaArray(projectIDs),
|
||||
intervalStart, defaultIntervalSeconds,
|
||||
pgutil.Int4Array(actionSlice), pgutil.Int8Array(inlineSlice), pgutil.Int8Array(allocatedSlice), pgutil.Int8Array(settledSlice))
|
||||
if err != nil {
|
||||
tx.log.Error("Bucket bandwidth rollup batch flush failed.", zap.Error(err))
|
||||
}
|
||||
|
||||
var projectRUIDs [][]byte
|
||||
var projectRUAllocated []int64
|
||||
projectInterval := time.Date(intervalStart.Year(), intervalStart.Month(), 1, intervalStart.Hour(), 0, 0, 0, time.UTC)
|
||||
|
||||
for k, v := range projectRUMap {
|
||||
projectID, err := uuid.FromString(k)
|
||||
pgutil.ByteaArray(bucketNames), pgutil.ByteaArray(projectIDs),
|
||||
intervalStart, defaultIntervalSeconds,
|
||||
pgutil.Int4Array(actionSlice), pgutil.Int8Array(inlineSlice), pgutil.Int8Array(allocatedSlice), pgutil.Int8Array(settledSlice))
|
||||
if err != nil {
|
||||
tx.log.Error("Could not parse project UUID.", zap.Error(err))
|
||||
continue
|
||||
db.db.log.Error("Bucket bandwidth rollup batch flush failed.", zap.Error(err))
|
||||
}
|
||||
projectRUIDs = append(projectRUIDs, projectID[:])
|
||||
projectRUAllocated = append(projectRUAllocated, v)
|
||||
}
|
||||
|
||||
if len(projectRUIDs) > 0 {
|
||||
_, err = tx.tx.Tx.ExecContext(ctx, `
|
||||
var projectRUIDs [][]byte
|
||||
var projectRUAllocated []int64
|
||||
projectInterval := time.Date(intervalStart.Year(), intervalStart.Month(), 1, intervalStart.Hour(), 0, 0, 0, time.UTC)
|
||||
|
||||
for k, v := range projectRUMap {
|
||||
projectID, err := uuid.FromString(k)
|
||||
if err != nil {
|
||||
db.db.log.Error("Could not parse project UUID.", zap.Error(err))
|
||||
continue
|
||||
}
|
||||
projectRUIDs = append(projectRUIDs, projectID[:])
|
||||
projectRUAllocated = append(projectRUAllocated, v)
|
||||
}
|
||||
|
||||
if len(projectRUIDs) > 0 {
|
||||
_, err = tx.Tx.ExecContext(ctx, `
|
||||
INSERT INTO project_bandwidth_rollups(project_id, interval_month, egress_allocated)
|
||||
SELECT unnest($1::bytea[]), $2, unnest($3::bigint[])
|
||||
ON CONFLICT(project_id, interval_month)
|
||||
DO UPDATE SET egress_allocated = project_bandwidth_rollups.egress_allocated + EXCLUDED.egress_allocated::bigint;
|
||||
`,
|
||||
pgutil.ByteaArray(projectRUIDs), projectInterval, pgutil.Int8Array(projectRUAllocated))
|
||||
if err != nil {
|
||||
tx.log.Error("Project bandwidth rollup batch flush failed.", zap.Error(err))
|
||||
pgutil.ByteaArray(projectRUIDs), projectInterval, pgutil.Int8Array(projectRUAllocated))
|
||||
if err != nil {
|
||||
db.db.log.Error("Project bandwidth rollup batch flush failed.", zap.Error(err))
|
||||
}
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (tx *ordersDBTx) UpdateStoragenodeBandwidthBatchPhase2(ctx context.Context, intervalStart time.Time, rollups []orders.StoragenodeBandwidthRollup) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
if len(rollups) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
orders.SortStoragenodeBandwidthRollups(rollups)
|
||||
|
||||
var storageNodeIDs []storj.NodeID
|
||||
var actionSlice []int32
|
||||
var allocatedSlice []int64
|
||||
var settledSlice []int64
|
||||
|
||||
intervalStart = intervalStart.UTC()
|
||||
intervalStart = time.Date(intervalStart.Year(), intervalStart.Month(), intervalStart.Day(), intervalStart.Hour(), 0, 0, 0, time.UTC)
|
||||
|
||||
for i := range rollups {
|
||||
rollup := &rollups[i]
|
||||
storageNodeIDs = append(storageNodeIDs, rollup.NodeID)
|
||||
actionSlice = append(actionSlice, int32(rollup.Action))
|
||||
allocatedSlice = append(allocatedSlice, rollup.Allocated)
|
||||
settledSlice = append(settledSlice, rollup.Settled)
|
||||
}
|
||||
|
||||
_, err = tx.tx.Tx.ExecContext(ctx, `
|
||||
INSERT INTO storagenode_bandwidth_rollups_phase2(
|
||||
storagenode_id,
|
||||
interval_start, interval_seconds,
|
||||
action, allocated, settled)
|
||||
SELECT
|
||||
unnest($1::bytea[]),
|
||||
$2, $3,
|
||||
unnest($4::int4[]), unnest($5::bigint[]), unnest($6::bigint[])
|
||||
ON CONFLICT(storagenode_id, interval_start, action)
|
||||
DO UPDATE SET
|
||||
allocated = storagenode_bandwidth_rollups_phase2.allocated + EXCLUDED.allocated,
|
||||
settled = storagenode_bandwidth_rollups_phase2.settled + EXCLUDED.settled`,
|
||||
pgutil.NodeIDArray(storageNodeIDs),
|
||||
intervalStart, defaultIntervalSeconds,
|
||||
pgutil.Int4Array(actionSlice), pgutil.Int8Array(allocatedSlice), pgutil.Int8Array(settledSlice))
|
||||
if err != nil {
|
||||
tx.log.Error("Storagenode bandwidth rollup batch flush failed.", zap.Error(err))
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// CreateConsumedSerialsBatch creates a batch of consumed serial entries.
|
||||
func (tx *ordersDBTx) CreateConsumedSerialsBatch(ctx context.Context, consumedSerials []orders.ConsumedSerial) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
if len(consumedSerials) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var storageNodeIDSlice [][]byte
|
||||
var serialNumberSlice [][]byte
|
||||
var expiresAtSlice []time.Time
|
||||
|
||||
for _, consumedSerial := range consumedSerials {
|
||||
consumedSerial := consumedSerial
|
||||
storageNodeIDSlice = append(storageNodeIDSlice, consumedSerial.NodeID.Bytes())
|
||||
serialNumberSlice = append(serialNumberSlice, consumedSerial.SerialNumber.Bytes())
|
||||
expiresAtSlice = append(expiresAtSlice, consumedSerial.ExpiresAt)
|
||||
}
|
||||
|
||||
var stmt string
|
||||
switch tx.db.implementation {
|
||||
case dbutil.Postgres:
|
||||
stmt = `
|
||||
INSERT INTO consumed_serials (
|
||||
storage_node_id, serial_number, expires_at
|
||||
)
|
||||
SELECT unnest($1::bytea[]), unnest($2::bytea[]), unnest($3::timestamptz[])
|
||||
ON CONFLICT ( storage_node_id, serial_number ) DO NOTHING
|
||||
`
|
||||
case dbutil.Cockroach:
|
||||
stmt = `
|
||||
UPSERT INTO consumed_serials (
|
||||
storage_node_id, serial_number, expires_at
|
||||
)
|
||||
SELECT unnest($1::bytea[]), unnest($2::bytea[]), unnest($3::timestamptz[])
|
||||
`
|
||||
default:
|
||||
return Error.New("invalid dbType: %v", tx.db.driver)
|
||||
}
|
||||
|
||||
_, err = tx.tx.Tx.ExecContext(ctx, stmt,
|
||||
pgutil.ByteaArray(storageNodeIDSlice),
|
||||
pgutil.ByteaArray(serialNumberSlice),
|
||||
pgutil.TimestampTZArray(expiresAtSlice),
|
||||
)
|
||||
return Error.Wrap(err)
|
||||
}
|
||||
|
||||
func (tx *ordersDBTx) HasConsumedSerial(ctx context.Context, nodeID storj.NodeID, serialNumber storj.SerialNumber) (exists bool, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
exists, err = tx.tx.Has_ConsumedSerial_By_StorageNodeId_And_SerialNumber(ctx,
|
||||
dbx.ConsumedSerial_StorageNodeId(nodeID.Bytes()),
|
||||
dbx.ConsumedSerial_SerialNumber(serialNumber.Bytes()))
|
||||
return exists, Error.Wrap(err)
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
//
|
||||
// transaction/batch methods
|
||||
//
|
||||
|
||||
type rawPendingSerial struct {
|
||||
nodeID []byte
|
||||
bucketID []byte
|
||||
serialNumber []byte
|
||||
}
|
||||
|
||||
type ordersDBQueue struct {
|
||||
impl dbutil.Implementation
|
||||
log *zap.Logger
|
||||
tx tagsql.Tx
|
||||
}
|
||||
|
||||
func (db *ordersDB) WithQueue(ctx context.Context, cb func(ctx context.Context, queue orders.Queue) error) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
return Error.Wrap(db.db.WithTx(ctx, func(ctx context.Context, tx *dbx.Tx) error {
|
||||
return cb(ctx, &ordersDBQueue{
|
||||
impl: db.db.implementation,
|
||||
log: db.db.log,
|
||||
tx: tx.Tx,
|
||||
})
|
||||
}))
|
||||
}
|
||||
|
||||
func (queue *ordersDBQueue) GetPendingSerialsBatch(ctx context.Context, size int) (pendingSerials []orders.PendingSerial, done bool, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
// TODO: no idea of this query makes sense on cockroach. it may do a terrible job with it.
|
||||
// but it's blazing fast on postgres and that's where we have the problem! :D :D :D
|
||||
|
||||
var rows tagsql.Rows
|
||||
switch queue.impl {
|
||||
case dbutil.Postgres:
|
||||
rows, err = queue.tx.Query(ctx, `
|
||||
DELETE
|
||||
FROM pending_serial_queue
|
||||
WHERE
|
||||
ctid = any (array(
|
||||
SELECT
|
||||
ctid
|
||||
FROM pending_serial_queue
|
||||
LIMIT $1
|
||||
))
|
||||
RETURNING storage_node_id, bucket_id, serial_number, action, settled, expires_at, (
|
||||
coalesce((
|
||||
SELECT 1
|
||||
FROM consumed_serials
|
||||
WHERE
|
||||
consumed_serials.storage_node_id = pending_serial_queue.storage_node_id
|
||||
AND consumed_serials.serial_number = pending_serial_queue.serial_number
|
||||
), 0))
|
||||
`, size)
|
||||
case dbutil.Cockroach:
|
||||
rows, err = queue.tx.Query(ctx, `
|
||||
DELETE
|
||||
FROM pending_serial_queue
|
||||
WHERE
|
||||
(storage_node_id, bucket_id, serial_number) = any (array(
|
||||
SELECT
|
||||
(storage_node_id, bucket_id, serial_number)
|
||||
FROM pending_serial_queue
|
||||
LIMIT $1
|
||||
))
|
||||
RETURNING storage_node_id, bucket_id, serial_number, action, settled, expires_at, (
|
||||
coalesce((
|
||||
SELECT 1
|
||||
FROM consumed_serials
|
||||
WHERE
|
||||
consumed_serials.storage_node_id = pending_serial_queue.storage_node_id
|
||||
AND consumed_serials.serial_number = pending_serial_queue.serial_number
|
||||
), 0))
|
||||
`, size)
|
||||
default:
|
||||
return nil, false, Error.New("unhandled implementation")
|
||||
}
|
||||
if err != nil {
|
||||
return nil, false, Error.Wrap(err)
|
||||
}
|
||||
defer func() { err = errs.Combine(err, Error.Wrap(rows.Close())) }()
|
||||
|
||||
for rows.Next() {
|
||||
var consumed int
|
||||
var rawPending rawPendingSerial
|
||||
var pendingSerial orders.PendingSerial
|
||||
|
||||
err := rows.Scan(
|
||||
&rawPending.nodeID,
|
||||
&rawPending.bucketID,
|
||||
&rawPending.serialNumber,
|
||||
&pendingSerial.Action,
|
||||
&pendingSerial.Settled,
|
||||
&pendingSerial.ExpiresAt,
|
||||
&consumed,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, false, Error.Wrap(err)
|
||||
}
|
||||
|
||||
size--
|
||||
|
||||
if consumed != 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
pendingSerial.NodeID, err = storj.NodeIDFromBytes(rawPending.nodeID)
|
||||
if err != nil {
|
||||
queue.log.Error("Invalid storage node id in pending serials queue",
|
||||
zap.Binary("id", rawPending.nodeID),
|
||||
zap.Error(errs.Wrap(err)))
|
||||
continue
|
||||
}
|
||||
pendingSerial.BucketID = rawPending.bucketID
|
||||
pendingSerial.SerialNumber, err = storj.SerialNumberFromBytes(rawPending.serialNumber)
|
||||
if err != nil {
|
||||
queue.log.Error("Invalid serial number in pending serials queue",
|
||||
zap.Binary("id", rawPending.serialNumber),
|
||||
zap.Error(errs.Wrap(err)))
|
||||
continue
|
||||
}
|
||||
|
||||
pendingSerials = append(pendingSerials, pendingSerial)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, false, Error.Wrap(err)
|
||||
}
|
||||
|
||||
return pendingSerials, size > 0, nil
|
||||
}
|
||||
|
||||
// UpdateStoragenodeBandwidthSettleWithWindow adds a record to for each action and settled amount.
|
||||
// If any of these orders already exist in the database, then all of these orders have already been processed.
|
||||
// Orders within a single window may only be processed once to prevent double spending.
|
||||
@ -825,13 +353,3 @@ func SettledAmountsMatch(rows []*dbx.StoragenodeBandwidthRollup, orderActionAmou
|
||||
|
||||
return reflect.DeepEqual(rowsSumByAction, orderActionAmounts)
|
||||
}
|
||||
|
||||
func (db *ordersDB) GetBucketIDFromSerialNumber(ctx context.Context, serialNumber storj.SerialNumber) ([]byte, error) {
|
||||
row, err := db.db.Get_SerialNumber_BucketId_By_SerialNumber(ctx,
|
||||
dbx.SerialNumber_SerialNumber(serialNumber[:]),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, ErrBucketFromSerial.Wrap(err)
|
||||
}
|
||||
return row.BucketId, nil
|
||||
}
|
||||
|
@ -78,6 +78,7 @@ func convertDBXPaystub(dbxPaystub *dbx.StoragenodePaystub) (snopayouts.Paystub,
|
||||
Owed: dbxPaystub.Owed,
|
||||
Disposed: dbxPaystub.Disposed,
|
||||
Paid: dbxPaystub.Paid,
|
||||
Distributed: dbxPaystub.Distributed,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -175,6 +176,7 @@ func (db *snopayoutsDB) TestCreatePaystub(ctx context.Context, stub snopayouts.P
|
||||
dbx.StoragenodePaystub_Owed(stub.Owed),
|
||||
dbx.StoragenodePaystub_Disposed(stub.Disposed),
|
||||
dbx.StoragenodePaystub_Paid(stub.Paid),
|
||||
dbx.StoragenodePaystub_Distributed(stub.Distributed),
|
||||
)
|
||||
}
|
||||
|
||||
|
@ -19,6 +19,7 @@ import (
|
||||
"storj.io/storj/private/dbutil/pgutil"
|
||||
"storj.io/storj/satellite/accounting"
|
||||
"storj.io/storj/satellite/metainfo/metabase"
|
||||
"storj.io/storj/satellite/orders"
|
||||
"storj.io/storj/satellite/satellitedb/dbx"
|
||||
)
|
||||
|
||||
@ -631,6 +632,65 @@ func (db *ProjectAccounting) GetBucketTotals(ctx context.Context, projectID uuid
|
||||
return page, nil
|
||||
}
|
||||
|
||||
// ArchiveRollupsBefore archives rollups older than a given time.
|
||||
func (db *ProjectAccounting) ArchiveRollupsBefore(ctx context.Context, before time.Time, batchSize int) (bucketRollupsDeleted int, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
if batchSize <= 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
switch db.db.implementation {
|
||||
case dbutil.Cockroach:
|
||||
for {
|
||||
row := db.db.QueryRow(ctx, `
|
||||
WITH rollups_to_move AS (
|
||||
DELETE FROM bucket_bandwidth_rollups
|
||||
WHERE interval_start <= $1
|
||||
LIMIT $2 RETURNING *
|
||||
), moved_rollups AS (
|
||||
INSERT INTO bucket_bandwidth_rollup_archives(bucket_name, project_id, interval_start, interval_seconds, action, inline, allocated, settled)
|
||||
SELECT bucket_name, project_id, interval_start, interval_seconds, action, inline, allocated, settled FROM rollups_to_move
|
||||
RETURNING *
|
||||
)
|
||||
SELECT count(*) FROM moved_rollups
|
||||
`, before, batchSize)
|
||||
|
||||
var rowCount int
|
||||
err = row.Scan(&rowCount)
|
||||
if err != nil {
|
||||
return bucketRollupsDeleted, err
|
||||
}
|
||||
bucketRollupsDeleted += rowCount
|
||||
|
||||
if rowCount < batchSize {
|
||||
break
|
||||
}
|
||||
}
|
||||
case dbutil.Postgres:
|
||||
bwStatement := `
|
||||
WITH rollups_to_move AS (
|
||||
DELETE FROM bucket_bandwidth_rollups
|
||||
WHERE interval_start <= $1
|
||||
RETURNING *
|
||||
), moved_rollups AS (
|
||||
INSERT INTO bucket_bandwidth_rollup_archives(bucket_name, project_id, interval_start, interval_seconds, action, inline, allocated, settled)
|
||||
SELECT bucket_name, project_id, interval_start, interval_seconds, action, inline, allocated, settled FROM rollups_to_move
|
||||
RETURNING *
|
||||
)
|
||||
SELECT count(*) FROM moved_rollups
|
||||
`
|
||||
row := db.db.DB.QueryRow(ctx, bwStatement, before)
|
||||
var rowCount int
|
||||
err = row.Scan(&rowCount)
|
||||
if err != nil {
|
||||
return bucketRollupsDeleted, err
|
||||
}
|
||||
bucketRollupsDeleted = rowCount
|
||||
}
|
||||
return bucketRollupsDeleted, err
|
||||
}
|
||||
|
||||
// getBuckets list all bucket of certain project.
|
||||
func (db *ProjectAccounting) getBuckets(ctx context.Context, projectID uuid.UUID) (_ []string, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
@ -679,3 +739,79 @@ func (db *ProjectAccounting) GetProjectLimits(ctx context.Context, projectID uui
|
||||
Bandwidth: row.BandwidthLimit,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetRollupsSince retrieves all archived rollup records since a given time.
|
||||
func (db *ProjectAccounting) GetRollupsSince(ctx context.Context, since time.Time) (bwRollups []orders.BucketBandwidthRollup, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
pageLimit := db.db.opts.ReadRollupBatchSize
|
||||
if pageLimit <= 0 {
|
||||
pageLimit = 10000
|
||||
}
|
||||
|
||||
var cursor *dbx.Paged_BucketBandwidthRollup_By_IntervalStart_GreaterOrEqual_Continuation
|
||||
for {
|
||||
dbxRollups, next, err := db.db.Paged_BucketBandwidthRollup_By_IntervalStart_GreaterOrEqual(ctx,
|
||||
dbx.BucketBandwidthRollup_IntervalStart(since),
|
||||
pageLimit, cursor)
|
||||
if err != nil {
|
||||
return nil, Error.Wrap(err)
|
||||
}
|
||||
cursor = next
|
||||
for _, dbxRollup := range dbxRollups {
|
||||
projectID, err := uuid.FromBytes(dbxRollup.ProjectId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bwRollups = append(bwRollups, orders.BucketBandwidthRollup{
|
||||
ProjectID: projectID,
|
||||
BucketName: string(dbxRollup.BucketName),
|
||||
Action: pb.PieceAction(dbxRollup.Action),
|
||||
Inline: int64(dbxRollup.Inline),
|
||||
Allocated: int64(dbxRollup.Allocated),
|
||||
Settled: int64(dbxRollup.Settled),
|
||||
})
|
||||
}
|
||||
if len(dbxRollups) < pageLimit {
|
||||
return bwRollups, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// GetArchivedRollupsSince retrieves all archived rollup records since a given time.
|
||||
func (db *ProjectAccounting) GetArchivedRollupsSince(ctx context.Context, since time.Time) (bwRollups []orders.BucketBandwidthRollup, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
pageLimit := db.db.opts.ReadRollupBatchSize
|
||||
if pageLimit <= 0 {
|
||||
pageLimit = 10000
|
||||
}
|
||||
|
||||
var cursor *dbx.Paged_BucketBandwidthRollupArchive_By_IntervalStart_GreaterOrEqual_Continuation
|
||||
for {
|
||||
dbxRollups, next, err := db.db.Paged_BucketBandwidthRollupArchive_By_IntervalStart_GreaterOrEqual(ctx,
|
||||
dbx.BucketBandwidthRollupArchive_IntervalStart(since),
|
||||
pageLimit, cursor)
|
||||
if err != nil {
|
||||
return nil, Error.Wrap(err)
|
||||
}
|
||||
cursor = next
|
||||
for _, dbxRollup := range dbxRollups {
|
||||
projectID, err := uuid.FromBytes(dbxRollup.ProjectId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bwRollups = append(bwRollups, orders.BucketBandwidthRollup{
|
||||
ProjectID: projectID,
|
||||
BucketName: string(dbxRollup.BucketName),
|
||||
Action: pb.PieceAction(dbxRollup.Action),
|
||||
Inline: int64(dbxRollup.Inline),
|
||||
Allocated: int64(dbxRollup.Allocated),
|
||||
Settled: int64(dbxRollup.Settled),
|
||||
})
|
||||
}
|
||||
if len(dbxRollups) < pageLimit {
|
||||
return bwRollups, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -487,3 +487,130 @@ func (db *StoragenodeAccounting) DeleteTalliesBefore(ctx context.Context, latest
|
||||
_, err = db.db.DB.ExecContext(ctx, db.db.Rebind(deleteRawSQL), latestRollup)
|
||||
return err
|
||||
}
|
||||
|
||||
// ArchiveRollupsBefore archives rollups older than a given time.
|
||||
func (db *StoragenodeAccounting) ArchiveRollupsBefore(ctx context.Context, before time.Time, batchSize int) (nodeRollupsDeleted int, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
if batchSize <= 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
switch db.db.implementation {
|
||||
case dbutil.Cockroach:
|
||||
for {
|
||||
row := db.db.QueryRow(ctx, `
|
||||
WITH rollups_to_move AS (
|
||||
DELETE FROM storagenode_bandwidth_rollups
|
||||
WHERE interval_start <= $1
|
||||
LIMIT $2 RETURNING *
|
||||
), moved_rollups AS (
|
||||
INSERT INTO storagenode_bandwidth_rollup_archives SELECT * FROM rollups_to_move RETURNING *
|
||||
)
|
||||
SELECT count(*) FROM moved_rollups
|
||||
`, before, batchSize)
|
||||
|
||||
var rowCount int
|
||||
err = row.Scan(&rowCount)
|
||||
if err != nil {
|
||||
return nodeRollupsDeleted, err
|
||||
}
|
||||
nodeRollupsDeleted += rowCount
|
||||
|
||||
if rowCount < batchSize {
|
||||
break
|
||||
}
|
||||
}
|
||||
case dbutil.Postgres:
|
||||
storagenodeStatement := `
|
||||
WITH rollups_to_move AS (
|
||||
DELETE FROM storagenode_bandwidth_rollups
|
||||
WHERE interval_start <= $1
|
||||
RETURNING *
|
||||
), moved_rollups AS (
|
||||
INSERT INTO storagenode_bandwidth_rollup_archives SELECT * FROM rollups_to_move RETURNING *
|
||||
)
|
||||
SELECT count(*) FROM moved_rollups
|
||||
`
|
||||
row := db.db.DB.QueryRow(ctx, storagenodeStatement, before)
|
||||
var rowCount int
|
||||
err = row.Scan(&rowCount)
|
||||
if err != nil {
|
||||
return nodeRollupsDeleted, err
|
||||
}
|
||||
nodeRollupsDeleted = rowCount
|
||||
}
|
||||
return nodeRollupsDeleted, err
|
||||
}
|
||||
|
||||
// GetRollupsSince retrieves all archived bandwidth rollup records since a given time.
|
||||
func (db *StoragenodeAccounting) GetRollupsSince(ctx context.Context, since time.Time) (bwRollups []accounting.StoragenodeBandwidthRollup, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
pageLimit := db.db.opts.ReadRollupBatchSize
|
||||
if pageLimit <= 0 {
|
||||
pageLimit = 10000
|
||||
}
|
||||
|
||||
var cursor *dbx.Paged_StoragenodeBandwidthRollup_By_IntervalStart_GreaterOrEqual_Continuation
|
||||
for {
|
||||
dbxRollups, next, err := db.db.Paged_StoragenodeBandwidthRollup_By_IntervalStart_GreaterOrEqual(ctx,
|
||||
dbx.StoragenodeBandwidthRollup_IntervalStart(since),
|
||||
pageLimit, cursor)
|
||||
if err != nil {
|
||||
return nil, Error.Wrap(err)
|
||||
}
|
||||
cursor = next
|
||||
for _, dbxRollup := range dbxRollups {
|
||||
id, err := storj.NodeIDFromBytes(dbxRollup.StoragenodeId)
|
||||
if err != nil {
|
||||
return nil, Error.Wrap(err)
|
||||
}
|
||||
bwRollups = append(bwRollups, accounting.StoragenodeBandwidthRollup{
|
||||
NodeID: id,
|
||||
IntervalStart: dbxRollup.IntervalStart,
|
||||
Action: dbxRollup.Action,
|
||||
Settled: dbxRollup.Settled,
|
||||
})
|
||||
}
|
||||
if len(dbxRollups) < pageLimit {
|
||||
return bwRollups, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// GetArchivedRollupsSince retrieves all archived bandwidth rollup records since a given time.
|
||||
func (db *StoragenodeAccounting) GetArchivedRollupsSince(ctx context.Context, since time.Time) (bwRollups []accounting.StoragenodeBandwidthRollup, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
pageLimit := db.db.opts.ReadRollupBatchSize
|
||||
if pageLimit <= 0 {
|
||||
pageLimit = 10000
|
||||
}
|
||||
|
||||
var cursor *dbx.Paged_StoragenodeBandwidthRollupArchive_By_IntervalStart_GreaterOrEqual_Continuation
|
||||
for {
|
||||
dbxRollups, next, err := db.db.Paged_StoragenodeBandwidthRollupArchive_By_IntervalStart_GreaterOrEqual(ctx,
|
||||
dbx.StoragenodeBandwidthRollupArchive_IntervalStart(since),
|
||||
pageLimit, cursor)
|
||||
if err != nil {
|
||||
return nil, Error.Wrap(err)
|
||||
}
|
||||
cursor = next
|
||||
for _, dbxRollup := range dbxRollups {
|
||||
id, err := storj.NodeIDFromBytes(dbxRollup.StoragenodeId)
|
||||
if err != nil {
|
||||
return nil, Error.Wrap(err)
|
||||
}
|
||||
bwRollups = append(bwRollups, accounting.StoragenodeBandwidthRollup{
|
||||
NodeID: id,
|
||||
IntervalStart: dbxRollup.IntervalStart,
|
||||
Action: dbxRollup.Action,
|
||||
Settled: dbxRollup.Settled,
|
||||
})
|
||||
}
|
||||
if len(dbxRollups) < pageLimit {
|
||||
return bwRollups, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -573,4 +573,4 @@ INSERT INTO "storagenode_bandwidth_rollups_phase2" ("storagenode_id", "interval_
|
||||
|
||||
INSERT INTO "users"("id", "full_name", "short_name", "email", "normalized_email", "password_hash", "status", "partner_id", "created_at", "position", "company_name", "working_on", "company_size", "is_professional") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\205\\311",'::bytea, 'Thierry', 'Berg', '2email2@mail.test', '2EMAIL2@MAIL.TEST', E'some_readable_hash'::bytea, 2, NULL, '2020-05-16 10:28:24.614594+00', 'engineer', 'storj', 'data storage', 55, true);
|
||||
|
||||
-- NEW DATA --
|
||||
-- NEW DATA --
|
||||
|
601
satellite/satellitedb/testdata/postgres.v142.sql
vendored
Normal file
601
satellite/satellitedb/testdata/postgres.v142.sql
vendored
Normal file
@ -0,0 +1,601 @@
|
||||
-- AUTOGENERATED BY storj.io/dbx
|
||||
-- DO NOT EDIT
|
||||
CREATE TABLE accounting_rollups (
|
||||
node_id bytea NOT NULL,
|
||||
start_time timestamp with time zone NOT NULL,
|
||||
put_total bigint NOT NULL,
|
||||
get_total bigint NOT NULL,
|
||||
get_audit_total bigint NOT NULL,
|
||||
get_repair_total bigint NOT NULL,
|
||||
put_repair_total bigint NOT NULL,
|
||||
at_rest_total double precision NOT NULL,
|
||||
PRIMARY KEY ( node_id, start_time )
|
||||
);
|
||||
CREATE TABLE accounting_timestamps (
|
||||
name text NOT NULL,
|
||||
value timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( name )
|
||||
);
|
||||
CREATE TABLE audit_histories (
|
||||
node_id bytea NOT NULL,
|
||||
history bytea NOT NULL,
|
||||
PRIMARY KEY ( node_id )
|
||||
);
|
||||
CREATE TABLE bucket_bandwidth_rollups (
|
||||
bucket_name bytea NOT NULL,
|
||||
project_id bytea NOT NULL,
|
||||
interval_start timestamp with time zone NOT NULL,
|
||||
interval_seconds integer NOT NULL,
|
||||
action integer NOT NULL,
|
||||
inline bigint NOT NULL,
|
||||
allocated bigint NOT NULL,
|
||||
settled bigint NOT NULL,
|
||||
PRIMARY KEY ( bucket_name, project_id, interval_start, action )
|
||||
);
|
||||
CREATE TABLE bucket_bandwidth_rollup_archives (
|
||||
bucket_name bytea NOT NULL,
|
||||
project_id bytea NOT NULL,
|
||||
interval_start timestamp with time zone NOT NULL,
|
||||
interval_seconds integer NOT NULL,
|
||||
action integer NOT NULL,
|
||||
inline bigint NOT NULL,
|
||||
allocated bigint NOT NULL,
|
||||
settled bigint NOT NULL,
|
||||
PRIMARY KEY ( bucket_name, project_id, interval_start, action )
|
||||
);
|
||||
CREATE TABLE bucket_storage_tallies (
|
||||
bucket_name bytea NOT NULL,
|
||||
project_id bytea NOT NULL,
|
||||
interval_start timestamp with time zone NOT NULL,
|
||||
inline bigint NOT NULL,
|
||||
remote bigint NOT NULL,
|
||||
remote_segments_count integer NOT NULL,
|
||||
inline_segments_count integer NOT NULL,
|
||||
object_count integer NOT NULL,
|
||||
metadata_size bigint NOT NULL,
|
||||
PRIMARY KEY ( bucket_name, project_id, interval_start )
|
||||
);
|
||||
CREATE TABLE coinpayments_transactions (
|
||||
id text NOT NULL,
|
||||
user_id bytea NOT NULL,
|
||||
address text NOT NULL,
|
||||
amount bytea NOT NULL,
|
||||
received bytea NOT NULL,
|
||||
status integer NOT NULL,
|
||||
key text NOT NULL,
|
||||
timeout integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE consumed_serials (
|
||||
storage_node_id bytea NOT NULL,
|
||||
serial_number bytea NOT NULL,
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( storage_node_id, serial_number )
|
||||
);
|
||||
CREATE TABLE coupons (
|
||||
id bytea NOT NULL,
|
||||
user_id bytea NOT NULL,
|
||||
amount bigint NOT NULL,
|
||||
description text NOT NULL,
|
||||
type integer NOT NULL,
|
||||
status integer NOT NULL,
|
||||
duration bigint NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE coupon_usages (
|
||||
coupon_id bytea NOT NULL,
|
||||
amount bigint NOT NULL,
|
||||
status integer NOT NULL,
|
||||
period timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( coupon_id, period )
|
||||
);
|
||||
CREATE TABLE graceful_exit_progress (
|
||||
node_id bytea NOT NULL,
|
||||
bytes_transferred bigint NOT NULL,
|
||||
pieces_transferred bigint NOT NULL DEFAULT 0,
|
||||
pieces_failed bigint NOT NULL DEFAULT 0,
|
||||
updated_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( node_id )
|
||||
);
|
||||
CREATE TABLE graceful_exit_transfer_queue (
|
||||
node_id bytea NOT NULL,
|
||||
path bytea NOT NULL,
|
||||
piece_num integer NOT NULL,
|
||||
root_piece_id bytea,
|
||||
durability_ratio double precision NOT NULL,
|
||||
queued_at timestamp with time zone NOT NULL,
|
||||
requested_at timestamp with time zone,
|
||||
last_failed_at timestamp with time zone,
|
||||
last_failed_code integer,
|
||||
failed_count integer,
|
||||
finished_at timestamp with time zone,
|
||||
order_limit_send_count integer NOT NULL DEFAULT 0,
|
||||
PRIMARY KEY ( node_id, path, piece_num )
|
||||
);
|
||||
CREATE TABLE injuredsegments (
|
||||
path bytea NOT NULL,
|
||||
data bytea NOT NULL,
|
||||
attempted timestamp with time zone,
|
||||
updated_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
|
||||
segment_health double precision NOT NULL DEFAULT 1,
|
||||
PRIMARY KEY ( path )
|
||||
);
|
||||
CREATE TABLE irreparabledbs (
|
||||
segmentpath bytea NOT NULL,
|
||||
segmentdetail bytea NOT NULL,
|
||||
pieces_lost_count bigint NOT NULL,
|
||||
seg_damaged_unix_sec bigint NOT NULL,
|
||||
repair_attempt_count bigint NOT NULL,
|
||||
PRIMARY KEY ( segmentpath )
|
||||
);
|
||||
CREATE TABLE nodes (
|
||||
id bytea NOT NULL,
|
||||
address text NOT NULL DEFAULT '',
|
||||
last_net text NOT NULL,
|
||||
last_ip_port text,
|
||||
protocol integer NOT NULL DEFAULT 0,
|
||||
type integer NOT NULL DEFAULT 0,
|
||||
email text NOT NULL,
|
||||
wallet text NOT NULL,
|
||||
free_disk bigint NOT NULL DEFAULT -1,
|
||||
piece_count bigint NOT NULL DEFAULT 0,
|
||||
major bigint NOT NULL DEFAULT 0,
|
||||
minor bigint NOT NULL DEFAULT 0,
|
||||
patch bigint NOT NULL DEFAULT 0,
|
||||
hash text NOT NULL DEFAULT '',
|
||||
timestamp timestamp with time zone NOT NULL DEFAULT '0001-01-01 00:00:00+00',
|
||||
release boolean NOT NULL DEFAULT false,
|
||||
latency_90 bigint NOT NULL DEFAULT 0,
|
||||
audit_success_count bigint NOT NULL DEFAULT 0,
|
||||
total_audit_count bigint NOT NULL DEFAULT 0,
|
||||
vetted_at timestamp with time zone,
|
||||
uptime_success_count bigint NOT NULL DEFAULT 0,
|
||||
total_uptime_count bigint NOT NULL DEFAULT 0,
|
||||
created_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
|
||||
updated_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
|
||||
last_contact_success timestamp with time zone NOT NULL DEFAULT 'epoch',
|
||||
last_contact_failure timestamp with time zone NOT NULL DEFAULT 'epoch',
|
||||
contained boolean NOT NULL DEFAULT false,
|
||||
disqualified timestamp with time zone,
|
||||
suspended timestamp with time zone,
|
||||
unknown_audit_suspended timestamp with time zone,
|
||||
offline_suspended timestamp with time zone,
|
||||
under_review timestamp with time zone,
|
||||
online_score double precision NOT NULL DEFAULT 1,
|
||||
audit_reputation_alpha double precision NOT NULL DEFAULT 1,
|
||||
audit_reputation_beta double precision NOT NULL DEFAULT 0,
|
||||
unknown_audit_reputation_alpha double precision NOT NULL DEFAULT 1,
|
||||
unknown_audit_reputation_beta double precision NOT NULL DEFAULT 0,
|
||||
uptime_reputation_alpha double precision NOT NULL DEFAULT 1,
|
||||
uptime_reputation_beta double precision NOT NULL DEFAULT 0,
|
||||
exit_initiated_at timestamp with time zone,
|
||||
exit_loop_completed_at timestamp with time zone,
|
||||
exit_finished_at timestamp with time zone,
|
||||
exit_success boolean NOT NULL DEFAULT false,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE node_api_versions (
|
||||
id bytea NOT NULL,
|
||||
api_version integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
updated_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE offers (
|
||||
id serial NOT NULL,
|
||||
name text NOT NULL,
|
||||
description text NOT NULL,
|
||||
award_credit_in_cents integer NOT NULL DEFAULT 0,
|
||||
invitee_credit_in_cents integer NOT NULL DEFAULT 0,
|
||||
award_credit_duration_days integer,
|
||||
invitee_credit_duration_days integer,
|
||||
redeemable_cap integer,
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
status integer NOT NULL,
|
||||
type integer NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE peer_identities (
|
||||
node_id bytea NOT NULL,
|
||||
leaf_serial_number bytea NOT NULL,
|
||||
chain bytea NOT NULL,
|
||||
updated_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( node_id )
|
||||
);
|
||||
CREATE TABLE pending_audits (
|
||||
node_id bytea NOT NULL,
|
||||
piece_id bytea NOT NULL,
|
||||
stripe_index bigint NOT NULL,
|
||||
share_size bigint NOT NULL,
|
||||
expected_share_hash bytea NOT NULL,
|
||||
reverify_count bigint NOT NULL,
|
||||
path bytea NOT NULL,
|
||||
PRIMARY KEY ( node_id )
|
||||
);
|
||||
CREATE TABLE pending_serial_queue (
|
||||
storage_node_id bytea NOT NULL,
|
||||
bucket_id bytea NOT NULL,
|
||||
serial_number bytea NOT NULL,
|
||||
action integer NOT NULL,
|
||||
settled bigint NOT NULL,
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( storage_node_id, bucket_id, serial_number )
|
||||
);
|
||||
CREATE TABLE projects (
|
||||
id bytea NOT NULL,
|
||||
name text NOT NULL,
|
||||
description text NOT NULL,
|
||||
usage_limit bigint,
|
||||
bandwidth_limit bigint,
|
||||
rate_limit integer,
|
||||
max_buckets integer,
|
||||
partner_id bytea,
|
||||
owner_id bytea NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE project_bandwidth_rollups (
|
||||
project_id bytea NOT NULL,
|
||||
interval_month date NOT NULL,
|
||||
egress_allocated bigint NOT NULL,
|
||||
PRIMARY KEY ( project_id, interval_month )
|
||||
);
|
||||
CREATE TABLE registration_tokens (
|
||||
secret bytea NOT NULL,
|
||||
owner_id bytea,
|
||||
project_limit integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( secret ),
|
||||
UNIQUE ( owner_id )
|
||||
);
|
||||
CREATE TABLE reported_serials (
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
storage_node_id bytea NOT NULL,
|
||||
bucket_id bytea NOT NULL,
|
||||
action integer NOT NULL,
|
||||
serial_number bytea NOT NULL,
|
||||
settled bigint NOT NULL,
|
||||
observed_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( expires_at, storage_node_id, bucket_id, action, serial_number )
|
||||
);
|
||||
CREATE TABLE reset_password_tokens (
|
||||
secret bytea NOT NULL,
|
||||
owner_id bytea NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( secret ),
|
||||
UNIQUE ( owner_id )
|
||||
);
|
||||
CREATE TABLE revocations (
|
||||
revoked bytea NOT NULL,
|
||||
api_key_id bytea NOT NULL,
|
||||
PRIMARY KEY ( revoked )
|
||||
);
|
||||
CREATE TABLE serial_numbers (
|
||||
id serial NOT NULL,
|
||||
serial_number bytea NOT NULL,
|
||||
bucket_id bytea NOT NULL,
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE storagenode_bandwidth_rollups (
|
||||
storagenode_id bytea NOT NULL,
|
||||
interval_start timestamp with time zone NOT NULL,
|
||||
interval_seconds integer NOT NULL,
|
||||
action integer NOT NULL,
|
||||
allocated bigint DEFAULT 0,
|
||||
settled bigint NOT NULL,
|
||||
PRIMARY KEY ( storagenode_id, interval_start, action )
|
||||
);
|
||||
CREATE TABLE storagenode_bandwidth_rollup_archives (
|
||||
storagenode_id bytea NOT NULL,
|
||||
interval_start timestamp with time zone NOT NULL,
|
||||
interval_seconds integer NOT NULL,
|
||||
action integer NOT NULL,
|
||||
allocated bigint DEFAULT 0,
|
||||
settled bigint NOT NULL,
|
||||
PRIMARY KEY ( storagenode_id, interval_start, action )
|
||||
);
|
||||
CREATE TABLE storagenode_bandwidth_rollups_phase2 (
|
||||
storagenode_id bytea NOT NULL,
|
||||
interval_start timestamp with time zone NOT NULL,
|
||||
interval_seconds integer NOT NULL,
|
||||
action integer NOT NULL,
|
||||
allocated bigint DEFAULT 0,
|
||||
settled bigint NOT NULL,
|
||||
PRIMARY KEY ( storagenode_id, interval_start, action )
|
||||
);
|
||||
CREATE TABLE storagenode_payments (
|
||||
id bigserial NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
node_id bytea NOT NULL,
|
||||
period text NOT NULL,
|
||||
amount bigint NOT NULL,
|
||||
receipt text,
|
||||
notes text,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE storagenode_paystubs (
|
||||
period text NOT NULL,
|
||||
node_id bytea NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
codes text NOT NULL,
|
||||
usage_at_rest double precision NOT NULL,
|
||||
usage_get bigint NOT NULL,
|
||||
usage_put bigint NOT NULL,
|
||||
usage_get_repair bigint NOT NULL,
|
||||
usage_put_repair bigint NOT NULL,
|
||||
usage_get_audit bigint NOT NULL,
|
||||
comp_at_rest bigint NOT NULL,
|
||||
comp_get bigint NOT NULL,
|
||||
comp_put bigint NOT NULL,
|
||||
comp_get_repair bigint NOT NULL,
|
||||
comp_put_repair bigint NOT NULL,
|
||||
comp_get_audit bigint NOT NULL,
|
||||
surge_percent bigint NOT NULL,
|
||||
held bigint NOT NULL,
|
||||
owed bigint NOT NULL,
|
||||
disposed bigint NOT NULL,
|
||||
paid bigint NOT NULL,
|
||||
PRIMARY KEY ( period, node_id )
|
||||
);
|
||||
CREATE TABLE storagenode_storage_tallies (
|
||||
node_id bytea NOT NULL,
|
||||
interval_end_time timestamp with time zone NOT NULL,
|
||||
data_total double precision NOT NULL,
|
||||
PRIMARY KEY ( interval_end_time, node_id )
|
||||
);
|
||||
CREATE TABLE stripe_customers (
|
||||
user_id bytea NOT NULL,
|
||||
customer_id text NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( user_id ),
|
||||
UNIQUE ( customer_id )
|
||||
);
|
||||
CREATE TABLE stripecoinpayments_invoice_project_records (
|
||||
id bytea NOT NULL,
|
||||
project_id bytea NOT NULL,
|
||||
storage double precision NOT NULL,
|
||||
egress bigint NOT NULL,
|
||||
objects bigint NOT NULL,
|
||||
period_start timestamp with time zone NOT NULL,
|
||||
period_end timestamp with time zone NOT NULL,
|
||||
state integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id ),
|
||||
UNIQUE ( project_id, period_start, period_end )
|
||||
);
|
||||
CREATE TABLE stripecoinpayments_tx_conversion_rates (
|
||||
tx_id text NOT NULL,
|
||||
rate bytea NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( tx_id )
|
||||
);
|
||||
CREATE TABLE users (
|
||||
id bytea NOT NULL,
|
||||
email text NOT NULL,
|
||||
normalized_email text NOT NULL,
|
||||
full_name text NOT NULL,
|
||||
short_name text,
|
||||
password_hash bytea NOT NULL,
|
||||
status integer NOT NULL,
|
||||
partner_id bytea,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
project_limit integer NOT NULL DEFAULT 0,
|
||||
position text,
|
||||
company_name text,
|
||||
company_size integer,
|
||||
working_on text,
|
||||
is_professional boolean NOT NULL DEFAULT false,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE value_attributions (
|
||||
project_id bytea NOT NULL,
|
||||
bucket_name bytea NOT NULL,
|
||||
partner_id bytea NOT NULL,
|
||||
last_updated timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( project_id, bucket_name )
|
||||
);
|
||||
CREATE TABLE api_keys (
|
||||
id bytea NOT NULL,
|
||||
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
||||
head bytea NOT NULL,
|
||||
name text NOT NULL,
|
||||
secret bytea NOT NULL,
|
||||
partner_id bytea,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id ),
|
||||
UNIQUE ( head ),
|
||||
UNIQUE ( name, project_id )
|
||||
);
|
||||
CREATE TABLE bucket_metainfos (
|
||||
id bytea NOT NULL,
|
||||
project_id bytea NOT NULL REFERENCES projects( id ),
|
||||
name bytea NOT NULL,
|
||||
partner_id bytea,
|
||||
path_cipher integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
default_segment_size integer NOT NULL,
|
||||
default_encryption_cipher_suite integer NOT NULL,
|
||||
default_encryption_block_size integer NOT NULL,
|
||||
default_redundancy_algorithm integer NOT NULL,
|
||||
default_redundancy_share_size integer NOT NULL,
|
||||
default_redundancy_required_shares integer NOT NULL,
|
||||
default_redundancy_repair_shares integer NOT NULL,
|
||||
default_redundancy_optimal_shares integer NOT NULL,
|
||||
default_redundancy_total_shares integer NOT NULL,
|
||||
PRIMARY KEY ( id ),
|
||||
UNIQUE ( project_id, name )
|
||||
);
|
||||
CREATE TABLE project_members (
|
||||
member_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
|
||||
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( member_id, project_id )
|
||||
);
|
||||
CREATE TABLE stripecoinpayments_apply_balance_intents (
|
||||
tx_id text NOT NULL REFERENCES coinpayments_transactions( id ) ON DELETE CASCADE,
|
||||
state integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( tx_id )
|
||||
);
|
||||
CREATE TABLE used_serials (
|
||||
serial_number_id integer NOT NULL REFERENCES serial_numbers( id ) ON DELETE CASCADE,
|
||||
storage_node_id bytea NOT NULL,
|
||||
PRIMARY KEY ( serial_number_id, storage_node_id )
|
||||
);
|
||||
CREATE TABLE user_credits (
|
||||
id serial NOT NULL,
|
||||
user_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
|
||||
offer_id integer NOT NULL REFERENCES offers( id ),
|
||||
referred_by bytea REFERENCES users( id ) ON DELETE SET NULL,
|
||||
type text NOT NULL,
|
||||
credits_earned_in_cents integer NOT NULL,
|
||||
credits_used_in_cents integer NOT NULL,
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id ),
|
||||
UNIQUE ( id, offer_id )
|
||||
);
|
||||
CREATE INDEX accounting_rollups_start_time_index ON accounting_rollups ( start_time );
|
||||
CREATE INDEX bucket_bandwidth_rollups_project_id_action_interval_index ON bucket_bandwidth_rollups ( project_id, action, interval_start );
|
||||
CREATE INDEX bucket_bandwidth_rollups_action_interval_project_id_index ON bucket_bandwidth_rollups ( action, interval_start, project_id );
|
||||
CREATE INDEX bucket_bandwidth_rollups_archive_project_id_action_interval_index ON bucket_bandwidth_rollup_archives ( project_id, action, interval_start );
|
||||
CREATE INDEX bucket_bandwidth_rollups_archive_action_interval_project_id_index ON bucket_bandwidth_rollup_archives ( action, interval_start, project_id );
|
||||
CREATE INDEX bucket_storage_tallies_project_id_index ON bucket_storage_tallies ( project_id );
|
||||
CREATE INDEX consumed_serials_expires_at_index ON consumed_serials ( expires_at );
|
||||
CREATE INDEX graceful_exit_transfer_queue_nid_dr_qa_fa_lfa_index ON graceful_exit_transfer_queue ( node_id, durability_ratio, queued_at, finished_at, last_failed_at );
|
||||
CREATE INDEX injuredsegments_attempted_index ON injuredsegments ( attempted );
|
||||
CREATE INDEX injuredsegments_segment_health_index ON injuredsegments ( segment_health );
|
||||
CREATE INDEX injuredsegments_updated_at_index ON injuredsegments ( updated_at );
|
||||
CREATE INDEX node_last_ip ON nodes ( last_net );
|
||||
CREATE INDEX nodes_dis_unk_exit_fin_last_success_index ON nodes ( disqualified, unknown_audit_suspended, exit_finished_at, last_contact_success );
|
||||
CREATE UNIQUE INDEX serial_number_index ON serial_numbers ( serial_number );
|
||||
CREATE INDEX serial_numbers_expires_at_index ON serial_numbers ( expires_at );
|
||||
CREATE INDEX storagenode_bandwidth_rollups_interval_start_index ON storagenode_bandwidth_rollups ( interval_start );
|
||||
CREATE INDEX storagenode_bandwidth_rollup_archives_interval_start_index ON storagenode_bandwidth_rollup_archives ( interval_start );
|
||||
CREATE INDEX storagenode_payments_node_id_period_index ON storagenode_payments ( node_id, period );
|
||||
CREATE INDEX storagenode_paystubs_node_id_index ON storagenode_paystubs ( node_id );
|
||||
CREATE INDEX storagenode_storage_tallies_node_id_index ON storagenode_storage_tallies ( node_id );
|
||||
CREATE UNIQUE INDEX credits_earned_user_id_offer_id ON user_credits ( id, offer_id );
|
||||
|
||||
INSERT INTO "accounting_rollups"("node_id", "start_time", "put_total", "get_total", "get_audit_total", "get_repair_total", "put_repair_total", "at_rest_total") VALUES (E'\\367M\\177\\251]t/\\022\\256\\214\\265\\025\\224\\204:\\217\\212\\0102<\\321\\374\\020&\\271Qc\\325\\261\\354\\246\\233'::bytea, '2019-02-09 00:00:00+00', 3000, 6000, 9000, 12000, 0, 15000);
|
||||
|
||||
INSERT INTO "accounting_timestamps" VALUES ('LastAtRestTally', '0001-01-01 00:00:00+00');
|
||||
INSERT INTO "accounting_timestamps" VALUES ('LastRollup', '0001-01-01 00:00:00+00');
|
||||
INSERT INTO "accounting_timestamps" VALUES ('LastBandwidthTally', '0001-01-01 00:00:00+00');
|
||||
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 5, false, 1);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '127.0.0.1:55518', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 0, 3, 3, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 0, false, 1);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014', '127.0.0.1:55517', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 0, 0, 0, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 0, false, 1);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\015', '127.0.0.1:55519', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 1, 2, 1, 2, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 1, false, 1);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "vetted_at", "online_score") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', '127.0.0.1:55520', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 300, 400, 300, 400, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 300, 0, 1, 0, 300, 100, false, '2020-03-18 12:00:00.000000+00', 1);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\154\\313\\233\\074\\327\\177\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 75, 25, 100, 5, false, 1);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "last_ip_port", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\154\\313\\233\\074\\327\\177\\136\\070\\346\\002', '127.0.0.1:55516', '127.0.0.0', '127.0.0.1:55516', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 75, 25, 100, 5, false, 1);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\363\\341\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 5, false, 1);
|
||||
|
||||
INSERT INTO "users"("id", "full_name", "short_name", "email", "normalized_email", "password_hash", "status", "partner_id", "created_at", "is_professional") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'Noahson', 'William', '1email1@mail.test', '1EMAIL1@MAIL.TEST', E'some_readable_hash'::bytea, 1, NULL, '2019-02-14 08:28:24.614594+00', false);
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets", "partner_id", "owner_id", "created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, 'ProjectName', 'projects description', NULL, NULL, NULL, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:28:24.254934+00');
|
||||
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets", "partner_id", "owner_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, 'projName1', 'Test project 1', NULL, NULL, NULL, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:28:24.636949+00');
|
||||
INSERT INTO "project_members"("member_id", "project_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, '2019-02-14 08:28:24.677953+00');
|
||||
INSERT INTO "project_members"("member_id", "project_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, '2019-02-13 08:28:24.677953+00');
|
||||
|
||||
INSERT INTO "irreparabledbs" ("segmentpath", "segmentdetail", "pieces_lost_count", "seg_damaged_unix_sec", "repair_attempt_count") VALUES ('\x49616d5365676d656e746b6579696e666f30', '\x49616d5365676d656e7464657461696c696e666f30', 10, 1550159554, 10);
|
||||
|
||||
INSERT INTO "registration_tokens" ("secret", "owner_id", "project_limit", "created_at") VALUES (E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, null, 1, '2019-02-14 08:28:24.677953+00');
|
||||
|
||||
INSERT INTO "serial_numbers" ("id", "serial_number", "bucket_id", "expires_at") VALUES (1, E'0123456701234567'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014/testbucket'::bytea, '2019-03-06 08:28:24.677953+00');
|
||||
INSERT INTO "used_serials" ("serial_number_id", "storage_node_id") VALUES (1, E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n');
|
||||
|
||||
INSERT INTO "storagenode_bandwidth_rollups" ("storagenode_id", "interval_start", "interval_seconds", "action", "allocated", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024);
|
||||
INSERT INTO "storagenode_storage_tallies" VALUES (E'\\3510\\323\\225"~\\036<\\342\\330m\\0253Jhr\\246\\233K\\246#\\2303\\351\\256\\275j\\212UM\\362\\207', '2019-02-14 08:16:57.812849+00', 1000);
|
||||
|
||||
INSERT INTO "bucket_bandwidth_rollups" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024, 3024);
|
||||
INSERT INTO "bucket_storage_tallies" ("bucket_name", "project_id", "interval_start", "inline", "remote", "remote_segments_count", "inline_segments_count", "object_count", "metadata_size") VALUES (E'testbucket'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 4024, 5024, 0, 0, 0, 0);
|
||||
INSERT INTO "bucket_bandwidth_rollups" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\170\\160\\157\\370\\274\\366\\113\\364\\272\\235\\301\\243\\321\\102\\321\\136'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024, 3024);
|
||||
INSERT INTO "bucket_storage_tallies" ("bucket_name", "project_id", "interval_start", "inline", "remote", "remote_segments_count", "inline_segments_count", "object_count", "metadata_size") VALUES (E'testbucket'::bytea, E'\\170\\160\\157\\370\\274\\366\\113\\364\\272\\235\\301\\243\\321\\102\\321\\136'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 4024, 5024, 0, 0, 0, 0);
|
||||
|
||||
INSERT INTO "reset_password_tokens" ("secret", "owner_id", "created_at") VALUES (E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-05-08 08:28:24.677953+00');
|
||||
|
||||
INSERT INTO "offers" ("id", "name", "description", "award_credit_in_cents", "invitee_credit_in_cents", "expires_at", "created_at", "status", "type", "award_credit_duration_days", "invitee_credit_duration_days") VALUES (1, 'Default referral offer', 'Is active when no other active referral offer', 300, 600, '2119-03-14 08:28:24.636949+00', '2019-07-14 08:28:24.636949+00', 1, 2, 365, 14);
|
||||
INSERT INTO "offers" ("id", "name", "description", "award_credit_in_cents", "invitee_credit_in_cents", "expires_at", "created_at", "status", "type", "award_credit_duration_days", "invitee_credit_duration_days") VALUES (2, 'Default free credit offer', 'Is active when no active free credit offer', 0, 300, '2119-03-14 08:28:24.636949+00', '2019-07-14 08:28:24.636949+00', 1, 1, NULL, 14);
|
||||
|
||||
INSERT INTO "api_keys" ("id", "project_id", "head", "name", "secret", "partner_id", "created_at") VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\111\\142\\147\\304\\132\\375\\070\\163\\270\\160\\251\\370\\126\\063\\351\\037\\257\\071\\143\\375\\351\\320\\253\\232\\220\\260\\075\\173\\306\\307\\115\\136'::bytea, 'key 2', E'\\254\\011\\315\\333\\273\\365\\001\\071\\024\\154\\253\\332\\301\\216\\361\\074\\221\\367\\251\\231\\274\\333\\300\\367\\001\\272\\327\\111\\315\\123\\042\\016'::bytea, NULL, '2019-02-14 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "value_attributions" ("project_id", "bucket_name", "partner_id", "last_updated") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E''::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-02-14 08:07:31.028103+00');
|
||||
|
||||
INSERT INTO "user_credits" ("id", "user_id", "offer_id", "referred_by", "credits_earned_in_cents", "credits_used_in_cents", "type", "expires_at", "created_at") VALUES (1, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 1, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 200, 0, 'invalid', '2019-10-01 08:28:24.267934+00', '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "bucket_metainfos" ("id", "project_id", "name", "partner_id", "created_at", "path_cipher", "default_segment_size", "default_encryption_cipher_suite", "default_encryption_block_size", "default_redundancy_algorithm", "default_redundancy_share_size", "default_redundancy_required_shares", "default_redundancy_repair_shares", "default_redundancy_optimal_shares", "default_redundancy_total_shares") VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'testbucketuniquename'::bytea, NULL, '2019-06-14 08:28:24.677953+00', 1, 65536, 1, 8192, 1, 4096, 4, 6, 8, 10);
|
||||
|
||||
INSERT INTO "pending_audits" ("node_id", "piece_id", "stripe_index", "share_size", "expected_share_hash", "reverify_count", "path") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 5, 1024, E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, 1, 'not null');
|
||||
|
||||
INSERT INTO "peer_identities" VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:07:31.335028+00');
|
||||
|
||||
INSERT INTO "graceful_exit_progress" ("node_id", "bytes_transferred", "pieces_transferred", "pieces_failed", "updated_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', 1000000000000000, 0, 0, '2019-09-12 10:07:31.028103+00');
|
||||
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n\\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\311', 8, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
|
||||
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n\\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\312', 8, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
|
||||
|
||||
INSERT INTO "stripe_customers" ("user_id", "customer_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'stripe_id', '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n\\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\311', 9, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
|
||||
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n\\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\312', 9, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
|
||||
|
||||
INSERT INTO "stripecoinpayments_invoice_project_records"("id", "project_id", "storage", "egress", "objects", "period_start", "period_end", "state", "created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\021\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, 0, 0, 0, '2019-06-01 08:28:24.267934+00', '2019-06-01 08:28:24.267934+00', 0, '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "root_piece_id", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n\\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\311', 10, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
|
||||
|
||||
INSERT INTO "stripecoinpayments_tx_conversion_rates" ("tx_id", "rate", "created_at") VALUES ('tx_id', E'\\363\\311\\033w\\222\\303Ci,'::bytea, '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "coinpayments_transactions" ("id", "user_id", "address", "amount", "received", "status", "key", "timeout", "created_at") VALUES ('tx_id', E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'address', E'\\363\\311\\033w'::bytea, E'\\363\\311\\033w'::bytea, 1, 'key', 60, '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "storagenode_bandwidth_rollups" ("storagenode_id", "interval_start", "interval_seconds", "action", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2020-01-11 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 2024);
|
||||
|
||||
INSERT INTO "coupons" ("id", "user_id", "amount", "description", "type", "status", "duration", "created_at") VALUES (E'\\362\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 50, 'description', 0, 0, 2, '2019-06-01 08:28:24.267934+00');
|
||||
INSERT INTO "coupon_usages" ("coupon_id", "amount", "status", "period") VALUES (E'\\362\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, 22, 0, '2019-06-01 09:28:24.267934+00');
|
||||
|
||||
INSERT INTO "reported_serials" ("expires_at", "storage_node_id", "bucket_id", "action", "serial_number", "settled", "observed_at") VALUES ('2020-01-11 08:00:00.000000+00', E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014/testbucket'::bytea, 1, E'0123456701234567'::bytea, 100, '2020-01-11 08:00:00.000000+00');
|
||||
|
||||
INSERT INTO "stripecoinpayments_apply_balance_intents" ("tx_id", "state", "created_at") VALUES ('tx_id', 0, '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets", "rate_limit", "partner_id", "owner_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\347'::bytea, 'projName1', 'Test project 1', NULL, NULL, NULL, 2000000, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2020-01-15 08:28:24.636949+00');
|
||||
|
||||
INSERT INTO "pending_serial_queue" ("storage_node_id", "bucket_id", "serial_number", "action", "settled", "expires_at") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014/testbucket'::bytea, E'5123456701234567'::bytea, 1, 100, '2020-01-11 08:00:00.000000+00');
|
||||
|
||||
INSERT INTO "consumed_serials" ("storage_node_id", "serial_number", "expires_at") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', E'1234567012345678'::bytea, '2020-01-12 08:00:00.000000+00');
|
||||
|
||||
INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('0', '\x0a0130120100', 1.0, '2020-09-01 00:00:00.000000+00');
|
||||
INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('here''s/a/great/path', '\x0a136865726527732f612f67726561742f70617468120a0102030405060708090a', 1.0, '2020-09-01 00:00:00.000000+00');
|
||||
INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('yet/another/cool/path', '\x0a157965742f616e6f746865722f636f6f6c2f70617468120a0102030405060708090a', 1.0, '2020-09-01 00:00:00.000000+00');
|
||||
INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('/this/is/a/new/path', '\x0a23736f2f6d616e792f69636f6e69632f70617468732f746f2f63686f6f73652f66726f6d120a0102030405060708090a', 1.0, '2020-09-01 00:00:00.000000+00');
|
||||
INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('/some/path/1/23/4', '\x0a23736f2f6d618e792f69636f6e69632f70617468732f746f2f63686f6f73652f66726f6d120a0102030405060708090a', 0.2, '2020-09-01 00:00:00.000000+00');
|
||||
|
||||
INSERT INTO "project_bandwidth_rollups"("project_id", "interval_month", egress_allocated) VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\347'::bytea, '2020-04-01', 10000);
|
||||
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets","rate_limit", "partner_id", "owner_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\345'::bytea, 'egress101', 'High Bandwidth Project', NULL, NULL, NULL, 2000000, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2020-05-15 08:46:24.000000+00');
|
||||
|
||||
INSERT INTO "storagenode_paystubs"("period", "node_id", "created_at", "codes", "usage_at_rest", "usage_get", "usage_put", "usage_get_repair", "usage_put_repair", "usage_get_audit", "comp_at_rest", "comp_get", "comp_put", "comp_get_repair", "comp_put_repair", "comp_get_audit", "surge_percent", "held", "owed", "disposed", "paid") VALUES ('2020-01', '\xf2a3b4c4dfdf7221310382fd5db5aa73e1d227d6df09734ec4e5305000000000', '2020-04-07T20:14:21.479141Z', '', 1327959864508416, 294054066688, 159031363328, 226751, 0, 836608, 2861984, 5881081, 0, 226751, 0, 8, 300, 0, 26909472, 0, 26909472);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "unknown_audit_suspended", "offline_suspended", "under_review") VALUES (E'\\153\\313\\233\\074\\327\\255\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 5, false, '2019-02-14 08:07:31.108963+00', '2019-02-14 08:07:31.108963+00', '2019-02-14 08:07:31.108963+00');
|
||||
|
||||
INSERT INTO "audit_histories" ("node_id", "history") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', '\x0a23736f2f6d616e792f69636f6e69632f70617468732f746f2f63686f6f73652f66726f6d120a0102030405060708090a');
|
||||
|
||||
INSERT INTO "node_api_versions"("id", "api_version", "created_at", "updated_at") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', 1, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00');
|
||||
INSERT INTO "node_api_versions"("id", "api_version", "created_at", "updated_at") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', 2, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00');
|
||||
INSERT INTO "node_api_versions"("id", "api_version", "created_at", "updated_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014', 3, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00');
|
||||
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\256\\263'::bytea, 'egress102', 'High Bandwidth Project 2', NULL, NULL, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-05-15 08:46:24.000000+00', 1000);
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\255\\244'::bytea, 'egress103', 'High Bandwidth Project 3', NULL, NULL, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-05-15 08:46:24.000000+00', 1000);
|
||||
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\253\\231'::bytea, 'Limit Test 1', 'This project is above the default', 50000000001, 50000000001, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-10-14 10:10:10.000000+00', 101);
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\252\\230'::bytea, 'Limit Test 2', 'This project is below the default', NULL, NULL, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-10-14 10:10:11.000000+00', NULL);
|
||||
|
||||
INSERT INTO "storagenode_bandwidth_rollups_phase2" ("storagenode_id", "interval_start", "interval_seconds", "action", "allocated", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024);
|
||||
|
||||
INSERT INTO "users"("id", "full_name", "short_name", "email", "normalized_email", "password_hash", "status", "partner_id", "created_at", "position", "company_name", "working_on", "company_size", "is_professional") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\205\\311",'::bytea, 'Thierry', 'Berg', '2email2@mail.test', '2EMAIL2@MAIL.TEST', E'some_readable_hash'::bytea, 2, NULL, '2020-05-16 10:28:24.614594+00', 'engineer', 'storj', 'data storage', 55, true);
|
||||
|
||||
-- NEW DATA --
|
||||
INSERT INTO "storagenode_bandwidth_rollup_archives" ("storagenode_id", "interval_start", "interval_seconds", "action", "allocated", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024);
|
||||
INSERT INTO "bucket_bandwidth_rollup_archives" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\170\\160\\157\\370\\274\\366\\113\\364\\272\\235\\301\\243\\321\\102\\321\\136'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024, 3024);
|
609
satellite/satellitedb/testdata/postgres.v143.sql
vendored
Normal file
609
satellite/satellitedb/testdata/postgres.v143.sql
vendored
Normal file
@ -0,0 +1,609 @@
|
||||
-- AUTOGENERATED BY storj.io/dbx
|
||||
-- DO NOT EDIT
|
||||
CREATE TABLE accounting_rollups (
|
||||
node_id bytea NOT NULL,
|
||||
start_time timestamp with time zone NOT NULL,
|
||||
put_total bigint NOT NULL,
|
||||
get_total bigint NOT NULL,
|
||||
get_audit_total bigint NOT NULL,
|
||||
get_repair_total bigint NOT NULL,
|
||||
put_repair_total bigint NOT NULL,
|
||||
at_rest_total double precision NOT NULL,
|
||||
PRIMARY KEY ( node_id, start_time )
|
||||
);
|
||||
CREATE TABLE accounting_timestamps (
|
||||
name text NOT NULL,
|
||||
value timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( name )
|
||||
);
|
||||
CREATE TABLE audit_histories (
|
||||
node_id bytea NOT NULL,
|
||||
history bytea NOT NULL,
|
||||
PRIMARY KEY ( node_id )
|
||||
);
|
||||
CREATE TABLE bucket_bandwidth_rollups (
|
||||
bucket_name bytea NOT NULL,
|
||||
project_id bytea NOT NULL,
|
||||
interval_start timestamp with time zone NOT NULL,
|
||||
interval_seconds integer NOT NULL,
|
||||
action integer NOT NULL,
|
||||
inline bigint NOT NULL,
|
||||
allocated bigint NOT NULL,
|
||||
settled bigint NOT NULL,
|
||||
PRIMARY KEY ( bucket_name, project_id, interval_start, action )
|
||||
);
|
||||
CREATE TABLE bucket_bandwidth_rollup_archives (
|
||||
bucket_name bytea NOT NULL,
|
||||
project_id bytea NOT NULL,
|
||||
interval_start timestamp with time zone NOT NULL,
|
||||
interval_seconds integer NOT NULL,
|
||||
action integer NOT NULL,
|
||||
inline bigint NOT NULL,
|
||||
allocated bigint NOT NULL,
|
||||
settled bigint NOT NULL,
|
||||
PRIMARY KEY ( bucket_name, project_id, interval_start, action )
|
||||
);
|
||||
CREATE TABLE bucket_storage_tallies (
|
||||
bucket_name bytea NOT NULL,
|
||||
project_id bytea NOT NULL,
|
||||
interval_start timestamp with time zone NOT NULL,
|
||||
inline bigint NOT NULL,
|
||||
remote bigint NOT NULL,
|
||||
remote_segments_count integer NOT NULL,
|
||||
inline_segments_count integer NOT NULL,
|
||||
object_count integer NOT NULL,
|
||||
metadata_size bigint NOT NULL,
|
||||
PRIMARY KEY ( bucket_name, project_id, interval_start )
|
||||
);
|
||||
CREATE TABLE coinpayments_transactions (
|
||||
id text NOT NULL,
|
||||
user_id bytea NOT NULL,
|
||||
address text NOT NULL,
|
||||
amount bytea NOT NULL,
|
||||
received bytea NOT NULL,
|
||||
status integer NOT NULL,
|
||||
key text NOT NULL,
|
||||
timeout integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE consumed_serials (
|
||||
storage_node_id bytea NOT NULL,
|
||||
serial_number bytea NOT NULL,
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( storage_node_id, serial_number )
|
||||
);
|
||||
CREATE TABLE coupons (
|
||||
id bytea NOT NULL,
|
||||
user_id bytea NOT NULL,
|
||||
amount bigint NOT NULL,
|
||||
description text NOT NULL,
|
||||
type integer NOT NULL,
|
||||
status integer NOT NULL,
|
||||
duration bigint NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE coupon_usages (
|
||||
coupon_id bytea NOT NULL,
|
||||
amount bigint NOT NULL,
|
||||
status integer NOT NULL,
|
||||
period timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( coupon_id, period )
|
||||
);
|
||||
CREATE TABLE graceful_exit_progress (
|
||||
node_id bytea NOT NULL,
|
||||
bytes_transferred bigint NOT NULL,
|
||||
pieces_transferred bigint NOT NULL DEFAULT 0,
|
||||
pieces_failed bigint NOT NULL DEFAULT 0,
|
||||
updated_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( node_id )
|
||||
);
|
||||
CREATE TABLE graceful_exit_transfer_queue (
|
||||
node_id bytea NOT NULL,
|
||||
path bytea NOT NULL,
|
||||
piece_num integer NOT NULL,
|
||||
root_piece_id bytea,
|
||||
durability_ratio double precision NOT NULL,
|
||||
queued_at timestamp with time zone NOT NULL,
|
||||
requested_at timestamp with time zone,
|
||||
last_failed_at timestamp with time zone,
|
||||
last_failed_code integer,
|
||||
failed_count integer,
|
||||
finished_at timestamp with time zone,
|
||||
order_limit_send_count integer NOT NULL DEFAULT 0,
|
||||
PRIMARY KEY ( node_id, path, piece_num )
|
||||
);
|
||||
CREATE TABLE injuredsegments (
|
||||
path bytea NOT NULL,
|
||||
data bytea NOT NULL,
|
||||
attempted timestamp with time zone,
|
||||
updated_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
|
||||
segment_health double precision NOT NULL DEFAULT 1,
|
||||
PRIMARY KEY ( path )
|
||||
);
|
||||
CREATE TABLE irreparabledbs (
|
||||
segmentpath bytea NOT NULL,
|
||||
segmentdetail bytea NOT NULL,
|
||||
pieces_lost_count bigint NOT NULL,
|
||||
seg_damaged_unix_sec bigint NOT NULL,
|
||||
repair_attempt_count bigint NOT NULL,
|
||||
PRIMARY KEY ( segmentpath )
|
||||
);
|
||||
CREATE TABLE nodes (
|
||||
id bytea NOT NULL,
|
||||
address text NOT NULL DEFAULT '',
|
||||
last_net text NOT NULL,
|
||||
last_ip_port text,
|
||||
protocol integer NOT NULL DEFAULT 0,
|
||||
type integer NOT NULL DEFAULT 0,
|
||||
email text NOT NULL,
|
||||
wallet text NOT NULL,
|
||||
free_disk bigint NOT NULL DEFAULT -1,
|
||||
piece_count bigint NOT NULL DEFAULT 0,
|
||||
major bigint NOT NULL DEFAULT 0,
|
||||
minor bigint NOT NULL DEFAULT 0,
|
||||
patch bigint NOT NULL DEFAULT 0,
|
||||
hash text NOT NULL DEFAULT '',
|
||||
timestamp timestamp with time zone NOT NULL DEFAULT '0001-01-01 00:00:00+00',
|
||||
release boolean NOT NULL DEFAULT false,
|
||||
latency_90 bigint NOT NULL DEFAULT 0,
|
||||
audit_success_count bigint NOT NULL DEFAULT 0,
|
||||
total_audit_count bigint NOT NULL DEFAULT 0,
|
||||
vetted_at timestamp with time zone,
|
||||
uptime_success_count bigint NOT NULL DEFAULT 0,
|
||||
total_uptime_count bigint NOT NULL DEFAULT 0,
|
||||
created_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
|
||||
updated_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
|
||||
last_contact_success timestamp with time zone NOT NULL DEFAULT 'epoch',
|
||||
last_contact_failure timestamp with time zone NOT NULL DEFAULT 'epoch',
|
||||
contained boolean NOT NULL DEFAULT false,
|
||||
disqualified timestamp with time zone,
|
||||
suspended timestamp with time zone,
|
||||
unknown_audit_suspended timestamp with time zone,
|
||||
offline_suspended timestamp with time zone,
|
||||
under_review timestamp with time zone,
|
||||
online_score double precision NOT NULL DEFAULT 1,
|
||||
audit_reputation_alpha double precision NOT NULL DEFAULT 1,
|
||||
audit_reputation_beta double precision NOT NULL DEFAULT 0,
|
||||
unknown_audit_reputation_alpha double precision NOT NULL DEFAULT 1,
|
||||
unknown_audit_reputation_beta double precision NOT NULL DEFAULT 0,
|
||||
uptime_reputation_alpha double precision NOT NULL DEFAULT 1,
|
||||
uptime_reputation_beta double precision NOT NULL DEFAULT 0,
|
||||
exit_initiated_at timestamp with time zone,
|
||||
exit_loop_completed_at timestamp with time zone,
|
||||
exit_finished_at timestamp with time zone,
|
||||
exit_success boolean NOT NULL DEFAULT false,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE node_api_versions (
|
||||
id bytea NOT NULL,
|
||||
api_version integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
updated_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE offers (
|
||||
id serial NOT NULL,
|
||||
name text NOT NULL,
|
||||
description text NOT NULL,
|
||||
award_credit_in_cents integer NOT NULL DEFAULT 0,
|
||||
invitee_credit_in_cents integer NOT NULL DEFAULT 0,
|
||||
award_credit_duration_days integer,
|
||||
invitee_credit_duration_days integer,
|
||||
redeemable_cap integer,
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
status integer NOT NULL,
|
||||
type integer NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE peer_identities (
|
||||
node_id bytea NOT NULL,
|
||||
leaf_serial_number bytea NOT NULL,
|
||||
chain bytea NOT NULL,
|
||||
updated_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( node_id )
|
||||
);
|
||||
CREATE TABLE pending_audits (
|
||||
node_id bytea NOT NULL,
|
||||
piece_id bytea NOT NULL,
|
||||
stripe_index bigint NOT NULL,
|
||||
share_size bigint NOT NULL,
|
||||
expected_share_hash bytea NOT NULL,
|
||||
reverify_count bigint NOT NULL,
|
||||
path bytea NOT NULL,
|
||||
PRIMARY KEY ( node_id )
|
||||
);
|
||||
CREATE TABLE pending_serial_queue (
|
||||
storage_node_id bytea NOT NULL,
|
||||
bucket_id bytea NOT NULL,
|
||||
serial_number bytea NOT NULL,
|
||||
action integer NOT NULL,
|
||||
settled bigint NOT NULL,
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( storage_node_id, bucket_id, serial_number )
|
||||
);
|
||||
CREATE TABLE projects (
|
||||
id bytea NOT NULL,
|
||||
name text NOT NULL,
|
||||
description text NOT NULL,
|
||||
usage_limit bigint,
|
||||
bandwidth_limit bigint,
|
||||
rate_limit integer,
|
||||
max_buckets integer,
|
||||
partner_id bytea,
|
||||
owner_id bytea NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE project_bandwidth_rollups (
|
||||
project_id bytea NOT NULL,
|
||||
interval_month date NOT NULL,
|
||||
egress_allocated bigint NOT NULL,
|
||||
PRIMARY KEY ( project_id, interval_month )
|
||||
);
|
||||
CREATE TABLE registration_tokens (
|
||||
secret bytea NOT NULL,
|
||||
owner_id bytea,
|
||||
project_limit integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( secret ),
|
||||
UNIQUE ( owner_id )
|
||||
);
|
||||
CREATE TABLE reported_serials (
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
storage_node_id bytea NOT NULL,
|
||||
bucket_id bytea NOT NULL,
|
||||
action integer NOT NULL,
|
||||
serial_number bytea NOT NULL,
|
||||
settled bigint NOT NULL,
|
||||
observed_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( expires_at, storage_node_id, bucket_id, action, serial_number )
|
||||
);
|
||||
CREATE TABLE reset_password_tokens (
|
||||
secret bytea NOT NULL,
|
||||
owner_id bytea NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( secret ),
|
||||
UNIQUE ( owner_id )
|
||||
);
|
||||
CREATE TABLE revocations (
|
||||
revoked bytea NOT NULL,
|
||||
api_key_id bytea NOT NULL,
|
||||
PRIMARY KEY ( revoked )
|
||||
);
|
||||
CREATE TABLE serial_numbers (
|
||||
id serial NOT NULL,
|
||||
serial_number bytea NOT NULL,
|
||||
bucket_id bytea NOT NULL,
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE storagenode_bandwidth_rollups (
|
||||
storagenode_id bytea NOT NULL,
|
||||
interval_start timestamp with time zone NOT NULL,
|
||||
interval_seconds integer NOT NULL,
|
||||
action integer NOT NULL,
|
||||
allocated bigint DEFAULT 0,
|
||||
settled bigint NOT NULL,
|
||||
PRIMARY KEY ( storagenode_id, interval_start, action )
|
||||
);
|
||||
CREATE TABLE storagenode_bandwidth_rollup_archives (
|
||||
storagenode_id bytea NOT NULL,
|
||||
interval_start timestamp with time zone NOT NULL,
|
||||
interval_seconds integer NOT NULL,
|
||||
action integer NOT NULL,
|
||||
allocated bigint DEFAULT 0,
|
||||
settled bigint NOT NULL,
|
||||
PRIMARY KEY ( storagenode_id, interval_start, action )
|
||||
);
|
||||
CREATE TABLE storagenode_bandwidth_rollups_phase2 (
|
||||
storagenode_id bytea NOT NULL,
|
||||
interval_start timestamp with time zone NOT NULL,
|
||||
interval_seconds integer NOT NULL,
|
||||
action integer NOT NULL,
|
||||
allocated bigint DEFAULT 0,
|
||||
settled bigint NOT NULL,
|
||||
PRIMARY KEY ( storagenode_id, interval_start, action )
|
||||
);
|
||||
CREATE TABLE storagenode_payments (
|
||||
id bigserial NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
node_id bytea NOT NULL,
|
||||
period text NOT NULL,
|
||||
amount bigint NOT NULL,
|
||||
receipt text,
|
||||
notes text,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE storagenode_paystubs (
|
||||
period text NOT NULL,
|
||||
node_id bytea NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
codes text NOT NULL,
|
||||
usage_at_rest double precision NOT NULL,
|
||||
usage_get bigint NOT NULL,
|
||||
usage_put bigint NOT NULL,
|
||||
usage_get_repair bigint NOT NULL,
|
||||
usage_put_repair bigint NOT NULL,
|
||||
usage_get_audit bigint NOT NULL,
|
||||
comp_at_rest bigint NOT NULL,
|
||||
comp_get bigint NOT NULL,
|
||||
comp_put bigint NOT NULL,
|
||||
comp_get_repair bigint NOT NULL,
|
||||
comp_put_repair bigint NOT NULL,
|
||||
comp_get_audit bigint NOT NULL,
|
||||
surge_percent bigint NOT NULL,
|
||||
held bigint NOT NULL,
|
||||
owed bigint NOT NULL,
|
||||
disposed bigint NOT NULL,
|
||||
paid bigint NOT NULL,
|
||||
distributed bigint NOT NULL,
|
||||
PRIMARY KEY ( period, node_id )
|
||||
);
|
||||
CREATE TABLE storagenode_storage_tallies (
|
||||
node_id bytea NOT NULL,
|
||||
interval_end_time timestamp with time zone NOT NULL,
|
||||
data_total double precision NOT NULL,
|
||||
PRIMARY KEY ( interval_end_time, node_id )
|
||||
);
|
||||
CREATE TABLE stripe_customers (
|
||||
user_id bytea NOT NULL,
|
||||
customer_id text NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( user_id ),
|
||||
UNIQUE ( customer_id )
|
||||
);
|
||||
CREATE TABLE stripecoinpayments_invoice_project_records (
|
||||
id bytea NOT NULL,
|
||||
project_id bytea NOT NULL,
|
||||
storage double precision NOT NULL,
|
||||
egress bigint NOT NULL,
|
||||
objects bigint NOT NULL,
|
||||
period_start timestamp with time zone NOT NULL,
|
||||
period_end timestamp with time zone NOT NULL,
|
||||
state integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id ),
|
||||
UNIQUE ( project_id, period_start, period_end )
|
||||
);
|
||||
CREATE TABLE stripecoinpayments_tx_conversion_rates (
|
||||
tx_id text NOT NULL,
|
||||
rate bytea NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( tx_id )
|
||||
);
|
||||
CREATE TABLE users (
|
||||
id bytea NOT NULL,
|
||||
email text NOT NULL,
|
||||
normalized_email text NOT NULL,
|
||||
full_name text NOT NULL,
|
||||
short_name text,
|
||||
password_hash bytea NOT NULL,
|
||||
status integer NOT NULL,
|
||||
partner_id bytea,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
project_limit integer NOT NULL DEFAULT 0,
|
||||
position text,
|
||||
company_name text,
|
||||
company_size integer,
|
||||
working_on text,
|
||||
is_professional boolean NOT NULL DEFAULT false,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE value_attributions (
|
||||
project_id bytea NOT NULL,
|
||||
bucket_name bytea NOT NULL,
|
||||
partner_id bytea NOT NULL,
|
||||
last_updated timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( project_id, bucket_name )
|
||||
);
|
||||
CREATE TABLE api_keys (
|
||||
id bytea NOT NULL,
|
||||
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
||||
head bytea NOT NULL,
|
||||
name text NOT NULL,
|
||||
secret bytea NOT NULL,
|
||||
partner_id bytea,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id ),
|
||||
UNIQUE ( head ),
|
||||
UNIQUE ( name, project_id )
|
||||
);
|
||||
CREATE TABLE bucket_metainfos (
|
||||
id bytea NOT NULL,
|
||||
project_id bytea NOT NULL REFERENCES projects( id ),
|
||||
name bytea NOT NULL,
|
||||
partner_id bytea,
|
||||
path_cipher integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
default_segment_size integer NOT NULL,
|
||||
default_encryption_cipher_suite integer NOT NULL,
|
||||
default_encryption_block_size integer NOT NULL,
|
||||
default_redundancy_algorithm integer NOT NULL,
|
||||
default_redundancy_share_size integer NOT NULL,
|
||||
default_redundancy_required_shares integer NOT NULL,
|
||||
default_redundancy_repair_shares integer NOT NULL,
|
||||
default_redundancy_optimal_shares integer NOT NULL,
|
||||
default_redundancy_total_shares integer NOT NULL,
|
||||
PRIMARY KEY ( id ),
|
||||
UNIQUE ( project_id, name )
|
||||
);
|
||||
CREATE TABLE project_members (
|
||||
member_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
|
||||
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( member_id, project_id )
|
||||
);
|
||||
CREATE TABLE stripecoinpayments_apply_balance_intents (
|
||||
tx_id text NOT NULL REFERENCES coinpayments_transactions( id ) ON DELETE CASCADE,
|
||||
state integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( tx_id )
|
||||
);
|
||||
CREATE TABLE used_serials (
|
||||
serial_number_id integer NOT NULL REFERENCES serial_numbers( id ) ON DELETE CASCADE,
|
||||
storage_node_id bytea NOT NULL,
|
||||
PRIMARY KEY ( serial_number_id, storage_node_id )
|
||||
);
|
||||
CREATE TABLE user_credits (
|
||||
id serial NOT NULL,
|
||||
user_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
|
||||
offer_id integer NOT NULL REFERENCES offers( id ),
|
||||
referred_by bytea REFERENCES users( id ) ON DELETE SET NULL,
|
||||
type text NOT NULL,
|
||||
credits_earned_in_cents integer NOT NULL,
|
||||
credits_used_in_cents integer NOT NULL,
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id ),
|
||||
UNIQUE ( id, offer_id )
|
||||
);
|
||||
CREATE INDEX accounting_rollups_start_time_index ON accounting_rollups ( start_time );
|
||||
CREATE INDEX bucket_bandwidth_rollups_project_id_action_interval_index ON bucket_bandwidth_rollups ( project_id, action, interval_start );
|
||||
CREATE INDEX bucket_bandwidth_rollups_action_interval_project_id_index ON bucket_bandwidth_rollups ( action, interval_start, project_id );
|
||||
CREATE INDEX bucket_bandwidth_rollups_archive_project_id_action_interval_index ON bucket_bandwidth_rollup_archives ( project_id, action, interval_start );
|
||||
CREATE INDEX bucket_bandwidth_rollups_archive_action_interval_project_id_index ON bucket_bandwidth_rollup_archives ( action, interval_start, project_id );
|
||||
CREATE INDEX bucket_storage_tallies_project_id_index ON bucket_storage_tallies ( project_id );
|
||||
CREATE INDEX consumed_serials_expires_at_index ON consumed_serials ( expires_at );
|
||||
CREATE INDEX graceful_exit_transfer_queue_nid_dr_qa_fa_lfa_index ON graceful_exit_transfer_queue ( node_id, durability_ratio, queued_at, finished_at, last_failed_at );
|
||||
CREATE INDEX injuredsegments_attempted_index ON injuredsegments ( attempted );
|
||||
CREATE INDEX injuredsegments_segment_health_index ON injuredsegments ( segment_health );
|
||||
CREATE INDEX injuredsegments_updated_at_index ON injuredsegments ( updated_at );
|
||||
CREATE INDEX node_last_ip ON nodes ( last_net );
|
||||
CREATE INDEX nodes_dis_unk_exit_fin_last_success_index ON nodes ( disqualified, unknown_audit_suspended, exit_finished_at, last_contact_success );
|
||||
CREATE UNIQUE INDEX serial_number_index ON serial_numbers ( serial_number );
|
||||
CREATE INDEX serial_numbers_expires_at_index ON serial_numbers ( expires_at );
|
||||
CREATE INDEX storagenode_bandwidth_rollups_interval_start_index ON storagenode_bandwidth_rollups ( interval_start );
|
||||
CREATE INDEX storagenode_bandwidth_rollup_archives_interval_start_index ON storagenode_bandwidth_rollup_archives ( interval_start );
|
||||
CREATE INDEX storagenode_payments_node_id_period_index ON storagenode_payments ( node_id, period );
|
||||
CREATE INDEX storagenode_paystubs_node_id_index ON storagenode_paystubs ( node_id );
|
||||
CREATE INDEX storagenode_storage_tallies_node_id_index ON storagenode_storage_tallies ( node_id );
|
||||
CREATE UNIQUE INDEX credits_earned_user_id_offer_id ON user_credits ( id, offer_id );
|
||||
|
||||
INSERT INTO "accounting_rollups"("node_id", "start_time", "put_total", "get_total", "get_audit_total", "get_repair_total", "put_repair_total", "at_rest_total") VALUES (E'\\367M\\177\\251]t/\\022\\256\\214\\265\\025\\224\\204:\\217\\212\\0102<\\321\\374\\020&\\271Qc\\325\\261\\354\\246\\233'::bytea, '2019-02-09 00:00:00+00', 3000, 6000, 9000, 12000, 0, 15000);
|
||||
|
||||
INSERT INTO "accounting_timestamps" VALUES ('LastAtRestTally', '0001-01-01 00:00:00+00');
|
||||
INSERT INTO "accounting_timestamps" VALUES ('LastRollup', '0001-01-01 00:00:00+00');
|
||||
INSERT INTO "accounting_timestamps" VALUES ('LastBandwidthTally', '0001-01-01 00:00:00+00');
|
||||
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 5, false, 1);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '127.0.0.1:55518', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 0, 3, 3, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 0, false, 1);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014', '127.0.0.1:55517', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 0, 0, 0, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 0, false, 1);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\015', '127.0.0.1:55519', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 1, 2, 1, 2, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 1, false, 1);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "vetted_at", "online_score") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', '127.0.0.1:55520', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 300, 400, 300, 400, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 300, 0, 1, 0, 300, 100, false, '2020-03-18 12:00:00.000000+00', 1);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\154\\313\\233\\074\\327\\177\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 75, 25, 100, 5, false, 1);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "last_ip_port", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\154\\313\\233\\074\\327\\177\\136\\070\\346\\002', '127.0.0.1:55516', '127.0.0.0', '127.0.0.1:55516', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 75, 25, 100, 5, false, 1);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\363\\341\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 5, false, 1);
|
||||
|
||||
INSERT INTO "users"("id", "full_name", "short_name", "email", "normalized_email", "password_hash", "status", "partner_id", "created_at", "is_professional") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'Noahson', 'William', '1email1@mail.test', '1EMAIL1@MAIL.TEST', E'some_readable_hash'::bytea, 1, NULL, '2019-02-14 08:28:24.614594+00', false);
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets", "partner_id", "owner_id", "created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, 'ProjectName', 'projects description', NULL, NULL, NULL, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:28:24.254934+00');
|
||||
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets", "partner_id", "owner_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, 'projName1', 'Test project 1', NULL, NULL, NULL, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:28:24.636949+00');
|
||||
INSERT INTO "project_members"("member_id", "project_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, '2019-02-14 08:28:24.677953+00');
|
||||
INSERT INTO "project_members"("member_id", "project_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, '2019-02-13 08:28:24.677953+00');
|
||||
|
||||
INSERT INTO "irreparabledbs" ("segmentpath", "segmentdetail", "pieces_lost_count", "seg_damaged_unix_sec", "repair_attempt_count") VALUES ('\x49616d5365676d656e746b6579696e666f30', '\x49616d5365676d656e7464657461696c696e666f30', 10, 1550159554, 10);
|
||||
|
||||
INSERT INTO "registration_tokens" ("secret", "owner_id", "project_limit", "created_at") VALUES (E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, null, 1, '2019-02-14 08:28:24.677953+00');
|
||||
|
||||
INSERT INTO "serial_numbers" ("id", "serial_number", "bucket_id", "expires_at") VALUES (1, E'0123456701234567'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014/testbucket'::bytea, '2019-03-06 08:28:24.677953+00');
|
||||
INSERT INTO "used_serials" ("serial_number_id", "storage_node_id") VALUES (1, E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n');
|
||||
|
||||
INSERT INTO "storagenode_bandwidth_rollups" ("storagenode_id", "interval_start", "interval_seconds", "action", "allocated", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024);
|
||||
INSERT INTO "storagenode_storage_tallies" VALUES (E'\\3510\\323\\225"~\\036<\\342\\330m\\0253Jhr\\246\\233K\\246#\\2303\\351\\256\\275j\\212UM\\362\\207', '2019-02-14 08:16:57.812849+00', 1000);
|
||||
|
||||
INSERT INTO "bucket_bandwidth_rollups" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024, 3024);
|
||||
INSERT INTO "bucket_storage_tallies" ("bucket_name", "project_id", "interval_start", "inline", "remote", "remote_segments_count", "inline_segments_count", "object_count", "metadata_size") VALUES (E'testbucket'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 4024, 5024, 0, 0, 0, 0);
|
||||
INSERT INTO "bucket_bandwidth_rollups" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\170\\160\\157\\370\\274\\366\\113\\364\\272\\235\\301\\243\\321\\102\\321\\136'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024, 3024);
|
||||
INSERT INTO "bucket_storage_tallies" ("bucket_name", "project_id", "interval_start", "inline", "remote", "remote_segments_count", "inline_segments_count", "object_count", "metadata_size") VALUES (E'testbucket'::bytea, E'\\170\\160\\157\\370\\274\\366\\113\\364\\272\\235\\301\\243\\321\\102\\321\\136'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 4024, 5024, 0, 0, 0, 0);
|
||||
|
||||
INSERT INTO "reset_password_tokens" ("secret", "owner_id", "created_at") VALUES (E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-05-08 08:28:24.677953+00');
|
||||
|
||||
INSERT INTO "offers" ("id", "name", "description", "award_credit_in_cents", "invitee_credit_in_cents", "expires_at", "created_at", "status", "type", "award_credit_duration_days", "invitee_credit_duration_days") VALUES (1, 'Default referral offer', 'Is active when no other active referral offer', 300, 600, '2119-03-14 08:28:24.636949+00', '2019-07-14 08:28:24.636949+00', 1, 2, 365, 14);
|
||||
INSERT INTO "offers" ("id", "name", "description", "award_credit_in_cents", "invitee_credit_in_cents", "expires_at", "created_at", "status", "type", "award_credit_duration_days", "invitee_credit_duration_days") VALUES (2, 'Default free credit offer', 'Is active when no active free credit offer', 0, 300, '2119-03-14 08:28:24.636949+00', '2019-07-14 08:28:24.636949+00', 1, 1, NULL, 14);
|
||||
|
||||
INSERT INTO "api_keys" ("id", "project_id", "head", "name", "secret", "partner_id", "created_at") VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\111\\142\\147\\304\\132\\375\\070\\163\\270\\160\\251\\370\\126\\063\\351\\037\\257\\071\\143\\375\\351\\320\\253\\232\\220\\260\\075\\173\\306\\307\\115\\136'::bytea, 'key 2', E'\\254\\011\\315\\333\\273\\365\\001\\071\\024\\154\\253\\332\\301\\216\\361\\074\\221\\367\\251\\231\\274\\333\\300\\367\\001\\272\\327\\111\\315\\123\\042\\016'::bytea, NULL, '2019-02-14 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "value_attributions" ("project_id", "bucket_name", "partner_id", "last_updated") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E''::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-02-14 08:07:31.028103+00');
|
||||
|
||||
INSERT INTO "user_credits" ("id", "user_id", "offer_id", "referred_by", "credits_earned_in_cents", "credits_used_in_cents", "type", "expires_at", "created_at") VALUES (1, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 1, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 200, 0, 'invalid', '2019-10-01 08:28:24.267934+00', '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "bucket_metainfos" ("id", "project_id", "name", "partner_id", "created_at", "path_cipher", "default_segment_size", "default_encryption_cipher_suite", "default_encryption_block_size", "default_redundancy_algorithm", "default_redundancy_share_size", "default_redundancy_required_shares", "default_redundancy_repair_shares", "default_redundancy_optimal_shares", "default_redundancy_total_shares") VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'testbucketuniquename'::bytea, NULL, '2019-06-14 08:28:24.677953+00', 1, 65536, 1, 8192, 1, 4096, 4, 6, 8, 10);
|
||||
|
||||
INSERT INTO "pending_audits" ("node_id", "piece_id", "stripe_index", "share_size", "expected_share_hash", "reverify_count", "path") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 5, 1024, E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, 1, 'not null');
|
||||
|
||||
INSERT INTO "peer_identities" VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:07:31.335028+00');
|
||||
|
||||
INSERT INTO "graceful_exit_progress" ("node_id", "bytes_transferred", "pieces_transferred", "pieces_failed", "updated_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', 1000000000000000, 0, 0, '2019-09-12 10:07:31.028103+00');
|
||||
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n\\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\311', 8, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
|
||||
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n\\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\312', 8, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
|
||||
|
||||
INSERT INTO "stripe_customers" ("user_id", "customer_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'stripe_id', '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n\\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\311', 9, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
|
||||
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n\\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\312', 9, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
|
||||
|
||||
INSERT INTO "stripecoinpayments_invoice_project_records"("id", "project_id", "storage", "egress", "objects", "period_start", "period_end", "state", "created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\021\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, 0, 0, 0, '2019-06-01 08:28:24.267934+00', '2019-06-01 08:28:24.267934+00', 0, '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "root_piece_id", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n\\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\311', 10, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
|
||||
|
||||
INSERT INTO "stripecoinpayments_tx_conversion_rates" ("tx_id", "rate", "created_at") VALUES ('tx_id', E'\\363\\311\\033w\\222\\303Ci,'::bytea, '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "coinpayments_transactions" ("id", "user_id", "address", "amount", "received", "status", "key", "timeout", "created_at") VALUES ('tx_id', E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'address', E'\\363\\311\\033w'::bytea, E'\\363\\311\\033w'::bytea, 1, 'key', 60, '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "storagenode_bandwidth_rollups" ("storagenode_id", "interval_start", "interval_seconds", "action", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2020-01-11 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 2024);
|
||||
|
||||
INSERT INTO "coupons" ("id", "user_id", "amount", "description", "type", "status", "duration", "created_at") VALUES (E'\\362\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 50, 'description', 0, 0, 2, '2019-06-01 08:28:24.267934+00');
|
||||
INSERT INTO "coupon_usages" ("coupon_id", "amount", "status", "period") VALUES (E'\\362\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, 22, 0, '2019-06-01 09:28:24.267934+00');
|
||||
|
||||
INSERT INTO "reported_serials" ("expires_at", "storage_node_id", "bucket_id", "action", "serial_number", "settled", "observed_at") VALUES ('2020-01-11 08:00:00.000000+00', E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014/testbucket'::bytea, 1, E'0123456701234567'::bytea, 100, '2020-01-11 08:00:00.000000+00');
|
||||
|
||||
INSERT INTO "stripecoinpayments_apply_balance_intents" ("tx_id", "state", "created_at") VALUES ('tx_id', 0, '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets", "rate_limit", "partner_id", "owner_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\347'::bytea, 'projName1', 'Test project 1', NULL, NULL, NULL, 2000000, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2020-01-15 08:28:24.636949+00');
|
||||
|
||||
INSERT INTO "pending_serial_queue" ("storage_node_id", "bucket_id", "serial_number", "action", "settled", "expires_at") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014/testbucket'::bytea, E'5123456701234567'::bytea, 1, 100, '2020-01-11 08:00:00.000000+00');
|
||||
|
||||
INSERT INTO "consumed_serials" ("storage_node_id", "serial_number", "expires_at") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', E'1234567012345678'::bytea, '2020-01-12 08:00:00.000000+00');
|
||||
|
||||
INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('0', '\x0a0130120100', 1.0, '2020-09-01 00:00:00.000000+00');
|
||||
INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('here''s/a/great/path', '\x0a136865726527732f612f67726561742f70617468120a0102030405060708090a', 1.0, '2020-09-01 00:00:00.000000+00');
|
||||
INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('yet/another/cool/path', '\x0a157965742f616e6f746865722f636f6f6c2f70617468120a0102030405060708090a', 1.0, '2020-09-01 00:00:00.000000+00');
|
||||
INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('/this/is/a/new/path', '\x0a23736f2f6d616e792f69636f6e69632f70617468732f746f2f63686f6f73652f66726f6d120a0102030405060708090a', 1.0, '2020-09-01 00:00:00.000000+00');
|
||||
INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('/some/path/1/23/4', '\x0a23736f2f6d618e792f69636f6e69632f70617468732f746f2f63686f6f73652f66726f6d120a0102030405060708090a', 0.2, '2020-09-01 00:00:00.000000+00');
|
||||
|
||||
INSERT INTO "project_bandwidth_rollups"("project_id", "interval_month", egress_allocated) VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\347'::bytea, '2020-04-01', 10000);
|
||||
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets","rate_limit", "partner_id", "owner_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\345'::bytea, 'egress101', 'High Bandwidth Project', NULL, NULL, NULL, 2000000, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2020-05-15 08:46:24.000000+00');
|
||||
|
||||
INSERT INTO "storagenode_paystubs"("period", "node_id", "created_at", "codes", "usage_at_rest", "usage_get", "usage_put", "usage_get_repair", "usage_put_repair", "usage_get_audit", "comp_at_rest", "comp_get", "comp_put", "comp_get_repair", "comp_put_repair", "comp_get_audit", "surge_percent", "held", "owed", "disposed", "paid", "distributed") VALUES ('2020-01', '\xf2a3b4c4dfdf7221310382fd5db5aa73e1d227d6df09734ec4e5305000000000', '2020-04-07T20:14:21.479141Z', '', 1327959864508416, 294054066688, 159031363328, 226751, 0, 836608, 2861984, 5881081, 0, 226751, 0, 8, 300, 0, 26909472, 0, 26909472, 0);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "unknown_audit_suspended", "offline_suspended", "under_review") VALUES (E'\\153\\313\\233\\074\\327\\255\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 5, false, '2019-02-14 08:07:31.108963+00', '2019-02-14 08:07:31.108963+00', '2019-02-14 08:07:31.108963+00');
|
||||
|
||||
INSERT INTO "audit_histories" ("node_id", "history") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', '\x0a23736f2f6d616e792f69636f6e69632f70617468732f746f2f63686f6f73652f66726f6d120a0102030405060708090a');
|
||||
|
||||
INSERT INTO "node_api_versions"("id", "api_version", "created_at", "updated_at") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', 1, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00');
|
||||
INSERT INTO "node_api_versions"("id", "api_version", "created_at", "updated_at") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', 2, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00');
|
||||
INSERT INTO "node_api_versions"("id", "api_version", "created_at", "updated_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014', 3, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00');
|
||||
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\256\\263'::bytea, 'egress102', 'High Bandwidth Project 2', NULL, NULL, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-05-15 08:46:24.000000+00', 1000);
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\255\\244'::bytea, 'egress103', 'High Bandwidth Project 3', NULL, NULL, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-05-15 08:46:24.000000+00', 1000);
|
||||
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\253\\231'::bytea, 'Limit Test 1', 'This project is above the default', 50000000001, 50000000001, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-10-14 10:10:10.000000+00', 101);
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\252\\230'::bytea, 'Limit Test 2', 'This project is below the default', NULL, NULL, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-10-14 10:10:11.000000+00', NULL);
|
||||
|
||||
INSERT INTO "storagenode_bandwidth_rollups_phase2" ("storagenode_id", "interval_start", "interval_seconds", "action", "allocated", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024);
|
||||
|
||||
INSERT INTO "users"("id", "full_name", "short_name", "email", "normalized_email", "password_hash", "status", "partner_id", "created_at", "position", "company_name", "working_on", "company_size", "is_professional") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\205\\311",'::bytea, 'Thierry', 'Berg', '2email2@mail.test', '2EMAIL2@MAIL.TEST', E'some_readable_hash'::bytea, 2, NULL, '2020-05-16 10:28:24.614594+00', 'engineer', 'storj', 'data storage', 55, true);
|
||||
|
||||
INSERT INTO "storagenode_bandwidth_rollup_archives" ("storagenode_id", "interval_start", "interval_seconds", "action", "allocated", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024);
|
||||
INSERT INTO "bucket_bandwidth_rollup_archives" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\170\\160\\157\\370\\274\\366\\113\\364\\272\\235\\301\\243\\321\\102\\321\\136'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024, 3024);
|
||||
|
||||
INSERT INTO "storagenode_paystubs"("period", "node_id", "created_at", "codes", "usage_at_rest", "usage_get", "usage_put", "usage_get_repair", "usage_put_repair", "usage_get_audit", "comp_at_rest", "comp_get", "comp_put", "comp_get_repair", "comp_put_repair", "comp_get_audit", "surge_percent", "held", "owed", "disposed", "paid", "distributed") VALUES ('2020-12', '\x1111111111111111111111111111111111111111111111111111111111111111', '2020-04-07T20:14:21.479141Z', '', 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 117);
|
||||
INSERT INTO "storagenode_payments"("id", "created_at", "period", "node_id", "amount") VALUES (1, '2020-04-07T20:14:21.479141Z', '2020-12', '\x1111111111111111111111111111111111111111111111111111111111111111', 117);
|
||||
|
||||
-- OLD DATA --
|
||||
|
||||
INSERT INTO "storagenode_paystubs"("period", "node_id", "created_at", "codes", "usage_at_rest", "usage_get", "usage_put", "usage_get_repair", "usage_put_repair", "usage_get_audit", "comp_at_rest", "comp_get", "comp_put", "comp_get_repair", "comp_put_repair", "comp_get_audit", "surge_percent", "held", "owed", "disposed", "paid") VALUES ('2020-12', '\x1111111111111111111111111111111111111111111111111111111111111111', '2020-04-07T20:14:21.479141Z', '', 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117);
|
||||
INSERT INTO "storagenode_payments"("id", "created_at", "period", "node_id", "amount") VALUES (1, '2020-04-07T20:14:21.479141Z', '2020-12', '\x1111111111111111111111111111111111111111111111111111111111111111', 117);
|
557
satellite/satellitedb/testdata/postgres.v144.sql
vendored
Normal file
557
satellite/satellitedb/testdata/postgres.v144.sql
vendored
Normal file
@ -0,0 +1,557 @@
|
||||
-- AUTOGENERATED BY storj.io/dbx
|
||||
-- DO NOT EDIT
|
||||
CREATE TABLE accounting_rollups (
|
||||
node_id bytea NOT NULL,
|
||||
start_time timestamp with time zone NOT NULL,
|
||||
put_total bigint NOT NULL,
|
||||
get_total bigint NOT NULL,
|
||||
get_audit_total bigint NOT NULL,
|
||||
get_repair_total bigint NOT NULL,
|
||||
put_repair_total bigint NOT NULL,
|
||||
at_rest_total double precision NOT NULL,
|
||||
PRIMARY KEY ( node_id, start_time )
|
||||
);
|
||||
CREATE TABLE accounting_timestamps (
|
||||
name text NOT NULL,
|
||||
value timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( name )
|
||||
);
|
||||
CREATE TABLE audit_histories (
|
||||
node_id bytea NOT NULL,
|
||||
history bytea NOT NULL,
|
||||
PRIMARY KEY ( node_id )
|
||||
);
|
||||
CREATE TABLE bucket_bandwidth_rollups (
|
||||
bucket_name bytea NOT NULL,
|
||||
project_id bytea NOT NULL,
|
||||
interval_start timestamp with time zone NOT NULL,
|
||||
interval_seconds integer NOT NULL,
|
||||
action integer NOT NULL,
|
||||
inline bigint NOT NULL,
|
||||
allocated bigint NOT NULL,
|
||||
settled bigint NOT NULL,
|
||||
PRIMARY KEY ( bucket_name, project_id, interval_start, action )
|
||||
);
|
||||
CREATE TABLE bucket_bandwidth_rollup_archives (
|
||||
bucket_name bytea NOT NULL,
|
||||
project_id bytea NOT NULL,
|
||||
interval_start timestamp with time zone NOT NULL,
|
||||
interval_seconds integer NOT NULL,
|
||||
action integer NOT NULL,
|
||||
inline bigint NOT NULL,
|
||||
allocated bigint NOT NULL,
|
||||
settled bigint NOT NULL,
|
||||
PRIMARY KEY ( bucket_name, project_id, interval_start, action )
|
||||
);
|
||||
CREATE TABLE bucket_storage_tallies (
|
||||
bucket_name bytea NOT NULL,
|
||||
project_id bytea NOT NULL,
|
||||
interval_start timestamp with time zone NOT NULL,
|
||||
inline bigint NOT NULL,
|
||||
remote bigint NOT NULL,
|
||||
remote_segments_count integer NOT NULL,
|
||||
inline_segments_count integer NOT NULL,
|
||||
object_count integer NOT NULL,
|
||||
metadata_size bigint NOT NULL,
|
||||
PRIMARY KEY ( bucket_name, project_id, interval_start )
|
||||
);
|
||||
CREATE TABLE coinpayments_transactions (
|
||||
id text NOT NULL,
|
||||
user_id bytea NOT NULL,
|
||||
address text NOT NULL,
|
||||
amount bytea NOT NULL,
|
||||
received bytea NOT NULL,
|
||||
status integer NOT NULL,
|
||||
key text NOT NULL,
|
||||
timeout integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE coupons (
|
||||
id bytea NOT NULL,
|
||||
user_id bytea NOT NULL,
|
||||
amount bigint NOT NULL,
|
||||
description text NOT NULL,
|
||||
type integer NOT NULL,
|
||||
status integer NOT NULL,
|
||||
duration bigint NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE coupon_usages (
|
||||
coupon_id bytea NOT NULL,
|
||||
amount bigint NOT NULL,
|
||||
status integer NOT NULL,
|
||||
period timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( coupon_id, period )
|
||||
);
|
||||
CREATE TABLE graceful_exit_progress (
|
||||
node_id bytea NOT NULL,
|
||||
bytes_transferred bigint NOT NULL,
|
||||
pieces_transferred bigint NOT NULL DEFAULT 0,
|
||||
pieces_failed bigint NOT NULL DEFAULT 0,
|
||||
updated_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( node_id )
|
||||
);
|
||||
CREATE TABLE graceful_exit_transfer_queue (
|
||||
node_id bytea NOT NULL,
|
||||
path bytea NOT NULL,
|
||||
piece_num integer NOT NULL,
|
||||
root_piece_id bytea,
|
||||
durability_ratio double precision NOT NULL,
|
||||
queued_at timestamp with time zone NOT NULL,
|
||||
requested_at timestamp with time zone,
|
||||
last_failed_at timestamp with time zone,
|
||||
last_failed_code integer,
|
||||
failed_count integer,
|
||||
finished_at timestamp with time zone,
|
||||
order_limit_send_count integer NOT NULL DEFAULT 0,
|
||||
PRIMARY KEY ( node_id, path, piece_num )
|
||||
);
|
||||
CREATE TABLE injuredsegments (
|
||||
path bytea NOT NULL,
|
||||
data bytea NOT NULL,
|
||||
attempted timestamp with time zone,
|
||||
updated_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
|
||||
segment_health double precision NOT NULL DEFAULT 1,
|
||||
PRIMARY KEY ( path )
|
||||
);
|
||||
CREATE TABLE irreparabledbs (
|
||||
segmentpath bytea NOT NULL,
|
||||
segmentdetail bytea NOT NULL,
|
||||
pieces_lost_count bigint NOT NULL,
|
||||
seg_damaged_unix_sec bigint NOT NULL,
|
||||
repair_attempt_count bigint NOT NULL,
|
||||
PRIMARY KEY ( segmentpath )
|
||||
);
|
||||
CREATE TABLE nodes (
|
||||
id bytea NOT NULL,
|
||||
address text NOT NULL DEFAULT '',
|
||||
last_net text NOT NULL,
|
||||
last_ip_port text,
|
||||
protocol integer NOT NULL DEFAULT 0,
|
||||
type integer NOT NULL DEFAULT 0,
|
||||
email text NOT NULL,
|
||||
wallet text NOT NULL,
|
||||
free_disk bigint NOT NULL DEFAULT -1,
|
||||
piece_count bigint NOT NULL DEFAULT 0,
|
||||
major bigint NOT NULL DEFAULT 0,
|
||||
minor bigint NOT NULL DEFAULT 0,
|
||||
patch bigint NOT NULL DEFAULT 0,
|
||||
hash text NOT NULL DEFAULT '',
|
||||
timestamp timestamp with time zone NOT NULL DEFAULT '0001-01-01 00:00:00+00',
|
||||
release boolean NOT NULL DEFAULT false,
|
||||
latency_90 bigint NOT NULL DEFAULT 0,
|
||||
audit_success_count bigint NOT NULL DEFAULT 0,
|
||||
total_audit_count bigint NOT NULL DEFAULT 0,
|
||||
vetted_at timestamp with time zone,
|
||||
uptime_success_count bigint NOT NULL DEFAULT 0,
|
||||
total_uptime_count bigint NOT NULL DEFAULT 0,
|
||||
created_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
|
||||
updated_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
|
||||
last_contact_success timestamp with time zone NOT NULL DEFAULT 'epoch',
|
||||
last_contact_failure timestamp with time zone NOT NULL DEFAULT 'epoch',
|
||||
contained boolean NOT NULL DEFAULT false,
|
||||
disqualified timestamp with time zone,
|
||||
suspended timestamp with time zone,
|
||||
unknown_audit_suspended timestamp with time zone,
|
||||
offline_suspended timestamp with time zone,
|
||||
under_review timestamp with time zone,
|
||||
online_score double precision NOT NULL DEFAULT 1,
|
||||
audit_reputation_alpha double precision NOT NULL DEFAULT 1,
|
||||
audit_reputation_beta double precision NOT NULL DEFAULT 0,
|
||||
unknown_audit_reputation_alpha double precision NOT NULL DEFAULT 1,
|
||||
unknown_audit_reputation_beta double precision NOT NULL DEFAULT 0,
|
||||
uptime_reputation_alpha double precision NOT NULL DEFAULT 1,
|
||||
uptime_reputation_beta double precision NOT NULL DEFAULT 0,
|
||||
exit_initiated_at timestamp with time zone,
|
||||
exit_loop_completed_at timestamp with time zone,
|
||||
exit_finished_at timestamp with time zone,
|
||||
exit_success boolean NOT NULL DEFAULT false,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE node_api_versions (
|
||||
id bytea NOT NULL,
|
||||
api_version integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
updated_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE offers (
|
||||
id serial NOT NULL,
|
||||
name text NOT NULL,
|
||||
description text NOT NULL,
|
||||
award_credit_in_cents integer NOT NULL DEFAULT 0,
|
||||
invitee_credit_in_cents integer NOT NULL DEFAULT 0,
|
||||
award_credit_duration_days integer,
|
||||
invitee_credit_duration_days integer,
|
||||
redeemable_cap integer,
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
status integer NOT NULL,
|
||||
type integer NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE peer_identities (
|
||||
node_id bytea NOT NULL,
|
||||
leaf_serial_number bytea NOT NULL,
|
||||
chain bytea NOT NULL,
|
||||
updated_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( node_id )
|
||||
);
|
||||
CREATE TABLE pending_audits (
|
||||
node_id bytea NOT NULL,
|
||||
piece_id bytea NOT NULL,
|
||||
stripe_index bigint NOT NULL,
|
||||
share_size bigint NOT NULL,
|
||||
expected_share_hash bytea NOT NULL,
|
||||
reverify_count bigint NOT NULL,
|
||||
path bytea NOT NULL,
|
||||
PRIMARY KEY ( node_id )
|
||||
);
|
||||
CREATE TABLE projects (
|
||||
id bytea NOT NULL,
|
||||
name text NOT NULL,
|
||||
description text NOT NULL,
|
||||
usage_limit bigint,
|
||||
bandwidth_limit bigint,
|
||||
rate_limit integer,
|
||||
max_buckets integer,
|
||||
partner_id bytea,
|
||||
owner_id bytea NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE project_bandwidth_rollups (
|
||||
project_id bytea NOT NULL,
|
||||
interval_month date NOT NULL,
|
||||
egress_allocated bigint NOT NULL,
|
||||
PRIMARY KEY ( project_id, interval_month )
|
||||
);
|
||||
CREATE TABLE registration_tokens (
|
||||
secret bytea NOT NULL,
|
||||
owner_id bytea,
|
||||
project_limit integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( secret ),
|
||||
UNIQUE ( owner_id )
|
||||
);
|
||||
CREATE TABLE reset_password_tokens (
|
||||
secret bytea NOT NULL,
|
||||
owner_id bytea NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( secret ),
|
||||
UNIQUE ( owner_id )
|
||||
);
|
||||
CREATE TABLE revocations (
|
||||
revoked bytea NOT NULL,
|
||||
api_key_id bytea NOT NULL,
|
||||
PRIMARY KEY ( revoked )
|
||||
);
|
||||
CREATE TABLE storagenode_bandwidth_rollups (
|
||||
storagenode_id bytea NOT NULL,
|
||||
interval_start timestamp with time zone NOT NULL,
|
||||
interval_seconds integer NOT NULL,
|
||||
action integer NOT NULL,
|
||||
allocated bigint DEFAULT 0,
|
||||
settled bigint NOT NULL,
|
||||
PRIMARY KEY ( storagenode_id, interval_start, action )
|
||||
);
|
||||
CREATE TABLE storagenode_bandwidth_rollup_archives (
|
||||
storagenode_id bytea NOT NULL,
|
||||
interval_start timestamp with time zone NOT NULL,
|
||||
interval_seconds integer NOT NULL,
|
||||
action integer NOT NULL,
|
||||
allocated bigint DEFAULT 0,
|
||||
settled bigint NOT NULL,
|
||||
PRIMARY KEY ( storagenode_id, interval_start, action )
|
||||
);
|
||||
CREATE TABLE storagenode_bandwidth_rollups_phase2 (
|
||||
storagenode_id bytea NOT NULL,
|
||||
interval_start timestamp with time zone NOT NULL,
|
||||
interval_seconds integer NOT NULL,
|
||||
action integer NOT NULL,
|
||||
allocated bigint DEFAULT 0,
|
||||
settled bigint NOT NULL,
|
||||
PRIMARY KEY ( storagenode_id, interval_start, action )
|
||||
);
|
||||
CREATE TABLE storagenode_payments (
|
||||
id bigserial NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
node_id bytea NOT NULL,
|
||||
period text NOT NULL,
|
||||
amount bigint NOT NULL,
|
||||
receipt text,
|
||||
notes text,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE storagenode_paystubs (
|
||||
period text NOT NULL,
|
||||
node_id bytea NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
codes text NOT NULL,
|
||||
usage_at_rest double precision NOT NULL,
|
||||
usage_get bigint NOT NULL,
|
||||
usage_put bigint NOT NULL,
|
||||
usage_get_repair bigint NOT NULL,
|
||||
usage_put_repair bigint NOT NULL,
|
||||
usage_get_audit bigint NOT NULL,
|
||||
comp_at_rest bigint NOT NULL,
|
||||
comp_get bigint NOT NULL,
|
||||
comp_put bigint NOT NULL,
|
||||
comp_get_repair bigint NOT NULL,
|
||||
comp_put_repair bigint NOT NULL,
|
||||
comp_get_audit bigint NOT NULL,
|
||||
surge_percent bigint NOT NULL,
|
||||
held bigint NOT NULL,
|
||||
owed bigint NOT NULL,
|
||||
disposed bigint NOT NULL,
|
||||
paid bigint NOT NULL,
|
||||
distributed bigint NOT NULL,
|
||||
PRIMARY KEY ( period, node_id )
|
||||
);
|
||||
CREATE TABLE storagenode_storage_tallies (
|
||||
node_id bytea NOT NULL,
|
||||
interval_end_time timestamp with time zone NOT NULL,
|
||||
data_total double precision NOT NULL,
|
||||
PRIMARY KEY ( interval_end_time, node_id )
|
||||
);
|
||||
CREATE TABLE stripe_customers (
|
||||
user_id bytea NOT NULL,
|
||||
customer_id text NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( user_id ),
|
||||
UNIQUE ( customer_id )
|
||||
);
|
||||
CREATE TABLE stripecoinpayments_invoice_project_records (
|
||||
id bytea NOT NULL,
|
||||
project_id bytea NOT NULL,
|
||||
storage double precision NOT NULL,
|
||||
egress bigint NOT NULL,
|
||||
objects bigint NOT NULL,
|
||||
period_start timestamp with time zone NOT NULL,
|
||||
period_end timestamp with time zone NOT NULL,
|
||||
state integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id ),
|
||||
UNIQUE ( project_id, period_start, period_end )
|
||||
);
|
||||
CREATE TABLE stripecoinpayments_tx_conversion_rates (
|
||||
tx_id text NOT NULL,
|
||||
rate bytea NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( tx_id )
|
||||
);
|
||||
CREATE TABLE users (
|
||||
id bytea NOT NULL,
|
||||
email text NOT NULL,
|
||||
normalized_email text NOT NULL,
|
||||
full_name text NOT NULL,
|
||||
short_name text,
|
||||
password_hash bytea NOT NULL,
|
||||
status integer NOT NULL,
|
||||
partner_id bytea,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
project_limit integer NOT NULL DEFAULT 0,
|
||||
position text,
|
||||
company_name text,
|
||||
company_size integer,
|
||||
working_on text,
|
||||
is_professional boolean NOT NULL DEFAULT false,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE value_attributions (
|
||||
project_id bytea NOT NULL,
|
||||
bucket_name bytea NOT NULL,
|
||||
partner_id bytea NOT NULL,
|
||||
last_updated timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( project_id, bucket_name )
|
||||
);
|
||||
CREATE TABLE api_keys (
|
||||
id bytea NOT NULL,
|
||||
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
||||
head bytea NOT NULL,
|
||||
name text NOT NULL,
|
||||
secret bytea NOT NULL,
|
||||
partner_id bytea,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id ),
|
||||
UNIQUE ( head ),
|
||||
UNIQUE ( name, project_id )
|
||||
);
|
||||
CREATE TABLE bucket_metainfos (
|
||||
id bytea NOT NULL,
|
||||
project_id bytea NOT NULL REFERENCES projects( id ),
|
||||
name bytea NOT NULL,
|
||||
partner_id bytea,
|
||||
path_cipher integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
default_segment_size integer NOT NULL,
|
||||
default_encryption_cipher_suite integer NOT NULL,
|
||||
default_encryption_block_size integer NOT NULL,
|
||||
default_redundancy_algorithm integer NOT NULL,
|
||||
default_redundancy_share_size integer NOT NULL,
|
||||
default_redundancy_required_shares integer NOT NULL,
|
||||
default_redundancy_repair_shares integer NOT NULL,
|
||||
default_redundancy_optimal_shares integer NOT NULL,
|
||||
default_redundancy_total_shares integer NOT NULL,
|
||||
PRIMARY KEY ( id ),
|
||||
UNIQUE ( project_id, name )
|
||||
);
|
||||
CREATE TABLE project_members (
|
||||
member_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
|
||||
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( member_id, project_id )
|
||||
);
|
||||
CREATE TABLE stripecoinpayments_apply_balance_intents (
|
||||
tx_id text NOT NULL REFERENCES coinpayments_transactions( id ) ON DELETE CASCADE,
|
||||
state integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( tx_id )
|
||||
);
|
||||
CREATE TABLE user_credits (
|
||||
id serial NOT NULL,
|
||||
user_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
|
||||
offer_id integer NOT NULL REFERENCES offers( id ),
|
||||
referred_by bytea REFERENCES users( id ) ON DELETE SET NULL,
|
||||
type text NOT NULL,
|
||||
credits_earned_in_cents integer NOT NULL,
|
||||
credits_used_in_cents integer NOT NULL,
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id ),
|
||||
UNIQUE ( id, offer_id )
|
||||
);
|
||||
CREATE INDEX accounting_rollups_start_time_index ON accounting_rollups ( start_time );
|
||||
CREATE INDEX bucket_bandwidth_rollups_project_id_action_interval_index ON bucket_bandwidth_rollups ( project_id, action, interval_start );
|
||||
CREATE INDEX bucket_bandwidth_rollups_action_interval_project_id_index ON bucket_bandwidth_rollups ( action, interval_start, project_id );
|
||||
CREATE INDEX bucket_bandwidth_rollups_archive_project_id_action_interval_index ON bucket_bandwidth_rollup_archives ( project_id, action, interval_start );
|
||||
CREATE INDEX bucket_bandwidth_rollups_archive_action_interval_project_id_index ON bucket_bandwidth_rollup_archives ( action, interval_start, project_id );
|
||||
CREATE INDEX bucket_storage_tallies_project_id_index ON bucket_storage_tallies ( project_id );
|
||||
CREATE INDEX graceful_exit_transfer_queue_nid_dr_qa_fa_lfa_index ON graceful_exit_transfer_queue ( node_id, durability_ratio, queued_at, finished_at, last_failed_at );
|
||||
CREATE INDEX injuredsegments_attempted_index ON injuredsegments ( attempted );
|
||||
CREATE INDEX injuredsegments_segment_health_index ON injuredsegments ( segment_health );
|
||||
CREATE INDEX injuredsegments_updated_at_index ON injuredsegments ( updated_at );
|
||||
CREATE INDEX node_last_ip ON nodes ( last_net );
|
||||
CREATE INDEX nodes_dis_unk_exit_fin_last_success_index ON nodes ( disqualified, unknown_audit_suspended, exit_finished_at, last_contact_success );
|
||||
CREATE INDEX storagenode_bandwidth_rollups_interval_start_index ON storagenode_bandwidth_rollups ( interval_start );
|
||||
CREATE INDEX storagenode_bandwidth_rollup_archives_interval_start_index ON storagenode_bandwidth_rollup_archives ( interval_start );
|
||||
CREATE INDEX storagenode_payments_node_id_period_index ON storagenode_payments ( node_id, period );
|
||||
CREATE INDEX storagenode_paystubs_node_id_index ON storagenode_paystubs ( node_id );
|
||||
CREATE INDEX storagenode_storage_tallies_node_id_index ON storagenode_storage_tallies ( node_id );
|
||||
CREATE UNIQUE INDEX credits_earned_user_id_offer_id ON user_credits ( id, offer_id );
|
||||
|
||||
INSERT INTO "accounting_rollups"("node_id", "start_time", "put_total", "get_total", "get_audit_total", "get_repair_total", "put_repair_total", "at_rest_total") VALUES (E'\\367M\\177\\251]t/\\022\\256\\214\\265\\025\\224\\204:\\217\\212\\0102<\\321\\374\\020&\\271Qc\\325\\261\\354\\246\\233'::bytea, '2019-02-09 00:00:00+00', 3000, 6000, 9000, 12000, 0, 15000);
|
||||
|
||||
INSERT INTO "accounting_timestamps" VALUES ('LastAtRestTally', '0001-01-01 00:00:00+00');
|
||||
INSERT INTO "accounting_timestamps" VALUES ('LastRollup', '0001-01-01 00:00:00+00');
|
||||
INSERT INTO "accounting_timestamps" VALUES ('LastBandwidthTally', '0001-01-01 00:00:00+00');
|
||||
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 5, false, 1);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '127.0.0.1:55518', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 0, 3, 3, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 0, false, 1);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014', '127.0.0.1:55517', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 0, 0, 0, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 0, false, 1);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\015', '127.0.0.1:55519', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 1, 2, 1, 2, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 1, false, 1);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "vetted_at", "online_score") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', '127.0.0.1:55520', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 300, 400, 300, 400, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 300, 0, 1, 0, 300, 100, false, '2020-03-18 12:00:00.000000+00', 1);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\154\\313\\233\\074\\327\\177\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 75, 25, 100, 5, false, 1);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "last_ip_port", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\154\\313\\233\\074\\327\\177\\136\\070\\346\\002', '127.0.0.1:55516', '127.0.0.0', '127.0.0.1:55516', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 75, 25, 100, 5, false, 1);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\363\\341\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 5, false, 1);
|
||||
|
||||
INSERT INTO "users"("id", "full_name", "short_name", "email", "normalized_email", "password_hash", "status", "partner_id", "created_at", "is_professional") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'Noahson', 'William', '1email1@mail.test', '1EMAIL1@MAIL.TEST', E'some_readable_hash'::bytea, 1, NULL, '2019-02-14 08:28:24.614594+00', false);
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets", "partner_id", "owner_id", "created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, 'ProjectName', 'projects description', NULL, NULL, NULL, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:28:24.254934+00');
|
||||
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets", "partner_id", "owner_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, 'projName1', 'Test project 1', NULL, NULL, NULL, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:28:24.636949+00');
|
||||
INSERT INTO "project_members"("member_id", "project_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, '2019-02-14 08:28:24.677953+00');
|
||||
INSERT INTO "project_members"("member_id", "project_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, '2019-02-13 08:28:24.677953+00');
|
||||
|
||||
INSERT INTO "irreparabledbs" ("segmentpath", "segmentdetail", "pieces_lost_count", "seg_damaged_unix_sec", "repair_attempt_count") VALUES ('\x49616d5365676d656e746b6579696e666f30', '\x49616d5365676d656e7464657461696c696e666f30', 10, 1550159554, 10);
|
||||
|
||||
INSERT INTO "registration_tokens" ("secret", "owner_id", "project_limit", "created_at") VALUES (E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, null, 1, '2019-02-14 08:28:24.677953+00');
|
||||
|
||||
INSERT INTO "storagenode_bandwidth_rollups" ("storagenode_id", "interval_start", "interval_seconds", "action", "allocated", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024);
|
||||
INSERT INTO "storagenode_storage_tallies" VALUES (E'\\3510\\323\\225"~\\036<\\342\\330m\\0253Jhr\\246\\233K\\246#\\2303\\351\\256\\275j\\212UM\\362\\207', '2019-02-14 08:16:57.812849+00', 1000);
|
||||
|
||||
INSERT INTO "bucket_bandwidth_rollups" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024, 3024);
|
||||
INSERT INTO "bucket_storage_tallies" ("bucket_name", "project_id", "interval_start", "inline", "remote", "remote_segments_count", "inline_segments_count", "object_count", "metadata_size") VALUES (E'testbucket'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 4024, 5024, 0, 0, 0, 0);
|
||||
INSERT INTO "bucket_bandwidth_rollups" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\170\\160\\157\\370\\274\\366\\113\\364\\272\\235\\301\\243\\321\\102\\321\\136'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024, 3024);
|
||||
INSERT INTO "bucket_storage_tallies" ("bucket_name", "project_id", "interval_start", "inline", "remote", "remote_segments_count", "inline_segments_count", "object_count", "metadata_size") VALUES (E'testbucket'::bytea, E'\\170\\160\\157\\370\\274\\366\\113\\364\\272\\235\\301\\243\\321\\102\\321\\136'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 4024, 5024, 0, 0, 0, 0);
|
||||
|
||||
INSERT INTO "reset_password_tokens" ("secret", "owner_id", "created_at") VALUES (E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-05-08 08:28:24.677953+00');
|
||||
|
||||
INSERT INTO "offers" ("id", "name", "description", "award_credit_in_cents", "invitee_credit_in_cents", "expires_at", "created_at", "status", "type", "award_credit_duration_days", "invitee_credit_duration_days") VALUES (1, 'Default referral offer', 'Is active when no other active referral offer', 300, 600, '2119-03-14 08:28:24.636949+00', '2019-07-14 08:28:24.636949+00', 1, 2, 365, 14);
|
||||
INSERT INTO "offers" ("id", "name", "description", "award_credit_in_cents", "invitee_credit_in_cents", "expires_at", "created_at", "status", "type", "award_credit_duration_days", "invitee_credit_duration_days") VALUES (2, 'Default free credit offer', 'Is active when no active free credit offer', 0, 300, '2119-03-14 08:28:24.636949+00', '2019-07-14 08:28:24.636949+00', 1, 1, NULL, 14);
|
||||
|
||||
INSERT INTO "api_keys" ("id", "project_id", "head", "name", "secret", "partner_id", "created_at") VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\111\\142\\147\\304\\132\\375\\070\\163\\270\\160\\251\\370\\126\\063\\351\\037\\257\\071\\143\\375\\351\\320\\253\\232\\220\\260\\075\\173\\306\\307\\115\\136'::bytea, 'key 2', E'\\254\\011\\315\\333\\273\\365\\001\\071\\024\\154\\253\\332\\301\\216\\361\\074\\221\\367\\251\\231\\274\\333\\300\\367\\001\\272\\327\\111\\315\\123\\042\\016'::bytea, NULL, '2019-02-14 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "value_attributions" ("project_id", "bucket_name", "partner_id", "last_updated") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E''::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-02-14 08:07:31.028103+00');
|
||||
|
||||
INSERT INTO "user_credits" ("id", "user_id", "offer_id", "referred_by", "credits_earned_in_cents", "credits_used_in_cents", "type", "expires_at", "created_at") VALUES (1, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 1, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 200, 0, 'invalid', '2019-10-01 08:28:24.267934+00', '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "bucket_metainfos" ("id", "project_id", "name", "partner_id", "created_at", "path_cipher", "default_segment_size", "default_encryption_cipher_suite", "default_encryption_block_size", "default_redundancy_algorithm", "default_redundancy_share_size", "default_redundancy_required_shares", "default_redundancy_repair_shares", "default_redundancy_optimal_shares", "default_redundancy_total_shares") VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'testbucketuniquename'::bytea, NULL, '2019-06-14 08:28:24.677953+00', 1, 65536, 1, 8192, 1, 4096, 4, 6, 8, 10);
|
||||
|
||||
INSERT INTO "pending_audits" ("node_id", "piece_id", "stripe_index", "share_size", "expected_share_hash", "reverify_count", "path") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 5, 1024, E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, 1, 'not null');
|
||||
|
||||
INSERT INTO "peer_identities" VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:07:31.335028+00');
|
||||
|
||||
INSERT INTO "graceful_exit_progress" ("node_id", "bytes_transferred", "pieces_transferred", "pieces_failed", "updated_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', 1000000000000000, 0, 0, '2019-09-12 10:07:31.028103+00');
|
||||
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n\\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\311', 8, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
|
||||
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n\\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\312', 8, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
|
||||
|
||||
INSERT INTO "stripe_customers" ("user_id", "customer_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'stripe_id', '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n\\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\311', 9, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
|
||||
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n\\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\312', 9, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
|
||||
|
||||
INSERT INTO "stripecoinpayments_invoice_project_records"("id", "project_id", "storage", "egress", "objects", "period_start", "period_end", "state", "created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\021\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, 0, 0, 0, '2019-06-01 08:28:24.267934+00', '2019-06-01 08:28:24.267934+00', 0, '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "root_piece_id", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n\\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\311', 10, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
|
||||
|
||||
INSERT INTO "stripecoinpayments_tx_conversion_rates" ("tx_id", "rate", "created_at") VALUES ('tx_id', E'\\363\\311\\033w\\222\\303Ci,'::bytea, '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "coinpayments_transactions" ("id", "user_id", "address", "amount", "received", "status", "key", "timeout", "created_at") VALUES ('tx_id', E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'address', E'\\363\\311\\033w'::bytea, E'\\363\\311\\033w'::bytea, 1, 'key', 60, '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "storagenode_bandwidth_rollups" ("storagenode_id", "interval_start", "interval_seconds", "action", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2020-01-11 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 2024);
|
||||
|
||||
INSERT INTO "coupons" ("id", "user_id", "amount", "description", "type", "status", "duration", "created_at") VALUES (E'\\362\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 50, 'description', 0, 0, 2, '2019-06-01 08:28:24.267934+00');
|
||||
INSERT INTO "coupon_usages" ("coupon_id", "amount", "status", "period") VALUES (E'\\362\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, 22, 0, '2019-06-01 09:28:24.267934+00');
|
||||
|
||||
INSERT INTO "stripecoinpayments_apply_balance_intents" ("tx_id", "state", "created_at") VALUES ('tx_id', 0, '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets", "rate_limit", "partner_id", "owner_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\347'::bytea, 'projName1', 'Test project 1', NULL, NULL, NULL, 2000000, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2020-01-15 08:28:24.636949+00');
|
||||
|
||||
INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('0', '\x0a0130120100', 1.0, '2020-09-01 00:00:00.000000+00');
|
||||
INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('here''s/a/great/path', '\x0a136865726527732f612f67726561742f70617468120a0102030405060708090a', 1.0, '2020-09-01 00:00:00.000000+00');
|
||||
INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('yet/another/cool/path', '\x0a157965742f616e6f746865722f636f6f6c2f70617468120a0102030405060708090a', 1.0, '2020-09-01 00:00:00.000000+00');
|
||||
INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('/this/is/a/new/path', '\x0a23736f2f6d616e792f69636f6e69632f70617468732f746f2f63686f6f73652f66726f6d120a0102030405060708090a', 1.0, '2020-09-01 00:00:00.000000+00');
|
||||
INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('/some/path/1/23/4', '\x0a23736f2f6d618e792f69636f6e69632f70617468732f746f2f63686f6f73652f66726f6d120a0102030405060708090a', 0.2, '2020-09-01 00:00:00.000000+00');
|
||||
|
||||
INSERT INTO "project_bandwidth_rollups"("project_id", "interval_month", egress_allocated) VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\347'::bytea, '2020-04-01', 10000);
|
||||
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets","rate_limit", "partner_id", "owner_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\345'::bytea, 'egress101', 'High Bandwidth Project', NULL, NULL, NULL, 2000000, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2020-05-15 08:46:24.000000+00');
|
||||
|
||||
INSERT INTO "storagenode_paystubs"("period", "node_id", "created_at", "codes", "usage_at_rest", "usage_get", "usage_put", "usage_get_repair", "usage_put_repair", "usage_get_audit", "comp_at_rest", "comp_get", "comp_put", "comp_get_repair", "comp_put_repair", "comp_get_audit", "surge_percent", "held", "owed", "disposed", "paid", "distributed") VALUES ('2020-01', '\xf2a3b4c4dfdf7221310382fd5db5aa73e1d227d6df09734ec4e5305000000000', '2020-04-07T20:14:21.479141Z', '', 1327959864508416, 294054066688, 159031363328, 226751, 0, 836608, 2861984, 5881081, 0, 226751, 0, 8, 300, 0, 26909472, 0, 26909472, 0);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "unknown_audit_suspended", "offline_suspended", "under_review") VALUES (E'\\153\\313\\233\\074\\327\\255\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 5, false, '2019-02-14 08:07:31.108963+00', '2019-02-14 08:07:31.108963+00', '2019-02-14 08:07:31.108963+00');
|
||||
|
||||
INSERT INTO "audit_histories" ("node_id", "history") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', '\x0a23736f2f6d616e792f69636f6e69632f70617468732f746f2f63686f6f73652f66726f6d120a0102030405060708090a');
|
||||
|
||||
INSERT INTO "node_api_versions"("id", "api_version", "created_at", "updated_at") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', 1, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00');
|
||||
INSERT INTO "node_api_versions"("id", "api_version", "created_at", "updated_at") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', 2, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00');
|
||||
INSERT INTO "node_api_versions"("id", "api_version", "created_at", "updated_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014', 3, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00');
|
||||
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\256\\263'::bytea, 'egress102', 'High Bandwidth Project 2', NULL, NULL, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-05-15 08:46:24.000000+00', 1000);
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\255\\244'::bytea, 'egress103', 'High Bandwidth Project 3', NULL, NULL, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-05-15 08:46:24.000000+00', 1000);
|
||||
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\253\\231'::bytea, 'Limit Test 1', 'This project is above the default', 50000000001, 50000000001, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-10-14 10:10:10.000000+00', 101);
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\252\\230'::bytea, 'Limit Test 2', 'This project is below the default', NULL, NULL, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-10-14 10:10:11.000000+00', NULL);
|
||||
|
||||
INSERT INTO "storagenode_bandwidth_rollups_phase2" ("storagenode_id", "interval_start", "interval_seconds", "action", "allocated", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024);
|
||||
|
||||
INSERT INTO "users"("id", "full_name", "short_name", "email", "normalized_email", "password_hash", "status", "partner_id", "created_at", "position", "company_name", "working_on", "company_size", "is_professional") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\205\\311",'::bytea, 'Thierry', 'Berg', '2email2@mail.test', '2EMAIL2@MAIL.TEST', E'some_readable_hash'::bytea, 2, NULL, '2020-05-16 10:28:24.614594+00', 'engineer', 'storj', 'data storage', 55, true);
|
||||
|
||||
INSERT INTO "storagenode_bandwidth_rollup_archives" ("storagenode_id", "interval_start", "interval_seconds", "action", "allocated", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024);
|
||||
INSERT INTO "bucket_bandwidth_rollup_archives" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\170\\160\\157\\370\\274\\366\\113\\364\\272\\235\\301\\243\\321\\102\\321\\136'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024, 3024);
|
||||
|
||||
INSERT INTO "storagenode_paystubs"("period", "node_id", "created_at", "codes", "usage_at_rest", "usage_get", "usage_put", "usage_get_repair", "usage_put_repair", "usage_get_audit", "comp_at_rest", "comp_get", "comp_put", "comp_get_repair", "comp_put_repair", "comp_get_audit", "surge_percent", "held", "owed", "disposed", "paid", "distributed") VALUES ('2020-12', '\x1111111111111111111111111111111111111111111111111111111111111111', '2020-04-07T20:14:21.479141Z', '', 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 117);
|
||||
INSERT INTO "storagenode_payments"("id", "created_at", "period", "node_id", "amount") VALUES (1, '2020-04-07T20:14:21.479141Z', '2020-12', '\x1111111111111111111111111111111111111111111111111111111111111111', 117);
|
||||
|
||||
-- NEW DATA --
|
@ -130,6 +130,7 @@ func convertPaystub(paystub Paystub) (*pb.GetHeldAmountResponse, error) {
|
||||
Owed: paystub.Owed,
|
||||
Disposed: paystub.Disposed,
|
||||
Paid: paystub.Paid,
|
||||
Distributed: paystub.Distributed,
|
||||
}, err
|
||||
}
|
||||
|
||||
|
@ -62,6 +62,7 @@ type Paystub struct {
|
||||
Owed int64 `json:"owed"`
|
||||
Disposed int64 `json:"disposed"`
|
||||
Paid int64 `json:"paid"`
|
||||
Distributed int64 `json:"distributed"`
|
||||
}
|
||||
|
||||
// Payment is an entity that holds payment to storagenode operator parameters.
|
||||
|
@ -43,6 +43,7 @@ func TestPayoutDB(t *testing.T) {
|
||||
Owed: 15,
|
||||
Disposed: 16,
|
||||
Paid: 17,
|
||||
Distributed: 18,
|
||||
}
|
||||
|
||||
paystub2 := snopayouts.Paystub{
|
||||
@ -66,6 +67,7 @@ func TestPayoutDB(t *testing.T) {
|
||||
Owed: 18,
|
||||
Disposed: 19,
|
||||
Paid: 20,
|
||||
Distributed: 21,
|
||||
}
|
||||
|
||||
paystub3 := snopayouts.Paystub{
|
||||
@ -89,6 +91,7 @@ func TestPayoutDB(t *testing.T) {
|
||||
Owed: 24,
|
||||
Disposed: 25,
|
||||
Paid: 26,
|
||||
Distributed: 27,
|
||||
}
|
||||
|
||||
{
|
||||
|
23
scripts/testdata/satellite-config.yaml.lock
vendored
23
scripts/testdata/satellite-config.yaml.lock
vendored
@ -421,15 +421,6 @@ identity.key-path: /root/.local/share/storj/identity/satellite/identity.key
|
||||
# how many concurrent orders to process at once. zero is unlimited
|
||||
# orders.orders-semaphore-size: 2
|
||||
|
||||
# how many records to read in a single transaction when calculating billable bandwidth
|
||||
# orders.reported-rollups-read-batch-size: 1000
|
||||
|
||||
# how many orders to batch per transaction
|
||||
# orders.settlement-batch-size: 250
|
||||
|
||||
# rollout phase for the windowed endpoint
|
||||
# orders.window-endpoint-rollout-phase: phase3
|
||||
|
||||
# The length of time to give suspended SNOs to diagnose and fix issues causing downtime. Afterwards, they will have one tracking period to reach the minimum online score before disqualification
|
||||
# overlay.audit-history.grace-period: 168h0m0s
|
||||
|
||||
@ -604,11 +595,17 @@ identity.key-path: /root/.local/share/storj/identity/satellite/identity.key
|
||||
# time limit for an entire repair job, from queue pop to upload completion
|
||||
# repairer.total-timeout: 45m0s
|
||||
|
||||
# how often to flush the reported serial rollups to the database
|
||||
# reported-rollup.interval: 5m0s
|
||||
# age at which a rollup is archived
|
||||
# rollup-archive.archive-age: 2160h0m0s
|
||||
|
||||
# default queue batch size
|
||||
# reported-rollup.queue-batch-size: 10000
|
||||
# number of records to delete per delete execution. Used only for crdb which is slow without limit.
|
||||
# rollup-archive.batch-size: 500
|
||||
|
||||
# whether or not the rollup archive is enabled.
|
||||
# rollup-archive.enabled: true
|
||||
|
||||
# how frequently rollup archiver should run
|
||||
# rollup-archive.interval: 24h0m0s
|
||||
|
||||
# option for deleting tallies after they are rolled up
|
||||
# rollup.delete-tallies: true
|
||||
|
@ -64,9 +64,6 @@ func (chore *Chore) Run(ctx context.Context) (err error) {
|
||||
for _, satellite := range geSatellites {
|
||||
mon.Meter("satellite_gracefulexit_request").Mark(1) //mon:locked
|
||||
satellite := satellite
|
||||
if satellite.FinishedAt != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
worker := NewWorker(chore.log, chore.service, chore.transferService, chore.dialer, satellite.NodeURL, chore.config)
|
||||
if _, ok := chore.exitingMap.LoadOrStore(satellite.SatelliteID, worker); ok {
|
||||
|
@ -92,9 +92,12 @@ func (c *service) ListPendingExits(ctx context.Context) (_ []ExitingSatellite, e
|
||||
}
|
||||
exitingSatellites := make([]ExitingSatellite, 0, len(exitProgress))
|
||||
for _, sat := range exitProgress {
|
||||
if sat.FinishedAt != nil {
|
||||
continue
|
||||
}
|
||||
nodeURL, err := c.trust.GetNodeURL(ctx, sat.SatelliteID)
|
||||
if err != nil {
|
||||
c.log.Error("failed to get satellite address", zap.Stringer("satellite-id", sat.SatelliteID), zap.Error(err))
|
||||
c.log.Error("failed to get satellite address", zap.Stringer("Satellite ID", sat.SatelliteID), zap.Error(err))
|
||||
continue
|
||||
}
|
||||
exitingSatellites = append(exitingSatellites, ExitingSatellite{ExitProgress: sat, NodeURL: nodeURL})
|
||||
|
@ -232,6 +232,8 @@ func (service *Service) sendOrdersFromDB(ctx context.Context) (hasOrders bool) {
|
||||
}
|
||||
|
||||
// Settle uploads orders to the satellite.
|
||||
//
|
||||
// DEPRECATED server always return an error if this endpoint is called.
|
||||
func (service *Service) Settle(ctx context.Context, satelliteID storj.NodeID, orders []*ordersfile.Info, requests chan ArchiveRequest) {
|
||||
log := service.log.Named(satelliteID.String())
|
||||
err := service.settle(ctx, log, satelliteID, orders, requests)
|
||||
|
@ -95,6 +95,7 @@ func (endpoint *Endpoint) GetPaystub(ctx context.Context, satelliteID storj.Node
|
||||
Owed: resp.Owed,
|
||||
Disposed: resp.Disposed,
|
||||
Paid: resp.Paid,
|
||||
Distributed: resp.Distributed,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -138,6 +139,7 @@ func (endpoint *Endpoint) GetAllPaystubs(ctx context.Context, satelliteID storj.
|
||||
Owed: resp.Paystub[i].Owed,
|
||||
Disposed: resp.Paystub[i].Disposed,
|
||||
Paid: resp.Paystub[i].Paid,
|
||||
Distributed: resp.Paystub[i].Distributed,
|
||||
}
|
||||
|
||||
payStubs = append(payStubs, paystub)
|
||||
|
@ -64,6 +64,7 @@ type PayStub struct {
|
||||
Owed int64 `json:"owed"`
|
||||
Disposed int64 `json:"disposed"`
|
||||
Paid int64 `json:"paid"`
|
||||
Distributed int64 `json:"distributed"`
|
||||
}
|
||||
|
||||
// HoldForPeriod is node's held amount for period.
|
||||
|
@ -1893,6 +1893,14 @@ func (db *DB) Migration(ctx context.Context) *migrate.Migration {
|
||||
return nil
|
||||
}),
|
||||
},
|
||||
{
|
||||
DB: &db.payoutDB.DB,
|
||||
Description: "Add distributed field to paystubs table",
|
||||
Version: 49,
|
||||
Action: migrate.SQL{
|
||||
`ALTER TABLE paystubs ADD COLUMN distributed bigint`,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -53,8 +53,9 @@ func (db *payoutDB) StorePayStub(ctx context.Context, paystub payouts.PayStub) (
|
||||
held,
|
||||
owed,
|
||||
disposed,
|
||||
paid
|
||||
) VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)`
|
||||
paid,
|
||||
distributed
|
||||
) VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)`
|
||||
|
||||
_, err = db.ExecContext(ctx, query,
|
||||
paystub.Period,
|
||||
@ -78,6 +79,7 @@ func (db *payoutDB) StorePayStub(ctx context.Context, paystub payouts.PayStub) (
|
||||
paystub.Owed,
|
||||
paystub.Disposed,
|
||||
paystub.Paid,
|
||||
paystub.Distributed,
|
||||
)
|
||||
|
||||
return ErrPayout.Wrap(err)
|
||||
@ -111,7 +113,8 @@ func (db *payoutDB) GetPayStub(ctx context.Context, satelliteID storj.NodeID, pe
|
||||
held,
|
||||
owed,
|
||||
disposed,
|
||||
paid
|
||||
paid,
|
||||
distributed
|
||||
FROM paystubs WHERE satellite_id = ? AND period = ?`,
|
||||
satelliteID, period,
|
||||
)
|
||||
@ -136,6 +139,7 @@ func (db *payoutDB) GetPayStub(ctx context.Context, satelliteID storj.NodeID, pe
|
||||
&result.Owed,
|
||||
&result.Disposed,
|
||||
&result.Paid,
|
||||
&result.Distributed,
|
||||
)
|
||||
if err != nil {
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
@ -171,7 +175,8 @@ func (db *payoutDB) AllPayStubs(ctx context.Context, period string) (_ []payouts
|
||||
held,
|
||||
owed,
|
||||
disposed,
|
||||
paid
|
||||
paid,
|
||||
distributed
|
||||
FROM paystubs WHERE period = ?`
|
||||
|
||||
rows, err := db.QueryContext(ctx, query, period)
|
||||
@ -206,6 +211,7 @@ func (db *payoutDB) AllPayStubs(ctx context.Context, period string) (_ []payouts
|
||||
&paystub.Owed,
|
||||
&paystub.Disposed,
|
||||
&paystub.Paid,
|
||||
&paystub.Distributed,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, ErrPayout.Wrap(err)
|
||||
|
@ -159,6 +159,11 @@ func Schema() map[string]*dbschema.Schema {
|
||||
Type: "bigint",
|
||||
IsNullable: false,
|
||||
},
|
||||
&dbschema.Column{
|
||||
Name: "distributed",
|
||||
Type: "bigint",
|
||||
IsNullable: true,
|
||||
},
|
||||
&dbschema.Column{
|
||||
Name: "held",
|
||||
Type: "bigint",
|
||||
@ -719,3 +724,4 @@ func Schema() map[string]*dbschema.Schema {
|
||||
"used_serial": &dbschema.Schema{},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -63,6 +63,7 @@ var States = MultiDBStates{
|
||||
&v46,
|
||||
&v47,
|
||||
&v48,
|
||||
&v49,
|
||||
},
|
||||
}
|
||||
|
||||
|
63
storagenode/storagenodedb/testdata/v49.go
vendored
Normal file
63
storagenode/storagenodedb/testdata/v49.go
vendored
Normal file
@ -0,0 +1,63 @@
|
||||
// Copyright (C) 2021 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package testdata
|
||||
|
||||
import "storj.io/storj/storagenode/storagenodedb"
|
||||
|
||||
var v49 = MultiDBState{
|
||||
Version: 49,
|
||||
DBStates: DBStates{
|
||||
storagenodedb.UsedSerialsDBName: v47.DBStates[storagenodedb.UsedSerialsDBName],
|
||||
storagenodedb.StorageUsageDBName: v47.DBStates[storagenodedb.StorageUsageDBName],
|
||||
storagenodedb.ReputationDBName: v48.DBStates[storagenodedb.ReputationDBName],
|
||||
storagenodedb.PieceSpaceUsedDBName: v47.DBStates[storagenodedb.PieceSpaceUsedDBName],
|
||||
storagenodedb.PieceInfoDBName: v47.DBStates[storagenodedb.PieceInfoDBName],
|
||||
storagenodedb.PieceExpirationDBName: v47.DBStates[storagenodedb.PieceExpirationDBName],
|
||||
storagenodedb.OrdersDBName: v47.DBStates[storagenodedb.OrdersDBName],
|
||||
storagenodedb.BandwidthDBName: v47.DBStates[storagenodedb.BandwidthDBName],
|
||||
storagenodedb.SatellitesDBName: v47.DBStates[storagenodedb.SatellitesDBName],
|
||||
storagenodedb.DeprecatedInfoDBName: v47.DBStates[storagenodedb.DeprecatedInfoDBName],
|
||||
storagenodedb.NotificationsDBName: v47.DBStates[storagenodedb.NotificationsDBName],
|
||||
storagenodedb.HeldAmountDBName: &DBState{
|
||||
SQL: `
|
||||
-- tables to hold payments and paystub data
|
||||
CREATE TABLE paystubs (
|
||||
period text NOT NULL,
|
||||
satellite_id bytea NOT NULL,
|
||||
created_at timestamp NOT NULL,
|
||||
codes text NOT NULL,
|
||||
usage_at_rest double precision NOT NULL,
|
||||
usage_get bigint NOT NULL,
|
||||
usage_put bigint NOT NULL,
|
||||
usage_get_repair bigint NOT NULL,
|
||||
usage_put_repair bigint NOT NULL,
|
||||
usage_get_audit bigint NOT NULL,
|
||||
comp_at_rest bigint NOT NULL,
|
||||
comp_get bigint NOT NULL,
|
||||
comp_put bigint NOT NULL,
|
||||
comp_get_repair bigint NOT NULL,
|
||||
comp_put_repair bigint NOT NULL,
|
||||
comp_get_audit bigint NOT NULL,
|
||||
surge_percent bigint NOT NULL,
|
||||
held bigint NOT NULL,
|
||||
owed bigint NOT NULL,
|
||||
disposed bigint NOT NULL,
|
||||
paid bigint NOT NULL,
|
||||
distributed bigint,
|
||||
PRIMARY KEY ( period, satellite_id )
|
||||
);
|
||||
CREATE TABLE payments (
|
||||
id bigserial NOT NULL,
|
||||
created_at timestamp NOT NULL,
|
||||
satellite_id bytea NOT NULL,
|
||||
period text,
|
||||
amount bigint NOT NULL,
|
||||
receipt text,
|
||||
notes text,
|
||||
PRIMARY KEY ( id )
|
||||
);`,
|
||||
},
|
||||
storagenodedb.PricingDBName: v47.DBStates[storagenodedb.PricingDBName],
|
||||
storagenodedb.APIKeysDBName: v47.DBStates[storagenodedb.APIKeysDBName]},
|
||||
}
|
Loading…
Reference in New Issue
Block a user