satellite/accounting: account for old orders that can be submitted in satellite rollup

With the new phase 3 order submission, orders can be added to the
storage and bandwidth rollup tables at timestamps before the most recent
rollup was run. This change shifts the start time of each new rollup
window to account for any unexpired orders that might have been added
since the previous rollup.

A satellitedb migration is necessary to allow upserts in the
accounting_rollups table when entries with identical node_ids and
start_times are inserted.

Change-Id: Ib3022081f4d6be60cfec8430b45867ad3c01da63
This commit is contained in:
Moby von Briesen 2020-11-04 12:24:11 -05:00
parent e19fabc880
commit a8b66dce17
12 changed files with 1008 additions and 348 deletions

View File

@ -158,16 +158,18 @@ func TestStorageNodeUsage_TwoRollupsInADay(t *testing.T) {
rollups[t1][nodeID] = &accounting.Rollup{
NodeID: nodeID,
AtRestTotal: 1000,
StartTime: t1,
}
rollups[t2][nodeID] = &accounting.Rollup{
NodeID: nodeID,
AtRestTotal: 500,
StartTime: t2,
}
// save rollup
err = accountingDB.SaveRollup(ctx, now.Add(time.Hour*-24), rollups)
require.NoError(t, err)
nodeStorageUsages, err := accountingDB.QueryStorageNodeUsage(ctx, nodeID, time.Time{}, now)
nodeStorageUsages, err := accountingDB.QueryStorageNodeUsage(ctx, nodeID, t1.Add(-24*time.Hour), t2.Add(24*time.Hour))
require.NoError(t, err)
require.NotNil(t, nodeStorageUsages)
require.Equal(t, 1, len(nodeStorageUsages))

View File

@ -69,6 +69,11 @@ func (r *Service) Rollup(ctx context.Context) (err error) {
if err != nil {
return Error.Wrap(err)
}
// unexpired orders with created at times before the last rollup timestamp could still have been added later
if !lastRollup.IsZero() {
lastRollup = lastRollup.Add(-r.OrderExpiration)
}
rollupStats := make(accounting.RollupStats)
latestTally, err := r.RollupStorage(ctx, lastRollup, rollupStats)
if err != nil {

View File

@ -17,14 +17,10 @@ import (
"storj.io/common/testcontext"
"storj.io/storj/private/testplanet"
"storj.io/storj/satellite"
"storj.io/storj/satellite/orders"
"storj.io/storj/satellite/overlay"
)
type testData struct {
nodeData map[storj.NodeID]float64
bwTotals map[storj.NodeID][]int64
}
func TestRollupNoDeletes(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 10, UplinkCount: 0,
@ -36,175 +32,328 @@ func TestRollupNoDeletes(t *testing.T) {
},
},
func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
planet.Satellites[0].Accounting.Rollup.Loop.Pause()
planet.Satellites[0].Accounting.Tally.Loop.Pause()
// In testplanet the setting config.Rollup.DeleteTallies defaults to false.
// That means if we do not delete any old tally data, then we expect that we
// can tally/rollup data from anytime in the past.
// To confirm, this test creates 5 days of tally and rollup data, then we check that all
// the data is present in the accounting rollup table and in the storage node storage tally table.
const (
days = 5
atRestAmount = 10
getAmount = 20
putAmount = 30
getAuditAmount = 40
getRepairAmount = 50
putRepairAmount = 60
)
var (
satellitePeer = planet.Satellites[0]
ordersDB = satellitePeer.DB.Orders()
snAccountingDB = satellitePeer.DB.StoragenodeAccounting()
)
satellitePeer.Accounting.Rollup.Loop.Pause()
satellitePeer.Accounting.Tally.Loop.Pause()
// disqualifying nodes is unrelated to this test, but it is added here
// to confirm the disqualification shows up in the accounting CSVRow
dqedNodes, err := dqNodes(ctx, planet)
require.NoError(t, err)
require.NotEmpty(t, dqedNodes)
days := 5
testData := createData(planet, days)
// Set initialTime back by the number of days we want to save
initialTime := time.Now().UTC().AddDate(0, 0, -days)
currentTime := initialTime
// Set timestamp back by the number of days we want to save
timestamp := time.Now().UTC().AddDate(0, 0, -1*days)
start := timestamp
nodeData := map[storj.NodeID]float64{}
bwTotals := make(map[storj.NodeID][]int64)
for _, storageNode := range planet.StorageNodes {
nodeData[storageNode.ID()] = float64(atRestAmount)
storageNodeID := storageNode.ID()
bwTotals[storageNodeID] = []int64{putAmount, getAmount, getAuditAmount, getRepairAmount, putRepairAmount}
}
for i := 0; i < days; i++ {
err := planet.Satellites[0].DB.StoragenodeAccounting().SaveTallies(ctx, timestamp, testData[i].nodeData)
require.NoError(t, err)
err = saveBW(ctx, planet, testData[i].bwTotals, timestamp)
require.NoError(t, err)
// Create 5 days worth of tally and rollup data.
// Add one additional day of data since the rollup service will truncate data from the most recent day.
for i := 0; i < days+1; i++ {
require.NoError(t, snAccountingDB.SaveTallies(ctx, currentTime, nodeData))
require.NoError(t, saveBWPhase3(ctx, ordersDB, bwTotals, currentTime))
err = planet.Satellites[0].Accounting.Rollup.Rollup(ctx)
require.NoError(t, err)
require.NoError(t, satellitePeer.Accounting.Rollup.Rollup(ctx))
// Advance time by 24 hours
timestamp = timestamp.Add(time.Hour * 24)
end := timestamp
currentTime = currentTime.Add(24 * time.Hour)
}
// rollup.RollupRaws cuts off the hr/min/sec before saving, we need to do the same when querying
start = time.Date(start.Year(), start.Month(), start.Day(), 0, 0, 0, 0, start.Location())
end = time.Date(end.Year(), end.Month(), end.Day(), 0, 0, 0, 0, end.Location())
accountingCSVRows, err := snAccountingDB.QueryPaymentInfo(ctx, initialTime.Add(-24*time.Hour), currentTime.Add(24*time.Hour))
require.NoError(t, err)
assert.Equal(t, len(planet.StorageNodes), len(accountingCSVRows))
rows, err := planet.Satellites[0].DB.StoragenodeAccounting().QueryPaymentInfo(ctx, start, end)
require.NoError(t, err)
if i == 0 { // we need at least two days for rollup to work
assert.Equal(t, 0, len(rows))
continue
}
// the number of rows should be number of nodes
assert.Equal(t, len(planet.StorageNodes), len(rows))
// verify data is correct
for _, r := range rows {
totals := expectedTotals(testData, r.NodeID, i)
assert.Equal(t, int64(totals[0]), r.PutTotal)
assert.Equal(t, int64(totals[1]), r.GetTotal)
assert.Equal(t, int64(totals[2]), r.GetAuditTotal)
assert.Equal(t, int64(totals[3]), r.GetRepairTotal)
assert.Equal(t, totals[4], r.AtRestTotal)
assert.NotEmpty(t, r.Wallet)
if dqedNodes[r.NodeID] {
assert.NotNil(t, r.Disqualified)
} else {
assert.Nil(t, r.Disqualified)
}
// Confirm all the data saved over the 5 days is all summed in the accounting rollup table.
for _, row := range accountingCSVRows {
assert.Equal(t, int64(days*putAmount), row.PutTotal)
assert.Equal(t, int64(days*getAmount), row.GetTotal)
assert.Equal(t, int64(days*getAuditAmount), row.GetAuditTotal)
assert.Equal(t, int64(days*getRepairAmount), row.GetRepairTotal)
assert.Equal(t, float64(days*atRestAmount), row.AtRestTotal)
assert.NotEmpty(t, row.Wallet)
if dqedNodes[row.NodeID] {
assert.NotNil(t, row.Disqualified)
} else {
assert.Nil(t, row.Disqualified)
}
}
raw, err := planet.Satellites[0].DB.StoragenodeAccounting().GetTallies(ctx)
// Confirm there is a storage tally row for each time tally ran for each storage node.
// We ran tally for one additional day, so expect 6 days of tallies.
storagenodeTallies, err := snAccountingDB.GetTallies(ctx)
require.NoError(t, err)
assert.Equal(t, days*len(planet.StorageNodes), len(raw))
assert.Equal(t, (days+1)*len(planet.StorageNodes), len(storagenodeTallies))
})
}
func TestRollupDeletes(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 10, UplinkCount: 0,
Reconfigure: testplanet.Reconfigure{
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
config.Rollup.DeleteTallies = true
config.Orders.Expiration = time.Hour
// 0 so that we can disqualify a node immediately by triggering a failed audit
config.Overlay.Node.AuditReputationLambda = 0
},
},
},
func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
planet.Satellites[0].Accounting.Rollup.Loop.Pause()
planet.Satellites[0].Accounting.Tally.Loop.Pause()
// In this test config.Rollup.DeleteTallies is set to true.
// This means old tally data will be deleted when Rollup runs.
// To confirm, this test creates 5 days of tally and rollup data, then we check
// that the correct data is in the accounting rollup table and the storagenode storage tally table.
const (
days = 5
atRestAmount = 10
getAmount = 20
putAmount = 30
getAuditAmount = 40
getRepairAmount = 50
putRepairAmount = 60
)
var (
satellitePeer = planet.Satellites[0]
ordersDB = satellitePeer.DB.Orders()
snAccountingDB = satellitePeer.DB.StoragenodeAccounting()
)
satellitePeer.Accounting.Rollup.Loop.Pause()
satellitePeer.Accounting.Tally.Loop.Pause()
// disqualifying nodes is unrelated to this test, but it is added here
// to confirm the disqualification shows up in the accounting CSVRow
dqedNodes, err := dqNodes(ctx, planet)
require.NoError(t, err)
require.NotEmpty(t, dqedNodes)
days := 5
testData := createData(planet, days)
// Set timestamp back by the number of days we want to save
timestamp := time.Now().UTC().AddDate(0, 0, -days).Truncate(time.Millisecond)
start := timestamp
firstTimestamp := start
initialTime := time.Now().UTC().AddDate(0, 0, -days)
currentTime := initialTime
for i := 0; i < days; i++ {
err := planet.Satellites[0].DB.StoragenodeAccounting().SaveTallies(ctx, timestamp, testData[i].nodeData)
require.NoError(t, err)
err = saveBW(ctx, planet, testData[i].bwTotals, timestamp)
require.NoError(t, err)
nodeData := map[storj.NodeID]float64{}
bwTotals := make(map[storj.NodeID][]int64)
for _, storageNode := range planet.StorageNodes {
nodeData[storageNode.ID()] = float64(atRestAmount)
storageNodeID := storageNode.ID()
bwTotals[storageNodeID] = []int64{putAmount, getAmount, getAuditAmount, getRepairAmount, putRepairAmount}
}
err = planet.Satellites[0].Accounting.Rollup.Rollup(ctx)
require.NoError(t, err)
// Create 5 days worth of tally and rollup data.
// Add one additional day of data since the rollup service will truncate data from the most recent day.
for i := 0; i < days+1; i++ {
require.NoError(t, snAccountingDB.SaveTallies(ctx, currentTime, nodeData))
require.NoError(t, saveBWPhase3(ctx, ordersDB, bwTotals, currentTime))
// Assert that RollupStorage deleted all tallies before Config.Orders.Expiration
raw, err := planet.Satellites[0].DB.StoragenodeAccounting().GetTallies(ctx)
require.NoError(t, err)
for _, r := range raw {
assert.WithinDuration(t, timestamp, r.IntervalEndTime, planet.Satellites[0].Accounting.Rollup.OrderExpiration)
thisDay := r.IntervalEndTime.Sub(firstTimestamp) / (24 * time.Hour)
assert.Equal(t, testData[thisDay].nodeData[r.NodeID], r.DataTotal)
}
// Since the config.Rollup.DeleteTallies is set to true, at the end of the Rollup(),
// storagenode storage tallies that exist before the last rollup should be deleted.
require.NoError(t, satellitePeer.Accounting.Rollup.Rollup(ctx))
// Advance time by 24 hours
timestamp = timestamp.Add(time.Hour * 24)
end := timestamp
currentTime = currentTime.Add(24 * time.Hour)
}
// rollup.RollupRaws cuts off the hr/min/sec before saving, we need to do the same when querying
start = time.Date(start.Year(), start.Month(), start.Day(), 0, 0, 0, 0, start.Location())
end = time.Date(end.Year(), end.Month(), end.Day(), 0, 0, 0, 0, end.Location())
accountingCSVRows, err := snAccountingDB.QueryPaymentInfo(ctx, initialTime.Add(-24*time.Hour), currentTime.Add(24*time.Hour))
require.NoError(t, err)
assert.Equal(t, len(planet.StorageNodes), len(accountingCSVRows))
rows, err := planet.Satellites[0].DB.StoragenodeAccounting().QueryPaymentInfo(ctx, start, end)
require.NoError(t, err)
if i == 0 { // we need at least two days for rollup to work
assert.Equal(t, 0, len(rows))
continue
}
// the number of rows should be number of nodes
assert.Equal(t, len(planet.StorageNodes), len(rows))
// verify data is correct
for _, r := range rows {
totals := expectedTotals(testData, r.NodeID, i)
assert.Equal(t, int64(totals[0]), r.PutTotal)
assert.Equal(t, int64(totals[1]), r.GetTotal)
assert.Equal(t, int64(totals[2]), r.GetAuditTotal)
assert.Equal(t, int64(totals[3]), r.GetRepairTotal)
assert.Equal(t, totals[4], r.AtRestTotal)
assert.NotEmpty(t, r.Wallet)
if dqedNodes[r.NodeID] {
assert.NotNil(t, r.Disqualified)
} else {
assert.Nil(t, r.Disqualified)
}
// Confirm all the data saved over the 5 days is all summed in the accounting rollup table.
for _, row := range accountingCSVRows {
assert.Equal(t, int64(days*putAmount), row.PutTotal)
assert.Equal(t, int64(days*getAmount), row.GetTotal)
assert.Equal(t, int64(days*getAuditAmount), row.GetAuditTotal)
assert.Equal(t, int64(days*getRepairAmount), row.GetRepairTotal)
assert.Equal(t, float64(days*atRestAmount), row.AtRestTotal)
assert.NotEmpty(t, row.Wallet)
if dqedNodes[row.NodeID] {
assert.NotNil(t, row.Disqualified)
} else {
assert.Nil(t, row.Disqualified)
}
}
// Confirm there are only storage tally rows for the last time tally ran for each storage node.
storagenodeTallies, err := snAccountingDB.GetTallies(ctx)
require.NoError(t, err)
assert.Equal(t, len(planet.StorageNodes), len(storagenodeTallies))
})
}
// expectedTotals sums test data up to, but not including the current day's.
func expectedTotals(data []testData, id storj.NodeID, currentDay int) []float64 {
totals := make([]float64, 5)
for i := 0; i < currentDay; i++ {
totals[0] += float64(data[i].bwTotals[id][0])
totals[1] += float64(data[i].bwTotals[id][1])
totals[2] += float64(data[i].bwTotals[id][2])
totals[3] += float64(data[i].bwTotals[id][3])
totals[4] += data[i].nodeData[id]
}
return totals
func TestRollupOldOrders(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 2, UplinkCount: 0,
},
func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
// The purpose of this test is to ensure that running Rollup properly updates storagenode accounting data
// for a period of time which has already been accounted for in a previous call to Rollup.
// This is because orders can be added to the bandwidth settlement table in the past, so a previous rollup can become inaccurate.
// Test overview:
// We have 2 nodes (A, B).
// We start at t=now is the initial time, which is right at the beginning of a day.
// Phase 1:
// On node A, settle bandwidth {X} at t+2hr.
// Also settle bandwidth at t+26hr. This is necessary because rollup will truncate data from the most recent day, and we don't want to
// truncate the data from the day starting at t.
// Run rollup, expect data in storagenode accounting DB to match {X} for sn A, and have nothing for sn B.
// Phase 2:
// On nodes A and B, settle bandwidth {Y} at t+1hr.
// Run rollup, expect data in storagenode accounting DB to match {X}+{Y} for sn A, and to match {Y} for sn B.
var (
satellitePeer = planet.Satellites[0]
ordersDB = satellitePeer.DB.Orders()
snAccountingDB = satellitePeer.DB.StoragenodeAccounting()
)
// Run rollup once to start so we add the correct accounting timestamps to the db
satellitePeer.Accounting.Rollup.Loop.TriggerWait()
satellitePeer.Accounting.Rollup.Loop.Pause()
satellitePeer.Accounting.Tally.Loop.Pause()
nodeA := planet.StorageNodes[0]
nodeB := planet.StorageNodes[1]
// initialTime must start at the beginning of a day so that we can be sure
// that bandwidth data for both phases of the test is settled on the _same_ day.
// Subtract 48 hours so that when rollup discards the latest day, the data we care about is not ignored.
initialTime := time.Now().Truncate(24 * time.Hour)
const (
PutActionAmount1 = 100
GetActionAmount1 = 200
GetAuditActionAmount1 = 300
GetRepairActionAmount1 = 400
PutRepairActionAmount1 = 500
AtRestAmount1 = 600
PutActionAmount2 = 150
GetActionAmount2 = 250
GetAuditActionAmount2 = 350
GetRepairActionAmount2 = 450
PutRepairActionAmount2 = 550
AtRestAmount2 = 650
)
// Phase 1
storageTotalsPhase1 := make(map[storj.NodeID]float64)
storageTotalsPhase1[nodeA.ID()] = float64(AtRestAmount1)
require.NoError(t, snAccountingDB.SaveTallies(ctx, initialTime.Add(2*time.Hour), storageTotalsPhase1))
// save tallies for the next day too, so that the period we are testing is not truncated by the rollup service.
require.NoError(t, snAccountingDB.SaveTallies(ctx, initialTime.Add(26*time.Hour), storageTotalsPhase1))
bwTotalsPhase1 := make(map[storj.NodeID][]int64)
bwTotalsPhase1[nodeA.ID()] = []int64{PutActionAmount1, GetActionAmount1, GetAuditActionAmount1, GetRepairActionAmount1, PutRepairActionAmount1}
require.NoError(t, saveBWPhase3(ctx, ordersDB, bwTotalsPhase1, initialTime.Add(2*time.Hour)))
// save bandwidth for the next day too, so that the period we are testing is not truncated by the rollup service.
require.NoError(t, saveBWPhase3(ctx, ordersDB, bwTotalsPhase1, initialTime.Add(26*time.Hour)))
require.NoError(t, satellitePeer.Accounting.Rollup.Rollup(ctx))
accountingCSVRows, err := snAccountingDB.QueryPaymentInfo(ctx, initialTime.Add(-24*time.Hour), initialTime.Add(24*time.Hour))
require.NoError(t, err)
// there should only be data for node A
require.Len(t, accountingCSVRows, 1)
accountingCSVRow := accountingCSVRows[0]
require.Equal(t, nodeA.ID(), accountingCSVRow.NodeID)
// verify data is correct
require.EqualValues(t, PutActionAmount1, accountingCSVRow.PutTotal)
require.EqualValues(t, GetActionAmount1, accountingCSVRow.GetTotal)
require.EqualValues(t, GetAuditActionAmount1, accountingCSVRow.GetAuditTotal)
require.EqualValues(t, GetRepairActionAmount1, accountingCSVRow.GetRepairTotal)
require.EqualValues(t, PutRepairActionAmount1, accountingCSVRow.PutRepairTotal)
require.EqualValues(t, AtRestAmount1, accountingCSVRow.AtRestTotal)
// Phase 2
storageTotalsPhase2 := make(map[storj.NodeID]float64)
storageTotalsPhase2[nodeA.ID()] = float64(AtRestAmount2)
storageTotalsPhase2[nodeB.ID()] = float64(AtRestAmount2)
require.NoError(t, snAccountingDB.SaveTallies(ctx, initialTime.Add(-2*time.Hour), storageTotalsPhase2))
bwTotalsPhase2 := make(map[storj.NodeID][]int64)
bwTotalsPhase2[nodeA.ID()] = []int64{PutActionAmount2, GetActionAmount2, GetAuditActionAmount2, GetRepairActionAmount2, PutRepairActionAmount2}
bwTotalsPhase2[nodeB.ID()] = []int64{PutActionAmount2, GetActionAmount2, GetAuditActionAmount2, GetRepairActionAmount2, PutRepairActionAmount2}
require.NoError(t, saveBWPhase3(ctx, ordersDB, bwTotalsPhase2, initialTime.Add(time.Hour)))
require.NoError(t, satellitePeer.Accounting.Rollup.Rollup(ctx))
accountingCSVRows, err = snAccountingDB.QueryPaymentInfo(ctx, initialTime.Add(-24*time.Hour), initialTime.Add(24*time.Hour))
require.NoError(t, err)
// there should be data for both nodes
require.Len(t, accountingCSVRows, 2)
rA := accountingCSVRows[0]
rB := accountingCSVRows[1]
if rA.NodeID != nodeA.ID() {
rA = accountingCSVRows[1]
rB = accountingCSVRows[0]
}
require.Equal(t, nodeA.ID(), rA.NodeID)
require.Equal(t, nodeB.ID(), rB.NodeID)
// verify data is correct
require.EqualValues(t, PutActionAmount1+PutActionAmount2, rA.PutTotal)
require.EqualValues(t, GetActionAmount1+GetActionAmount2, rA.GetTotal)
require.EqualValues(t, GetAuditActionAmount1+GetAuditActionAmount2, rA.GetAuditTotal)
require.EqualValues(t, GetRepairActionAmount1+GetRepairActionAmount2, rA.GetRepairTotal)
require.EqualValues(t, PutRepairActionAmount1+PutRepairActionAmount2, rA.PutRepairTotal)
require.EqualValues(t, AtRestAmount1+AtRestAmount2, rA.AtRestTotal)
require.EqualValues(t, PutActionAmount2, rB.PutTotal)
require.EqualValues(t, GetActionAmount2, rB.GetTotal)
require.EqualValues(t, GetAuditActionAmount2, rB.GetAuditTotal)
require.EqualValues(t, GetRepairActionAmount2, rB.GetRepairTotal)
require.EqualValues(t, PutRepairActionAmount2, rB.PutRepairTotal)
require.EqualValues(t, AtRestAmount2, rB.AtRestTotal)
})
}
func createData(planet *testplanet.Planet, days int) []testData {
data := make([]testData, days)
for i := 0; i < days; i++ {
i := int64(i)
data[i].nodeData = make(map[storj.NodeID]float64)
data[i].bwTotals = make(map[storj.NodeID][]int64)
for _, n := range planet.StorageNodes {
id := n.Identity.ID
data[i].nodeData[id] = float64(i * 5000)
data[i].bwTotals[id] = []int64{i * 1000, i * 2000, i * 3000, i * 4000}
func saveBWPhase3(ctx context.Context, ordersDB orders.DB, bwTotals map[storj.NodeID][]int64, intervalStart time.Time) error {
pieceActions := []pb.PieceAction{pb.PieceAction_PUT,
pb.PieceAction_GET,
pb.PieceAction_GET_AUDIT,
pb.PieceAction_GET_REPAIR,
pb.PieceAction_PUT_REPAIR,
}
for nodeID, actions := range bwTotals {
var actionAmounts = map[int32]int64{}
for actionType, amount := range actions {
actionAmounts[int32(pieceActions[actionType])] = amount
}
_, _, err := ordersDB.UpdateStoragenodeBandwidthSettleWithWindow(ctx,
nodeID,
actionAmounts,
intervalStart.Truncate(1*time.Hour),
)
if err != nil {
return err
}
}
return data
return nil
}
// dqNodes disqualifies half the nodes in the testplanet and returns a map of dqed nodes.
@ -231,16 +380,3 @@ func dqNodes(ctx *testcontext.Context, planet *testplanet.Planet) (map[storj.Nod
}
return dqed, nil
}
func saveBW(ctx context.Context, planet *testplanet.Planet, bwTotals map[storj.NodeID][]int64, intervalStart time.Time) error {
pieceActions := []pb.PieceAction{pb.PieceAction_PUT, pb.PieceAction_GET, pb.PieceAction_GET_AUDIT, pb.PieceAction_GET_REPAIR}
for nodeID, actions := range bwTotals {
for actionType, amount := range actions {
err := planet.Satellites[0].DB.Orders().UpdateStoragenodeBandwidthSettle(ctx, nodeID, pieceActions[actionType], amount, intervalStart)
if err != nil {
return err
}
}
}
return nil
}

View File

@ -461,7 +461,7 @@ func (endpoint *Endpoint) SettlementWithWindowMigration(stream pb.DRPCOrders_Set
var receivedCount int
var window int64
var actions = map[pb.PieceAction]struct{}{}
actions := map[pb.PieceAction]struct{}{}
var requests []*ProcessOrderRequest
var finished bool
@ -590,9 +590,9 @@ func (endpoint *Endpoint) SettlementWithWindowFinal(stream pb.DRPCOrders_Settlem
log := endpoint.log.Named(peer.ID.String())
log.Debug("SettlementWithWindow")
var storagenodeSettled = map[int32]int64{}
var bucketSettled = map[bucketIDAction]int64{}
var seenSerials = map[storj.SerialNumber]struct{}{}
storagenodeSettled := map[int32]int64{}
bucketSettled := map[bucketIDAction]int64{}
seenSerials := map[storj.SerialNumber]struct{}{}
var window int64
var request *pb.SettlementRequest

View File

@ -95,11 +95,9 @@ read scalar (
)
model accounting_rollup (
key id
key node_id start_time
index ( fields start_time )
field id serial64
field node_id blob
field start_time timestamp
field put_total int64
@ -110,13 +108,7 @@ model accounting_rollup (
field at_rest_total float64
)
create accounting_rollup ( noreturn )
delete accounting_rollup ( where accounting_rollup.id = ? )
read one (
select accounting_rollup
where accounting_rollup.id = ?
)
create accounting_rollup ( noreturn, replace )
read all (
select accounting_rollup

View File

@ -274,7 +274,6 @@ func newpgx(db *DB) *pgxDB {
func (obj *pgxDB) Schema() string {
return `CREATE TABLE accounting_rollups (
id bigserial NOT NULL,
node_id bytea NOT NULL,
start_time timestamp with time zone NOT NULL,
put_total bigint NOT NULL,
@ -283,7 +282,7 @@ func (obj *pgxDB) Schema() string {
get_repair_total bigint NOT NULL,
put_repair_total bigint NOT NULL,
at_rest_total double precision NOT NULL,
PRIMARY KEY ( id )
PRIMARY KEY ( node_id, start_time )
);
CREATE TABLE accounting_timestamps (
name text NOT NULL,
@ -797,7 +796,6 @@ func newpgxcockroach(db *DB) *pgxcockroachDB {
func (obj *pgxcockroachDB) Schema() string {
return `CREATE TABLE accounting_rollups (
id bigserial NOT NULL,
node_id bytea NOT NULL,
start_time timestamp with time zone NOT NULL,
put_total bigint NOT NULL,
@ -806,7 +804,7 @@ func (obj *pgxcockroachDB) Schema() string {
get_repair_total bigint NOT NULL,
put_repair_total bigint NOT NULL,
at_rest_total double precision NOT NULL,
PRIMARY KEY ( id )
PRIMARY KEY ( node_id, start_time )
);
CREATE TABLE accounting_timestamps (
name text NOT NULL,
@ -1319,7 +1317,6 @@ nextval:
}
type AccountingRollup struct {
Id int64
NodeId []byte
StartTime time.Time
PutTotal int64
@ -1335,25 +1332,6 @@ func (AccountingRollup) _Table() string { return "accounting_rollups" }
type AccountingRollup_Update_Fields struct {
}
type AccountingRollup_Id_Field struct {
_set bool
_null bool
_value int64
}
func AccountingRollup_Id(v int64) AccountingRollup_Id_Field {
return AccountingRollup_Id_Field{_set: true, _value: v}
}
func (f AccountingRollup_Id_Field) value() interface{} {
if !f._set || f._null {
return nil
}
return f._value
}
func (AccountingRollup_Id_Field) _Column() string { return "id" }
type AccountingRollup_NodeId_Field struct {
_set bool
_null bool
@ -9089,7 +9067,7 @@ func (obj *pgxImpl) CreateNoReturn_AccountingTimestamps(ctx context.Context,
}
func (obj *pgxImpl) CreateNoReturn_AccountingRollup(ctx context.Context,
func (obj *pgxImpl) ReplaceNoReturn_AccountingRollup(ctx context.Context,
accounting_rollup_node_id AccountingRollup_NodeId_Field,
accounting_rollup_start_time AccountingRollup_StartTime_Field,
accounting_rollup_put_total AccountingRollup_PutTotal_Field,
@ -9109,7 +9087,7 @@ func (obj *pgxImpl) CreateNoReturn_AccountingRollup(ctx context.Context,
__put_repair_total_val := accounting_rollup_put_repair_total.value()
__at_rest_total_val := accounting_rollup_at_rest_total.value()
var __embed_stmt = __sqlbundle_Literal("INSERT INTO accounting_rollups ( node_id, start_time, put_total, get_total, get_audit_total, get_repair_total, put_repair_total, at_rest_total ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ? )")
var __embed_stmt = __sqlbundle_Literal("INSERT INTO accounting_rollups ( node_id, start_time, put_total, get_total, get_audit_total, get_repair_total, put_repair_total, at_rest_total ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ? ) ON CONFLICT ( node_id, start_time ) DO UPDATE SET node_id = EXCLUDED.node_id, start_time = EXCLUDED.start_time, put_total = EXCLUDED.put_total, get_total = EXCLUDED.get_total, get_audit_total = EXCLUDED.get_audit_total, get_repair_total = EXCLUDED.get_repair_total, put_repair_total = EXCLUDED.put_repair_total, at_rest_total = EXCLUDED.at_rest_total")
var __values []interface{}
__values = append(__values, __node_id_val, __start_time_val, __put_total_val, __get_total_val, __get_audit_total_val, __get_repair_total_val, __put_repair_total_val, __at_rest_total_val)
@ -10630,34 +10608,12 @@ func (obj *pgxImpl) Find_AccountingTimestamps_Value_By_Name(ctx context.Context,
}
func (obj *pgxImpl) Get_AccountingRollup_By_Id(ctx context.Context,
accounting_rollup_id AccountingRollup_Id_Field) (
accounting_rollup *AccountingRollup, err error) {
defer mon.Task()(&ctx)(&err)
var __embed_stmt = __sqlbundle_Literal("SELECT accounting_rollups.id, accounting_rollups.node_id, accounting_rollups.start_time, accounting_rollups.put_total, accounting_rollups.get_total, accounting_rollups.get_audit_total, accounting_rollups.get_repair_total, accounting_rollups.put_repair_total, accounting_rollups.at_rest_total FROM accounting_rollups WHERE accounting_rollups.id = ?")
var __values []interface{}
__values = append(__values, accounting_rollup_id.value())
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
obj.logStmt(__stmt, __values...)
accounting_rollup = &AccountingRollup{}
err = obj.driver.QueryRowContext(ctx, __stmt, __values...).Scan(&accounting_rollup.Id, &accounting_rollup.NodeId, &accounting_rollup.StartTime, &accounting_rollup.PutTotal, &accounting_rollup.GetTotal, &accounting_rollup.GetAuditTotal, &accounting_rollup.GetRepairTotal, &accounting_rollup.PutRepairTotal, &accounting_rollup.AtRestTotal)
if err != nil {
return (*AccountingRollup)(nil), obj.makeErr(err)
}
return accounting_rollup, nil
}
func (obj *pgxImpl) All_AccountingRollup_By_StartTime_GreaterOrEqual(ctx context.Context,
accounting_rollup_start_time_greater_or_equal AccountingRollup_StartTime_Field) (
rows []*AccountingRollup, err error) {
defer mon.Task()(&ctx)(&err)
var __embed_stmt = __sqlbundle_Literal("SELECT accounting_rollups.id, accounting_rollups.node_id, accounting_rollups.start_time, accounting_rollups.put_total, accounting_rollups.get_total, accounting_rollups.get_audit_total, accounting_rollups.get_repair_total, accounting_rollups.put_repair_total, accounting_rollups.at_rest_total FROM accounting_rollups WHERE accounting_rollups.start_time >= ?")
var __embed_stmt = __sqlbundle_Literal("SELECT accounting_rollups.node_id, accounting_rollups.start_time, accounting_rollups.put_total, accounting_rollups.get_total, accounting_rollups.get_audit_total, accounting_rollups.get_repair_total, accounting_rollups.put_repair_total, accounting_rollups.at_rest_total FROM accounting_rollups WHERE accounting_rollups.start_time >= ?")
var __values []interface{}
__values = append(__values, accounting_rollup_start_time_greater_or_equal.value())
@ -10673,7 +10629,7 @@ func (obj *pgxImpl) All_AccountingRollup_By_StartTime_GreaterOrEqual(ctx context
for __rows.Next() {
accounting_rollup := &AccountingRollup{}
err = __rows.Scan(&accounting_rollup.Id, &accounting_rollup.NodeId, &accounting_rollup.StartTime, &accounting_rollup.PutTotal, &accounting_rollup.GetTotal, &accounting_rollup.GetAuditTotal, &accounting_rollup.GetRepairTotal, &accounting_rollup.PutRepairTotal, &accounting_rollup.AtRestTotal)
err = __rows.Scan(&accounting_rollup.NodeId, &accounting_rollup.StartTime, &accounting_rollup.PutTotal, &accounting_rollup.GetTotal, &accounting_rollup.GetAuditTotal, &accounting_rollup.GetRepairTotal, &accounting_rollup.PutRepairTotal, &accounting_rollup.AtRestTotal)
if err != nil {
return nil, obj.makeErr(err)
}
@ -14360,33 +14316,6 @@ func (obj *pgxImpl) Delete_Irreparabledb_By_Segmentpath(ctx context.Context,
}
func (obj *pgxImpl) Delete_AccountingRollup_By_Id(ctx context.Context,
accounting_rollup_id AccountingRollup_Id_Field) (
deleted bool, err error) {
defer mon.Task()(&ctx)(&err)
var __embed_stmt = __sqlbundle_Literal("DELETE FROM accounting_rollups WHERE accounting_rollups.id = ?")
var __values []interface{}
__values = append(__values, accounting_rollup_id.value())
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
obj.logStmt(__stmt, __values...)
__res, err := obj.driver.ExecContext(ctx, __stmt, __values...)
if err != nil {
return false, obj.makeErr(err)
}
__count, err := __res.RowsAffected()
if err != nil {
return false, obj.makeErr(err)
}
return __count > 0, nil
}
func (obj *pgxImpl) Delete_Node_By_Id(ctx context.Context,
node_id Node_Id_Field) (
deleted bool, err error) {
@ -15418,7 +15347,7 @@ func (obj *pgxcockroachImpl) CreateNoReturn_AccountingTimestamps(ctx context.Con
}
func (obj *pgxcockroachImpl) CreateNoReturn_AccountingRollup(ctx context.Context,
func (obj *pgxcockroachImpl) ReplaceNoReturn_AccountingRollup(ctx context.Context,
accounting_rollup_node_id AccountingRollup_NodeId_Field,
accounting_rollup_start_time AccountingRollup_StartTime_Field,
accounting_rollup_put_total AccountingRollup_PutTotal_Field,
@ -15438,7 +15367,7 @@ func (obj *pgxcockroachImpl) CreateNoReturn_AccountingRollup(ctx context.Context
__put_repair_total_val := accounting_rollup_put_repair_total.value()
__at_rest_total_val := accounting_rollup_at_rest_total.value()
var __embed_stmt = __sqlbundle_Literal("INSERT INTO accounting_rollups ( node_id, start_time, put_total, get_total, get_audit_total, get_repair_total, put_repair_total, at_rest_total ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ? )")
var __embed_stmt = __sqlbundle_Literal("UPSERT INTO accounting_rollups ( node_id, start_time, put_total, get_total, get_audit_total, get_repair_total, put_repair_total, at_rest_total ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ? )")
var __values []interface{}
__values = append(__values, __node_id_val, __start_time_val, __put_total_val, __get_total_val, __get_audit_total_val, __get_repair_total_val, __put_repair_total_val, __at_rest_total_val)
@ -16959,34 +16888,12 @@ func (obj *pgxcockroachImpl) Find_AccountingTimestamps_Value_By_Name(ctx context
}
func (obj *pgxcockroachImpl) Get_AccountingRollup_By_Id(ctx context.Context,
accounting_rollup_id AccountingRollup_Id_Field) (
accounting_rollup *AccountingRollup, err error) {
defer mon.Task()(&ctx)(&err)
var __embed_stmt = __sqlbundle_Literal("SELECT accounting_rollups.id, accounting_rollups.node_id, accounting_rollups.start_time, accounting_rollups.put_total, accounting_rollups.get_total, accounting_rollups.get_audit_total, accounting_rollups.get_repair_total, accounting_rollups.put_repair_total, accounting_rollups.at_rest_total FROM accounting_rollups WHERE accounting_rollups.id = ?")
var __values []interface{}
__values = append(__values, accounting_rollup_id.value())
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
obj.logStmt(__stmt, __values...)
accounting_rollup = &AccountingRollup{}
err = obj.driver.QueryRowContext(ctx, __stmt, __values...).Scan(&accounting_rollup.Id, &accounting_rollup.NodeId, &accounting_rollup.StartTime, &accounting_rollup.PutTotal, &accounting_rollup.GetTotal, &accounting_rollup.GetAuditTotal, &accounting_rollup.GetRepairTotal, &accounting_rollup.PutRepairTotal, &accounting_rollup.AtRestTotal)
if err != nil {
return (*AccountingRollup)(nil), obj.makeErr(err)
}
return accounting_rollup, nil
}
func (obj *pgxcockroachImpl) All_AccountingRollup_By_StartTime_GreaterOrEqual(ctx context.Context,
accounting_rollup_start_time_greater_or_equal AccountingRollup_StartTime_Field) (
rows []*AccountingRollup, err error) {
defer mon.Task()(&ctx)(&err)
var __embed_stmt = __sqlbundle_Literal("SELECT accounting_rollups.id, accounting_rollups.node_id, accounting_rollups.start_time, accounting_rollups.put_total, accounting_rollups.get_total, accounting_rollups.get_audit_total, accounting_rollups.get_repair_total, accounting_rollups.put_repair_total, accounting_rollups.at_rest_total FROM accounting_rollups WHERE accounting_rollups.start_time >= ?")
var __embed_stmt = __sqlbundle_Literal("SELECT accounting_rollups.node_id, accounting_rollups.start_time, accounting_rollups.put_total, accounting_rollups.get_total, accounting_rollups.get_audit_total, accounting_rollups.get_repair_total, accounting_rollups.put_repair_total, accounting_rollups.at_rest_total FROM accounting_rollups WHERE accounting_rollups.start_time >= ?")
var __values []interface{}
__values = append(__values, accounting_rollup_start_time_greater_or_equal.value())
@ -17002,7 +16909,7 @@ func (obj *pgxcockroachImpl) All_AccountingRollup_By_StartTime_GreaterOrEqual(ct
for __rows.Next() {
accounting_rollup := &AccountingRollup{}
err = __rows.Scan(&accounting_rollup.Id, &accounting_rollup.NodeId, &accounting_rollup.StartTime, &accounting_rollup.PutTotal, &accounting_rollup.GetTotal, &accounting_rollup.GetAuditTotal, &accounting_rollup.GetRepairTotal, &accounting_rollup.PutRepairTotal, &accounting_rollup.AtRestTotal)
err = __rows.Scan(&accounting_rollup.NodeId, &accounting_rollup.StartTime, &accounting_rollup.PutTotal, &accounting_rollup.GetTotal, &accounting_rollup.GetAuditTotal, &accounting_rollup.GetRepairTotal, &accounting_rollup.PutRepairTotal, &accounting_rollup.AtRestTotal)
if err != nil {
return nil, obj.makeErr(err)
}
@ -20689,33 +20596,6 @@ func (obj *pgxcockroachImpl) Delete_Irreparabledb_By_Segmentpath(ctx context.Con
}
func (obj *pgxcockroachImpl) Delete_AccountingRollup_By_Id(ctx context.Context,
accounting_rollup_id AccountingRollup_Id_Field) (
deleted bool, err error) {
defer mon.Task()(&ctx)(&err)
var __embed_stmt = __sqlbundle_Literal("DELETE FROM accounting_rollups WHERE accounting_rollups.id = ?")
var __values []interface{}
__values = append(__values, accounting_rollup_id.value())
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
obj.logStmt(__stmt, __values...)
__res, err := obj.driver.ExecContext(ctx, __stmt, __values...)
if err != nil {
return false, obj.makeErr(err)
}
__count, err := __res.RowsAffected()
if err != nil {
return false, obj.makeErr(err)
}
return __count > 0, nil
}
func (obj *pgxcockroachImpl) Delete_Node_By_Id(ctx context.Context,
node_id Node_Id_Field) (
deleted bool, err error) {
@ -21932,24 +21812,6 @@ func (rx *Rx) Count_UserCredit_By_ReferredBy(ctx context.Context,
return tx.Count_UserCredit_By_ReferredBy(ctx, user_credit_referred_by)
}
func (rx *Rx) CreateNoReturn_AccountingRollup(ctx context.Context,
accounting_rollup_node_id AccountingRollup_NodeId_Field,
accounting_rollup_start_time AccountingRollup_StartTime_Field,
accounting_rollup_put_total AccountingRollup_PutTotal_Field,
accounting_rollup_get_total AccountingRollup_GetTotal_Field,
accounting_rollup_get_audit_total AccountingRollup_GetAuditTotal_Field,
accounting_rollup_get_repair_total AccountingRollup_GetRepairTotal_Field,
accounting_rollup_put_repair_total AccountingRollup_PutRepairTotal_Field,
accounting_rollup_at_rest_total AccountingRollup_AtRestTotal_Field) (
err error) {
var tx *Tx
if tx, err = rx.getTx(ctx); err != nil {
return
}
return tx.CreateNoReturn_AccountingRollup(ctx, accounting_rollup_node_id, accounting_rollup_start_time, accounting_rollup_put_total, accounting_rollup_get_total, accounting_rollup_get_audit_total, accounting_rollup_get_repair_total, accounting_rollup_put_repair_total, accounting_rollup_at_rest_total)
}
func (rx *Rx) CreateNoReturn_AccountingTimestamps(ctx context.Context,
accounting_timestamps_name AccountingTimestamps_Name_Field,
accounting_timestamps_value AccountingTimestamps_Value_Field) (
@ -22477,16 +22339,6 @@ func (rx *Rx) Create_ValueAttribution(ctx context.Context,
}
func (rx *Rx) Delete_AccountingRollup_By_Id(ctx context.Context,
accounting_rollup_id AccountingRollup_Id_Field) (
deleted bool, err error) {
var tx *Tx
if tx, err = rx.getTx(ctx); err != nil {
return
}
return tx.Delete_AccountingRollup_By_Id(ctx, accounting_rollup_id)
}
func (rx *Rx) Delete_ApiKey_By_Id(ctx context.Context,
api_key_id ApiKey_Id_Field) (
deleted bool, err error) {
@ -22751,16 +22603,6 @@ func (rx *Rx) First_BucketStorageTally_By_ProjectId_OrderBy_Desc_IntervalStart(c
return tx.First_BucketStorageTally_By_ProjectId_OrderBy_Desc_IntervalStart(ctx, bucket_storage_tally_project_id)
}
func (rx *Rx) Get_AccountingRollup_By_Id(ctx context.Context,
accounting_rollup_id AccountingRollup_Id_Field) (
accounting_rollup *AccountingRollup, err error) {
var tx *Tx
if tx, err = rx.getTx(ctx); err != nil {
return
}
return tx.Get_AccountingRollup_By_Id(ctx, accounting_rollup_id)
}
func (rx *Rx) Get_ApiKey_By_Head(ctx context.Context,
api_key_head ApiKey_Head_Field) (
api_key *ApiKey, err error) {
@ -23258,6 +23100,24 @@ func (rx *Rx) Paged_PendingSerialQueue(ctx context.Context,
return tx.Paged_PendingSerialQueue(ctx, limit, start)
}
func (rx *Rx) ReplaceNoReturn_AccountingRollup(ctx context.Context,
accounting_rollup_node_id AccountingRollup_NodeId_Field,
accounting_rollup_start_time AccountingRollup_StartTime_Field,
accounting_rollup_put_total AccountingRollup_PutTotal_Field,
accounting_rollup_get_total AccountingRollup_GetTotal_Field,
accounting_rollup_get_audit_total AccountingRollup_GetAuditTotal_Field,
accounting_rollup_get_repair_total AccountingRollup_GetRepairTotal_Field,
accounting_rollup_put_repair_total AccountingRollup_PutRepairTotal_Field,
accounting_rollup_at_rest_total AccountingRollup_AtRestTotal_Field) (
err error) {
var tx *Tx
if tx, err = rx.getTx(ctx); err != nil {
return
}
return tx.ReplaceNoReturn_AccountingRollup(ctx, accounting_rollup_node_id, accounting_rollup_start_time, accounting_rollup_put_total, accounting_rollup_get_total, accounting_rollup_get_audit_total, accounting_rollup_get_repair_total, accounting_rollup_put_repair_total, accounting_rollup_at_rest_total)
}
func (rx *Rx) ReplaceNoReturn_NodeApiVersion(ctx context.Context,
node_api_version_id NodeApiVersion_Id_Field,
node_api_version_api_version NodeApiVersion_ApiVersion_Field) (
@ -23629,17 +23489,6 @@ type Methods interface {
user_credit_referred_by UserCredit_ReferredBy_Field) (
count int64, err error)
CreateNoReturn_AccountingRollup(ctx context.Context,
accounting_rollup_node_id AccountingRollup_NodeId_Field,
accounting_rollup_start_time AccountingRollup_StartTime_Field,
accounting_rollup_put_total AccountingRollup_PutTotal_Field,
accounting_rollup_get_total AccountingRollup_GetTotal_Field,
accounting_rollup_get_audit_total AccountingRollup_GetAuditTotal_Field,
accounting_rollup_get_repair_total AccountingRollup_GetRepairTotal_Field,
accounting_rollup_put_repair_total AccountingRollup_PutRepairTotal_Field,
accounting_rollup_at_rest_total AccountingRollup_AtRestTotal_Field) (
err error)
CreateNoReturn_AccountingTimestamps(ctx context.Context,
accounting_timestamps_name AccountingTimestamps_Name_Field,
accounting_timestamps_value AccountingTimestamps_Value_Field) (
@ -23922,10 +23771,6 @@ type Methods interface {
value_attribution_partner_id ValueAttribution_PartnerId_Field) (
value_attribution *ValueAttribution, err error)
Delete_AccountingRollup_By_Id(ctx context.Context,
accounting_rollup_id AccountingRollup_Id_Field) (
deleted bool, err error)
Delete_ApiKey_By_Id(ctx context.Context,
api_key_id ApiKey_Id_Field) (
deleted bool, err error)
@ -24035,10 +23880,6 @@ type Methods interface {
bucket_storage_tally_project_id BucketStorageTally_ProjectId_Field) (
bucket_storage_tally *BucketStorageTally, err error)
Get_AccountingRollup_By_Id(ctx context.Context,
accounting_rollup_id AccountingRollup_Id_Field) (
accounting_rollup *AccountingRollup, err error)
Get_ApiKey_By_Head(ctx context.Context,
api_key_head ApiKey_Head_Field) (
api_key *ApiKey, err error)
@ -24254,6 +24095,17 @@ type Methods interface {
limit int, start *Paged_PendingSerialQueue_Continuation) (
rows []*PendingSerialQueue, next *Paged_PendingSerialQueue_Continuation, err error)
ReplaceNoReturn_AccountingRollup(ctx context.Context,
accounting_rollup_node_id AccountingRollup_NodeId_Field,
accounting_rollup_start_time AccountingRollup_StartTime_Field,
accounting_rollup_put_total AccountingRollup_PutTotal_Field,
accounting_rollup_get_total AccountingRollup_GetTotal_Field,
accounting_rollup_get_audit_total AccountingRollup_GetAuditTotal_Field,
accounting_rollup_get_repair_total AccountingRollup_GetRepairTotal_Field,
accounting_rollup_put_repair_total AccountingRollup_PutRepairTotal_Field,
accounting_rollup_at_rest_total AccountingRollup_AtRestTotal_Field) (
err error)
ReplaceNoReturn_NodeApiVersion(ctx context.Context,
node_api_version_id NodeApiVersion_Id_Field,
node_api_version_api_version NodeApiVersion_ApiVersion_Field) (

View File

@ -1,7 +1,6 @@
-- AUTOGENERATED BY storj.io/dbx
-- DO NOT EDIT
CREATE TABLE accounting_rollups (
id bigserial NOT NULL,
node_id bytea NOT NULL,
start_time timestamp with time zone NOT NULL,
put_total bigint NOT NULL,
@ -10,7 +9,7 @@ CREATE TABLE accounting_rollups (
get_repair_total bigint NOT NULL,
put_repair_total bigint NOT NULL,
at_rest_total double precision NOT NULL,
PRIMARY KEY ( id )
PRIMARY KEY ( node_id, start_time )
);
CREATE TABLE accounting_timestamps (
name text NOT NULL,

View File

@ -1,7 +1,6 @@
-- AUTOGENERATED BY storj.io/dbx
-- DO NOT EDIT
CREATE TABLE accounting_rollups (
id bigserial NOT NULL,
node_id bytea NOT NULL,
start_time timestamp with time zone NOT NULL,
put_total bigint NOT NULL,
@ -10,7 +9,7 @@ CREATE TABLE accounting_rollups (
get_repair_total bigint NOT NULL,
put_repair_total bigint NOT NULL,
at_rest_total double precision NOT NULL,
PRIMARY KEY ( id )
PRIMARY KEY ( node_id, start_time )
);
CREATE TABLE accounting_timestamps (
name text NOT NULL,

View File

@ -9,8 +9,10 @@ import (
"strings"
"github.com/zeebo/errs"
"go.uber.org/zap"
"storj.io/storj/private/dbutil"
"storj.io/storj/private/dbutil/cockroachutil"
"storj.io/storj/private/dbutil/pgutil"
"storj.io/storj/private/migrate"
"storj.io/storj/private/tagsql"
@ -1030,6 +1032,97 @@ func (db *satelliteDB) PostgresMigration() *migrate.Migration {
`CREATE INDEX injuredsegments_segment_health_index ON injuredsegments ( segment_health );`,
},
},
{
DB: &db.migrationDB,
Description: "Use node_id and start_time for accounting_rollups pkey instead of autogenerated id",
Version: 133,
SeparateTx: true,
Action: migrate.Func(func(ctx context.Context, log *zap.Logger, db tagsql.DB, tx tagsql.Tx) error {
if _, ok := db.Driver().(*cockroachutil.Driver); ok {
_, err := db.Exec(ctx,
`ALTER TABLE accounting_rollups RENAME TO accounting_rollups_original;`,
)
if err != nil {
return ErrMigrate.Wrap(err)
}
_, err = db.Exec(ctx,
`CREATE TABLE accounting_rollups (
node_id bytea NOT NULL,
start_time timestamp with time zone NOT NULL,
put_total bigint NOT NULL,
get_total bigint NOT NULL,
get_audit_total bigint NOT NULL,
get_repair_total bigint NOT NULL,
put_repair_total bigint NOT NULL,
at_rest_total double precision NOT NULL,
PRIMARY KEY ( node_id, start_time )
);
CREATE INDEX accounting_rollups_start_time_index ON accounting_rollups ( start_time );
INSERT INTO accounting_rollups (
node_id, start_time, put_total, get_total, get_audit_total, get_repair_total, put_repair_total, at_rest_total
)
SELECT node_id,
start_time,
SUM(put_total)::bigint,
SUM(get_total)::bigint,
SUM(get_audit_total)::bigint,
SUM(get_repair_total)::bigint,
SUM(put_repair_total)::bigint,
SUM(at_rest_total)
FROM accounting_rollups_original
GROUP BY node_id, start_time;
DROP TABLE accounting_rollups_original;`,
)
if err != nil {
return ErrMigrate.Wrap(err)
}
return nil
}
_, err := db.Exec(ctx,
`CREATE TABLE accounting_rollups_new (
node_id bytea NOT NULL,
start_time timestamp with time zone NOT NULL,
put_total bigint NOT NULL,
get_total bigint NOT NULL,
get_audit_total bigint NOT NULL,
get_repair_total bigint NOT NULL,
put_repair_total bigint NOT NULL,
at_rest_total double precision NOT NULL,
PRIMARY KEY ( node_id, start_time )
);
DROP INDEX accounting_rollups_start_time_index;
CREATE INDEX accounting_rollups_start_time_index ON accounting_rollups_new ( start_time );
INSERT INTO accounting_rollups_new (
node_id, start_time, put_total, get_total, get_audit_total, get_repair_total, put_repair_total, at_rest_total
)
SELECT node_id,
start_time,
SUM(put_total),
SUM(get_total),
SUM(get_audit_total),
SUM(get_repair_total),
SUM(put_repair_total),
SUM(at_rest_total)
FROM accounting_rollups
GROUP BY node_id, start_time;
DROP TABLE accounting_rollups;
ALTER INDEX accounting_rollups_new_pkey RENAME TO accounting_rollups_pkey;
ALTER TABLE accounting_rollups_new RENAME TO accounting_rollups;`,
)
if err != nil {
return ErrMigrate.Wrap(err)
}
return nil
}),
},
},
}
}

View File

@ -840,7 +840,7 @@ func (db *ordersDB) UpdateStoragenodeBandwidthSettleWithWindow(ctx context.Conte
// SettledAmountsMatch checks if database rows match the orders. If the settled amount for
// each action are not the same then false is returned.
func SettledAmountsMatch(rows []*dbx.StoragenodeBandwidthRollup, orderActionAmounts map[int32]int64) bool {
var rowsSumByAction = map[int32]int64{}
rowsSumByAction := map[int32]int64{}
for _, row := range rows {
rowsSumByAction[int32(row.Action)] += int64(row.Settled)
}

View File

@ -164,7 +164,7 @@ func (db *StoragenodeAccounting) SaveRollup(ctx context.Context, latestRollup ti
putRepair := dbx.AccountingRollup_PutRepairTotal(ar.PutRepairTotal)
atRest := dbx.AccountingRollup_AtRestTotal(ar.AtRestTotal)
err := tx.CreateNoReturn_AccountingRollup(ctx, nID, start, put, get, audit, getRepair, putRepair, atRest)
err := tx.ReplaceNoReturn_AccountingRollup(ctx, nID, start, put, get, audit, getRepair, putRepair, atRest)
if err != nil {
return err
}
@ -202,7 +202,7 @@ func (db *StoragenodeAccounting) LastTimestamp(ctx context.Context, timestampTyp
// QueryPaymentInfo queries Overlay, Accounting Rollup on nodeID.
func (db *StoragenodeAccounting) QueryPaymentInfo(ctx context.Context, start time.Time, end time.Time) (_ []*accounting.CSVRow, err error) {
defer mon.Task()(&ctx)(&err)
var sqlStmt = `SELECT n.id, n.created_at, r.at_rest_total, r.get_repair_total,
sqlStmt := `SELECT n.id, n.created_at, r.at_rest_total, r.get_repair_total,
r.put_repair_total, r.get_audit_total, r.put_total, r.get_total, n.wallet, n.disqualified
FROM (
SELECT node_id, SUM(at_rest_total::decimal) AS at_rest_total, SUM(get_repair_total) AS get_repair_total,
@ -366,7 +366,7 @@ func (db *StoragenodeAccounting) QueryStorageNodeUsage(ctx context.Context, node
// DeleteTalliesBefore deletes all raw tallies prior to some time.
func (db *StoragenodeAccounting) DeleteTalliesBefore(ctx context.Context, latestRollup time.Time) (err error) {
defer mon.Task()(&ctx)(&err)
var deleteRawSQL = `DELETE FROM storagenode_storage_tallies WHERE interval_end_time < ?`
deleteRawSQL := `DELETE FROM storagenode_storage_tallies WHERE interval_end_time < ?`
_, err = db.db.DB.ExecContext(ctx, db.db.Rebind(deleteRawSQL), latestRollup)
return err
}

View File

@ -0,0 +1,582 @@
-- AUTOGENERATED BY storj.io/dbx
-- DO NOT EDIT
CREATE TABLE accounting_rollups (
node_id bytea NOT NULL,
start_time timestamp with time zone NOT NULL,
put_total bigint NOT NULL,
get_total bigint NOT NULL,
get_audit_total bigint NOT NULL,
get_repair_total bigint NOT NULL,
put_repair_total bigint NOT NULL,
at_rest_total double precision NOT NULL,
PRIMARY KEY ( node_id, start_time )
);
CREATE TABLE accounting_timestamps (
name text NOT NULL,
value timestamp with time zone NOT NULL,
PRIMARY KEY ( name )
);
CREATE TABLE audit_histories (
node_id bytea NOT NULL,
history bytea NOT NULL,
PRIMARY KEY ( node_id )
);
CREATE TABLE bucket_bandwidth_rollups (
bucket_name bytea NOT NULL,
project_id bytea NOT NULL,
interval_start timestamp with time zone NOT NULL,
interval_seconds integer NOT NULL,
action integer NOT NULL,
inline bigint NOT NULL,
allocated bigint NOT NULL,
settled bigint NOT NULL,
PRIMARY KEY ( bucket_name, project_id, interval_start, action )
);
CREATE TABLE bucket_storage_tallies (
bucket_name bytea NOT NULL,
project_id bytea NOT NULL,
interval_start timestamp with time zone NOT NULL,
inline bigint NOT NULL,
remote bigint NOT NULL,
remote_segments_count integer NOT NULL,
inline_segments_count integer NOT NULL,
object_count integer NOT NULL,
metadata_size bigint NOT NULL,
PRIMARY KEY ( bucket_name, project_id, interval_start )
);
CREATE TABLE coinpayments_transactions (
id text NOT NULL,
user_id bytea NOT NULL,
address text NOT NULL,
amount bytea NOT NULL,
received bytea NOT NULL,
status integer NOT NULL,
key text NOT NULL,
timeout integer NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE consumed_serials (
storage_node_id bytea NOT NULL,
serial_number bytea NOT NULL,
expires_at timestamp with time zone NOT NULL,
PRIMARY KEY ( storage_node_id, serial_number )
);
CREATE TABLE coupons (
id bytea NOT NULL,
user_id bytea NOT NULL,
amount bigint NOT NULL,
description text NOT NULL,
type integer NOT NULL,
status integer NOT NULL,
duration bigint NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE coupon_usages (
coupon_id bytea NOT NULL,
amount bigint NOT NULL,
status integer NOT NULL,
period timestamp with time zone NOT NULL,
PRIMARY KEY ( coupon_id, period )
);
CREATE TABLE graceful_exit_progress (
node_id bytea NOT NULL,
bytes_transferred bigint NOT NULL,
pieces_transferred bigint NOT NULL DEFAULT 0,
pieces_failed bigint NOT NULL DEFAULT 0,
updated_at timestamp with time zone NOT NULL,
PRIMARY KEY ( node_id )
);
CREATE TABLE graceful_exit_transfer_queue (
node_id bytea NOT NULL,
path bytea NOT NULL,
piece_num integer NOT NULL,
root_piece_id bytea,
durability_ratio double precision NOT NULL,
queued_at timestamp with time zone NOT NULL,
requested_at timestamp with time zone,
last_failed_at timestamp with time zone,
last_failed_code integer,
failed_count integer,
finished_at timestamp with time zone,
order_limit_send_count integer NOT NULL DEFAULT 0,
PRIMARY KEY ( node_id, path, piece_num )
);
CREATE TABLE injuredsegments (
path bytea NOT NULL,
data bytea NOT NULL,
attempted timestamp with time zone,
updated_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
num_healthy_pieces integer NOT NULL DEFAULT 52,
segment_health double precision NOT NULL DEFAULT 1,
PRIMARY KEY ( path )
);
CREATE TABLE irreparabledbs (
segmentpath bytea NOT NULL,
segmentdetail bytea NOT NULL,
pieces_lost_count bigint NOT NULL,
seg_damaged_unix_sec bigint NOT NULL,
repair_attempt_count bigint NOT NULL,
PRIMARY KEY ( segmentpath )
);
CREATE TABLE nodes (
id bytea NOT NULL,
address text NOT NULL DEFAULT '',
last_net text NOT NULL,
last_ip_port text,
protocol integer NOT NULL DEFAULT 0,
type integer NOT NULL DEFAULT 0,
email text NOT NULL,
wallet text NOT NULL,
free_disk bigint NOT NULL DEFAULT -1,
piece_count bigint NOT NULL DEFAULT 0,
major bigint NOT NULL DEFAULT 0,
minor bigint NOT NULL DEFAULT 0,
patch bigint NOT NULL DEFAULT 0,
hash text NOT NULL DEFAULT '',
timestamp timestamp with time zone NOT NULL DEFAULT '0001-01-01 00:00:00+00',
release boolean NOT NULL DEFAULT false,
latency_90 bigint NOT NULL DEFAULT 0,
audit_success_count bigint NOT NULL DEFAULT 0,
total_audit_count bigint NOT NULL DEFAULT 0,
vetted_at timestamp with time zone,
uptime_success_count bigint NOT NULL,
total_uptime_count bigint NOT NULL,
created_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
updated_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
last_contact_success timestamp with time zone NOT NULL DEFAULT 'epoch',
last_contact_failure timestamp with time zone NOT NULL DEFAULT 'epoch',
contained boolean NOT NULL DEFAULT false,
disqualified timestamp with time zone,
suspended timestamp with time zone,
unknown_audit_suspended timestamp with time zone,
offline_suspended timestamp with time zone,
under_review timestamp with time zone,
online_score double precision NOT NULL DEFAULT 1,
audit_reputation_alpha double precision NOT NULL DEFAULT 1,
audit_reputation_beta double precision NOT NULL DEFAULT 0,
unknown_audit_reputation_alpha double precision NOT NULL DEFAULT 1,
unknown_audit_reputation_beta double precision NOT NULL DEFAULT 0,
uptime_reputation_alpha double precision NOT NULL DEFAULT 1,
uptime_reputation_beta double precision NOT NULL DEFAULT 0,
exit_initiated_at timestamp with time zone,
exit_loop_completed_at timestamp with time zone,
exit_finished_at timestamp with time zone,
exit_success boolean NOT NULL DEFAULT false,
PRIMARY KEY ( id )
);
CREATE TABLE node_api_versions (
id bytea NOT NULL,
api_version integer NOT NULL,
created_at timestamp with time zone NOT NULL,
updated_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE nodes_offline_times (
node_id bytea NOT NULL,
tracked_at timestamp with time zone NOT NULL,
seconds integer NOT NULL,
PRIMARY KEY ( node_id, tracked_at )
);
CREATE TABLE offers (
id serial NOT NULL,
name text NOT NULL,
description text NOT NULL,
award_credit_in_cents integer NOT NULL DEFAULT 0,
invitee_credit_in_cents integer NOT NULL DEFAULT 0,
award_credit_duration_days integer,
invitee_credit_duration_days integer,
redeemable_cap integer,
expires_at timestamp with time zone NOT NULL,
created_at timestamp with time zone NOT NULL,
status integer NOT NULL,
type integer NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE peer_identities (
node_id bytea NOT NULL,
leaf_serial_number bytea NOT NULL,
chain bytea NOT NULL,
updated_at timestamp with time zone NOT NULL,
PRIMARY KEY ( node_id )
);
CREATE TABLE pending_audits (
node_id bytea NOT NULL,
piece_id bytea NOT NULL,
stripe_index bigint NOT NULL,
share_size bigint NOT NULL,
expected_share_hash bytea NOT NULL,
reverify_count bigint NOT NULL,
path bytea NOT NULL,
PRIMARY KEY ( node_id )
);
CREATE TABLE pending_serial_queue (
storage_node_id bytea NOT NULL,
bucket_id bytea NOT NULL,
serial_number bytea NOT NULL,
action integer NOT NULL,
settled bigint NOT NULL,
expires_at timestamp with time zone NOT NULL,
PRIMARY KEY ( storage_node_id, bucket_id, serial_number )
);
CREATE TABLE projects (
id bytea NOT NULL,
name text NOT NULL,
description text NOT NULL,
usage_limit bigint,
bandwidth_limit bigint,
rate_limit integer,
max_buckets integer,
partner_id bytea,
owner_id bytea NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE project_bandwidth_rollups (
project_id bytea NOT NULL,
interval_month date NOT NULL,
egress_allocated bigint NOT NULL,
PRIMARY KEY ( project_id, interval_month )
);
CREATE TABLE registration_tokens (
secret bytea NOT NULL,
owner_id bytea,
project_limit integer NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( secret ),
UNIQUE ( owner_id )
);
CREATE TABLE reported_serials (
expires_at timestamp with time zone NOT NULL,
storage_node_id bytea NOT NULL,
bucket_id bytea NOT NULL,
action integer NOT NULL,
serial_number bytea NOT NULL,
settled bigint NOT NULL,
observed_at timestamp with time zone NOT NULL,
PRIMARY KEY ( expires_at, storage_node_id, bucket_id, action, serial_number )
);
CREATE TABLE reset_password_tokens (
secret bytea NOT NULL,
owner_id bytea NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( secret ),
UNIQUE ( owner_id )
);
CREATE TABLE revocations (
revoked bytea NOT NULL,
api_key_id bytea NOT NULL,
PRIMARY KEY ( revoked )
);
CREATE TABLE serial_numbers (
id serial NOT NULL,
serial_number bytea NOT NULL,
bucket_id bytea NOT NULL,
expires_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE storagenode_bandwidth_rollups (
storagenode_id bytea NOT NULL,
interval_start timestamp with time zone NOT NULL,
interval_seconds integer NOT NULL,
action integer NOT NULL,
allocated bigint DEFAULT 0,
settled bigint NOT NULL,
PRIMARY KEY ( storagenode_id, interval_start, action )
);
CREATE TABLE storagenode_bandwidth_rollups_phase2 (
storagenode_id bytea NOT NULL,
interval_start timestamp with time zone NOT NULL,
interval_seconds integer NOT NULL,
action integer NOT NULL,
allocated bigint DEFAULT 0,
settled bigint NOT NULL,
PRIMARY KEY ( storagenode_id, interval_start, action )
);
CREATE TABLE storagenode_payments (
id bigserial NOT NULL,
created_at timestamp with time zone NOT NULL,
node_id bytea NOT NULL,
period text NOT NULL,
amount bigint NOT NULL,
receipt text,
notes text,
PRIMARY KEY ( id )
);
CREATE TABLE storagenode_paystubs (
period text NOT NULL,
node_id bytea NOT NULL,
created_at timestamp with time zone NOT NULL,
codes text NOT NULL,
usage_at_rest double precision NOT NULL,
usage_get bigint NOT NULL,
usage_put bigint NOT NULL,
usage_get_repair bigint NOT NULL,
usage_put_repair bigint NOT NULL,
usage_get_audit bigint NOT NULL,
comp_at_rest bigint NOT NULL,
comp_get bigint NOT NULL,
comp_put bigint NOT NULL,
comp_get_repair bigint NOT NULL,
comp_put_repair bigint NOT NULL,
comp_get_audit bigint NOT NULL,
surge_percent bigint NOT NULL,
held bigint NOT NULL,
owed bigint NOT NULL,
disposed bigint NOT NULL,
paid bigint NOT NULL,
PRIMARY KEY ( period, node_id )
);
CREATE TABLE storagenode_storage_tallies (
node_id bytea NOT NULL,
interval_end_time timestamp with time zone NOT NULL,
data_total double precision NOT NULL,
PRIMARY KEY ( interval_end_time, node_id )
);
CREATE TABLE stripe_customers (
user_id bytea NOT NULL,
customer_id text NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( user_id ),
UNIQUE ( customer_id )
);
CREATE TABLE stripecoinpayments_invoice_project_records (
id bytea NOT NULL,
project_id bytea NOT NULL,
storage double precision NOT NULL,
egress bigint NOT NULL,
objects bigint NOT NULL,
period_start timestamp with time zone NOT NULL,
period_end timestamp with time zone NOT NULL,
state integer NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id ),
UNIQUE ( project_id, period_start, period_end )
);
CREATE TABLE stripecoinpayments_tx_conversion_rates (
tx_id text NOT NULL,
rate bytea NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( tx_id )
);
CREATE TABLE users (
id bytea NOT NULL,
email text NOT NULL,
normalized_email text NOT NULL,
full_name text NOT NULL,
short_name text,
password_hash bytea NOT NULL,
status integer NOT NULL,
partner_id bytea,
created_at timestamp with time zone NOT NULL,
project_limit integer NOT NULL DEFAULT 0,
PRIMARY KEY ( id )
);
CREATE TABLE value_attributions (
project_id bytea NOT NULL,
bucket_name bytea NOT NULL,
partner_id bytea NOT NULL,
last_updated timestamp with time zone NOT NULL,
PRIMARY KEY ( project_id, bucket_name )
);
CREATE TABLE api_keys (
id bytea NOT NULL,
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
head bytea NOT NULL,
name text NOT NULL,
secret bytea NOT NULL,
partner_id bytea,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id ),
UNIQUE ( head ),
UNIQUE ( name, project_id )
);
CREATE TABLE bucket_metainfos (
id bytea NOT NULL,
project_id bytea NOT NULL REFERENCES projects( id ),
name bytea NOT NULL,
partner_id bytea,
path_cipher integer NOT NULL,
created_at timestamp with time zone NOT NULL,
default_segment_size integer NOT NULL,
default_encryption_cipher_suite integer NOT NULL,
default_encryption_block_size integer NOT NULL,
default_redundancy_algorithm integer NOT NULL,
default_redundancy_share_size integer NOT NULL,
default_redundancy_required_shares integer NOT NULL,
default_redundancy_repair_shares integer NOT NULL,
default_redundancy_optimal_shares integer NOT NULL,
default_redundancy_total_shares integer NOT NULL,
PRIMARY KEY ( id ),
UNIQUE ( name, project_id ),
UNIQUE ( project_id, name )
);
CREATE TABLE project_members (
member_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( member_id, project_id )
);
CREATE TABLE stripecoinpayments_apply_balance_intents (
tx_id text NOT NULL REFERENCES coinpayments_transactions( id ) ON DELETE CASCADE,
state integer NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( tx_id )
);
CREATE TABLE used_serials (
serial_number_id integer NOT NULL REFERENCES serial_numbers( id ) ON DELETE CASCADE,
storage_node_id bytea NOT NULL,
PRIMARY KEY ( serial_number_id, storage_node_id )
);
CREATE TABLE user_credits (
id serial NOT NULL,
user_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
offer_id integer NOT NULL REFERENCES offers( id ),
referred_by bytea REFERENCES users( id ) ON DELETE SET NULL,
type text NOT NULL,
credits_earned_in_cents integer NOT NULL,
credits_used_in_cents integer NOT NULL,
expires_at timestamp with time zone NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id ),
UNIQUE ( id, offer_id )
);
CREATE INDEX accounting_rollups_start_time_index ON accounting_rollups ( start_time );
CREATE INDEX bucket_bandwidth_rollups_project_id_action_interval_index ON bucket_bandwidth_rollups ( project_id, action, interval_start );
CREATE INDEX bucket_bandwidth_rollups_action_interval_project_id_index ON bucket_bandwidth_rollups ( action, interval_start, project_id );
CREATE INDEX consumed_serials_expires_at_index ON consumed_serials ( expires_at );
CREATE INDEX injuredsegments_attempted_index ON injuredsegments ( attempted );
CREATE INDEX injuredsegments_num_healthy_pieces_index ON injuredsegments ( num_healthy_pieces );
CREATE INDEX injuredsegments_segment_health_index ON injuredsegments ( segment_health );
CREATE INDEX injuredsegments_updated_at_index ON injuredsegments ( updated_at );
CREATE INDEX node_last_ip ON nodes ( last_net );
CREATE INDEX nodes_offline_times_node_id_index ON nodes_offline_times ( node_id );
CREATE UNIQUE INDEX serial_number_index ON serial_numbers ( serial_number );
CREATE INDEX serial_numbers_expires_at_index ON serial_numbers ( expires_at );
CREATE INDEX storagenode_payments_node_id_period_index ON storagenode_payments ( node_id, period );
CREATE INDEX storagenode_paystubs_node_id_index ON storagenode_paystubs ( node_id );
CREATE INDEX storagenode_storage_tallies_node_id_index ON storagenode_storage_tallies ( node_id );
CREATE UNIQUE INDEX credits_earned_user_id_offer_id ON user_credits ( id, offer_id );
CREATE INDEX graceful_exit_transfer_queue_nid_dr_qa_fa_lfa_index ON graceful_exit_transfer_queue ( node_id, durability_ratio, queued_at, finished_at, last_failed_at );
INSERT INTO "accounting_rollups"("node_id", "start_time", "put_total", "get_total", "get_audit_total", "get_repair_total", "put_repair_total", "at_rest_total") VALUES (E'\\367M\\177\\251]t/\\022\\256\\214\\265\\025\\224\\204:\\217\\212\\0102<\\321\\374\\020&\\271Qc\\325\\261\\354\\246\\233'::bytea, '2019-02-09 00:00:00+00', 3000, 6000, 9000, 12000, 0, 15000);
INSERT INTO "accounting_timestamps" VALUES ('LastAtRestTally', '0001-01-01 00:00:00+00');
INSERT INTO "accounting_timestamps" VALUES ('LastRollup', '0001-01-01 00:00:00+00');
INSERT INTO "accounting_timestamps" VALUES ('LastBandwidthTally', '0001-01-01 00:00:00+00');
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 5, false, 1);
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '127.0.0.1:55518', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 0, 3, 3, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 0, false, 1);
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014', '127.0.0.1:55517', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 0, 0, 0, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 0, false, 1);
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\015', '127.0.0.1:55519', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 1, 2, 1, 2, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 1, false, 1);
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "vetted_at", "online_score") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', '127.0.0.1:55520', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 300, 400, 300, 400, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 300, 0, 1, 0, 300, 100, false, '2020-03-18 12:00:00.000000+00', 1);
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\154\\313\\233\\074\\327\\177\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 75, 25, 100, 5, false, 1);
INSERT INTO "nodes"("id", "address", "last_net", "last_ip_port", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\154\\313\\233\\074\\327\\177\\136\\070\\346\\002', '127.0.0.1:55516', '127.0.0.0', '127.0.0.1:55516', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 75, 25, 100, 5, false, 1);
INSERT INTO "users"("id", "full_name", "short_name", "email", "normalized_email", "password_hash", "status", "partner_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'Noahson', 'William', '1email1@mail.test', '1EMAIL1@MAIL.TEST', E'some_readable_hash'::bytea, 1, NULL, '2019-02-14 08:28:24.614594+00');
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets", "partner_id", "owner_id", "created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, 'ProjectName', 'projects description', NULL, NULL, NULL, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:28:24.254934+00');
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets", "partner_id", "owner_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, 'projName1', 'Test project 1', NULL, NULL, NULL, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:28:24.636949+00');
INSERT INTO "project_members"("member_id", "project_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, '2019-02-14 08:28:24.677953+00');
INSERT INTO "project_members"("member_id", "project_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, '2019-02-13 08:28:24.677953+00');
INSERT INTO "irreparabledbs" ("segmentpath", "segmentdetail", "pieces_lost_count", "seg_damaged_unix_sec", "repair_attempt_count") VALUES ('\x49616d5365676d656e746b6579696e666f30', '\x49616d5365676d656e7464657461696c696e666f30', 10, 1550159554, 10);
INSERT INTO "registration_tokens" ("secret", "owner_id", "project_limit", "created_at") VALUES (E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, null, 1, '2019-02-14 08:28:24.677953+00');
INSERT INTO "serial_numbers" ("id", "serial_number", "bucket_id", "expires_at") VALUES (1, E'0123456701234567'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014/testbucket'::bytea, '2019-03-06 08:28:24.677953+00');
INSERT INTO "used_serials" ("serial_number_id", "storage_node_id") VALUES (1, E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n');
INSERT INTO "storagenode_bandwidth_rollups" ("storagenode_id", "interval_start", "interval_seconds", "action", "allocated", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024);
INSERT INTO "storagenode_storage_tallies" VALUES (E'\\3510\\323\\225"~\\036<\\342\\330m\\0253Jhr\\246\\233K\\246#\\2303\\351\\256\\275j\\212UM\\362\\207', '2019-02-14 08:16:57.812849+00', 1000);
INSERT INTO "bucket_bandwidth_rollups" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024, 3024);
INSERT INTO "bucket_storage_tallies" ("bucket_name", "project_id", "interval_start", "inline", "remote", "remote_segments_count", "inline_segments_count", "object_count", "metadata_size") VALUES (E'testbucket'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 4024, 5024, 0, 0, 0, 0);
INSERT INTO "bucket_bandwidth_rollups" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\170\\160\\157\\370\\274\\366\\113\\364\\272\\235\\301\\243\\321\\102\\321\\136'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024, 3024);
INSERT INTO "bucket_storage_tallies" ("bucket_name", "project_id", "interval_start", "inline", "remote", "remote_segments_count", "inline_segments_count", "object_count", "metadata_size") VALUES (E'testbucket'::bytea, E'\\170\\160\\157\\370\\274\\366\\113\\364\\272\\235\\301\\243\\321\\102\\321\\136'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 4024, 5024, 0, 0, 0, 0);
INSERT INTO "reset_password_tokens" ("secret", "owner_id", "created_at") VALUES (E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-05-08 08:28:24.677953+00');
INSERT INTO "offers" ("id", "name", "description", "award_credit_in_cents", "invitee_credit_in_cents", "expires_at", "created_at", "status", "type", "award_credit_duration_days", "invitee_credit_duration_days") VALUES (1, 'Default referral offer', 'Is active when no other active referral offer', 300, 600, '2119-03-14 08:28:24.636949+00', '2019-07-14 08:28:24.636949+00', 1, 2, 365, 14);
INSERT INTO "offers" ("id", "name", "description", "award_credit_in_cents", "invitee_credit_in_cents", "expires_at", "created_at", "status", "type", "award_credit_duration_days", "invitee_credit_duration_days") VALUES (2, 'Default free credit offer', 'Is active when no active free credit offer', 0, 300, '2119-03-14 08:28:24.636949+00', '2019-07-14 08:28:24.636949+00', 1, 1, NULL, 14);
INSERT INTO "api_keys" ("id", "project_id", "head", "name", "secret", "partner_id", "created_at") VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\111\\142\\147\\304\\132\\375\\070\\163\\270\\160\\251\\370\\126\\063\\351\\037\\257\\071\\143\\375\\351\\320\\253\\232\\220\\260\\075\\173\\306\\307\\115\\136'::bytea, 'key 2', E'\\254\\011\\315\\333\\273\\365\\001\\071\\024\\154\\253\\332\\301\\216\\361\\074\\221\\367\\251\\231\\274\\333\\300\\367\\001\\272\\327\\111\\315\\123\\042\\016'::bytea, NULL, '2019-02-14 08:28:24.267934+00');
INSERT INTO "value_attributions" ("project_id", "bucket_name", "partner_id", "last_updated") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E''::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-02-14 08:07:31.028103+00');
INSERT INTO "user_credits" ("id", "user_id", "offer_id", "referred_by", "credits_earned_in_cents", "credits_used_in_cents", "type", "expires_at", "created_at") VALUES (1, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 1, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 200, 0, 'invalid', '2019-10-01 08:28:24.267934+00', '2019-06-01 08:28:24.267934+00');
INSERT INTO "bucket_metainfos" ("id", "project_id", "name", "partner_id", "created_at", "path_cipher", "default_segment_size", "default_encryption_cipher_suite", "default_encryption_block_size", "default_redundancy_algorithm", "default_redundancy_share_size", "default_redundancy_required_shares", "default_redundancy_repair_shares", "default_redundancy_optimal_shares", "default_redundancy_total_shares") VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'testbucketuniquename'::bytea, NULL, '2019-06-14 08:28:24.677953+00', 1, 65536, 1, 8192, 1, 4096, 4, 6, 8, 10);
INSERT INTO "pending_audits" ("node_id", "piece_id", "stripe_index", "share_size", "expected_share_hash", "reverify_count", "path") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 5, 1024, E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, 1, 'not null');
INSERT INTO "peer_identities" VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:07:31.335028+00');
INSERT INTO "graceful_exit_progress" ("node_id", "bytes_transferred", "pieces_transferred", "pieces_failed", "updated_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', 1000000000000000, 0, 0, '2019-09-12 10:07:31.028103+00');
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n \\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\311', 8, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n \\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\312', 8, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
INSERT INTO "stripe_customers" ("user_id", "customer_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'stripe_id', '2019-06-01 08:28:24.267934+00');
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n \\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\311', 9, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n \\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\312', 9, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
INSERT INTO "stripecoinpayments_invoice_project_records"("id", "project_id", "storage", "egress", "objects", "period_start", "period_end", "state", "created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\021\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, 0, 0, 0, '2019-06-01 08:28:24.267934+00', '2019-06-01 08:28:24.267934+00', 0, '2019-06-01 08:28:24.267934+00');
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "root_piece_id", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n \\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\311', 10, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
INSERT INTO "stripecoinpayments_tx_conversion_rates" ("tx_id", "rate", "created_at") VALUES ('tx_id', E'\\363\\311\\033w\\222\\303Ci,'::bytea, '2019-06-01 08:28:24.267934+00');
INSERT INTO "coinpayments_transactions" ("id", "user_id", "address", "amount", "received", "status", "key", "timeout", "created_at") VALUES ('tx_id', E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'address', E'\\363\\311\\033w'::bytea, E'\\363\\311\\033w'::bytea, 1, 'key', 60, '2019-06-01 08:28:24.267934+00');
INSERT INTO "nodes_offline_times" ("node_id", "tracked_at", "seconds") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001'::bytea, '2019-06-01 09:28:24.267934+00', 3600);
INSERT INTO "nodes_offline_times" ("node_id", "tracked_at", "seconds") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001'::bytea, '2017-06-01 09:28:24.267934+00', 100);
INSERT INTO "nodes_offline_times" ("node_id", "tracked_at", "seconds") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n'::bytea, '2019-06-01 09:28:24.267934+00', 3600);
INSERT INTO "storagenode_bandwidth_rollups" ("storagenode_id", "interval_start", "interval_seconds", "action", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2020-01-11 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 2024);
INSERT INTO "coupons" ("id", "user_id", "amount", "description", "type", "status", "duration", "created_at") VALUES (E'\\362\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 50, 'description', 0, 0, 2, '2019-06-01 08:28:24.267934+00');
INSERT INTO "coupon_usages" ("coupon_id", "amount", "status", "period") VALUES (E'\\362\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, 22, 0, '2019-06-01 09:28:24.267934+00');
INSERT INTO "reported_serials" ("expires_at", "storage_node_id", "bucket_id", "action", "serial_number", "settled", "observed_at") VALUES ('2020-01-11 08:00:00.000000+00', E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014/testbucket'::bytea, 1, E'0123456701234567'::bytea, 100, '2020-01-11 08:00:00.000000+00');
INSERT INTO "stripecoinpayments_apply_balance_intents" ("tx_id", "state", "created_at") VALUES ('tx_id', 0, '2019-06-01 08:28:24.267934+00');
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets", "rate_limit", "partner_id", "owner_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\347'::bytea, 'projName1', 'Test project 1', NULL, NULL, NULL, 2000000, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2020-01-15 08:28:24.636949+00');
INSERT INTO "pending_serial_queue" ("storage_node_id", "bucket_id", "serial_number", "action", "settled", "expires_at") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014/testbucket'::bytea, E'5123456701234567'::bytea, 1, 100, '2020-01-11 08:00:00.000000+00');
INSERT INTO "consumed_serials" ("storage_node_id", "serial_number", "expires_at") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', E'1234567012345678'::bytea, '2020-01-12 08:00:00.000000+00');
INSERT INTO "injuredsegments" ("path", "data", "num_healthy_pieces", "segment_health", "updated_at") VALUES ('0', '\x0a0130120100', 52, 1.0, '2020-09-01 00:00:00.000000+00');
INSERT INTO "injuredsegments" ("path", "data", "num_healthy_pieces", "segment_health", "updated_at") VALUES ('here''s/a/great/path', '\x0a136865726527732f612f67726561742f70617468120a0102030405060708090a', 30, 1.0, '2020-09-01 00:00:00.000000+00');
INSERT INTO "injuredsegments" ("path", "data", "num_healthy_pieces", "segment_health", "updated_at") VALUES ('yet/another/cool/path', '\x0a157965742f616e6f746865722f636f6f6c2f70617468120a0102030405060708090a', 51, 1.0, '2020-09-01 00:00:00.000000+00');
INSERT INTO "injuredsegments" ("path", "data", "num_healthy_pieces", "segment_health", "updated_at") VALUES ('/this/is/a/new/path', '\x0a23736f2f6d616e792f69636f6e69632f70617468732f746f2f63686f6f73652f66726f6d120a0102030405060708090a', 40, 1.0, '2020-09-01 00:00:00.000000+00');
INSERT INTO "injuredsegments" ("path", "data", "num_healthy_pieces", "segment_health", "updated_at") VALUES ('/some/path/1/23/4', '\x0a23736f2f6d618e792f69636f6e69632f70617468732f746f2f63686f6f73652f66726f6d120a0102030405060708090a', 40, 0.2, '2020-09-01 00:00:00.000000+00');
INSERT INTO "project_bandwidth_rollups"("project_id", "interval_month", egress_allocated) VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\347'::bytea, '2020-04-01', 10000);
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets","rate_limit", "partner_id", "owner_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\345'::bytea, 'egress101', 'High Bandwidth Project', NULL, NULL, NULL, 2000000, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2020-05-15 08:46:24.000000+00');
INSERT INTO "storagenode_paystubs"("period", "node_id", "created_at", "codes", "usage_at_rest", "usage_get", "usage_put", "usage_get_repair", "usage_put_repair", "usage_get_audit", "comp_at_rest", "comp_get", "comp_put", "comp_get_repair", "comp_put_repair", "comp_get_audit", "surge_percent", "held", "owed", "disposed", "paid") VALUES ('2020-01', '\xf2a3b4c4dfdf7221310382fd5db5aa73e1d227d6df09734ec4e5305000000000', '2020-04-07T20:14:21.479141Z', '', 1327959864508416, 294054066688, 159031363328, 226751, 0, 836608, 2861984, 5881081, 0, 226751, 0, 8, 300, 0, 26909472, 0, 26909472);
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "unknown_audit_suspended", "offline_suspended", "under_review") VALUES (E'\\153\\313\\233\\074\\327\\255\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 5, false, '2019-02-14 08:07:31.108963+00', '2019-02-14 08:07:31.108963+00', '2019-02-14 08:07:31.108963+00');
INSERT INTO "audit_histories" ("node_id", "history") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', '\x0a23736f2f6d616e792f69636f6e69632f70617468732f746f2f63686f6f73652f66726f6d120a0102030405060708090a');
INSERT INTO "node_api_versions"("id", "api_version", "created_at", "updated_at") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', 1, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00');
INSERT INTO "node_api_versions"("id", "api_version", "created_at", "updated_at") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', 2, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00');
INSERT INTO "node_api_versions"("id", "api_version", "created_at", "updated_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014', 3, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00');
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\256\\263'::bytea, 'egress102', 'High Bandwidth Project 2', NULL, NULL, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-05-15 08:46:24.000000+00', 1000);
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\255\\244'::bytea, 'egress103', 'High Bandwidth Project 3', NULL, NULL, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-05-15 08:46:24.000000+00', 1000);
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\253\\231'::bytea, 'Limit Test 1', 'This project is above the default', 50000000001, 50000000001, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-10-14 10:10:10.000000+00', 101);
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\252\\230'::bytea, 'Limit Test 2', 'This project is below the default', NULL, NULL, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-10-14 10:10:11.000000+00', NULL);
INSERT INTO "storagenode_bandwidth_rollups_phase2" ("storagenode_id", "interval_start", "interval_seconds", "action", "allocated", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024);
-- OLD DATA --
INSERT INTO "accounting_rollups"("id", "node_id", "start_time", "put_total", "get_total", "get_audit_total", "get_repair_total", "put_repair_total", "at_rest_total") VALUES (99, E'\\367M\\177\\251]t/\\022\\256\\214\\265\\025\\224\\204:\\217\\212\\0102<\\321\\374\\020&\\271Qc\\325\\261\\354\\246\\233'::bytea, '2019-02-09 00:00:00+00', 1000, 2000, 3000, 4000, 0, 5000);
INSERT INTO "accounting_rollups"("id", "node_id", "start_time", "put_total", "get_total", "get_audit_total", "get_repair_total", "put_repair_total", "at_rest_total") VALUES (100, E'\\367M\\177\\251]t/\\022\\256\\214\\265\\025\\224\\204:\\217\\212\\0102<\\321\\374\\020&\\271Qc\\325\\261\\354\\246\\233'::bytea, '2019-02-09 00:00:00+00', 1000, 2000, 3000, 4000, 0, 5000);