Replace base64 encoding for path segments (#2345)
This commit is contained in:
parent
0f36b160fc
commit
268c629ba8
@ -205,7 +205,7 @@ func (checker *Checker) updateSegmentStatus(ctx context.Context, pointer *pb.Poi
|
||||
}
|
||||
monStats.remoteSegmentsNeedingRepair++
|
||||
err = checker.repairQueue.Insert(ctx, &pb.InjuredSegment{
|
||||
Path: path,
|
||||
Path: []byte(path),
|
||||
LostPieces: missingPieces,
|
||||
InsertedTime: time.Now().UTC(),
|
||||
})
|
||||
|
@ -4,6 +4,7 @@
|
||||
package checker_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
@ -49,7 +50,7 @@ func TestIdentifyInjuredSegments(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
numValidNode := int32(len(planet.StorageNodes))
|
||||
require.Equal(t, "b", injuredSegment.Path)
|
||||
require.Equal(t, []byte("b"), injuredSegment.Path)
|
||||
require.Equal(t, len(planet.StorageNodes), len(injuredSegment.LostPieces))
|
||||
for _, lostPiece := range injuredSegment.LostPieces {
|
||||
// makePointer() starts with numValidNode good pieces
|
||||
@ -233,7 +234,7 @@ func TestCheckerResume(t *testing.T) {
|
||||
// "a" should be the only segment in the repair queue
|
||||
injuredSegment, err := repairQueue.Select(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, injuredSegment.Path, "a")
|
||||
require.Equal(t, injuredSegment.Path, []byte("a"))
|
||||
err = repairQueue.Delete(ctx, injuredSegment)
|
||||
require.NoError(t, err)
|
||||
injuredSegment, err = repairQueue.Select(ctx)
|
||||
@ -245,7 +246,7 @@ func TestCheckerResume(t *testing.T) {
|
||||
// "c" should be the only segment in the repair queue
|
||||
injuredSegment, err = repairQueue.Select(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, injuredSegment.Path, "c")
|
||||
require.Equal(t, injuredSegment.Path, []byte("c"))
|
||||
err = repairQueue.Delete(ctx, injuredSegment)
|
||||
require.NoError(t, err)
|
||||
injuredSegment, err = repairQueue.Select(ctx)
|
||||
@ -257,7 +258,7 @@ func TestCheckerResume(t *testing.T) {
|
||||
// "a" should be the only segment in the repair queue
|
||||
injuredSegment, err = repairQueue.Select(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, injuredSegment.Path, "a")
|
||||
require.Equal(t, injuredSegment.Path, []byte("a"))
|
||||
err = repairQueue.Delete(ctx, injuredSegment)
|
||||
require.NoError(t, err)
|
||||
injuredSegment, err = repairQueue.Select(ctx)
|
||||
@ -271,7 +272,7 @@ type mockRepairQueue struct {
|
||||
}
|
||||
|
||||
func (mockRepairQueue *mockRepairQueue) Insert(ctx context.Context, s *pb.InjuredSegment) error {
|
||||
if s.Path == "b" || s.Path == "d" {
|
||||
if bytes.Equal(s.Path, []byte("b")) || bytes.Equal(s.Path, []byte("d")) {
|
||||
return errs.New("mock Insert error")
|
||||
}
|
||||
mockRepairQueue.injuredSegments = append(mockRepairQueue.injuredSegments, *s)
|
||||
@ -290,7 +291,7 @@ func (mockRepairQueue *mockRepairQueue) Delete(ctx context.Context, s *pb.Injure
|
||||
var toDelete int
|
||||
found := false
|
||||
for i, seg := range mockRepairQueue.injuredSegments {
|
||||
if seg.Path == s.Path {
|
||||
if bytes.Equal(seg.Path, s.Path) {
|
||||
toDelete = i
|
||||
found = true
|
||||
break
|
||||
|
@ -26,7 +26,7 @@ func TestInsertSelect(t *testing.T) {
|
||||
q := db.RepairQueue()
|
||||
|
||||
seg := &pb.InjuredSegment{
|
||||
Path: "abc",
|
||||
Path: []byte("abc"),
|
||||
LostPieces: []int32{int32(1), int32(3)},
|
||||
}
|
||||
err := q.Insert(ctx, seg)
|
||||
@ -47,7 +47,7 @@ func TestInsertDuplicate(t *testing.T) {
|
||||
q := db.RepairQueue()
|
||||
|
||||
seg := &pb.InjuredSegment{
|
||||
Path: "abc",
|
||||
Path: []byte("abc"),
|
||||
LostPieces: []int32{int32(1), int32(3)},
|
||||
}
|
||||
err := q.Insert(ctx, seg)
|
||||
@ -81,7 +81,7 @@ func TestSequential(t *testing.T) {
|
||||
var addSegs []*pb.InjuredSegment
|
||||
for i := 0; i < N; i++ {
|
||||
seg := &pb.InjuredSegment{
|
||||
Path: strconv.Itoa(i),
|
||||
Path: []byte(strconv.Itoa(i)),
|
||||
LostPieces: []int32{int32(i)},
|
||||
}
|
||||
err := q.Insert(ctx, seg)
|
||||
@ -123,7 +123,7 @@ func TestParallel(t *testing.T) {
|
||||
i := i
|
||||
inserts.Go(func() error {
|
||||
return q.Insert(ctx, &pb.InjuredSegment{
|
||||
Path: strconv.Itoa(i),
|
||||
Path: []byte(strconv.Itoa(i)),
|
||||
LostPieces: []int32{int32(i)},
|
||||
})
|
||||
})
|
||||
|
@ -121,7 +121,7 @@ func (service *Service) process(ctx context.Context) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
for {
|
||||
seg, err := service.queue.Select(ctx)
|
||||
zap.L().Info("Dequeued segment from repair queue", zap.String("segment", seg.GetPath()))
|
||||
zap.L().Info("Dequeued segment from repair queue", zap.Binary("segment", seg.GetPath()))
|
||||
if err != nil {
|
||||
if storage.ErrEmptyQueue.Has(err) {
|
||||
return nil
|
||||
@ -143,12 +143,12 @@ func (service *Service) worker(ctx context.Context, seg *pb.InjuredSegment) (err
|
||||
|
||||
workerStartTime := time.Now().UTC()
|
||||
|
||||
zap.L().Info("Limiter running repair on segment", zap.String("segment", seg.GetPath()))
|
||||
err = service.repairer.Repair(ctx, seg.GetPath())
|
||||
zap.L().Info("Limiter running repair on segment", zap.Binary("segment", seg.GetPath()))
|
||||
err = service.repairer.Repair(ctx, string(seg.GetPath()))
|
||||
if err != nil {
|
||||
return Error.New("repair failed: %v", err)
|
||||
}
|
||||
zap.L().Info("Deleting segment from repair queue", zap.String("segment", seg.GetPath()))
|
||||
zap.L().Info("Deleting segment from repair queue", zap.Binary("segment", seg.GetPath()))
|
||||
err = service.queue.Delete(ctx, seg)
|
||||
if err != nil {
|
||||
return Error.New("repair delete failed: %v", err)
|
||||
|
@ -31,7 +31,7 @@ func ExampleEncryptPath() {
|
||||
panic(err)
|
||||
}
|
||||
fmt.Println("path to encrypt:", path)
|
||||
fmt.Println("encrypted path: ", encryptedPath)
|
||||
fmt.Println("encrypted path: ", hex.EncodeToString([]byte(encryptedPath.Raw())))
|
||||
|
||||
// decrypting the path
|
||||
decryptedPath, err := encryption.DecryptPath("bucket", encryptedPath, storj.EncAESGCM, store)
|
||||
@ -43,6 +43,6 @@ func ExampleEncryptPath() {
|
||||
// Output:
|
||||
// root key (32 bytes): 000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f
|
||||
// path to encrypt: fold1/fold2/fold3/file.txt
|
||||
// encrypted path: OHzjTiBUvLmgQouCAYdu74MlqDl791aOka_EBzlAb_rR/RT0pG5y4lHFVRi1sHtwjZ1B7DeVbRvpyMfO6atfOefSC/rXJX6O9Pk4rGtnlLUIUoc9Gz0y6N-xemdNyAasbo3dQm/qiEo3IYUlA989mKFE7WB98GHJK88AI98hhUgwv39ePexslzg
|
||||
// encrypted path: 02387ce34e2054bcb9a0428b820102876eef8325a8397bf7568e91afc40739406ffad12f02453d291b9cb8947155462d6c1edc2367507b0de55b46fa7231f3ba6ad7ce79f4822f02ad7257e8ef4f938ac6b6794b50852873d1b3d32e018dfb17a674dc806ac6e8ddd4262f02aa2128dc8614940f7cf6628513b581f7c18724af3c01018f7c861520c2fdfd78f7b1b25ce0
|
||||
// decrypted path: fold1/fold2/fold3/file.txt
|
||||
}
|
||||
|
@ -6,7 +6,6 @@ package encryption
|
||||
import (
|
||||
"crypto/hmac"
|
||||
"crypto/sha512"
|
||||
"encoding/base64"
|
||||
"strings"
|
||||
|
||||
"github.com/zeebo/errs"
|
||||
@ -15,6 +14,16 @@ import (
|
||||
"storj.io/storj/pkg/storj"
|
||||
)
|
||||
|
||||
var (
|
||||
emptyComponentPrefix = byte('\x01')
|
||||
notEmptyComponentPrefix = byte('\x02')
|
||||
emptyComponent = []byte{emptyComponentPrefix}
|
||||
|
||||
escapeSlash = byte('\x2e')
|
||||
escapeFF = byte('\xfe')
|
||||
escape01 = byte('\x01')
|
||||
)
|
||||
|
||||
// EncryptPath encrypts the path using the provided cipher and looking up
|
||||
// keys from the provided store and bucket.
|
||||
func EncryptPath(bucket string, path paths.Unencrypted, cipher storj.CipherSuite, store *Store) (
|
||||
@ -261,7 +270,7 @@ func encryptPathComponent(comp string, cipher storj.CipherSuite, key *storj.Key)
|
||||
}
|
||||
|
||||
// keep the nonce together with the cipher text
|
||||
return base64.RawURLEncoding.EncodeToString(append(nonce[:nonceSize], cipherText...)), nil
|
||||
return string(encodeSegment(append(nonce[:nonceSize], cipherText...))), nil
|
||||
}
|
||||
|
||||
// decryptPathComponent decrypts a single path component with the provided cipher and key.
|
||||
@ -270,7 +279,7 @@ func decryptPathComponent(comp string, cipher storj.CipherSuite, key *storj.Key)
|
||||
return "", nil
|
||||
}
|
||||
|
||||
data, err := base64.RawURLEncoding.DecodeString(comp)
|
||||
data, err := decodeSegment([]byte(comp))
|
||||
if err != nil {
|
||||
return "", Error.Wrap(err)
|
||||
}
|
||||
@ -294,3 +303,125 @@ func decryptPathComponent(comp string, cipher storj.CipherSuite, key *storj.Key)
|
||||
|
||||
return string(decrypted), nil
|
||||
}
|
||||
|
||||
// encodeSegment encodes segment according to specific rules
|
||||
// The empty path component is encoded as `\x01`
|
||||
// Any other path component is encoded as `\x02 + escape(component)`
|
||||
//
|
||||
// `\x2e` escapes to `\x2e\x01`
|
||||
// `\x2f` escapes to `\x2e\x02`
|
||||
// `\xfe` escapes to `\xfe\x01`
|
||||
// `\xff` escapes to `\xfe\x02`
|
||||
// `\x00` escapes to `\x01\x01`
|
||||
// `\x01` escapes to `\x01\x02
|
||||
// for more details see docs/design/path-component-encoding.md
|
||||
func encodeSegment(segment []byte) []byte {
|
||||
if len(segment) == 0 {
|
||||
return emptyComponent
|
||||
}
|
||||
|
||||
result := make([]byte, 0, len(segment)*2+1)
|
||||
result = append(result, notEmptyComponentPrefix)
|
||||
for i := 0; i < len(segment); i++ {
|
||||
switch segment[i] {
|
||||
case escapeSlash:
|
||||
result = append(result, []byte{escapeSlash, 1}...)
|
||||
case escapeSlash + 1:
|
||||
result = append(result, []byte{escapeSlash, 2}...)
|
||||
case escapeFF:
|
||||
result = append(result, []byte{escapeFF, 1}...)
|
||||
case escapeFF + 1:
|
||||
result = append(result, []byte{escapeFF, 2}...)
|
||||
case escape01 - 1:
|
||||
result = append(result, []byte{escape01, 1}...)
|
||||
case escape01:
|
||||
result = append(result, []byte{escape01, 2}...)
|
||||
default:
|
||||
result = append(result, segment[i])
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func decodeSegment(segment []byte) ([]byte, error) {
|
||||
err := validateEncodedSegment(segment)
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
if segment[0] == emptyComponentPrefix {
|
||||
return []byte{}, nil
|
||||
}
|
||||
|
||||
currentIndex := 0
|
||||
for i := 1; i < len(segment); i++ {
|
||||
switch {
|
||||
case i == len(segment)-1:
|
||||
segment[currentIndex] = segment[i]
|
||||
case segment[i] == escapeSlash || segment[i] == escapeFF:
|
||||
segment[currentIndex] = segment[i] + segment[i+1] - 1
|
||||
i++
|
||||
case segment[i] == escape01:
|
||||
segment[currentIndex] = segment[i+1] - 1
|
||||
i++
|
||||
default:
|
||||
segment[currentIndex] = segment[i]
|
||||
}
|
||||
currentIndex++
|
||||
}
|
||||
return segment[:currentIndex], nil
|
||||
}
|
||||
|
||||
// validateEncodedSegment checks if:
|
||||
// * The last byte/sequence is not in {escape1, escape2, escape3}
|
||||
// * Any byte after an escape character is \x01 or \x02
|
||||
// * It does not contain any characters in {\x00, \xff, \x2f}
|
||||
// * It is non-empty
|
||||
// * It begins with a character in {\x01, \x02}
|
||||
func validateEncodedSegment(segment []byte) error {
|
||||
switch {
|
||||
case len(segment) == 0:
|
||||
return errs.New("encoded segment cannot be empty")
|
||||
case segment[0] != emptyComponentPrefix && segment[0] != notEmptyComponentPrefix:
|
||||
return errs.New("invalid segment prefix")
|
||||
case segment[0] == emptyComponentPrefix && len(segment) > 1:
|
||||
return errs.New("segment encoded as empty but contains data")
|
||||
case segment[0] == notEmptyComponentPrefix && len(segment) == 1:
|
||||
return errs.New("segment encoded as not empty but doesn't contain data")
|
||||
}
|
||||
|
||||
if len(segment) == 1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
index := 1
|
||||
for ; index < len(segment)-1; index++ {
|
||||
if isEscapeByte(segment[index]) {
|
||||
if segment[index+1] == 1 || segment[index+1] == 2 {
|
||||
index++
|
||||
continue
|
||||
}
|
||||
return errs.New("invalid escape sequence")
|
||||
}
|
||||
if isDisallowedByte(segment[index]) {
|
||||
return errs.New("invalid character in segment")
|
||||
}
|
||||
}
|
||||
if index == len(segment)-1 {
|
||||
if isEscapeByte(segment[index]) {
|
||||
return errs.New("invalid escape sequence")
|
||||
}
|
||||
if isDisallowedByte(segment[index]) {
|
||||
return errs.New("invalid character")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func isEscapeByte(b byte) bool {
|
||||
return b == escapeSlash || b == escapeFF || b == escape01
|
||||
}
|
||||
|
||||
func isDisallowedByte(b byte) bool {
|
||||
return b == 0 || b == '\xff' || b == '/'
|
||||
}
|
||||
|
@ -4,10 +4,13 @@
|
||||
package encryption
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"storj.io/storj/internal/testrand"
|
||||
"storj.io/storj/pkg/paths"
|
||||
@ -63,3 +66,133 @@ func forAllCiphers(test func(cipher storj.CipherSuite)) {
|
||||
test(cipher)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSegmentEncoding(t *testing.T) {
|
||||
segments := [][]byte{
|
||||
{},
|
||||
{'a'},
|
||||
{0},
|
||||
{'/'},
|
||||
{'a', 'b', 'c', 'd', '1', '2', '3', '4', '5'},
|
||||
{'/', '/', '/', '/', '/'},
|
||||
{0, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||
{'a', '/', 'a', '2', 'a', 'a', 0, '1', 'b', 255},
|
||||
{'/', '/', 'a', 0, 'a', 'a', 0, '1', 'b', 'g', 'a', 'b', '/'},
|
||||
{0, '/', 'a', '0', 'a', 'a', 0, '1', 'b', 'g', 'a', 'b', 0},
|
||||
}
|
||||
|
||||
// additional random segments
|
||||
for i := 0; i < 20; i++ {
|
||||
segments = append(segments, testrand.BytesInt(testrand.Intn(256)))
|
||||
}
|
||||
|
||||
for i, segment := range segments {
|
||||
encoded := encodeSegment(segment)
|
||||
require.Equal(t, -1, bytes.IndexByte(encoded, 0))
|
||||
require.Equal(t, -1, bytes.IndexByte(encoded, 255))
|
||||
require.Equal(t, -1, bytes.IndexByte(encoded, '/'))
|
||||
|
||||
decoded, err := decodeSegment(encoded)
|
||||
require.NoError(t, err, "#%d", i)
|
||||
require.Equal(t, segment, decoded, "#%d", i)
|
||||
}
|
||||
}
|
||||
|
||||
func TestInvalidSegmentDecoding(t *testing.T) {
|
||||
encoded := []byte{3, 4, 5, 6, 7}
|
||||
// first byte should be '\x01' or '\x02'
|
||||
_, err := decodeSegment(encoded)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func TestValidateEncodedSegment(t *testing.T) {
|
||||
// all segments should be invalid
|
||||
encodedSegments := [][]byte{
|
||||
{},
|
||||
{1, 1},
|
||||
{2},
|
||||
{2, 0},
|
||||
{2, '\xff'},
|
||||
{2, '\x2f'},
|
||||
{2, escapeSlash, '3'},
|
||||
{2, escapeFF, '3'},
|
||||
{2, escape01, '3'},
|
||||
{3, 4, 4, 4},
|
||||
}
|
||||
|
||||
for i, segment := range encodedSegments {
|
||||
_, err := decodeSegment(segment)
|
||||
require.Error(t, err, "#%d", i)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodingDecodingStress(t *testing.T) {
|
||||
allCombinations := func(emit func([]byte)) {
|
||||
length := 3
|
||||
s := make([]byte, length)
|
||||
last := length - 1
|
||||
var combination func(int, int)
|
||||
combination = func(i int, next int) {
|
||||
for j := next; j < 256; j++ {
|
||||
s[i] = byte(j)
|
||||
if i == last {
|
||||
emit(s)
|
||||
} else {
|
||||
combination(i+1, j+1)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
combination(0, 0)
|
||||
}
|
||||
|
||||
// all combinations for length 3
|
||||
allCombinations(func(segment []byte) {
|
||||
_ = encodeSegment(segment)
|
||||
_, _ = decodeSegment(segment)
|
||||
})
|
||||
|
||||
// random segments
|
||||
for i := 0; i < 20; i++ {
|
||||
segment := testrand.BytesInt(testrand.Intn(256))
|
||||
_ = encodeSegment(segment)
|
||||
_, _ = decodeSegment(segment)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkSegmentEncoding(b *testing.B) {
|
||||
segments := [][]byte{
|
||||
{},
|
||||
{'a'},
|
||||
{0},
|
||||
{'/'},
|
||||
{'a', 'b', 'c', 'd', '1', '2', '3', '4', '5'},
|
||||
|
||||
{'/', '/', '/', '/', '/'},
|
||||
{0, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||
|
||||
{'a', '/', 'a', '2', 'a', 'a', 0, '1', 'b', 255},
|
||||
{'/', '/', 'a', 0, 'a', 'a', 0, '1', 'b', 'g', 'a', 'b', '/'},
|
||||
{0, '/', 'a', '0', 'a', 'a', 0, '1', 'b', 'g', 'a', 'b', 0},
|
||||
}
|
||||
|
||||
// additional random segment
|
||||
segments = append(segments, testrand.BytesInt(255))
|
||||
|
||||
b.Run("Loop", func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
for _, segment := range segments {
|
||||
encoded := encodeSegment(segment)
|
||||
_, _ = decodeSegment(encoded)
|
||||
}
|
||||
}
|
||||
})
|
||||
b.Run("Base64", func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
for _, segment := range segments {
|
||||
encoded := base64.RawURLEncoding.EncodeToString(segment)
|
||||
_, _ = base64.RawURLEncoding.DecodeString(encoded)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@ -26,7 +26,7 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
// InjuredSegment is the queue item used for the data repair queue
|
||||
type InjuredSegment struct {
|
||||
Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
|
||||
Path []byte `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
|
||||
LostPieces []int32 `protobuf:"varint,2,rep,packed,name=lost_pieces,json=lostPieces,proto3" json:"lost_pieces,omitempty"`
|
||||
InsertedTime time.Time `protobuf:"bytes,3,opt,name=inserted_time,json=insertedTime,proto3,stdtime" json:"inserted_time"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
@ -58,11 +58,11 @@ func (m *InjuredSegment) XXX_DiscardUnknown() {
|
||||
|
||||
var xxx_messageInfo_InjuredSegment proto.InternalMessageInfo
|
||||
|
||||
func (m *InjuredSegment) GetPath() string {
|
||||
func (m *InjuredSegment) GetPath() []byte {
|
||||
if m != nil {
|
||||
return m.Path
|
||||
}
|
||||
return ""
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *InjuredSegment) GetLostPieces() []int32 {
|
||||
@ -93,11 +93,11 @@ var fileDescriptor_b1b08e6fe9398aa6 = []byte{
|
||||
0xfa, 0x60, 0x5e, 0x52, 0x69, 0x9a, 0x7e, 0x49, 0x66, 0x6e, 0x6a, 0x71, 0x49, 0x62, 0x6e, 0x01,
|
||||
0x44, 0x81, 0xd2, 0x04, 0x46, 0x2e, 0x3e, 0xcf, 0xbc, 0xac, 0xd2, 0xa2, 0xd4, 0x94, 0xe0, 0xd4,
|
||||
0xf4, 0xdc, 0xd4, 0xbc, 0x12, 0x21, 0x21, 0x2e, 0x96, 0x82, 0xc4, 0x92, 0x0c, 0x09, 0x46, 0x05,
|
||||
0x46, 0x0d, 0xce, 0x20, 0x30, 0x5b, 0x48, 0x9e, 0x8b, 0x3b, 0x27, 0xbf, 0xb8, 0x24, 0xbe, 0x20,
|
||||
0x46, 0x0d, 0x9e, 0x20, 0x30, 0x5b, 0x48, 0x9e, 0x8b, 0x3b, 0x27, 0xbf, 0xb8, 0x24, 0xbe, 0x20,
|
||||
0x33, 0x35, 0x39, 0xb5, 0x58, 0x82, 0x49, 0x81, 0x59, 0x83, 0x35, 0x88, 0x0b, 0x24, 0x14, 0x00,
|
||||
0x16, 0x11, 0xf2, 0xe4, 0xe2, 0xcd, 0xcc, 0x2b, 0x4e, 0x2d, 0x2a, 0x49, 0x4d, 0x89, 0x07, 0xd9,
|
||||
0x21, 0xc1, 0xac, 0xc0, 0xa8, 0xc1, 0x6d, 0x24, 0xa5, 0x07, 0x71, 0x80, 0x1e, 0xcc, 0x01, 0x7a,
|
||||
0x21, 0x30, 0x07, 0x38, 0x71, 0x9c, 0xb8, 0x27, 0xcf, 0x30, 0xe1, 0xbe, 0x3c, 0x63, 0x10, 0x0f,
|
||||
0x4c, 0x2b, 0x48, 0xd2, 0x89, 0x25, 0x8a, 0xa9, 0x20, 0x29, 0x89, 0x0d, 0xac, 0xc3, 0x18, 0x10,
|
||||
0x00, 0x00, 0xff, 0xff, 0xca, 0x5a, 0x32, 0x32, 0xe8, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0xff, 0xff, 0x66, 0xb5, 0xcc, 0x1f, 0xe8, 0x00, 0x00, 0x00,
|
||||
}
|
||||
|
@ -10,7 +10,7 @@ package repair;
|
||||
|
||||
// InjuredSegment is the queue item used for the data repair queue
|
||||
message InjuredSegment {
|
||||
string path = 1;
|
||||
bytes path = 1;
|
||||
repeated int32 lost_pieces = 2;
|
||||
google.protobuf.Timestamp inserted_time = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||
}
|
||||
|
@ -162,7 +162,7 @@
|
||||
{
|
||||
"id": 1,
|
||||
"name": "path",
|
||||
"type": "string"
|
||||
"type": "bytes"
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
|
@ -185,7 +185,7 @@ read limitoffset (
|
||||
model injuredsegment (
|
||||
key path
|
||||
|
||||
field path text
|
||||
field path blob
|
||||
field data blob
|
||||
field attempted utimestamp (updatable, nullable)
|
||||
)
|
||||
|
@ -335,7 +335,7 @@ CREATE TABLE certRecords (
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE injuredsegments (
|
||||
path text NOT NULL,
|
||||
path bytea NOT NULL,
|
||||
data bytea NOT NULL,
|
||||
attempted timestamp,
|
||||
PRIMARY KEY ( path )
|
||||
@ -676,7 +676,7 @@ CREATE TABLE certRecords (
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE injuredsegments (
|
||||
path TEXT NOT NULL,
|
||||
path BLOB NOT NULL,
|
||||
data BLOB NOT NULL,
|
||||
attempted TIMESTAMP,
|
||||
PRIMARY KEY ( path )
|
||||
@ -1869,7 +1869,7 @@ func (f CertRecord_UpdateAt_Field) value() interface{} {
|
||||
func (CertRecord_UpdateAt_Field) _Column() string { return "update_at" }
|
||||
|
||||
type Injuredsegment struct {
|
||||
Path string
|
||||
Path []byte
|
||||
Data []byte
|
||||
Attempted *time.Time
|
||||
}
|
||||
@ -1887,10 +1887,10 @@ type Injuredsegment_Update_Fields struct {
|
||||
type Injuredsegment_Path_Field struct {
|
||||
_set bool
|
||||
_null bool
|
||||
_value string
|
||||
_value []byte
|
||||
}
|
||||
|
||||
func Injuredsegment_Path(v string) Injuredsegment_Path_Field {
|
||||
func Injuredsegment_Path(v []byte) Injuredsegment_Path_Field {
|
||||
return Injuredsegment_Path_Field{_set: true, _value: v}
|
||||
}
|
||||
|
||||
|
@ -62,7 +62,7 @@ CREATE TABLE certRecords (
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE injuredsegments (
|
||||
path text NOT NULL,
|
||||
path bytea NOT NULL,
|
||||
data bytea NOT NULL,
|
||||
attempted timestamp,
|
||||
PRIMARY KEY ( path )
|
||||
|
@ -62,7 +62,7 @@ CREATE TABLE certRecords (
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE injuredsegments (
|
||||
path TEXT NOT NULL,
|
||||
path BLOB NOT NULL,
|
||||
data BLOB NOT NULL,
|
||||
attempted TIMESTAMP,
|
||||
PRIMARY KEY ( path )
|
||||
|
@ -943,6 +943,18 @@ func (db *DB) PostgresMigration() *migrate.Migration {
|
||||
);`,
|
||||
},
|
||||
},
|
||||
{
|
||||
Description: "Move InjuredSegment path from string to bytes",
|
||||
Version: 41,
|
||||
Action: migrate.SQL{
|
||||
`ALTER TABLE injuredsegments RENAME COLUMN path TO path_old;`,
|
||||
`ALTER TABLE injuredsegments ADD COLUMN path bytea;`,
|
||||
`UPDATE injuredsegments SET path = decode(path_old, 'escape');`,
|
||||
`ALTER TABLE injuredsegments ALTER COLUMN path SET NOT NULL;`,
|
||||
`ALTER TABLE injuredsegments DROP COLUMN path_old;`,
|
||||
`ALTER TABLE injuredsegments ADD CONSTRAINT injuredsegments_pk PRIMARY KEY (path);`,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -51,7 +51,7 @@ func (r *repairQueue) postgresSelect(ctx context.Context) (seg *pb.InjuredSegmen
|
||||
func (r *repairQueue) sqliteSelect(ctx context.Context) (seg *pb.InjuredSegment, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
err = r.db.WithTx(ctx, func(ctx context.Context, tx *dbx.Tx) error {
|
||||
var path string
|
||||
var path []byte
|
||||
err = tx.Tx.QueryRowContext(ctx, r.db.Rebind(`
|
||||
SELECT path, data FROM injuredsegments
|
||||
WHERE attempted IS NULL
|
||||
|
343
satellite/satellitedb/testdata/postgres.v41.sql
vendored
Normal file
343
satellite/satellitedb/testdata/postgres.v41.sql
vendored
Normal file
@ -0,0 +1,343 @@
|
||||
-- AUTOGENERATED BY gopkg.in/spacemonkeygo/dbx.v1
|
||||
-- DO NOT EDIT
|
||||
CREATE TABLE accounting_rollups (
|
||||
id bigserial NOT NULL,
|
||||
node_id bytea NOT NULL,
|
||||
start_time timestamp with time zone NOT NULL,
|
||||
put_total bigint NOT NULL,
|
||||
get_total bigint NOT NULL,
|
||||
get_audit_total bigint NOT NULL,
|
||||
get_repair_total bigint NOT NULL,
|
||||
put_repair_total bigint NOT NULL,
|
||||
at_rest_total double precision NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE accounting_timestamps (
|
||||
name text NOT NULL,
|
||||
value timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( name )
|
||||
);
|
||||
CREATE TABLE bucket_bandwidth_rollups (
|
||||
bucket_name bytea NOT NULL,
|
||||
project_id bytea NOT NULL,
|
||||
interval_start timestamp NOT NULL,
|
||||
interval_seconds integer NOT NULL,
|
||||
action integer NOT NULL,
|
||||
inline bigint NOT NULL,
|
||||
allocated bigint NOT NULL,
|
||||
settled bigint NOT NULL,
|
||||
PRIMARY KEY ( bucket_name, project_id, interval_start, action )
|
||||
);
|
||||
CREATE TABLE bucket_storage_tallies (
|
||||
bucket_name bytea NOT NULL,
|
||||
project_id bytea NOT NULL,
|
||||
interval_start timestamp NOT NULL,
|
||||
inline bigint NOT NULL,
|
||||
remote bigint NOT NULL,
|
||||
remote_segments_count integer NOT NULL,
|
||||
inline_segments_count integer NOT NULL,
|
||||
object_count integer NOT NULL,
|
||||
metadata_size bigint NOT NULL,
|
||||
PRIMARY KEY ( bucket_name, project_id, interval_start )
|
||||
);
|
||||
CREATE TABLE bucket_usages (
|
||||
id bytea NOT NULL,
|
||||
bucket_id bytea NOT NULL,
|
||||
rollup_end_time timestamp with time zone NOT NULL,
|
||||
remote_stored_data bigint NOT NULL,
|
||||
inline_stored_data bigint NOT NULL,
|
||||
remote_segments integer NOT NULL,
|
||||
inline_segments integer NOT NULL,
|
||||
objects integer NOT NULL,
|
||||
metadata_size bigint NOT NULL,
|
||||
repair_egress bigint NOT NULL,
|
||||
get_egress bigint NOT NULL,
|
||||
audit_egress bigint NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE certRecords (
|
||||
publickey bytea NOT NULL,
|
||||
id bytea NOT NULL,
|
||||
update_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE injuredsegments (
|
||||
path bytea NOT NULL,
|
||||
data bytea NOT NULL,
|
||||
attempted timestamp,
|
||||
PRIMARY KEY ( path )
|
||||
);
|
||||
CREATE TABLE irreparabledbs (
|
||||
segmentpath bytea NOT NULL,
|
||||
segmentdetail bytea NOT NULL,
|
||||
pieces_lost_count bigint NOT NULL,
|
||||
seg_damaged_unix_sec bigint NOT NULL,
|
||||
repair_attempt_count bigint NOT NULL,
|
||||
PRIMARY KEY ( segmentpath )
|
||||
);
|
||||
CREATE TABLE nodes (
|
||||
id bytea NOT NULL,
|
||||
address text NOT NULL,
|
||||
last_net text NOT NULL,
|
||||
protocol integer NOT NULL,
|
||||
type integer NOT NULL,
|
||||
email text NOT NULL,
|
||||
wallet text NOT NULL,
|
||||
free_bandwidth bigint NOT NULL,
|
||||
free_disk bigint NOT NULL,
|
||||
major bigint NOT NULL,
|
||||
minor bigint NOT NULL,
|
||||
patch bigint NOT NULL,
|
||||
hash text NOT NULL,
|
||||
timestamp timestamp with time zone NOT NULL,
|
||||
release boolean NOT NULL,
|
||||
latency_90 bigint NOT NULL,
|
||||
audit_success_count bigint NOT NULL,
|
||||
total_audit_count bigint NOT NULL,
|
||||
uptime_success_count bigint NOT NULL,
|
||||
total_uptime_count bigint NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
updated_at timestamp with time zone NOT NULL,
|
||||
last_contact_success timestamp with time zone NOT NULL,
|
||||
last_contact_failure timestamp with time zone NOT NULL,
|
||||
contained boolean NOT NULL,
|
||||
disqualified timestamp with time zone,
|
||||
audit_reputation_alpha double precision NOT NULL,
|
||||
audit_reputation_beta double precision NOT NULL,
|
||||
uptime_reputation_alpha double precision NOT NULL,
|
||||
uptime_reputation_beta double precision NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE offers (
|
||||
id serial NOT NULL,
|
||||
name text NOT NULL,
|
||||
description text NOT NULL,
|
||||
award_credit_in_cents integer NOT NULL,
|
||||
invitee_credit_in_cents integer NOT NULL,
|
||||
award_credit_duration_days integer NOT NULL,
|
||||
invitee_credit_duration_days integer NOT NULL,
|
||||
redeemable_cap integer NOT NULL,
|
||||
num_redeemed integer NOT NULL,
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
status integer NOT NULL,
|
||||
type integer NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE pending_audits (
|
||||
node_id bytea NOT NULL,
|
||||
piece_id bytea NOT NULL,
|
||||
stripe_index bigint NOT NULL,
|
||||
share_size bigint NOT NULL,
|
||||
expected_share_hash bytea NOT NULL,
|
||||
reverify_count bigint NOT NULL,
|
||||
PRIMARY KEY ( node_id )
|
||||
);
|
||||
CREATE TABLE projects (
|
||||
id bytea NOT NULL,
|
||||
name text NOT NULL,
|
||||
description text NOT NULL,
|
||||
usage_limit bigint NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE registration_tokens (
|
||||
secret bytea NOT NULL,
|
||||
owner_id bytea,
|
||||
project_limit integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( secret ),
|
||||
UNIQUE ( owner_id )
|
||||
);
|
||||
CREATE TABLE reset_password_tokens (
|
||||
secret bytea NOT NULL,
|
||||
owner_id bytea NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( secret ),
|
||||
UNIQUE ( owner_id )
|
||||
);
|
||||
CREATE TABLE serial_numbers (
|
||||
id serial NOT NULL,
|
||||
serial_number bytea NOT NULL,
|
||||
bucket_id bytea NOT NULL,
|
||||
expires_at timestamp NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE storagenode_bandwidth_rollups (
|
||||
storagenode_id bytea NOT NULL,
|
||||
interval_start timestamp NOT NULL,
|
||||
interval_seconds integer NOT NULL,
|
||||
action integer NOT NULL,
|
||||
allocated bigint NOT NULL,
|
||||
settled bigint NOT NULL,
|
||||
PRIMARY KEY ( storagenode_id, interval_start, action )
|
||||
);
|
||||
CREATE TABLE storagenode_storage_tallies (
|
||||
id bigserial NOT NULL,
|
||||
node_id bytea NOT NULL,
|
||||
interval_end_time timestamp with time zone NOT NULL,
|
||||
data_total double precision NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE users (
|
||||
id bytea NOT NULL,
|
||||
email text NOT NULL,
|
||||
full_name text NOT NULL,
|
||||
short_name text,
|
||||
password_hash bytea NOT NULL,
|
||||
status integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE value_attributions (
|
||||
project_id bytea NOT NULL,
|
||||
bucket_name bytea NOT NULL,
|
||||
partner_id bytea NOT NULL,
|
||||
last_updated timestamp NOT NULL,
|
||||
PRIMARY KEY ( project_id, bucket_name )
|
||||
);
|
||||
CREATE TABLE api_keys (
|
||||
id bytea NOT NULL,
|
||||
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
||||
head bytea NOT NULL,
|
||||
name text NOT NULL,
|
||||
secret bytea NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id ),
|
||||
UNIQUE ( head ),
|
||||
UNIQUE ( name, project_id )
|
||||
);
|
||||
CREATE TABLE bucket_metainfos (
|
||||
id bytea NOT NULL,
|
||||
project_id bytea NOT NULL REFERENCES projects( id ),
|
||||
name bytea NOT NULL,
|
||||
path_cipher integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
default_segment_size integer NOT NULL,
|
||||
default_encryption_cipher_suite integer NOT NULL,
|
||||
default_encryption_block_size integer NOT NULL,
|
||||
default_redundancy_algorithm integer NOT NULL,
|
||||
default_redundancy_share_size integer NOT NULL,
|
||||
default_redundancy_required_shares integer NOT NULL,
|
||||
default_redundancy_repair_shares integer NOT NULL,
|
||||
default_redundancy_optimal_shares integer NOT NULL,
|
||||
default_redundancy_total_shares integer NOT NULL,
|
||||
PRIMARY KEY ( id ),
|
||||
UNIQUE ( name, project_id )
|
||||
);
|
||||
CREATE TABLE project_invoice_stamps (
|
||||
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
||||
invoice_id bytea NOT NULL,
|
||||
start_date timestamp with time zone NOT NULL,
|
||||
end_date timestamp with time zone NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( project_id, start_date, end_date ),
|
||||
UNIQUE ( invoice_id )
|
||||
);
|
||||
CREATE TABLE project_members (
|
||||
member_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
|
||||
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( member_id, project_id )
|
||||
);
|
||||
CREATE TABLE used_serials (
|
||||
serial_number_id integer NOT NULL REFERENCES serial_numbers( id ) ON DELETE CASCADE,
|
||||
storage_node_id bytea NOT NULL,
|
||||
PRIMARY KEY ( serial_number_id, storage_node_id )
|
||||
);
|
||||
CREATE TABLE user_credits (
|
||||
id serial NOT NULL,
|
||||
user_id bytea NOT NULL REFERENCES users( id ),
|
||||
offer_id integer NOT NULL REFERENCES offers( id ),
|
||||
referred_by bytea REFERENCES users( id ),
|
||||
credits_earned_in_cents integer NOT NULL,
|
||||
credits_used_in_cents integer NOT NULL,
|
||||
expires_at timestamp with time zone NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE TABLE user_payments (
|
||||
user_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
|
||||
customer_id bytea NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( user_id ),
|
||||
UNIQUE ( customer_id )
|
||||
);
|
||||
CREATE TABLE project_payments (
|
||||
id bytea NOT NULL,
|
||||
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
||||
payer_id bytea NOT NULL REFERENCES user_payments( user_id ) ON DELETE CASCADE,
|
||||
payment_method_id bytea NOT NULL,
|
||||
is_default boolean NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
);
|
||||
CREATE INDEX bucket_name_project_id_interval_start_interval_seconds ON bucket_bandwidth_rollups ( bucket_name, project_id, interval_start, interval_seconds );
|
||||
CREATE UNIQUE INDEX bucket_id_rollup ON bucket_usages ( bucket_id, rollup_end_time );
|
||||
CREATE INDEX node_last_ip ON nodes ( last_net );
|
||||
CREATE UNIQUE INDEX serial_number ON serial_numbers ( serial_number );
|
||||
CREATE INDEX serial_numbers_expires_at_index ON serial_numbers ( expires_at );
|
||||
CREATE INDEX storagenode_id_interval_start_interval_seconds ON storagenode_bandwidth_rollups ( storagenode_id, interval_start, interval_seconds );
|
||||
|
||||
---
|
||||
|
||||
INSERT INTO "accounting_rollups"("id", "node_id", "start_time", "put_total", "get_total", "get_audit_total", "get_repair_total", "put_repair_total", "at_rest_total") VALUES (1, E'\\367M\\177\\251]t/\\022\\256\\214\\265\\025\\224\\204:\\217\\212\\0102<\\321\\374\\020&\\271Qc\\325\\261\\354\\246\\233'::bytea, '2019-02-09 00:00:00+00', 1000, 2000, 3000, 4000, 0, 5000);
|
||||
|
||||
INSERT INTO "accounting_timestamps" VALUES ('LastAtRestTally', '0001-01-01 00:00:00+00');
|
||||
INSERT INTO "accounting_timestamps" VALUES ('LastRollup', '0001-01-01 00:00:00+00');
|
||||
INSERT INTO "accounting_timestamps" VALUES ('LastBandwidthTally', '0001-01-01 00:00:00+00');
|
||||
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_bandwidth", "free_disk", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "audit_reputation_alpha", "audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, -1, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, 50, 5, 100, 5);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_bandwidth", "free_disk", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "audit_reputation_alpha", "audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '127.0.0.1:55518', '', 0, 4, '', '', -1, -1, 0, 1, 0, '', 'epoch', false, 0, 0, 0, 3, 3, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, 50, 0, 100, 0);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_bandwidth", "free_disk", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "audit_reputation_alpha", "audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014', '127.0.0.1:55517', '', 0, 4, '', '', -1, -1, 0, 1, 0, '', 'epoch', false, 0, 0, 0, 0, 0, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, 50, 0, 100, 0);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_bandwidth", "free_disk", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "audit_reputation_alpha", "audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\015', '127.0.0.1:55519', '', 0, 4, '', '', -1, -1, 0, 1, 0, '', 'epoch', false, 0, 1, 2, 1, 2, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, 50, 1, 100, 1);
|
||||
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_bandwidth", "free_disk", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "audit_reputation_alpha", "audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', '127.0.0.1:55520', '', 0, 4, '', '', -1, -1, 0, 1, 0, '', 'epoch', false, 0, 300, 400, 300, 400, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, 300, 100, 300, 100);
|
||||
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit","created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, 'ProjectName', 'projects description', 0, '2019-02-14 08:28:24.254934+00');
|
||||
|
||||
INSERT INTO "users"("id", "full_name", "short_name", "email", "password_hash", "status", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'Noahson', 'William', '1email1@mail.test', E'some_readable_hash'::bytea, 1, '2019-02-14 08:28:24.614594+00');
|
||||
INSERT INTO "projects"("id", "name", "description", "usage_limit", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, 'projName1', 'Test project 1', 0, '2019-02-14 08:28:24.636949+00');
|
||||
INSERT INTO "project_members"("member_id", "project_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, '2019-02-14 08:28:24.677953+00');
|
||||
|
||||
INSERT INTO "irreparabledbs" ("segmentpath", "segmentdetail", "pieces_lost_count", "seg_damaged_unix_sec", "repair_attempt_count") VALUES ('\x49616d5365676d656e746b6579696e666f30', '\x49616d5365676d656e7464657461696c696e666f30', 10, 1550159554, 10);
|
||||
|
||||
INSERT INTO "injuredsegments" ("path", "data") VALUES ('0', '\x0a0130120100');
|
||||
INSERT INTO "injuredsegments" ("path", "data") VALUES ('here''s/a/great/path', '\x0a136865726527732f612f67726561742f70617468120a0102030405060708090a');
|
||||
INSERT INTO "injuredsegments" ("path", "data") VALUES ('yet/another/cool/path', '\x0a157965742f616e6f746865722f636f6f6c2f70617468120a0102030405060708090a');
|
||||
INSERT INTO "injuredsegments" ("path", "data") VALUES ('so/many/iconic/paths/to/choose/from', '\x0a23736f2f6d616e792f69636f6e69632f70617468732f746f2f63686f6f73652f66726f6d120a0102030405060708090a');
|
||||
|
||||
INSERT INTO "certrecords" VALUES (E'0Y0\\023\\006\\007*\\206H\\316=\\002\\001\\006\\010*\\206H\\316=\\003\\001\\007\\003B\\000\\004\\360\\267\\227\\377\\253u\\222\\337Y\\324C:GQ\\010\\277v\\010\\315D\\271\\333\\337.\\203\\023=C\\343\\014T%6\\027\\362?\\214\\326\\017U\\334\\000\\260\\224\\260J\\221\\304\\331F\\304\\221\\236zF,\\325\\326l\\215\\306\\365\\200\\022', E'L\\301|\\200\\247}F|1\\320\\232\\037n\\335\\241\\206\\244\\242\\207\\204.\\253\\357\\326\\352\\033Dt\\202`\\022\\325', '2019-02-14 08:07:31.335028+00');
|
||||
|
||||
INSERT INTO "bucket_usages" ("id", "bucket_id", "rollup_end_time", "remote_stored_data", "inline_stored_data", "remote_segments", "inline_segments", "objects", "metadata_size", "repair_egress", "get_egress", "audit_egress") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001",'::bytea, E'\\366\\146\\032\\321\\316\\161\\070\\133\\302\\271",'::bytea, '2019-03-06 08:28:24.677953+00', 10, 11, 12, 13, 14, 15, 16, 17, 18);
|
||||
|
||||
INSERT INTO "registration_tokens" ("secret", "owner_id", "project_limit", "created_at") VALUES (E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, null, 1, '2019-02-14 08:28:24.677953+00');
|
||||
|
||||
INSERT INTO "serial_numbers" ("id", "serial_number", "bucket_id", "expires_at") VALUES (1, E'0123456701234567'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014/testbucket'::bytea, '2019-03-06 08:28:24.677953+00');
|
||||
INSERT INTO "used_serials" ("serial_number_id", "storage_node_id") VALUES (1, E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n');
|
||||
|
||||
INSERT INTO "storagenode_bandwidth_rollups" ("storagenode_id", "interval_start", "interval_seconds", "action", "allocated", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2019-03-06 08:00:00.000000+00', 3600, 1, 1024, 2024);
|
||||
INSERT INTO "storagenode_storage_tallies" VALUES (1, E'\\3510\\323\\225"~\\036<\\342\\330m\\0253Jhr\\246\\233K\\246#\\2303\\351\\256\\275j\\212UM\\362\\207', '2019-02-14 08:16:57.812849+00', 1000);
|
||||
|
||||
INSERT INTO "bucket_bandwidth_rollups" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-03-06 08:00:00.000000+00', 3600, 1, 1024, 2024, 3024);
|
||||
INSERT INTO "bucket_storage_tallies" ("bucket_name", "project_id", "interval_start", "inline", "remote", "remote_segments_count", "inline_segments_count", "object_count", "metadata_size") VALUES (E'testbucket'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-03-06 08:00:00.000000+00', 4024, 5024, 0, 0, 0, 0);
|
||||
INSERT INTO "bucket_bandwidth_rollups" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\170\\160\\157\\370\\274\\366\\113\\364\\272\\235\\301\\243\\321\\102\\321\\136'::bytea,'2019-03-06 08:00:00.000000+00', 3600, 1, 1024, 2024, 3024);
|
||||
INSERT INTO "bucket_storage_tallies" ("bucket_name", "project_id", "interval_start", "inline", "remote", "remote_segments_count", "inline_segments_count", "object_count", "metadata_size") VALUES (E'testbucket'::bytea, E'\\170\\160\\157\\370\\274\\366\\113\\364\\272\\235\\301\\243\\321\\102\\321\\136'::bytea,'2019-03-06 08:00:00.000000+00', 4024, 5024, 0, 0, 0, 0);
|
||||
|
||||
INSERT INTO "reset_password_tokens" ("secret", "owner_id", "created_at") VALUES (E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-05-08 08:28:24.677953+00');
|
||||
|
||||
INSERT INTO "pending_audits" ("node_id", "piece_id", "stripe_index", "share_size", "expected_share_hash", "reverify_count") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 5, 1024, E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, 1);
|
||||
|
||||
INSERT INTO "offers" ("id", "name", "description", "award_credit_in_cents", "invitee_credit_in_cents", "award_credit_duration_days", "invitee_credit_duration_days", "redeemable_cap", "expires_at", "created_at", "num_redeemed", "status", "type") VALUES (1, 'testOffer', 'Test offer 1', 0, 0, 14, 14, 50, '2019-03-14 08:28:24.636949+00', '2019-02-14 08:28:24.636949+00', 0, 0, 0);
|
||||
|
||||
INSERT INTO "api_keys" ("id", "project_id", "head", "name", "secret", "created_at") VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\111\\142\\147\\304\\132\\375\\070\\163\\270\\160\\251\\370\\126\\063\\351\\037\\257\\071\\143\\375\\351\\320\\253\\232\\220\\260\\075\\173\\306\\307\\115\\136'::bytea, 'key 2', E'\\254\\011\\315\\333\\273\\365\\001\\071\\024\\154\\253\\332\\301\\216\\361\\074\\221\\367\\251\\231\\274\\333\\300\\367\\001\\272\\327\\111\\315\\123\\042\\016'::bytea, '2019-02-14 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "user_payments" ("user_id", "customer_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\022\\217/\\014\\376!K\\023\\276'::bytea, '2019-06-01 08:28:24.267934+00');
|
||||
INSERT INTO "project_invoice_stamps" ("project_id", "invoice_id", "start_date", "end_date", "created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\363\\311\\033w\\222\\303,'::bytea, '2019-06-01 08:28:24.267934+00', '2019-06-29 08:28:24.267934+00', '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "value_attributions" ("project_id", "bucket_name", "partner_id", "last_updated") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E''::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-02-14 08:07:31.028103+00');
|
||||
|
||||
INSERT INTO "user_credits" ("id", "user_id", "offer_id", "referred_by", "credits_earned_in_cents", "credits_used_in_cents", "expires_at", "created_at") VALUES (1, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 1, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 200, 0, '2019-10-01 08:28:24.267934+00', '2019-06-01 08:28:24.267934+00');
|
||||
|
||||
INSERT INTO "bucket_metainfos" ("id", "project_id", "name", "created_at", "path_cipher", "default_segment_size", "default_encryption_cipher_suite", "default_encryption_block_size", "default_redundancy_algorithm", "default_redundancy_share_size", "default_redundancy_required_shares", "default_redundancy_repair_shares", "default_redundancy_optimal_shares", "default_redundancy_total_shares") VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'testbucketuniquename'::bytea, '2019-06-14 08:28:24.677953+00', 1, 65536, 1, 8192, 1, 4096, 4, 6, 8, 10);
|
||||
|
||||
INSERT INTO "project_payments" ("id", "project_id", "payer_id", "payment_method_id", "is_default","created_at") VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\022\\217/\\014\\376!K\\023\\276'::bytea, true, '2019-06-01 08:28:24.267934+00');
|
Loading…
Reference in New Issue
Block a user