2019-03-18 10:55:06 +00:00
|
|
|
// Copyright (C) 2019 Storj Labs, Inc.
|
|
|
|
// See LICENSE for copying information
|
|
|
|
|
|
|
|
package piecestore_test
|
|
|
|
|
|
|
|
import (
|
2019-07-10 14:41:47 +01:00
|
|
|
"crypto/tls"
|
|
|
|
"crypto/x509"
|
2019-03-18 10:55:06 +00:00
|
|
|
"io"
|
2019-04-04 17:56:42 +01:00
|
|
|
"strings"
|
2019-07-03 14:47:55 +01:00
|
|
|
"sync/atomic"
|
2019-03-18 10:55:06 +00:00
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/stretchr/testify/assert"
|
|
|
|
"github.com/stretchr/testify/require"
|
2019-07-03 14:47:55 +01:00
|
|
|
"github.com/zeebo/errs"
|
|
|
|
"go.uber.org/zap"
|
|
|
|
"go.uber.org/zap/zaptest"
|
|
|
|
"golang.org/x/sync/errgroup"
|
|
|
|
"google.golang.org/grpc/codes"
|
2019-07-10 14:41:47 +01:00
|
|
|
"google.golang.org/grpc/credentials"
|
|
|
|
"google.golang.org/grpc/peer"
|
2019-03-18 10:55:06 +00:00
|
|
|
|
2019-07-03 14:47:55 +01:00
|
|
|
"storj.io/storj/internal/errs2"
|
2019-03-18 10:55:06 +00:00
|
|
|
"storj.io/storj/internal/memory"
|
|
|
|
"storj.io/storj/internal/testcontext"
|
2019-07-10 14:41:47 +01:00
|
|
|
"storj.io/storj/internal/testidentity"
|
2019-03-18 10:55:06 +00:00
|
|
|
"storj.io/storj/internal/testplanet"
|
2019-06-26 11:38:51 +01:00
|
|
|
"storj.io/storj/internal/testrand"
|
2019-03-20 21:12:00 +00:00
|
|
|
"storj.io/storj/pkg/auth/signing"
|
2019-07-10 14:41:47 +01:00
|
|
|
"storj.io/storj/pkg/bloomfilter"
|
2019-03-20 21:12:00 +00:00
|
|
|
"storj.io/storj/pkg/pb"
|
|
|
|
"storj.io/storj/pkg/pkcrypto"
|
|
|
|
"storj.io/storj/pkg/storj"
|
2019-07-03 14:47:55 +01:00
|
|
|
"storj.io/storj/storagenode"
|
2019-03-18 10:55:06 +00:00
|
|
|
"storj.io/storj/storagenode/bandwidth"
|
2019-07-10 14:41:47 +01:00
|
|
|
"storj.io/storj/storagenode/pieces"
|
|
|
|
ps "storj.io/storj/storagenode/piecestore"
|
|
|
|
"storj.io/storj/storagenode/storagenodedb/storagenodedbtest"
|
2019-07-11 21:04:22 +01:00
|
|
|
"storj.io/storj/storagenode/trust"
|
2019-03-18 10:55:06 +00:00
|
|
|
"storj.io/storj/uplink/piecestore"
|
|
|
|
)
|
|
|
|
|
|
|
|
func TestUploadAndPartialDownload(t *testing.T) {
|
2019-04-22 10:07:50 +01:00
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 6, UplinkCount: 1,
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
2019-06-26 11:38:51 +01:00
|
|
|
expectedData := testrand.Bytes(100 * memory.KiB)
|
2019-03-18 10:55:06 +00:00
|
|
|
|
2019-06-26 11:38:51 +01:00
|
|
|
err := planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "testbucket", "test/path", expectedData)
|
2019-04-22 10:07:50 +01:00
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
var totalDownload int64
|
|
|
|
for _, tt := range []struct {
|
|
|
|
offset, size int64
|
|
|
|
}{
|
|
|
|
{0, 1510},
|
|
|
|
{1513, 1584},
|
|
|
|
{13581, 4783},
|
|
|
|
} {
|
|
|
|
if piecestore.DefaultConfig.InitialStep < tt.size {
|
|
|
|
t.Fatal("test expects initial step to be larger than size to download")
|
|
|
|
}
|
|
|
|
totalDownload += piecestore.DefaultConfig.InitialStep
|
|
|
|
|
2019-07-01 15:35:10 +01:00
|
|
|
download, cleanup, err := planet.Uplinks[0].DownloadStream(ctx, planet.Satellites[0], "testbucket", "test/path")
|
2019-04-22 10:07:50 +01:00
|
|
|
require.NoError(t, err)
|
2019-07-23 15:58:45 +01:00
|
|
|
defer ctx.Check(cleanup)
|
2019-04-22 10:07:50 +01:00
|
|
|
pos, err := download.Seek(tt.offset, io.SeekStart)
|
|
|
|
require.NoError(t, err)
|
|
|
|
assert.Equal(t, pos, tt.offset)
|
|
|
|
|
|
|
|
data := make([]byte, tt.size)
|
|
|
|
n, err := io.ReadFull(download, data)
|
|
|
|
require.NoError(t, err)
|
|
|
|
assert.Equal(t, int(tt.size), n)
|
|
|
|
|
|
|
|
assert.Equal(t, expectedData[tt.offset:tt.offset+tt.size], data)
|
|
|
|
|
|
|
|
require.NoError(t, download.Close())
|
|
|
|
}
|
2019-03-18 10:55:06 +00:00
|
|
|
|
2019-04-22 10:07:50 +01:00
|
|
|
var totalBandwidthUsage bandwidth.Usage
|
|
|
|
for _, storagenode := range planet.StorageNodes {
|
|
|
|
usage, err := storagenode.DB.Bandwidth().Summary(ctx, time.Now().Add(-10*time.Hour), time.Now().Add(10*time.Hour))
|
|
|
|
require.NoError(t, err)
|
|
|
|
totalBandwidthUsage.Add(usage)
|
|
|
|
}
|
2019-03-18 10:55:06 +00:00
|
|
|
|
2019-04-22 10:07:50 +01:00
|
|
|
err = planet.Uplinks[0].Delete(ctx, planet.Satellites[0], "testbucket", "test/path")
|
2019-03-18 10:55:06 +00:00
|
|
|
require.NoError(t, err)
|
2019-04-22 10:07:50 +01:00
|
|
|
_, err = planet.Uplinks[0].Download(ctx, planet.Satellites[0], "testbucket", "test/path")
|
|
|
|
require.Error(t, err)
|
|
|
|
|
|
|
|
// check rough limits for the upload and download
|
|
|
|
totalUpload := int64(len(expectedData))
|
|
|
|
t.Log(totalUpload, totalBandwidthUsage.Put, int64(len(planet.StorageNodes))*totalUpload)
|
|
|
|
assert.True(t, totalUpload < totalBandwidthUsage.Put && totalBandwidthUsage.Put < int64(len(planet.StorageNodes))*totalUpload)
|
|
|
|
t.Log(totalDownload, totalBandwidthUsage.Get, int64(len(planet.StorageNodes))*totalDownload)
|
|
|
|
assert.True(t, totalBandwidthUsage.Get < int64(len(planet.StorageNodes))*totalDownload)
|
|
|
|
})
|
2019-03-18 10:55:06 +00:00
|
|
|
}
|
2019-03-20 21:12:00 +00:00
|
|
|
|
|
|
|
func TestUpload(t *testing.T) {
|
|
|
|
ctx := testcontext.New(t)
|
|
|
|
defer ctx.Cleanup()
|
|
|
|
|
|
|
|
planet, err := testplanet.New(t, 1, 1, 1)
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer ctx.Check(planet.Shutdown)
|
|
|
|
|
|
|
|
planet.Start(ctx)
|
|
|
|
|
|
|
|
client, err := planet.Uplinks[0].DialPiecestore(ctx, planet.StorageNodes[0])
|
|
|
|
require.NoError(t, err)
|
2019-06-25 21:00:51 +01:00
|
|
|
defer ctx.Check(client.Close)
|
2019-03-20 21:12:00 +00:00
|
|
|
|
|
|
|
for _, tt := range []struct {
|
|
|
|
pieceID storj.PieceID
|
|
|
|
contentLength memory.Size
|
|
|
|
action pb.PieceAction
|
|
|
|
err string
|
|
|
|
}{
|
|
|
|
{ // should successfully store data
|
|
|
|
pieceID: storj.PieceID{1},
|
|
|
|
contentLength: 50 * memory.KiB,
|
|
|
|
action: pb.PieceAction_PUT,
|
|
|
|
err: "",
|
|
|
|
},
|
|
|
|
{ // should err with piece ID not specified
|
|
|
|
pieceID: storj.PieceID{},
|
|
|
|
contentLength: 1 * memory.KiB,
|
|
|
|
action: pb.PieceAction_PUT,
|
|
|
|
err: "missing piece id",
|
|
|
|
},
|
|
|
|
{ // should err because invalid action
|
|
|
|
pieceID: storj.PieceID{1},
|
|
|
|
contentLength: 1 * memory.KiB,
|
|
|
|
action: pb.PieceAction_GET,
|
|
|
|
err: "expected put or put repair action got GET",
|
|
|
|
},
|
|
|
|
} {
|
2019-06-26 11:38:51 +01:00
|
|
|
data := testrand.Bytes(tt.contentLength)
|
2019-03-20 21:12:00 +00:00
|
|
|
expectedHash := pkcrypto.SHA256Hash(data)
|
2019-06-26 11:38:51 +01:00
|
|
|
serialNumber := testrand.SerialNumber()
|
2019-03-20 21:12:00 +00:00
|
|
|
|
2019-07-11 21:51:40 +01:00
|
|
|
orderLimit, piecePrivateKey := GenerateOrderLimit(
|
2019-03-20 21:12:00 +00:00
|
|
|
t,
|
|
|
|
planet.Satellites[0].ID(),
|
|
|
|
planet.StorageNodes[0].ID(),
|
|
|
|
tt.pieceID,
|
|
|
|
tt.action,
|
|
|
|
serialNumber,
|
|
|
|
24*time.Hour,
|
|
|
|
24*time.Hour,
|
|
|
|
int64(len(data)),
|
|
|
|
)
|
|
|
|
signer := signing.SignerFromFullIdentity(planet.Satellites[0].Identity)
|
2019-06-05 14:47:01 +01:00
|
|
|
orderLimit, err = signing.SignOrderLimit(ctx, signer, orderLimit)
|
2019-03-20 21:12:00 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-07-11 21:51:40 +01:00
|
|
|
uploader, err := client.Upload(ctx, orderLimit, piecePrivateKey)
|
2019-03-20 21:12:00 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
_, err = uploader.Write(data)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-06-05 14:47:01 +01:00
|
|
|
pieceHash, err := uploader.Commit(ctx)
|
2019-03-20 21:12:00 +00:00
|
|
|
if tt.err != "" {
|
|
|
|
require.Error(t, err)
|
|
|
|
require.Contains(t, err.Error(), tt.err)
|
|
|
|
} else {
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
assert.Equal(t, expectedHash, pieceHash.Hash)
|
|
|
|
|
|
|
|
signee := signing.SignerFromFullIdentity(planet.StorageNodes[0].Identity)
|
2019-06-05 14:47:01 +01:00
|
|
|
require.NoError(t, signing.VerifyPieceHashSignature(ctx, signee, pieceHash))
|
2019-03-20 21:12:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestDownload(t *testing.T) {
|
|
|
|
ctx := testcontext.New(t)
|
|
|
|
defer ctx.Cleanup()
|
|
|
|
|
|
|
|
planet, err := testplanet.New(t, 1, 1, 1)
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer ctx.Check(planet.Shutdown)
|
|
|
|
|
|
|
|
planet.Start(ctx)
|
|
|
|
|
|
|
|
// upload test piece
|
|
|
|
client, err := planet.Uplinks[0].DialPiecestore(ctx, planet.StorageNodes[0])
|
|
|
|
require.NoError(t, err)
|
2019-06-25 21:00:51 +01:00
|
|
|
defer ctx.Check(client.Close)
|
2019-03-20 21:12:00 +00:00
|
|
|
|
2019-06-26 11:38:51 +01:00
|
|
|
expectedData := testrand.Bytes(10 * memory.KiB)
|
|
|
|
serialNumber := testrand.SerialNumber()
|
2019-03-20 21:12:00 +00:00
|
|
|
|
2019-07-11 21:51:40 +01:00
|
|
|
orderLimit, piecePrivateKey := GenerateOrderLimit(
|
2019-03-20 21:12:00 +00:00
|
|
|
t,
|
|
|
|
planet.Satellites[0].ID(),
|
|
|
|
planet.StorageNodes[0].ID(),
|
|
|
|
storj.PieceID{1},
|
|
|
|
pb.PieceAction_PUT,
|
|
|
|
serialNumber,
|
|
|
|
24*time.Hour,
|
|
|
|
24*time.Hour,
|
|
|
|
int64(len(expectedData)),
|
|
|
|
)
|
|
|
|
signer := signing.SignerFromFullIdentity(planet.Satellites[0].Identity)
|
2019-06-05 14:47:01 +01:00
|
|
|
orderLimit, err = signing.SignOrderLimit(ctx, signer, orderLimit)
|
2019-03-20 21:12:00 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-07-11 21:51:40 +01:00
|
|
|
uploader, err := client.Upload(ctx, orderLimit, piecePrivateKey)
|
2019-03-20 21:12:00 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
_, err = uploader.Write(expectedData)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-06-05 14:47:01 +01:00
|
|
|
_, err = uploader.Commit(ctx)
|
2019-03-20 21:12:00 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
for _, tt := range []struct {
|
|
|
|
pieceID storj.PieceID
|
|
|
|
action pb.PieceAction
|
2019-04-04 17:56:42 +01:00
|
|
|
errs []string
|
2019-03-20 21:12:00 +00:00
|
|
|
}{
|
|
|
|
{ // should successfully download data
|
|
|
|
pieceID: orderLimit.PieceId,
|
|
|
|
action: pb.PieceAction_GET,
|
|
|
|
},
|
|
|
|
{ // should err with piece ID not specified
|
|
|
|
pieceID: storj.PieceID{2},
|
|
|
|
action: pb.PieceAction_GET,
|
2019-04-04 17:56:42 +01:00
|
|
|
errs: []string{"no such file or directory", "The system cannot find the path specified"},
|
2019-03-20 21:12:00 +00:00
|
|
|
},
|
|
|
|
{ // should successfully download data
|
|
|
|
pieceID: orderLimit.PieceId,
|
|
|
|
action: pb.PieceAction_PUT,
|
2019-04-04 17:56:42 +01:00
|
|
|
errs: []string{"expected get or get repair or audit action got PUT"},
|
2019-03-20 21:12:00 +00:00
|
|
|
},
|
|
|
|
} {
|
2019-06-26 11:38:51 +01:00
|
|
|
serialNumber := testrand.SerialNumber()
|
2019-03-20 21:12:00 +00:00
|
|
|
|
2019-07-11 21:51:40 +01:00
|
|
|
orderLimit, piecePrivateKey := GenerateOrderLimit(
|
2019-03-20 21:12:00 +00:00
|
|
|
t,
|
|
|
|
planet.Satellites[0].ID(),
|
|
|
|
planet.StorageNodes[0].ID(),
|
|
|
|
tt.pieceID,
|
|
|
|
tt.action,
|
|
|
|
serialNumber,
|
|
|
|
24*time.Hour,
|
|
|
|
24*time.Hour,
|
|
|
|
int64(len(expectedData)),
|
|
|
|
)
|
|
|
|
signer := signing.SignerFromFullIdentity(planet.Satellites[0].Identity)
|
2019-06-05 14:47:01 +01:00
|
|
|
orderLimit, err = signing.SignOrderLimit(ctx, signer, orderLimit)
|
2019-03-20 21:12:00 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-07-11 21:51:40 +01:00
|
|
|
downloader, err := client.Download(ctx, orderLimit, piecePrivateKey, 0, int64(len(expectedData)))
|
2019-03-20 21:12:00 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
buffer := make([]byte, len(expectedData))
|
|
|
|
n, err := downloader.Read(buffer)
|
|
|
|
|
2019-04-04 17:56:42 +01:00
|
|
|
if len(tt.errs) > 0 {
|
2019-03-20 21:12:00 +00:00
|
|
|
} else {
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, expectedData, buffer[:n])
|
|
|
|
}
|
|
|
|
|
|
|
|
err = downloader.Close()
|
2019-04-04 17:56:42 +01:00
|
|
|
if len(tt.errs) > 0 {
|
2019-03-20 21:12:00 +00:00
|
|
|
require.Error(t, err)
|
2019-04-04 17:56:42 +01:00
|
|
|
require.True(t, strings.Contains(err.Error(), tt.errs[0]) || strings.Contains(err.Error(), tt.errs[1]), err.Error())
|
2019-03-20 21:12:00 +00:00
|
|
|
} else {
|
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestDelete(t *testing.T) {
|
|
|
|
ctx := testcontext.New(t)
|
|
|
|
defer ctx.Cleanup()
|
|
|
|
|
|
|
|
planet, err := testplanet.New(t, 1, 1, 1)
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer ctx.Check(planet.Shutdown)
|
|
|
|
|
|
|
|
planet.Start(ctx)
|
|
|
|
|
|
|
|
// upload test piece
|
|
|
|
client, err := planet.Uplinks[0].DialPiecestore(ctx, planet.StorageNodes[0])
|
|
|
|
require.NoError(t, err)
|
2019-06-25 21:00:51 +01:00
|
|
|
defer ctx.Check(client.Close)
|
2019-03-20 21:12:00 +00:00
|
|
|
|
2019-06-26 11:38:51 +01:00
|
|
|
expectedData := testrand.Bytes(10 * memory.KiB)
|
|
|
|
serialNumber := testrand.SerialNumber()
|
2019-03-20 21:12:00 +00:00
|
|
|
|
2019-07-11 21:51:40 +01:00
|
|
|
orderLimit, piecePrivateKey := GenerateOrderLimit(
|
2019-03-20 21:12:00 +00:00
|
|
|
t,
|
|
|
|
planet.Satellites[0].ID(),
|
|
|
|
planet.StorageNodes[0].ID(),
|
|
|
|
storj.PieceID{1},
|
|
|
|
pb.PieceAction_PUT,
|
|
|
|
serialNumber,
|
|
|
|
24*time.Hour,
|
|
|
|
24*time.Hour,
|
|
|
|
int64(len(expectedData)),
|
|
|
|
)
|
|
|
|
signer := signing.SignerFromFullIdentity(planet.Satellites[0].Identity)
|
2019-06-05 14:47:01 +01:00
|
|
|
orderLimit, err = signing.SignOrderLimit(ctx, signer, orderLimit)
|
2019-03-20 21:12:00 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-07-11 21:51:40 +01:00
|
|
|
uploader, err := client.Upload(ctx, orderLimit, piecePrivateKey)
|
2019-03-20 21:12:00 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
_, err = uploader.Write(expectedData)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-06-05 14:47:01 +01:00
|
|
|
_, err = uploader.Commit(ctx)
|
2019-03-20 21:12:00 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
for _, tt := range []struct {
|
|
|
|
pieceID storj.PieceID
|
|
|
|
action pb.PieceAction
|
|
|
|
err string
|
|
|
|
}{
|
|
|
|
{ // should successfully download data
|
|
|
|
pieceID: orderLimit.PieceId,
|
|
|
|
action: pb.PieceAction_DELETE,
|
|
|
|
err: "",
|
|
|
|
},
|
|
|
|
{ // should err with piece ID not specified
|
|
|
|
pieceID: storj.PieceID{99},
|
|
|
|
action: pb.PieceAction_DELETE,
|
|
|
|
err: "", // TODO should this return error
|
|
|
|
},
|
|
|
|
{ // should successfully download data
|
|
|
|
pieceID: orderLimit.PieceId,
|
|
|
|
action: pb.PieceAction_GET,
|
|
|
|
err: "expected delete action got GET",
|
|
|
|
},
|
|
|
|
} {
|
2019-06-26 11:38:51 +01:00
|
|
|
serialNumber := testrand.SerialNumber()
|
2019-03-20 21:12:00 +00:00
|
|
|
|
2019-07-11 21:51:40 +01:00
|
|
|
orderLimit, piecePrivateKey := GenerateOrderLimit(
|
2019-03-20 21:12:00 +00:00
|
|
|
t,
|
|
|
|
planet.Satellites[0].ID(),
|
|
|
|
planet.StorageNodes[0].ID(),
|
|
|
|
tt.pieceID,
|
|
|
|
tt.action,
|
|
|
|
serialNumber,
|
|
|
|
24*time.Hour,
|
|
|
|
24*time.Hour,
|
|
|
|
100,
|
|
|
|
)
|
|
|
|
signer := signing.SignerFromFullIdentity(planet.Satellites[0].Identity)
|
2019-06-05 14:47:01 +01:00
|
|
|
orderLimit, err = signing.SignOrderLimit(ctx, signer, orderLimit)
|
2019-03-20 21:12:00 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-07-11 21:51:40 +01:00
|
|
|
err := client.Delete(ctx, orderLimit, piecePrivateKey)
|
2019-03-20 21:12:00 +00:00
|
|
|
if tt.err != "" {
|
|
|
|
require.Error(t, err)
|
|
|
|
require.Contains(t, err.Error(), tt.err)
|
|
|
|
} else {
|
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-03 14:47:55 +01:00
|
|
|
func TestTooManyRequests(t *testing.T) {
|
|
|
|
t.Skip("flaky, because of EOF issues")
|
|
|
|
|
|
|
|
ctx := testcontext.New(t)
|
|
|
|
defer ctx.Cleanup()
|
|
|
|
|
|
|
|
const uplinkCount = 6
|
|
|
|
const maxConcurrent = 3
|
|
|
|
const expectedFailures = uplinkCount - maxConcurrent
|
|
|
|
|
|
|
|
log := zaptest.NewLogger(t)
|
|
|
|
|
|
|
|
planet, err := testplanet.NewCustom(log, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: uplinkCount,
|
|
|
|
Reconfigure: testplanet.Reconfigure{
|
|
|
|
StorageNode: func(index int, config *storagenode.Config) {
|
|
|
|
config.Storage2.MaxConcurrentRequests = maxConcurrent
|
|
|
|
},
|
|
|
|
},
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer ctx.Check(planet.Shutdown)
|
|
|
|
|
|
|
|
planet.Start(ctx)
|
|
|
|
|
|
|
|
doneWaiting := make(chan struct{})
|
|
|
|
failedCount := int64(expectedFailures)
|
|
|
|
|
|
|
|
uploads, _ := errgroup.WithContext(ctx)
|
|
|
|
defer ctx.Check(uploads.Wait)
|
|
|
|
|
|
|
|
for i, uplink := range planet.Uplinks {
|
|
|
|
i, uplink := i, uplink
|
|
|
|
uploads.Go(func() (err error) {
|
|
|
|
storageNode := planet.StorageNodes[0].Local()
|
|
|
|
config := piecestore.DefaultConfig
|
|
|
|
config.UploadBufferSize = 0 // disable buffering so we can detect write error early
|
|
|
|
|
2019-07-11 21:51:40 +01:00
|
|
|
client, err := piecestore.Dial(ctx, uplink.Transport, &storageNode.Node, uplink.Log, config)
|
2019-07-03 14:47:55 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer func() {
|
|
|
|
if cerr := client.Close(); cerr != nil {
|
|
|
|
uplink.Log.Error("close failed", zap.Error(cerr))
|
|
|
|
err = errs.Combine(err, cerr)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
pieceID := storj.PieceID{byte(i + 1)}
|
|
|
|
serialNumber := testrand.SerialNumber()
|
|
|
|
|
2019-07-11 21:51:40 +01:00
|
|
|
orderLimit, piecePrivateKey := GenerateOrderLimit(
|
2019-07-03 14:47:55 +01:00
|
|
|
t,
|
|
|
|
planet.Satellites[0].ID(),
|
|
|
|
planet.StorageNodes[0].ID(),
|
|
|
|
pieceID,
|
|
|
|
pb.PieceAction_PUT,
|
|
|
|
serialNumber,
|
|
|
|
24*time.Hour,
|
|
|
|
24*time.Hour,
|
|
|
|
int64(10000),
|
|
|
|
)
|
|
|
|
|
|
|
|
satelliteSigner := signing.SignerFromFullIdentity(planet.Satellites[0].Identity)
|
|
|
|
orderLimit, err = signing.SignOrderLimit(ctx, satelliteSigner, orderLimit)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-07-11 21:51:40 +01:00
|
|
|
upload, err := client.Upload(ctx, orderLimit, piecePrivateKey)
|
2019-07-03 14:47:55 +01:00
|
|
|
if err != nil {
|
|
|
|
if errs2.IsRPC(err, codes.Unavailable) {
|
|
|
|
if atomic.AddInt64(&failedCount, -1) == 0 {
|
|
|
|
close(doneWaiting)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
uplink.Log.Error("upload failed", zap.Stringer("Piece ID", pieceID), zap.Error(err))
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
_, err = upload.Write(make([]byte, orderLimit.Limit))
|
|
|
|
if err != nil {
|
|
|
|
if errs2.IsRPC(err, codes.Unavailable) {
|
|
|
|
if atomic.AddInt64(&failedCount, -1) == 0 {
|
|
|
|
close(doneWaiting)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
uplink.Log.Error("write failed", zap.Stringer("Piece ID", pieceID), zap.Error(err))
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
_, err = upload.Commit(ctx)
|
|
|
|
if err != nil {
|
|
|
|
if errs2.IsRPC(err, codes.Unavailable) {
|
|
|
|
if atomic.AddInt64(&failedCount, -1) == 0 {
|
|
|
|
close(doneWaiting)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
uplink.Log.Error("commit failed", zap.Stringer("Piece ID", pieceID), zap.Error(err))
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-11 21:51:40 +01:00
|
|
|
func GenerateOrderLimit(t *testing.T, satellite storj.NodeID, storageNode storj.NodeID, pieceID storj.PieceID, action pb.PieceAction, serialNumber storj.SerialNumber, pieceExpiration, orderExpiration time.Duration, limit int64) (*pb.OrderLimit, storj.PiecePrivateKey) {
|
|
|
|
piecePublicKey, piecePrivateKey, err := storj.NewPieceKey()
|
|
|
|
require.NoError(t, err)
|
2019-03-20 21:12:00 +00:00
|
|
|
|
2019-07-09 22:54:00 +01:00
|
|
|
now := time.Now()
|
2019-07-03 14:47:55 +01:00
|
|
|
return &pb.OrderLimit{
|
2019-03-20 21:12:00 +00:00
|
|
|
SatelliteId: satellite,
|
2019-07-11 21:51:40 +01:00
|
|
|
UplinkPublicKey: piecePublicKey,
|
2019-03-20 21:12:00 +00:00
|
|
|
StorageNodeId: storageNode,
|
|
|
|
PieceId: pieceID,
|
|
|
|
Action: action,
|
|
|
|
SerialNumber: serialNumber,
|
2019-07-02 17:06:12 +01:00
|
|
|
OrderCreation: time.Now(),
|
2019-07-09 22:54:00 +01:00
|
|
|
OrderExpiration: now.Add(orderExpiration),
|
|
|
|
PieceExpiration: now.Add(pieceExpiration),
|
2019-03-20 21:12:00 +00:00
|
|
|
Limit: limit,
|
2019-07-11 21:51:40 +01:00
|
|
|
}, piecePrivateKey
|
2019-03-20 21:12:00 +00:00
|
|
|
}
|
2019-07-10 14:41:47 +01:00
|
|
|
|
|
|
|
func TestRetain(t *testing.T) {
|
|
|
|
storagenodedbtest.Run(t, func(t *testing.T, db storagenode.DB) {
|
|
|
|
ctx := testcontext.New(t)
|
|
|
|
defer ctx.Cleanup()
|
|
|
|
pieceInfos := db.PieceInfo()
|
|
|
|
store := pieces.NewStore(zaptest.NewLogger(t), db.Pieces())
|
|
|
|
|
|
|
|
const numPieces = 1000
|
|
|
|
const numPiecesToKeep = 990
|
|
|
|
// pieces from numPiecesToKeep + numOldPieces to numPieces will
|
|
|
|
// have a recent timestamp and thus should not be deleted
|
|
|
|
const numOldPieces = 5
|
|
|
|
|
|
|
|
filter := bloomfilter.NewOptimal(numPiecesToKeep, 0.1)
|
|
|
|
|
|
|
|
pieceIDs := generateTestIDs(numPieces)
|
|
|
|
|
|
|
|
satellite0 := testidentity.MustPregeneratedSignedIdentity(0, storj.LatestIDVersion())
|
|
|
|
satellite1 := testidentity.MustPregeneratedSignedIdentity(2, storj.LatestIDVersion())
|
|
|
|
|
2019-07-11 21:04:22 +01:00
|
|
|
whitelisted := storj.NodeURLs{
|
|
|
|
storj.NodeURL{ID: satellite0.ID},
|
|
|
|
storj.NodeURL{ID: satellite1.ID},
|
|
|
|
}
|
|
|
|
|
2019-07-17 19:14:44 +01:00
|
|
|
trusted, err := trust.NewPool(nil, whitelisted)
|
2019-07-11 21:04:22 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-07-10 14:41:47 +01:00
|
|
|
uplink := testidentity.MustPregeneratedSignedIdentity(3, storj.LatestIDVersion())
|
2019-07-11 21:04:22 +01:00
|
|
|
endpoint, err := ps.NewEndpoint(zaptest.NewLogger(t), nil, trusted, nil, store, pieceInfos, nil, nil, nil, ps.Config{})
|
2019-07-10 14:41:47 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
recentTime := time.Now()
|
|
|
|
oldTime := recentTime.Add(-time.Duration(48) * time.Hour)
|
|
|
|
|
|
|
|
var pieceCreation time.Time
|
|
|
|
// add all pieces to the node pieces info DB - but only count piece ids in filter
|
|
|
|
for index, id := range pieceIDs {
|
|
|
|
if index < numPiecesToKeep {
|
|
|
|
filter.Add(id)
|
|
|
|
}
|
|
|
|
|
|
|
|
if index < numPiecesToKeep+numOldPieces {
|
|
|
|
pieceCreation = oldTime
|
|
|
|
} else {
|
|
|
|
pieceCreation = recentTime
|
|
|
|
}
|
|
|
|
|
|
|
|
piecehash0, err := signing.SignPieceHash(ctx,
|
|
|
|
signing.SignerFromFullIdentity(uplink),
|
|
|
|
&pb.PieceHash{
|
|
|
|
PieceId: id,
|
|
|
|
Hash: []byte{0, 2, 3, 4, 5},
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
piecehash1, err := signing.SignPieceHash(ctx,
|
|
|
|
signing.SignerFromFullIdentity(uplink),
|
|
|
|
&pb.PieceHash{
|
|
|
|
PieceId: id,
|
|
|
|
Hash: []byte{0, 2, 3, 4, 5},
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
pieceinfo0 := pieces.Info{
|
|
|
|
SatelliteID: satellite0.ID,
|
|
|
|
PieceSize: 4,
|
|
|
|
PieceID: id,
|
|
|
|
PieceCreation: pieceCreation,
|
|
|
|
UplinkPieceHash: piecehash0,
|
2019-07-11 21:51:40 +01:00
|
|
|
OrderLimit: &pb.OrderLimit{},
|
2019-07-10 14:41:47 +01:00
|
|
|
}
|
|
|
|
pieceinfo1 := pieces.Info{
|
|
|
|
SatelliteID: satellite1.ID,
|
|
|
|
PieceSize: 4,
|
|
|
|
PieceID: id,
|
|
|
|
PieceCreation: pieceCreation,
|
|
|
|
UplinkPieceHash: piecehash1,
|
2019-07-11 21:51:40 +01:00
|
|
|
OrderLimit: &pb.OrderLimit{},
|
2019-07-10 14:41:47 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
err = pieceInfos.Add(ctx, &pieceinfo0)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
err = pieceInfos.Add(ctx, &pieceinfo1)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
ctxSatellite0 := peer.NewContext(ctx, &peer.Peer{
|
|
|
|
AuthInfo: credentials.TLSInfo{
|
|
|
|
State: tls.ConnectionState{
|
|
|
|
PeerCertificates: []*x509.Certificate{satellite0.PeerIdentity().Leaf, satellite0.PeerIdentity().CA},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
})
|
|
|
|
|
|
|
|
retainReq := pb.RetainRequest{}
|
|
|
|
retainReq.Filter = filter.Bytes()
|
|
|
|
retainReq.CreationDate = recentTime
|
|
|
|
|
|
|
|
_, err = endpoint.Retain(ctxSatellite0, &retainReq)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// check we have deleted nothing for satellite1
|
|
|
|
satellite1Pieces, err := pieceInfos.GetPieceIDs(ctx, satellite1.ID, recentTime.Add(time.Duration(5)*time.Second), numPieces, 0)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, numPieces, len(satellite1Pieces))
|
|
|
|
|
|
|
|
// check we did not delete recent pieces
|
|
|
|
satellite0Pieces, err := pieceInfos.GetPieceIDs(ctx, satellite0.ID, recentTime.Add(time.Duration(5)*time.Second), numPieces, 0)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
for _, id := range pieceIDs[:numPiecesToKeep] {
|
|
|
|
require.Contains(t, satellite0Pieces, id, "piece should not have been deleted (not in bloom filter)")
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, id := range pieceIDs[numPiecesToKeep+numOldPieces:] {
|
|
|
|
require.Contains(t, satellite0Pieces, id, "piece should not have been deleted (recent piece)")
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// generateTestIDs generates n piece ids
|
|
|
|
func generateTestIDs(n int) []storj.PieceID {
|
|
|
|
ids := make([]storj.PieceID, n)
|
|
|
|
for i := range ids {
|
|
|
|
ids[i] = testrand.PieceID()
|
|
|
|
}
|
|
|
|
return ids
|
|
|
|
}
|