2019-03-18 10:55:06 +00:00
|
|
|
// Copyright (C) 2019 Storj Labs, Inc.
|
|
|
|
// See LICENSE for copying information
|
|
|
|
|
|
|
|
package piecestore_test
|
|
|
|
|
|
|
|
import (
|
|
|
|
"io"
|
2019-04-04 17:56:42 +01:00
|
|
|
"strings"
|
2019-07-03 14:47:55 +01:00
|
|
|
"sync/atomic"
|
2019-03-18 10:55:06 +00:00
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/stretchr/testify/assert"
|
|
|
|
"github.com/stretchr/testify/require"
|
2019-07-03 14:47:55 +01:00
|
|
|
"github.com/zeebo/errs"
|
|
|
|
"go.uber.org/zap"
|
|
|
|
"golang.org/x/sync/errgroup"
|
2019-03-18 10:55:06 +00:00
|
|
|
|
2019-12-27 11:48:47 +00:00
|
|
|
"storj.io/common/errs2"
|
|
|
|
"storj.io/common/memory"
|
|
|
|
"storj.io/common/pb"
|
|
|
|
"storj.io/common/pkcrypto"
|
|
|
|
"storj.io/common/rpc/rpcstatus"
|
|
|
|
"storj.io/common/signing"
|
|
|
|
"storj.io/common/storj"
|
|
|
|
"storj.io/common/testcontext"
|
|
|
|
"storj.io/common/testrand"
|
2020-05-22 19:12:26 +01:00
|
|
|
"storj.io/storj/private/testblobs"
|
2019-11-14 19:46:15 +00:00
|
|
|
"storj.io/storj/private/testplanet"
|
2019-07-03 14:47:55 +01:00
|
|
|
"storj.io/storj/storagenode"
|
2019-03-18 10:55:06 +00:00
|
|
|
"storj.io/storj/storagenode/bandwidth"
|
2020-02-21 14:07:29 +00:00
|
|
|
"storj.io/uplink/private/piecestore"
|
2019-03-18 10:55:06 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
func TestUploadAndPartialDownload(t *testing.T) {
|
2019-04-22 10:07:50 +01:00
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 6, UplinkCount: 1,
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
2019-06-26 11:38:51 +01:00
|
|
|
expectedData := testrand.Bytes(100 * memory.KiB)
|
2019-03-18 10:55:06 +00:00
|
|
|
|
2019-06-26 11:38:51 +01:00
|
|
|
err := planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "testbucket", "test/path", expectedData)
|
2019-04-22 10:07:50 +01:00
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
var totalDownload int64
|
|
|
|
for _, tt := range []struct {
|
|
|
|
offset, size int64
|
|
|
|
}{
|
|
|
|
{0, 1510},
|
|
|
|
{1513, 1584},
|
|
|
|
{13581, 4783},
|
|
|
|
} {
|
|
|
|
if piecestore.DefaultConfig.InitialStep < tt.size {
|
|
|
|
t.Fatal("test expects initial step to be larger than size to download")
|
|
|
|
}
|
|
|
|
totalDownload += piecestore.DefaultConfig.InitialStep
|
|
|
|
|
2019-08-13 18:29:02 +01:00
|
|
|
download, cleanup, err := planet.Uplinks[0].DownloadStreamRange(ctx, planet.Satellites[0], "testbucket", "test/path", tt.offset, -1)
|
2019-04-22 10:07:50 +01:00
|
|
|
require.NoError(t, err)
|
2019-07-23 15:58:45 +01:00
|
|
|
defer ctx.Check(cleanup)
|
2019-04-22 10:07:50 +01:00
|
|
|
|
|
|
|
data := make([]byte, tt.size)
|
|
|
|
n, err := io.ReadFull(download, data)
|
|
|
|
require.NoError(t, err)
|
|
|
|
assert.Equal(t, int(tt.size), n)
|
|
|
|
|
|
|
|
assert.Equal(t, expectedData[tt.offset:tt.offset+tt.size], data)
|
|
|
|
|
|
|
|
require.NoError(t, download.Close())
|
|
|
|
}
|
2019-03-18 10:55:06 +00:00
|
|
|
|
2019-04-22 10:07:50 +01:00
|
|
|
var totalBandwidthUsage bandwidth.Usage
|
|
|
|
for _, storagenode := range planet.StorageNodes {
|
|
|
|
usage, err := storagenode.DB.Bandwidth().Summary(ctx, time.Now().Add(-10*time.Hour), time.Now().Add(10*time.Hour))
|
|
|
|
require.NoError(t, err)
|
|
|
|
totalBandwidthUsage.Add(usage)
|
|
|
|
}
|
2019-03-18 10:55:06 +00:00
|
|
|
|
2020-02-10 12:18:18 +00:00
|
|
|
err = planet.Uplinks[0].DeleteObject(ctx, planet.Satellites[0], "testbucket", "test/path")
|
2019-03-18 10:55:06 +00:00
|
|
|
require.NoError(t, err)
|
2019-04-22 10:07:50 +01:00
|
|
|
_, err = planet.Uplinks[0].Download(ctx, planet.Satellites[0], "testbucket", "test/path")
|
|
|
|
require.Error(t, err)
|
|
|
|
|
|
|
|
// check rough limits for the upload and download
|
|
|
|
totalUpload := int64(len(expectedData))
|
|
|
|
t.Log(totalUpload, totalBandwidthUsage.Put, int64(len(planet.StorageNodes))*totalUpload)
|
|
|
|
assert.True(t, totalUpload < totalBandwidthUsage.Put && totalBandwidthUsage.Put < int64(len(planet.StorageNodes))*totalUpload)
|
|
|
|
t.Log(totalDownload, totalBandwidthUsage.Get, int64(len(planet.StorageNodes))*totalDownload)
|
|
|
|
assert.True(t, totalBandwidthUsage.Get < int64(len(planet.StorageNodes))*totalDownload)
|
|
|
|
})
|
2019-03-18 10:55:06 +00:00
|
|
|
}
|
2019-03-20 21:12:00 +00:00
|
|
|
|
|
|
|
func TestUpload(t *testing.T) {
|
2019-12-06 18:03:22 +00:00
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 1,
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
client, err := planet.Uplinks[0].DialPiecestore(ctx, planet.StorageNodes[0])
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer ctx.Check(client.Close)
|
2019-03-20 21:12:00 +00:00
|
|
|
|
2019-12-06 18:03:22 +00:00
|
|
|
for _, tt := range []struct {
|
|
|
|
pieceID storj.PieceID
|
|
|
|
contentLength memory.Size
|
|
|
|
action pb.PieceAction
|
|
|
|
err string
|
|
|
|
}{
|
|
|
|
{ // should successfully store data
|
|
|
|
pieceID: storj.PieceID{1},
|
|
|
|
contentLength: 50 * memory.KiB,
|
|
|
|
action: pb.PieceAction_PUT,
|
|
|
|
err: "",
|
|
|
|
},
|
|
|
|
{ // should err with piece ID not specified
|
|
|
|
pieceID: storj.PieceID{},
|
|
|
|
contentLength: 1 * memory.KiB,
|
|
|
|
action: pb.PieceAction_PUT,
|
|
|
|
err: "missing piece id",
|
|
|
|
},
|
|
|
|
{ // should err because invalid action
|
|
|
|
pieceID: storj.PieceID{1},
|
|
|
|
contentLength: 1 * memory.KiB,
|
|
|
|
action: pb.PieceAction_GET,
|
|
|
|
err: "expected put or put repair action got GET",
|
|
|
|
},
|
|
|
|
} {
|
|
|
|
data := testrand.Bytes(tt.contentLength)
|
|
|
|
serialNumber := testrand.SerialNumber()
|
|
|
|
|
|
|
|
orderLimit, piecePrivateKey := GenerateOrderLimit(
|
|
|
|
t,
|
|
|
|
planet.Satellites[0].ID(),
|
|
|
|
planet.StorageNodes[0].ID(),
|
|
|
|
tt.pieceID,
|
|
|
|
tt.action,
|
|
|
|
serialNumber,
|
|
|
|
24*time.Hour,
|
|
|
|
24*time.Hour,
|
|
|
|
int64(len(data)),
|
|
|
|
)
|
|
|
|
signer := signing.SignerFromFullIdentity(planet.Satellites[0].Identity)
|
|
|
|
orderLimit, err = signing.SignOrderLimit(ctx, signer, orderLimit)
|
|
|
|
require.NoError(t, err)
|
2019-03-20 21:12:00 +00:00
|
|
|
|
2019-12-06 18:03:22 +00:00
|
|
|
uploader, err := client.Upload(ctx, orderLimit, piecePrivateKey)
|
|
|
|
require.NoError(t, err)
|
2019-03-20 21:12:00 +00:00
|
|
|
|
2019-12-06 18:03:22 +00:00
|
|
|
_, err = uploader.Write(data)
|
|
|
|
require.NoError(t, err)
|
2019-03-20 21:12:00 +00:00
|
|
|
|
2019-12-06 18:03:22 +00:00
|
|
|
pieceHash, err := uploader.Commit(ctx)
|
|
|
|
if tt.err != "" {
|
|
|
|
require.Error(t, err)
|
|
|
|
require.Contains(t, err.Error(), tt.err)
|
|
|
|
} else {
|
|
|
|
require.NoError(t, err)
|
2019-03-20 21:12:00 +00:00
|
|
|
|
2019-12-06 18:03:22 +00:00
|
|
|
expectedHash := pkcrypto.SHA256Hash(data)
|
|
|
|
assert.Equal(t, expectedHash, pieceHash.Hash)
|
|
|
|
|
|
|
|
signee := signing.SignerFromFullIdentity(planet.StorageNodes[0].Identity)
|
|
|
|
require.NoError(t, signing.VerifyPieceHashSignature(ctx, signee, pieceHash))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
2019-03-20 21:12:00 +00:00
|
|
|
|
2020-05-22 19:12:26 +01:00
|
|
|
func TestUploadOverAvailable(t *testing.T) {
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 1,
|
|
|
|
Reconfigure: testplanet.Reconfigure{
|
|
|
|
StorageNodeDB: func(index int, db storagenode.DB, log *zap.Logger) (storagenode.DB, error) {
|
|
|
|
return testblobs.NewLimitedSpaceDB(log.Named("overload"), db, 3000000), nil
|
|
|
|
},
|
|
|
|
StorageNode: func(index int, config *storagenode.Config) {
|
|
|
|
config.Storage2.Monitor.MinimumDiskSpace = 3 * memory.MB
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
client, err := planet.Uplinks[0].DialPiecestore(ctx, planet.StorageNodes[0])
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer ctx.Check(client.Close)
|
|
|
|
|
|
|
|
var tt struct {
|
|
|
|
pieceID storj.PieceID
|
|
|
|
contentLength memory.Size
|
|
|
|
action pb.PieceAction
|
|
|
|
err string
|
|
|
|
}
|
|
|
|
|
|
|
|
tt.pieceID = storj.PieceID{1}
|
|
|
|
tt.contentLength = 5 * memory.MB
|
|
|
|
tt.action = pb.PieceAction_PUT
|
|
|
|
tt.err = "not enough available disk space, have: 3000000, need: 5000000"
|
|
|
|
|
|
|
|
data := testrand.Bytes(tt.contentLength)
|
|
|
|
serialNumber := testrand.SerialNumber()
|
|
|
|
|
|
|
|
orderLimit, piecePrivateKey := GenerateOrderLimit(
|
|
|
|
t,
|
|
|
|
planet.Satellites[0].ID(),
|
|
|
|
planet.StorageNodes[0].ID(),
|
|
|
|
tt.pieceID,
|
|
|
|
tt.action,
|
|
|
|
serialNumber,
|
|
|
|
24*time.Hour,
|
|
|
|
24*time.Hour,
|
|
|
|
int64(len(data)),
|
|
|
|
)
|
|
|
|
signer := signing.SignerFromFullIdentity(planet.Satellites[0].Identity)
|
|
|
|
orderLimit, err = signing.SignOrderLimit(ctx, signer, orderLimit)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
uploader, err := client.Upload(ctx, orderLimit, piecePrivateKey)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
_, err = uploader.Write(data)
|
|
|
|
require.Error(t, err)
|
|
|
|
|
|
|
|
pieceHash, err := uploader.Commit(ctx)
|
|
|
|
if tt.err != "" {
|
|
|
|
require.Error(t, err)
|
|
|
|
require.Contains(t, err.Error(), tt.err)
|
|
|
|
} else {
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
expectedHash := pkcrypto.SHA256Hash(data)
|
|
|
|
assert.Equal(t, expectedHash, pieceHash.Hash)
|
|
|
|
|
|
|
|
signee := signing.SignerFromFullIdentity(planet.StorageNodes[0].Identity)
|
|
|
|
require.NoError(t, signing.VerifyPieceHashSignature(ctx, signee, pieceHash))
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-12-06 18:03:22 +00:00
|
|
|
func TestDownload(t *testing.T) {
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 1,
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
pieceID := storj.PieceID{1}
|
|
|
|
expectedData, _, _ := uploadPiece(t, ctx, pieceID, planet.StorageNodes[0], planet.Uplinks[0], planet.Satellites[0])
|
|
|
|
client, err := planet.Uplinks[0].DialPiecestore(ctx, planet.StorageNodes[0])
|
2019-03-20 21:12:00 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-12-06 18:03:22 +00:00
|
|
|
for _, tt := range []struct {
|
|
|
|
pieceID storj.PieceID
|
|
|
|
action pb.PieceAction
|
|
|
|
errs []string
|
|
|
|
}{
|
|
|
|
{ // should successfully download data
|
|
|
|
pieceID: pieceID,
|
|
|
|
action: pb.PieceAction_GET,
|
|
|
|
},
|
|
|
|
{ // should err with piece ID not specified
|
|
|
|
pieceID: storj.PieceID{},
|
|
|
|
action: pb.PieceAction_GET,
|
|
|
|
errs: []string{"missing piece id"},
|
|
|
|
},
|
|
|
|
{ // should err with piece ID not specified
|
|
|
|
pieceID: storj.PieceID{2},
|
|
|
|
action: pb.PieceAction_GET,
|
|
|
|
errs: []string{"file does not exist", "The system cannot find the path specified"},
|
|
|
|
},
|
|
|
|
{ // should err with invalid action
|
|
|
|
pieceID: pieceID,
|
|
|
|
action: pb.PieceAction_PUT,
|
|
|
|
errs: []string{"expected get or get repair or audit action got PUT"},
|
|
|
|
},
|
|
|
|
} {
|
|
|
|
serialNumber := testrand.SerialNumber()
|
|
|
|
|
|
|
|
orderLimit, piecePrivateKey := GenerateOrderLimit(
|
|
|
|
t,
|
|
|
|
planet.Satellites[0].ID(),
|
|
|
|
planet.StorageNodes[0].ID(),
|
|
|
|
tt.pieceID,
|
|
|
|
tt.action,
|
|
|
|
serialNumber,
|
|
|
|
24*time.Hour,
|
|
|
|
24*time.Hour,
|
|
|
|
int64(len(expectedData)),
|
|
|
|
)
|
|
|
|
signer := signing.SignerFromFullIdentity(planet.Satellites[0].Identity)
|
|
|
|
orderLimit, err = signing.SignOrderLimit(ctx, signer, orderLimit)
|
2019-03-20 21:12:00 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-12-06 18:03:22 +00:00
|
|
|
downloader, err := client.Download(ctx, orderLimit, piecePrivateKey, 0, int64(len(expectedData)))
|
|
|
|
require.NoError(t, err)
|
2019-03-20 21:12:00 +00:00
|
|
|
|
2019-12-06 18:03:22 +00:00
|
|
|
buffer := make([]byte, len(expectedData))
|
|
|
|
n, err := downloader.Read(buffer)
|
2019-03-20 21:12:00 +00:00
|
|
|
|
2019-12-06 18:03:22 +00:00
|
|
|
if len(tt.errs) > 0 {
|
|
|
|
} else {
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, expectedData, buffer[:n])
|
|
|
|
}
|
2019-03-20 21:12:00 +00:00
|
|
|
|
2019-12-06 18:03:22 +00:00
|
|
|
err = downloader.Close()
|
|
|
|
if len(tt.errs) > 0 {
|
|
|
|
require.Error(t, err)
|
|
|
|
require.True(t, strings.Contains(err.Error(), tt.errs[0]) || strings.Contains(err.Error(), tt.errs[1]), err.Error())
|
|
|
|
} else {
|
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
2019-03-20 21:12:00 +00:00
|
|
|
|
2019-12-06 18:03:22 +00:00
|
|
|
// these should only be not-nil if action = pb.PieceAction_GET_REPAIR
|
|
|
|
hash, originalLimit := downloader.GetHashAndLimit()
|
|
|
|
require.Nil(t, hash)
|
|
|
|
require.Nil(t, originalLimit)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
2019-03-20 21:12:00 +00:00
|
|
|
|
2019-12-06 18:03:22 +00:00
|
|
|
func TestDownloadGetRepair(t *testing.T) {
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 1,
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
|
|
|
|
pieceID := storj.PieceID{1}
|
|
|
|
expectedData, ulOrderLimit, originHash := uploadPiece(
|
|
|
|
t, ctx, pieceID, planet.StorageNodes[0], planet.Uplinks[0], planet.Satellites[0],
|
|
|
|
)
|
|
|
|
client, err := planet.Uplinks[0].DialPiecestore(ctx, planet.StorageNodes[0])
|
|
|
|
require.NoError(t, err)
|
2019-03-20 21:12:00 +00:00
|
|
|
|
2019-06-26 11:38:51 +01:00
|
|
|
serialNumber := testrand.SerialNumber()
|
2019-03-20 21:12:00 +00:00
|
|
|
|
2019-12-06 18:03:22 +00:00
|
|
|
dlOrderLimit, piecePrivateKey := GenerateOrderLimit(
|
2019-03-20 21:12:00 +00:00
|
|
|
t,
|
|
|
|
planet.Satellites[0].ID(),
|
|
|
|
planet.StorageNodes[0].ID(),
|
2019-12-06 18:03:22 +00:00
|
|
|
storj.PieceID{1},
|
|
|
|
pb.PieceAction_GET_REPAIR,
|
2019-03-20 21:12:00 +00:00
|
|
|
serialNumber,
|
|
|
|
24*time.Hour,
|
|
|
|
24*time.Hour,
|
|
|
|
int64(len(expectedData)),
|
|
|
|
)
|
|
|
|
signer := signing.SignerFromFullIdentity(planet.Satellites[0].Identity)
|
2019-12-06 18:03:22 +00:00
|
|
|
dlOrderLimit, err = signing.SignOrderLimit(ctx, signer, dlOrderLimit)
|
2019-03-20 21:12:00 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-12-06 18:03:22 +00:00
|
|
|
downloader, err := client.Download(ctx, dlOrderLimit, piecePrivateKey, 0, int64(len(expectedData)))
|
2019-03-20 21:12:00 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
buffer := make([]byte, len(expectedData))
|
|
|
|
n, err := downloader.Read(buffer)
|
|
|
|
|
2019-12-06 18:03:22 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, expectedData, buffer[:n])
|
2019-03-20 21:12:00 +00:00
|
|
|
|
|
|
|
err = downloader.Close()
|
2019-12-06 18:03:22 +00:00
|
|
|
require.NoError(t, err)
|
2019-08-26 19:57:41 +01:00
|
|
|
|
2019-12-06 18:03:22 +00:00
|
|
|
hash, originLimit := downloader.GetHashAndLimit()
|
|
|
|
require.NotNil(t, hash)
|
|
|
|
require.Equal(t, originHash.Hash, hash.Hash)
|
|
|
|
require.Equal(t, originHash.PieceId, hash.PieceId)
|
|
|
|
|
|
|
|
require.NotNil(t, originLimit)
|
|
|
|
require.Equal(t, originLimit.Action, ulOrderLimit.Action)
|
|
|
|
require.Equal(t, originLimit.Limit, ulOrderLimit.Limit)
|
|
|
|
require.Equal(t, originLimit.PieceId, ulOrderLimit.PieceId)
|
|
|
|
require.Equal(t, originLimit.SatelliteId, ulOrderLimit.SatelliteId)
|
|
|
|
require.Equal(t, originLimit.SerialNumber, ulOrderLimit.SerialNumber)
|
|
|
|
require.Equal(t, originLimit.SatelliteSignature, ulOrderLimit.SatelliteSignature)
|
|
|
|
require.Equal(t, originLimit.UplinkPublicKey, ulOrderLimit.UplinkPublicKey)
|
|
|
|
})
|
2019-08-26 19:57:41 +01:00
|
|
|
}
|
|
|
|
|
2019-03-20 21:12:00 +00:00
|
|
|
func TestDelete(t *testing.T) {
|
2019-12-06 18:03:22 +00:00
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 1,
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
pieceID := storj.PieceID{1}
|
|
|
|
uploadPiece(t, ctx, pieceID, planet.StorageNodes[0], planet.Uplinks[0], planet.Satellites[0])
|
2020-05-04 08:14:55 +01:00
|
|
|
|
|
|
|
nodeurl := planet.StorageNodes[0].NodeURL()
|
2020-05-19 16:49:13 +01:00
|
|
|
conn, err := planet.Uplinks[0].Dialer.DialNodeURL(ctx, nodeurl)
|
2019-03-20 21:12:00 +00:00
|
|
|
require.NoError(t, err)
|
2020-05-04 08:14:55 +01:00
|
|
|
defer ctx.Check(conn.Close)
|
|
|
|
|
|
|
|
client := pb.NewDRPCPiecestoreClient(conn)
|
2019-03-20 21:12:00 +00:00
|
|
|
|
2019-12-06 18:03:22 +00:00
|
|
|
for _, tt := range []struct {
|
|
|
|
pieceID storj.PieceID
|
|
|
|
action pb.PieceAction
|
|
|
|
err string
|
|
|
|
}{
|
|
|
|
{ // should successfully delete data
|
|
|
|
pieceID: pieceID,
|
|
|
|
action: pb.PieceAction_DELETE,
|
|
|
|
err: "",
|
|
|
|
},
|
|
|
|
{ // should err with piece ID not found
|
|
|
|
pieceID: storj.PieceID{99},
|
|
|
|
action: pb.PieceAction_DELETE,
|
|
|
|
err: "", // TODO should this return error
|
|
|
|
},
|
|
|
|
{ // should err with piece ID not specified
|
|
|
|
pieceID: storj.PieceID{},
|
|
|
|
action: pb.PieceAction_DELETE,
|
|
|
|
err: "missing piece id",
|
|
|
|
},
|
|
|
|
{ // should err due to incorrect action
|
|
|
|
pieceID: pieceID,
|
|
|
|
action: pb.PieceAction_GET,
|
|
|
|
err: "expected delete action got GET",
|
|
|
|
},
|
|
|
|
} {
|
|
|
|
serialNumber := testrand.SerialNumber()
|
|
|
|
|
2020-05-04 08:14:55 +01:00
|
|
|
orderLimit, _ := GenerateOrderLimit(
|
2019-12-06 18:03:22 +00:00
|
|
|
t,
|
|
|
|
planet.Satellites[0].ID(),
|
|
|
|
planet.StorageNodes[0].ID(),
|
|
|
|
tt.pieceID,
|
|
|
|
tt.action,
|
|
|
|
serialNumber,
|
|
|
|
24*time.Hour,
|
|
|
|
24*time.Hour,
|
|
|
|
100,
|
|
|
|
)
|
|
|
|
signer := signing.SignerFromFullIdentity(planet.Satellites[0].Identity)
|
|
|
|
orderLimit, err = signing.SignOrderLimit(ctx, signer, orderLimit)
|
2019-03-20 21:12:00 +00:00
|
|
|
require.NoError(t, err)
|
2019-12-06 18:03:22 +00:00
|
|
|
|
2020-05-04 08:14:55 +01:00
|
|
|
_, err := client.Delete(ctx, &pb.PieceDeleteRequest{
|
|
|
|
Limit: orderLimit,
|
|
|
|
})
|
2019-12-06 18:03:22 +00:00
|
|
|
if tt.err != "" {
|
|
|
|
require.Error(t, err)
|
|
|
|
require.Contains(t, err.Error(), tt.err)
|
|
|
|
} else {
|
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
2019-03-20 21:12:00 +00:00
|
|
|
}
|
2019-12-06 18:03:22 +00:00
|
|
|
})
|
2019-03-20 21:12:00 +00:00
|
|
|
}
|
|
|
|
|
2019-12-18 15:33:12 +00:00
|
|
|
func TestDeletePieces(t *testing.T) {
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 1,
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
2020-04-29 16:22:43 +01:00
|
|
|
satellite := planet.Satellites[0]
|
|
|
|
storagenode := planet.StorageNodes[0]
|
2019-12-18 15:33:12 +00:00
|
|
|
|
2020-04-29 16:22:43 +01:00
|
|
|
nodeurl := storagenode.NodeURL()
|
2020-05-19 16:49:13 +01:00
|
|
|
conn, err := planet.Satellites[0].Dialer.DialNodeURL(ctx, nodeurl)
|
2020-04-29 16:22:43 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
defer ctx.Check(conn.Close)
|
2019-12-18 15:33:12 +00:00
|
|
|
|
2020-04-29 16:22:43 +01:00
|
|
|
client := pb.NewDRPCPiecestoreClient(conn)
|
2019-12-18 15:33:12 +00:00
|
|
|
|
2020-04-29 16:22:43 +01:00
|
|
|
t.Run("ok", func(t *testing.T) {
|
2020-04-20 21:29:18 +01:00
|
|
|
pieceIDs := []storj.PieceID{testrand.PieceID(), testrand.PieceID(), testrand.PieceID(), testrand.PieceID()}
|
2019-12-18 15:33:12 +00:00
|
|
|
dataArray := make([][]byte, len(pieceIDs))
|
|
|
|
for i, pieceID := range pieceIDs {
|
2020-04-29 16:22:43 +01:00
|
|
|
dataArray[i], _, _ = uploadPiece(t, ctx, pieceID, storagenode, planet.Uplinks[0], satellite)
|
2019-12-18 15:33:12 +00:00
|
|
|
}
|
|
|
|
|
2020-04-29 16:22:43 +01:00
|
|
|
_, err := client.DeletePieces(ctx.Context, &pb.DeletePiecesRequest{
|
|
|
|
PieceIds: pieceIDs,
|
|
|
|
})
|
2019-12-18 15:33:12 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2020-04-29 19:38:05 +01:00
|
|
|
planet.WaitForStorageNodeDeleters(ctx)
|
|
|
|
|
2019-12-18 15:33:12 +00:00
|
|
|
for i, pieceID := range pieceIDs {
|
2020-04-29 16:22:43 +01:00
|
|
|
_, err = downloadPiece(t, ctx, pieceID, int64(len(dataArray[i])), storagenode, planet.Uplinks[0], satellite)
|
2019-12-18 15:33:12 +00:00
|
|
|
require.Error(t, err)
|
|
|
|
}
|
|
|
|
require.Condition(t, func() bool {
|
|
|
|
return strings.Contains(err.Error(), "file does not exist") ||
|
|
|
|
strings.Contains(err.Error(), "The system cannot find the path specified")
|
|
|
|
}, "unexpected error message")
|
|
|
|
})
|
|
|
|
|
2020-04-29 16:22:43 +01:00
|
|
|
t.Run("ok: one piece to delete is missing", func(t *testing.T) {
|
2020-04-20 21:29:18 +01:00
|
|
|
missingPieceID := testrand.PieceID()
|
|
|
|
pieceIDs := []storj.PieceID{testrand.PieceID(), testrand.PieceID(), testrand.PieceID(), testrand.PieceID()}
|
2019-12-18 15:33:12 +00:00
|
|
|
dataArray := make([][]byte, len(pieceIDs))
|
|
|
|
for i, pieceID := range pieceIDs {
|
2020-04-29 16:22:43 +01:00
|
|
|
dataArray[i], _, _ = uploadPiece(t, ctx, pieceID, storagenode, planet.Uplinks[0], satellite)
|
2019-12-18 15:33:12 +00:00
|
|
|
}
|
|
|
|
|
2020-04-29 16:22:43 +01:00
|
|
|
_, err := client.DeletePieces(ctx.Context, &pb.DeletePiecesRequest{
|
|
|
|
PieceIds: append(pieceIDs, missingPieceID),
|
|
|
|
})
|
2019-12-18 15:33:12 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2020-04-29 19:38:05 +01:00
|
|
|
planet.WaitForStorageNodeDeleters(ctx)
|
|
|
|
|
2019-12-18 15:33:12 +00:00
|
|
|
for i, pieceID := range pieceIDs {
|
2020-04-29 16:22:43 +01:00
|
|
|
_, err = downloadPiece(t, ctx, pieceID, int64(len(dataArray[i])), storagenode, planet.Uplinks[0], satellite)
|
2019-12-18 15:33:12 +00:00
|
|
|
require.Error(t, err)
|
|
|
|
}
|
|
|
|
require.Condition(t, func() bool {
|
|
|
|
return strings.Contains(err.Error(), "file does not exist") ||
|
|
|
|
strings.Contains(err.Error(), "The system cannot find the path specified")
|
|
|
|
}, "unexpected error message")
|
|
|
|
})
|
|
|
|
|
2020-04-29 16:22:43 +01:00
|
|
|
t.Run("ok: no piece deleted", func(t *testing.T) {
|
2020-04-20 21:29:18 +01:00
|
|
|
pieceID := testrand.PieceID()
|
2020-04-29 16:22:43 +01:00
|
|
|
data, _, _ := uploadPiece(t, ctx, pieceID, storagenode, planet.Uplinks[0], satellite)
|
2019-12-18 15:33:12 +00:00
|
|
|
|
2020-04-29 16:22:43 +01:00
|
|
|
_, err := client.DeletePieces(ctx.Context, &pb.DeletePiecesRequest{})
|
2019-12-18 15:33:12 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2020-04-29 19:38:05 +01:00
|
|
|
planet.WaitForStorageNodeDeleters(ctx)
|
|
|
|
|
2020-04-29 16:22:43 +01:00
|
|
|
downloaded, err := downloadPiece(t, ctx, pieceID, int64(len(data)), storagenode, planet.Uplinks[0], satellite)
|
2019-12-18 15:33:12 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, data, downloaded)
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("error: permission denied", func(t *testing.T) {
|
2020-05-19 16:49:13 +01:00
|
|
|
conn, err := planet.Uplinks[0].Dialer.DialNodeURL(ctx, nodeurl)
|
2019-12-18 15:33:12 +00:00
|
|
|
require.NoError(t, err)
|
2020-04-29 16:22:43 +01:00
|
|
|
defer ctx.Check(conn.Close)
|
|
|
|
client := pb.NewDRPCPiecestoreClient(conn)
|
|
|
|
|
|
|
|
pieceID := testrand.PieceID()
|
|
|
|
data, _, _ := uploadPiece(t, ctx, pieceID, storagenode, planet.Uplinks[0], satellite)
|
2019-12-18 15:33:12 +00:00
|
|
|
|
2020-04-29 16:22:43 +01:00
|
|
|
_, err = client.DeletePieces(ctx.Context, &pb.DeletePiecesRequest{
|
|
|
|
PieceIds: []storj.PieceID{pieceID},
|
|
|
|
})
|
2019-12-18 15:33:12 +00:00
|
|
|
require.Error(t, err)
|
|
|
|
require.Equal(t, rpcstatus.PermissionDenied, rpcstatus.Code(err))
|
|
|
|
|
2020-04-29 19:38:05 +01:00
|
|
|
planet.WaitForStorageNodeDeleters(ctx)
|
|
|
|
|
2020-04-29 16:22:43 +01:00
|
|
|
downloaded, err := downloadPiece(t, ctx, pieceID, int64(len(data)), storagenode, planet.Uplinks[0], satellite)
|
2019-12-18 15:33:12 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, data, downloaded)
|
|
|
|
})
|
|
|
|
})
|
|
|
|
}
|
2019-11-26 17:47:19 +00:00
|
|
|
|
2019-07-03 14:47:55 +01:00
|
|
|
func TestTooManyRequests(t *testing.T) {
|
|
|
|
t.Skip("flaky, because of EOF issues")
|
|
|
|
|
|
|
|
const uplinkCount = 6
|
|
|
|
const maxConcurrent = 3
|
|
|
|
const expectedFailures = uplinkCount - maxConcurrent
|
|
|
|
|
2020-01-21 19:06:14 +00:00
|
|
|
testplanet.Run(t, testplanet.Config{
|
2019-07-03 14:47:55 +01:00
|
|
|
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: uplinkCount,
|
|
|
|
Reconfigure: testplanet.Reconfigure{
|
|
|
|
StorageNode: func(index int, config *storagenode.Config) {
|
|
|
|
config.Storage2.MaxConcurrentRequests = maxConcurrent
|
|
|
|
},
|
|
|
|
},
|
2020-01-21 19:06:14 +00:00
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
doneWaiting := make(chan struct{})
|
|
|
|
failedCount := int64(expectedFailures)
|
|
|
|
|
|
|
|
uploads, _ := errgroup.WithContext(ctx)
|
|
|
|
defer ctx.Check(uploads.Wait)
|
|
|
|
|
|
|
|
for i, uplink := range planet.Uplinks {
|
|
|
|
i, uplink := i, uplink
|
|
|
|
uploads.Go(func() (err error) {
|
2020-05-19 16:49:13 +01:00
|
|
|
storageNode := planet.StorageNodes[0]
|
2020-01-21 19:06:14 +00:00
|
|
|
config := piecestore.DefaultConfig
|
|
|
|
config.UploadBufferSize = 0 // disable buffering so we can detect write error early
|
|
|
|
|
2020-05-19 16:49:13 +01:00
|
|
|
client, err := piecestore.DialNodeURL(ctx, uplink.Dialer, storageNode.NodeURL(), uplink.Log, config)
|
2020-01-21 19:06:14 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer func() {
|
|
|
|
if cerr := client.Close(); cerr != nil {
|
|
|
|
uplink.Log.Error("close failed", zap.Error(cerr))
|
|
|
|
err = errs.Combine(err, cerr)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
pieceID := storj.PieceID{byte(i + 1)}
|
|
|
|
serialNumber := testrand.SerialNumber()
|
|
|
|
|
|
|
|
orderLimit, piecePrivateKey := GenerateOrderLimit(
|
|
|
|
t,
|
|
|
|
planet.Satellites[0].ID(),
|
|
|
|
planet.StorageNodes[0].ID(),
|
|
|
|
pieceID,
|
|
|
|
pb.PieceAction_PUT,
|
|
|
|
serialNumber,
|
|
|
|
24*time.Hour,
|
|
|
|
24*time.Hour,
|
|
|
|
int64(10000),
|
|
|
|
)
|
|
|
|
|
|
|
|
satelliteSigner := signing.SignerFromFullIdentity(planet.Satellites[0].Identity)
|
|
|
|
orderLimit, err = signing.SignOrderLimit(ctx, satelliteSigner, orderLimit)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2019-07-03 14:47:55 +01:00
|
|
|
}
|
|
|
|
|
2020-01-21 19:06:14 +00:00
|
|
|
upload, err := client.Upload(ctx, orderLimit, piecePrivateKey)
|
|
|
|
if err != nil {
|
|
|
|
if errs2.IsRPC(err, rpcstatus.Unavailable) {
|
|
|
|
if atomic.AddInt64(&failedCount, -1) == 0 {
|
|
|
|
close(doneWaiting)
|
|
|
|
}
|
|
|
|
return nil
|
2019-07-03 14:47:55 +01:00
|
|
|
}
|
2020-01-21 19:06:14 +00:00
|
|
|
uplink.Log.Error("upload failed", zap.Stringer("Piece ID", pieceID), zap.Error(err))
|
|
|
|
return err
|
2019-07-03 14:47:55 +01:00
|
|
|
}
|
|
|
|
|
2020-01-21 19:06:14 +00:00
|
|
|
_, err = upload.Write(make([]byte, orderLimit.Limit))
|
|
|
|
if err != nil {
|
|
|
|
if errs2.IsRPC(err, rpcstatus.Unavailable) {
|
|
|
|
if atomic.AddInt64(&failedCount, -1) == 0 {
|
|
|
|
close(doneWaiting)
|
|
|
|
}
|
|
|
|
return nil
|
2019-07-03 14:47:55 +01:00
|
|
|
}
|
2020-01-21 19:06:14 +00:00
|
|
|
uplink.Log.Error("write failed", zap.Stringer("Piece ID", pieceID), zap.Error(err))
|
|
|
|
return err
|
2019-07-03 14:47:55 +01:00
|
|
|
}
|
|
|
|
|
2020-01-21 19:06:14 +00:00
|
|
|
_, err = upload.Commit(ctx)
|
|
|
|
if err != nil {
|
|
|
|
if errs2.IsRPC(err, rpcstatus.Unavailable) {
|
|
|
|
if atomic.AddInt64(&failedCount, -1) == 0 {
|
|
|
|
close(doneWaiting)
|
|
|
|
}
|
|
|
|
return nil
|
2019-07-03 14:47:55 +01:00
|
|
|
}
|
2020-01-21 19:06:14 +00:00
|
|
|
uplink.Log.Error("commit failed", zap.Stringer("Piece ID", pieceID), zap.Error(err))
|
|
|
|
return err
|
2019-07-03 14:47:55 +01:00
|
|
|
}
|
|
|
|
|
2020-01-21 19:06:14 +00:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
}
|
|
|
|
})
|
2019-07-03 14:47:55 +01:00
|
|
|
}
|
|
|
|
|
2019-07-11 21:51:40 +01:00
|
|
|
func GenerateOrderLimit(t *testing.T, satellite storj.NodeID, storageNode storj.NodeID, pieceID storj.PieceID, action pb.PieceAction, serialNumber storj.SerialNumber, pieceExpiration, orderExpiration time.Duration, limit int64) (*pb.OrderLimit, storj.PiecePrivateKey) {
|
|
|
|
piecePublicKey, piecePrivateKey, err := storj.NewPieceKey()
|
|
|
|
require.NoError(t, err)
|
2019-03-20 21:12:00 +00:00
|
|
|
|
2019-07-09 22:54:00 +01:00
|
|
|
now := time.Now()
|
2019-07-03 14:47:55 +01:00
|
|
|
return &pb.OrderLimit{
|
2019-03-20 21:12:00 +00:00
|
|
|
SatelliteId: satellite,
|
2019-07-11 21:51:40 +01:00
|
|
|
UplinkPublicKey: piecePublicKey,
|
2019-03-20 21:12:00 +00:00
|
|
|
StorageNodeId: storageNode,
|
|
|
|
PieceId: pieceID,
|
|
|
|
Action: action,
|
|
|
|
SerialNumber: serialNumber,
|
2019-07-02 17:06:12 +01:00
|
|
|
OrderCreation: time.Now(),
|
2019-07-09 22:54:00 +01:00
|
|
|
OrderExpiration: now.Add(orderExpiration),
|
|
|
|
PieceExpiration: now.Add(pieceExpiration),
|
2019-03-20 21:12:00 +00:00
|
|
|
Limit: limit,
|
2019-07-11 21:51:40 +01:00
|
|
|
}, piecePrivateKey
|
2019-03-20 21:12:00 +00:00
|
|
|
}
|
2019-11-26 17:47:19 +00:00
|
|
|
|
|
|
|
// uploadPiece uploads piece to storageNode.
|
|
|
|
func uploadPiece(
|
2020-03-27 14:46:40 +00:00
|
|
|
t *testing.T, ctx *testcontext.Context, piece storj.PieceID, storageNode *testplanet.StorageNode,
|
|
|
|
uplink *testplanet.Uplink, satellite *testplanet.Satellite,
|
2019-11-26 17:47:19 +00:00
|
|
|
) (uploadedData []byte, _ *pb.OrderLimit, _ *pb.PieceHash) {
|
|
|
|
t.Helper()
|
|
|
|
|
|
|
|
client, err := uplink.DialPiecestore(ctx, storageNode)
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer ctx.Check(client.Close)
|
|
|
|
|
|
|
|
serialNumber := testrand.SerialNumber()
|
|
|
|
uploadedData = testrand.Bytes(10 * memory.KiB)
|
|
|
|
|
|
|
|
orderLimit, piecePrivateKey := GenerateOrderLimit(
|
|
|
|
t,
|
|
|
|
satellite.ID(),
|
|
|
|
storageNode.ID(),
|
|
|
|
piece,
|
|
|
|
pb.PieceAction_PUT,
|
|
|
|
serialNumber,
|
|
|
|
24*time.Hour,
|
|
|
|
24*time.Hour,
|
|
|
|
int64(len(uploadedData)),
|
|
|
|
)
|
|
|
|
signer := signing.SignerFromFullIdentity(satellite.Identity)
|
|
|
|
orderLimit, err = signing.SignOrderLimit(ctx, signer, orderLimit)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
uploader, err := client.Upload(ctx, orderLimit, piecePrivateKey)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
_, err = uploader.Write(uploadedData)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
hash, err := uploader.Commit(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
return uploadedData, orderLimit, hash
|
|
|
|
}
|
|
|
|
|
|
|
|
// downloadPiece downlodads piece from storageNode.
|
|
|
|
func downloadPiece(
|
|
|
|
t *testing.T, ctx *testcontext.Context, piece storj.PieceID, limit int64,
|
2020-03-27 14:46:40 +00:00
|
|
|
storageNode *testplanet.StorageNode, uplink *testplanet.Uplink, satellite *testplanet.Satellite,
|
2019-11-26 17:47:19 +00:00
|
|
|
) (pieceData []byte, err error) {
|
|
|
|
t.Helper()
|
|
|
|
|
|
|
|
serialNumber := testrand.SerialNumber()
|
|
|
|
orderLimit, piecePrivateKey := GenerateOrderLimit(
|
|
|
|
t,
|
|
|
|
satellite.ID(),
|
|
|
|
storageNode.ID(),
|
|
|
|
piece,
|
|
|
|
pb.PieceAction_GET,
|
|
|
|
serialNumber,
|
|
|
|
24*time.Hour,
|
|
|
|
24*time.Hour,
|
|
|
|
limit,
|
|
|
|
)
|
|
|
|
signer := signing.SignerFromFullIdentity(satellite.Identity)
|
|
|
|
orderLimit, err = signing.SignOrderLimit(ctx.Context, signer, orderLimit)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
client, err := uplink.DialPiecestore(ctx, storageNode)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
downloader, err := client.Download(ctx.Context, orderLimit, piecePrivateKey, 0, limit)
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
|
|
|
// Ignore err in Close if an error happened in Download because it's also
|
|
|
|
// returned by Close.
|
|
|
|
_ = downloader.Close()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
err = downloader.Close()
|
|
|
|
}()
|
|
|
|
|
|
|
|
buffer := make([]byte, limit)
|
|
|
|
n, err := downloader.Read(buffer)
|
|
|
|
return buffer[:n], err
|
|
|
|
}
|