2019-04-15 11:12:22 +01:00
|
|
|
// Copyright (C) 2019 Storj Labs, Inc.
|
|
|
|
// See LICENSE for copying information
|
|
|
|
|
|
|
|
package piecestore_test
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2019-10-11 22:57:59 +01:00
|
|
|
"io/ioutil"
|
2019-04-15 11:12:22 +01:00
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
2019-10-15 18:22:15 +01:00
|
|
|
"github.com/stretchr/testify/assert"
|
2019-04-15 11:12:22 +01:00
|
|
|
"github.com/stretchr/testify/require"
|
|
|
|
"github.com/zeebo/errs"
|
|
|
|
|
2019-12-27 11:48:47 +00:00
|
|
|
"storj.io/common/identity"
|
|
|
|
"storj.io/common/memory"
|
|
|
|
"storj.io/common/pb"
|
|
|
|
"storj.io/common/signing"
|
|
|
|
"storj.io/common/storj"
|
|
|
|
"storj.io/common/testcontext"
|
|
|
|
"storj.io/common/testrand"
|
2019-11-14 19:46:15 +00:00
|
|
|
"storj.io/storj/private/testplanet"
|
2019-04-15 11:12:22 +01:00
|
|
|
)
|
|
|
|
|
|
|
|
const oneWeek = 7 * 24 * time.Hour
|
|
|
|
|
|
|
|
func TestOrderLimitPutValidation(t *testing.T) {
|
2019-08-08 02:47:30 +01:00
|
|
|
for _, tt := range []struct {
|
|
|
|
testName string
|
2019-04-15 11:12:22 +01:00
|
|
|
useUnknownSatellite bool
|
|
|
|
pieceID storj.PieceID
|
|
|
|
action pb.PieceAction
|
|
|
|
serialNumber storj.SerialNumber
|
|
|
|
pieceExpiration time.Duration
|
|
|
|
orderExpiration time.Duration
|
|
|
|
limit int64
|
|
|
|
availableBandwidth int64
|
|
|
|
availableSpace int64
|
|
|
|
err string
|
|
|
|
}{
|
2019-08-08 02:47:30 +01:00
|
|
|
{
|
|
|
|
testName: "unapproved satellite id",
|
2019-04-15 11:12:22 +01:00
|
|
|
useUnknownSatellite: true,
|
|
|
|
pieceID: storj.PieceID{1},
|
|
|
|
action: pb.PieceAction_PUT,
|
|
|
|
serialNumber: storj.SerialNumber{1},
|
|
|
|
pieceExpiration: oneWeek,
|
|
|
|
orderExpiration: oneWeek,
|
|
|
|
limit: memory.KiB.Int64(),
|
|
|
|
err: " is untrusted",
|
|
|
|
},
|
2019-08-08 02:47:30 +01:00
|
|
|
{
|
|
|
|
testName: "approved satellite id",
|
2019-04-15 11:12:22 +01:00
|
|
|
pieceID: storj.PieceID{2},
|
|
|
|
action: pb.PieceAction_PUT,
|
|
|
|
serialNumber: storj.SerialNumber{2},
|
|
|
|
pieceExpiration: oneWeek,
|
|
|
|
orderExpiration: oneWeek,
|
|
|
|
limit: 10 * memory.KiB.Int64(),
|
|
|
|
},
|
2019-08-08 02:47:30 +01:00
|
|
|
{
|
|
|
|
testName: "wrong action type",
|
2019-04-15 11:12:22 +01:00
|
|
|
pieceID: storj.PieceID{3},
|
|
|
|
action: pb.PieceAction_GET,
|
|
|
|
serialNumber: storj.SerialNumber{3},
|
|
|
|
pieceExpiration: oneWeek,
|
|
|
|
orderExpiration: oneWeek,
|
|
|
|
limit: memory.KiB.Int64(),
|
|
|
|
err: "expected put or put repair action got GET",
|
|
|
|
},
|
2019-08-08 02:47:30 +01:00
|
|
|
{
|
|
|
|
testName: "piece expired",
|
2019-04-15 11:12:22 +01:00
|
|
|
pieceID: storj.PieceID{4},
|
|
|
|
action: pb.PieceAction_PUT,
|
|
|
|
serialNumber: storj.SerialNumber{4},
|
|
|
|
pieceExpiration: -4 * 24 * time.Hour,
|
|
|
|
orderExpiration: oneWeek,
|
|
|
|
limit: memory.KiB.Int64(),
|
|
|
|
err: "piece expired:",
|
|
|
|
},
|
2019-08-08 02:47:30 +01:00
|
|
|
{
|
|
|
|
testName: "limit is negative",
|
2019-04-15 11:12:22 +01:00
|
|
|
pieceID: storj.PieceID{5},
|
|
|
|
action: pb.PieceAction_PUT,
|
|
|
|
serialNumber: storj.SerialNumber{5},
|
|
|
|
pieceExpiration: oneWeek,
|
|
|
|
orderExpiration: oneWeek,
|
|
|
|
limit: -1,
|
|
|
|
err: "order limit is negative",
|
|
|
|
},
|
2019-08-08 02:47:30 +01:00
|
|
|
{
|
|
|
|
testName: "order limit expired",
|
2019-04-15 11:12:22 +01:00
|
|
|
pieceID: storj.PieceID{6},
|
|
|
|
action: pb.PieceAction_PUT,
|
|
|
|
serialNumber: storj.SerialNumber{6},
|
|
|
|
pieceExpiration: oneWeek,
|
|
|
|
orderExpiration: -4 * 24 * time.Hour,
|
|
|
|
limit: memory.KiB.Int64(),
|
|
|
|
err: "order expired:",
|
|
|
|
},
|
2019-08-08 02:47:30 +01:00
|
|
|
{
|
|
|
|
testName: "allocated bandwidth limit",
|
2019-04-15 11:12:22 +01:00
|
|
|
pieceID: storj.PieceID{7},
|
|
|
|
action: pb.PieceAction_PUT,
|
|
|
|
serialNumber: storj.SerialNumber{7},
|
|
|
|
pieceExpiration: oneWeek,
|
|
|
|
orderExpiration: oneWeek,
|
|
|
|
limit: 10 * memory.KiB.Int64(),
|
|
|
|
availableBandwidth: 5 * memory.KiB.Int64(),
|
|
|
|
err: "out of bandwidth",
|
|
|
|
},
|
2019-08-08 02:47:30 +01:00
|
|
|
{
|
|
|
|
testName: "allocated space limit",
|
2019-04-15 11:12:22 +01:00
|
|
|
pieceID: storj.PieceID{8},
|
|
|
|
action: pb.PieceAction_PUT,
|
|
|
|
serialNumber: storj.SerialNumber{8},
|
|
|
|
pieceExpiration: oneWeek,
|
|
|
|
orderExpiration: oneWeek,
|
|
|
|
limit: 10 * memory.KiB.Int64(),
|
|
|
|
availableSpace: 5 * memory.KiB.Int64(),
|
|
|
|
err: "out of space",
|
|
|
|
},
|
|
|
|
} {
|
2019-08-08 02:47:30 +01:00
|
|
|
tt := tt
|
|
|
|
t.Run(tt.testName, func(t *testing.T) {
|
2019-12-06 18:03:22 +00:00
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 1,
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
2019-04-15 11:12:22 +01:00
|
|
|
|
2019-12-06 18:03:22 +00:00
|
|
|
// set desirable bandwidth
|
|
|
|
setBandwidth(ctx, t, planet, tt.availableBandwidth)
|
|
|
|
// set desirable space
|
|
|
|
setSpace(ctx, t, planet, tt.availableSpace)
|
|
|
|
|
|
|
|
client, err := planet.Uplinks[0].DialPiecestore(ctx, planet.StorageNodes[0])
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer ctx.Check(client.Close)
|
|
|
|
|
|
|
|
signer := signing.SignerFromFullIdentity(planet.Satellites[0].Identity)
|
|
|
|
satellite := planet.Satellites[0].Identity
|
|
|
|
if tt.useUnknownSatellite {
|
|
|
|
unapprovedSatellite, err := planet.NewIdentity()
|
|
|
|
require.NoError(t, err)
|
|
|
|
signer = signing.SignerFromFullIdentity(unapprovedSatellite)
|
|
|
|
satellite = unapprovedSatellite
|
|
|
|
}
|
|
|
|
|
|
|
|
orderLimit, piecePrivateKey := GenerateOrderLimit(
|
|
|
|
t,
|
|
|
|
satellite.ID,
|
|
|
|
planet.StorageNodes[0].ID(),
|
|
|
|
tt.pieceID,
|
|
|
|
tt.action,
|
|
|
|
tt.serialNumber,
|
|
|
|
tt.pieceExpiration,
|
|
|
|
tt.orderExpiration,
|
|
|
|
tt.limit,
|
|
|
|
)
|
|
|
|
|
|
|
|
orderLimit, err = signing.SignOrderLimit(ctx, signer, orderLimit)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
uploader, err := client.Upload(ctx, orderLimit, piecePrivateKey)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
var writeErr error
|
|
|
|
buffer := make([]byte, memory.KiB)
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
testrand.Read(buffer)
|
|
|
|
_, writeErr = uploader.Write(buffer)
|
|
|
|
if writeErr != nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
_, commitErr := uploader.Commit(ctx)
|
|
|
|
err = errs.Combine(writeErr, commitErr)
|
|
|
|
if tt.err != "" {
|
|
|
|
require.Error(t, err)
|
|
|
|
require.Contains(t, err.Error(), tt.err)
|
|
|
|
} else {
|
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestOrderLimitGetValidation(t *testing.T) {
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 1,
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
|
|
|
|
defaultPieceSize := 10 * memory.KiB
|
|
|
|
|
|
|
|
for _, storageNode := range planet.StorageNodes {
|
|
|
|
err := storageNode.DB.Bandwidth().Add(ctx, planet.Satellites[0].ID(), pb.PieceAction_GET, memory.TB.Int64()-(15*memory.KiB.Int64()), time.Now())
|
2019-08-08 02:47:30 +01:00
|
|
|
require.NoError(t, err)
|
2019-12-06 18:03:22 +00:00
|
|
|
}
|
2019-04-15 11:12:22 +01:00
|
|
|
|
2019-12-06 18:03:22 +00:00
|
|
|
{ // upload test piece
|
|
|
|
client, err := planet.Uplinks[0].DialPiecestore(ctx, planet.StorageNodes[0])
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer ctx.Check(client.Close)
|
2019-04-15 11:12:22 +01:00
|
|
|
|
2019-12-06 18:03:22 +00:00
|
|
|
signer := signing.SignerFromFullIdentity(planet.Satellites[0].Identity)
|
|
|
|
satellite := planet.Satellites[0].Identity
|
2019-04-15 11:12:22 +01:00
|
|
|
|
2019-12-06 18:03:22 +00:00
|
|
|
orderLimit, piecePrivateKey := GenerateOrderLimit(
|
|
|
|
t,
|
|
|
|
satellite.ID,
|
|
|
|
planet.StorageNodes[0].ID(),
|
|
|
|
storj.PieceID{1},
|
|
|
|
pb.PieceAction_PUT,
|
|
|
|
storj.SerialNumber{0},
|
|
|
|
oneWeek,
|
|
|
|
oneWeek,
|
|
|
|
defaultPieceSize.Int64(),
|
|
|
|
)
|
|
|
|
|
|
|
|
orderLimit, err = signing.SignOrderLimit(ctx, signer, orderLimit)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
uploader, err := client.Upload(ctx, orderLimit, piecePrivateKey)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
data := testrand.Bytes(defaultPieceSize)
|
|
|
|
|
|
|
|
_, err = uploader.Write(data)
|
|
|
|
require.NoError(t, err)
|
|
|
|
_, err = uploader.Commit(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// wait for all requests to finish to ensure that the upload usage has been
|
|
|
|
// accounted for.
|
|
|
|
waitForEndpointRequestsToDrain(t, planet)
|
|
|
|
|
|
|
|
for _, tt := range []struct {
|
|
|
|
satellite *identity.FullIdentity
|
|
|
|
pieceID storj.PieceID
|
|
|
|
action pb.PieceAction
|
|
|
|
serialNumber storj.SerialNumber
|
|
|
|
pieceExpiration time.Duration
|
|
|
|
orderExpiration time.Duration
|
|
|
|
limit int64
|
|
|
|
err string
|
|
|
|
}{
|
|
|
|
{ // allocated bandwidth limit
|
|
|
|
pieceID: storj.PieceID{1},
|
|
|
|
action: pb.PieceAction_GET,
|
|
|
|
serialNumber: storj.SerialNumber{1},
|
|
|
|
pieceExpiration: oneWeek,
|
|
|
|
orderExpiration: oneWeek,
|
|
|
|
limit: 10 * memory.KiB.Int64(),
|
|
|
|
err: "out of bandwidth",
|
|
|
|
},
|
|
|
|
} {
|
2019-08-08 02:47:30 +01:00
|
|
|
client, err := planet.Uplinks[0].DialPiecestore(ctx, planet.StorageNodes[0])
|
2019-04-15 11:12:22 +01:00
|
|
|
require.NoError(t, err)
|
2019-08-08 02:47:30 +01:00
|
|
|
defer ctx.Check(client.Close)
|
|
|
|
|
|
|
|
signer := signing.SignerFromFullIdentity(planet.Satellites[0].Identity)
|
|
|
|
satellite := planet.Satellites[0].Identity
|
2019-12-06 18:03:22 +00:00
|
|
|
if tt.satellite != nil {
|
|
|
|
signer = signing.SignerFromFullIdentity(tt.satellite)
|
|
|
|
satellite = tt.satellite
|
2019-08-08 02:47:30 +01:00
|
|
|
}
|
2019-04-15 11:12:22 +01:00
|
|
|
|
2019-08-08 02:47:30 +01:00
|
|
|
orderLimit, piecePrivateKey := GenerateOrderLimit(
|
|
|
|
t,
|
|
|
|
satellite.ID,
|
|
|
|
planet.StorageNodes[0].ID(),
|
|
|
|
tt.pieceID,
|
|
|
|
tt.action,
|
|
|
|
tt.serialNumber,
|
|
|
|
tt.pieceExpiration,
|
|
|
|
tt.orderExpiration,
|
|
|
|
tt.limit,
|
|
|
|
)
|
|
|
|
|
|
|
|
orderLimit, err = signing.SignOrderLimit(ctx, signer, orderLimit)
|
|
|
|
require.NoError(t, err)
|
2019-04-15 11:12:22 +01:00
|
|
|
|
2019-12-06 18:03:22 +00:00
|
|
|
downloader, err := client.Download(ctx, orderLimit, piecePrivateKey, 0, tt.limit)
|
2019-08-08 02:47:30 +01:00
|
|
|
require.NoError(t, err)
|
2019-04-15 11:12:22 +01:00
|
|
|
|
2019-12-06 18:03:22 +00:00
|
|
|
buffer, readErr := ioutil.ReadAll(downloader)
|
|
|
|
closeErr := downloader.Close()
|
|
|
|
err = errs.Combine(readErr, closeErr)
|
2019-08-08 02:47:30 +01:00
|
|
|
if tt.err != "" {
|
2019-12-06 18:03:22 +00:00
|
|
|
assert.Equal(t, 0, len(buffer))
|
2019-08-08 02:47:30 +01:00
|
|
|
require.Error(t, err)
|
|
|
|
require.Contains(t, err.Error(), tt.err)
|
|
|
|
} else {
|
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
2019-04-15 11:12:22 +01:00
|
|
|
}
|
2019-12-06 18:03:22 +00:00
|
|
|
})
|
2019-04-15 11:12:22 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
func setBandwidth(ctx context.Context, t *testing.T, planet *testplanet.Planet, bandwidth int64) {
|
|
|
|
if bandwidth == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
for _, storageNode := range planet.StorageNodes {
|
|
|
|
availableBandwidth, err := storageNode.Storage2.Monitor.AvailableBandwidth(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
diff := (bandwidth - availableBandwidth) * -1
|
|
|
|
err = storageNode.DB.Bandwidth().Add(ctx, planet.Satellites[0].ID(), pb.PieceAction_GET, diff, time.Now())
|
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func setSpace(ctx context.Context, t *testing.T, planet *testplanet.Planet, space int64) {
|
|
|
|
if space == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
for _, storageNode := range planet.StorageNodes {
|
|
|
|
availableSpace, err := storageNode.Storage2.Monitor.AvailableSpace(ctx)
|
|
|
|
require.NoError(t, err)
|
2019-08-12 22:43:05 +01:00
|
|
|
// add these bytes to the space used cache so that we can test what happens
|
|
|
|
// when we exceeded available space on the storagenode
|
2019-12-21 13:11:24 +00:00
|
|
|
err = storageNode.DB.PieceSpaceUsedDB().UpdatePieceTotal(ctx, availableSpace-space)
|
2019-08-12 22:43:05 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
err = storageNode.Storage2.CacheService.Init(ctx)
|
|
|
|
require.NoError(t, err)
|
2019-04-15 11:12:22 +01:00
|
|
|
}
|
|
|
|
}
|
2019-10-15 18:22:15 +01:00
|
|
|
|
|
|
|
func waitForEndpointRequestsToDrain(t *testing.T, planet *testplanet.Planet) {
|
|
|
|
timeout := time.NewTimer(time.Minute)
|
|
|
|
defer timeout.Stop()
|
|
|
|
for {
|
|
|
|
if endpointRequestCount(planet) == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
select {
|
|
|
|
case <-time.After(50 * time.Millisecond):
|
|
|
|
case <-timeout.C:
|
|
|
|
require.FailNow(t, "timed out waiting for endpoint requests to drain")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func endpointRequestCount(planet *testplanet.Planet) int {
|
|
|
|
total := 0
|
|
|
|
for _, storageNode := range planet.StorageNodes {
|
|
|
|
total += int(storageNode.Storage2.Endpoint.TestLiveRequestCount())
|
|
|
|
}
|
|
|
|
return total
|
|
|
|
}
|