replace planet.Start in tests with planet.Run

planet.Start starts a testplanet system, whereas planet.Run starts a testplanet
and runs a test against it with each DB backend (cockroach compat).

Change-Id: I39c9da26d9619ee69a2b718d24ab00271f9e9bc2
This commit is contained in:
Cameron Ayer 2019-12-06 13:03:22 -05:00 committed by Cameron
parent 8cf1aa6e4f
commit 6fae361c31
11 changed files with 935 additions and 1062 deletions

View File

@ -11,33 +11,11 @@ import (
"time"
"github.com/stretchr/testify/require"
"go.uber.org/zap/zapcore"
"go.uber.org/zap/zaptest"
"storj.io/storj/private/testcontext"
"storj.io/storj/private/testplanet"
)
func RunPlanet(t *testing.T, run func(ctx *testcontext.Context, planet *testplanet.Planet)) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
planet, err := testplanet.NewCustom(
zaptest.NewLogger(t, zaptest.Level(zapcore.WarnLevel)),
testplanet.Config{
SatelliteCount: 1,
StorageNodeCount: 5,
UplinkCount: 1,
Reconfigure: testplanet.DisablePeerCAWhitelist,
},
)
require.NoError(t, err)
defer ctx.Check(planet.Shutdown)
planet.Start(ctx)
run(ctx, planet)
}
func TestC(t *testing.T) {
ctx := testcontext.NewWithTimeout(t, 5*time.Minute)
defer ctx.Cleanup()
@ -71,7 +49,10 @@ func TestC(t *testing.T) {
},
})
RunPlanet(t, func(ctx *testcontext.Context, planet *testplanet.Planet) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 5, UplinkCount: 1,
Reconfigure: testplanet.DisablePeerCAWhitelist,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
cmd := exec.Command(testexe)
cmd.Dir = filepath.Dir(testexe)
cmd.Env = append(os.Environ(),

View File

@ -122,29 +122,24 @@ func TestNewOptions(t *testing.T) {
}
func TestOptions_ServerOption_Peer_CA_Whitelist(t *testing.T) {
ctx := testcontext.New(t)
testplanet.Run(t, testplanet.Config{
SatelliteCount: 0, StorageNodeCount: 2, UplinkCount: 0,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
target := planet.StorageNodes[1].Local()
testidentity.CompleteIdentityVersionsTest(t, func(t *testing.T, version storj.IDVersion, ident *identity.FullIdentity) {
tlsOptions, err := tlsopts.NewOptions(ident, tlsopts.Config{
PeerIDVersions: "*",
}, nil)
require.NoError(t, err)
planet, err := testplanet.New(t, 0, 2, 0)
require.NoError(t, err)
dialer := rpc.NewDefaultDialer(tlsOptions)
planet.Start(ctx)
defer ctx.Check(planet.Shutdown)
conn, err := dialer.DialNode(ctx, &target.Node)
assert.NotNil(t, conn)
assert.NoError(t, err)
target := planet.StorageNodes[1].Local()
testidentity.CompleteIdentityVersionsTest(t, func(t *testing.T, version storj.IDVersion, ident *identity.FullIdentity) {
tlsOptions, err := tlsopts.NewOptions(ident, tlsopts.Config{
PeerIDVersions: "*",
}, nil)
require.NoError(t, err)
dialer := rpc.NewDefaultDialer(tlsOptions)
conn, err := dialer.DialNode(ctx, &target.Node)
assert.NotNil(t, conn)
assert.NoError(t, err)
assert.NoError(t, conn.Close())
assert.NoError(t, conn.Close())
})
})
}

View File

@ -11,7 +11,6 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/zap/zaptest"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/peertls/tlsopts"
@ -39,200 +38,180 @@ func TestRPCBuild(t *testing.T) {
}
func TestDialNode(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
testplanet.Run(t, testplanet.Config{
SatelliteCount: 0, StorageNodeCount: 2, UplinkCount: 0,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
whitelistPath, err := planet.WriteWhitelist(storj.LatestIDVersion())
require.NoError(t, err)
planet, err := testplanet.New(t, 0, 2, 0)
if err != nil {
t.Fatal(err)
}
defer ctx.Check(planet.Shutdown)
unsignedIdent, err := testidentity.PregeneratedIdentity(0, storj.LatestIDVersion())
require.NoError(t, err)
whitelistPath, err := planet.WriteWhitelist(storj.LatestIDVersion())
require.NoError(t, err)
signedIdent, err := testidentity.PregeneratedSignedIdentity(0, storj.LatestIDVersion())
require.NoError(t, err)
planet.Start(ctx)
tlsOptions, err := tlsopts.NewOptions(signedIdent, tlsopts.Config{
UsePeerCAWhitelist: true,
PeerCAWhitelistPath: whitelistPath,
PeerIDVersions: "*",
}, nil)
require.NoError(t, err)
unsignedIdent, err := testidentity.PregeneratedIdentity(0, storj.LatestIDVersion())
require.NoError(t, err)
dialer := rpc.NewDefaultDialer(tlsOptions)
signedIdent, err := testidentity.PregeneratedSignedIdentity(0, storj.LatestIDVersion())
require.NoError(t, err)
unsignedClientOpts, err := tlsopts.NewOptions(unsignedIdent, tlsopts.Config{
PeerIDVersions: "*",
}, nil)
require.NoError(t, err)
tlsOptions, err := tlsopts.NewOptions(signedIdent, tlsopts.Config{
UsePeerCAWhitelist: true,
PeerCAWhitelistPath: whitelistPath,
PeerIDVersions: "*",
}, nil)
require.NoError(t, err)
unsignedDialer := rpc.NewDefaultDialer(unsignedClientOpts)
dialer := rpc.NewDefaultDialer(tlsOptions)
unsignedClientOpts, err := tlsopts.NewOptions(unsignedIdent, tlsopts.Config{
PeerIDVersions: "*",
}, nil)
require.NoError(t, err)
unsignedDialer := rpc.NewDefaultDialer(unsignedClientOpts)
t.Run("DialNode with invalid targets", func(t *testing.T) {
targets := []*pb.Node{
{
Id: storj.NodeID{},
Address: nil,
},
{
Id: storj.NodeID{},
Address: &pb.NodeAddress{
Transport: pb.NodeTransport_TCP_TLS_GRPC,
t.Run("DialNode with invalid targets", func(t *testing.T) {
targets := []*pb.Node{
{
Id: storj.NodeID{},
Address: nil,
},
},
{
Id: storj.NodeID{123},
Address: &pb.NodeAddress{
Transport: pb.NodeTransport_TCP_TLS_GRPC,
Address: "127.0.0.1:100",
{
Id: storj.NodeID{},
Address: &pb.NodeAddress{
Transport: pb.NodeTransport_TCP_TLS_GRPC,
},
},
},
{
Id: storj.NodeID{},
{
Id: storj.NodeID{123},
Address: &pb.NodeAddress{
Transport: pb.NodeTransport_TCP_TLS_GRPC,
Address: "127.0.0.1:100",
},
},
{
Id: storj.NodeID{},
Address: &pb.NodeAddress{
Transport: pb.NodeTransport_TCP_TLS_GRPC,
Address: planet.StorageNodes[1].Addr(),
},
},
}
for _, target := range targets {
tag := fmt.Sprintf("%+v", target)
timedCtx, cancel := context.WithTimeout(ctx, time.Second)
conn, err := dialer.DialNode(timedCtx, target)
cancel()
assert.Error(t, err, tag)
assert.Nil(t, conn, tag)
}
})
t.Run("DialNode with valid signed target", func(t *testing.T) {
target := &pb.Node{
Id: planet.StorageNodes[1].ID(),
Address: &pb.NodeAddress{
Transport: pb.NodeTransport_TCP_TLS_GRPC,
Address: planet.StorageNodes[1].Addr(),
},
},
}
for _, target := range targets {
tag := fmt.Sprintf("%+v", target)
}
timedCtx, cancel := context.WithTimeout(ctx, time.Second)
conn, err := dialer.DialNode(timedCtx, target)
cancel()
assert.Error(t, err, tag)
assert.Nil(t, conn, tag)
}
})
t.Run("DialNode with valid signed target", func(t *testing.T) {
target := &pb.Node{
Id: planet.StorageNodes[1].ID(),
Address: &pb.NodeAddress{
Transport: pb.NodeTransport_TCP_TLS_GRPC,
Address: planet.StorageNodes[1].Addr(),
},
}
assert.NoError(t, err)
require.NotNil(t, conn)
timedCtx, cancel := context.WithTimeout(ctx, time.Second)
conn, err := dialer.DialNode(timedCtx, target)
cancel()
assert.NoError(t, conn.Close())
})
assert.NoError(t, err)
require.NotNil(t, conn)
t.Run("DialNode with unsigned identity", func(t *testing.T) {
target := &pb.Node{
Id: planet.StorageNodes[1].ID(),
Address: &pb.NodeAddress{
Transport: pb.NodeTransport_TCP_TLS_GRPC,
Address: planet.StorageNodes[1].Addr(),
},
}
assert.NoError(t, conn.Close())
})
timedCtx, cancel := context.WithTimeout(ctx, time.Second)
conn, err := unsignedDialer.DialNode(timedCtx, target)
cancel()
t.Run("DialNode with unsigned identity", func(t *testing.T) {
target := &pb.Node{
Id: planet.StorageNodes[1].ID(),
Address: &pb.NodeAddress{
Transport: pb.NodeTransport_TCP_TLS_GRPC,
Address: planet.StorageNodes[1].Addr(),
},
}
assert.NotNil(t, conn)
require.NoError(t, err)
assert.NoError(t, conn.Close())
})
timedCtx, cancel := context.WithTimeout(ctx, time.Second)
conn, err := unsignedDialer.DialNode(timedCtx, target)
cancel()
t.Run("DialAddress with unsigned identity", func(t *testing.T) {
timedCtx, cancel := context.WithTimeout(ctx, time.Second)
conn, err := unsignedDialer.DialAddressInsecure(timedCtx, planet.StorageNodes[1].Addr())
cancel()
assert.NotNil(t, conn)
require.NoError(t, err)
assert.NoError(t, conn.Close())
})
assert.NotNil(t, conn)
require.NoError(t, err)
assert.NoError(t, conn.Close())
})
t.Run("DialAddress with unsigned identity", func(t *testing.T) {
timedCtx, cancel := context.WithTimeout(ctx, time.Second)
conn, err := unsignedDialer.DialAddressInsecure(timedCtx, planet.StorageNodes[1].Addr())
cancel()
t.Run("DialAddress with valid address", func(t *testing.T) {
timedCtx, cancel := context.WithTimeout(ctx, time.Second)
conn, err := dialer.DialAddressInsecure(timedCtx, planet.StorageNodes[1].Addr())
cancel()
assert.NotNil(t, conn)
require.NoError(t, err)
assert.NoError(t, conn.Close())
})
t.Run("DialAddress with valid address", func(t *testing.T) {
timedCtx, cancel := context.WithTimeout(ctx, time.Second)
conn, err := dialer.DialAddressInsecure(timedCtx, planet.StorageNodes[1].Addr())
cancel()
assert.NoError(t, err)
require.NotNil(t, conn)
assert.NoError(t, conn.Close())
assert.NoError(t, err)
require.NotNil(t, conn)
assert.NoError(t, conn.Close())
})
})
}
func TestDialNode_BadServerCertificate(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
testplanet.Run(t, testplanet.Config{
SatelliteCount: 0, StorageNodeCount: 2, UplinkCount: 0,
Reconfigure: testplanet.DisablePeerCAWhitelist,
Identities: testidentity.NewPregeneratedIdentities(storj.LatestIDVersion()),
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
planet, err := testplanet.NewCustom(
zaptest.NewLogger(t),
testplanet.Config{
SatelliteCount: 0,
StorageNodeCount: 2,
UplinkCount: 0,
Reconfigure: testplanet.DisablePeerCAWhitelist,
Identities: testidentity.NewPregeneratedIdentities(storj.LatestIDVersion()),
},
)
if err != nil {
t.Fatal(err)
}
defer ctx.Check(planet.Shutdown)
whitelistPath, err := planet.WriteWhitelist(storj.LatestIDVersion())
require.NoError(t, err)
whitelistPath, err := planet.WriteWhitelist(storj.LatestIDVersion())
require.NoError(t, err)
ident, err := testidentity.PregeneratedSignedIdentity(0, storj.LatestIDVersion())
require.NoError(t, err)
planet.Start(ctx)
tlsOptions, err := tlsopts.NewOptions(ident, tlsopts.Config{
UsePeerCAWhitelist: true,
PeerCAWhitelistPath: whitelistPath,
}, nil)
require.NoError(t, err)
ident, err := testidentity.PregeneratedSignedIdentity(0, storj.LatestIDVersion())
require.NoError(t, err)
dialer := rpc.NewDefaultDialer(tlsOptions)
tlsOptions, err := tlsopts.NewOptions(ident, tlsopts.Config{
UsePeerCAWhitelist: true,
PeerCAWhitelistPath: whitelistPath,
}, nil)
require.NoError(t, err)
t.Run("DialNode with bad server certificate", func(t *testing.T) {
target := &pb.Node{
Id: planet.StorageNodes[1].ID(),
Address: &pb.NodeAddress{
Transport: pb.NodeTransport_TCP_TLS_GRPC,
Address: planet.StorageNodes[1].Addr(),
},
}
dialer := rpc.NewDefaultDialer(tlsOptions)
timedCtx, cancel := context.WithTimeout(ctx, time.Second)
conn, err := dialer.DialNode(timedCtx, target)
cancel()
t.Run("DialNode with bad server certificate", func(t *testing.T) {
target := &pb.Node{
Id: planet.StorageNodes[1].ID(),
Address: &pb.NodeAddress{
Transport: pb.NodeTransport_TCP_TLS_GRPC,
Address: planet.StorageNodes[1].Addr(),
},
}
tag := fmt.Sprintf("%+v", target)
assert.Nil(t, conn, tag)
require.Error(t, err, tag)
assert.Contains(t, err.Error(), "not signed by any CA in the whitelist")
})
timedCtx, cancel := context.WithTimeout(ctx, time.Second)
conn, err := dialer.DialNode(timedCtx, target)
cancel()
t.Run("DialAddress with bad server certificate", func(t *testing.T) {
timedCtx, cancel := context.WithTimeout(ctx, time.Second)
conn, err := dialer.DialAddressID(timedCtx, planet.StorageNodes[1].Addr(), planet.StorageNodes[1].ID())
cancel()
tag := fmt.Sprintf("%+v", target)
assert.Nil(t, conn, tag)
require.Error(t, err, tag)
assert.Contains(t, err.Error(), "not signed by any CA in the whitelist")
})
t.Run("DialAddress with bad server certificate", func(t *testing.T) {
timedCtx, cancel := context.WithTimeout(ctx, time.Second)
conn, err := dialer.DialAddressID(timedCtx, planet.StorageNodes[1].Addr(), planet.StorageNodes[1].ID())
cancel()
assert.Nil(t, conn)
require.Error(t, err)
assert.Contains(t, err.Error(), "not signed by any CA in the whitelist")
assert.Nil(t, conn)
require.Error(t, err)
assert.Contains(t, err.Error(), "not signed by any CA in the whitelist")
})
})
}

View File

@ -18,48 +18,41 @@ import (
)
func TestBasic(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
test := func(version storj.IDVersion) {
planet, err := testplanet.NewWithIdentityVersion(t, &version, 2, 4, 1)
require.NoError(t, err)
defer ctx.Check(planet.Shutdown)
planet.Start(ctx)
for _, satellite := range planet.Satellites {
t.Log("SATELLITE", satellite.ID(), satellite.Addr())
}
for _, storageNode := range planet.StorageNodes {
t.Log("STORAGE", storageNode.ID(), storageNode.Addr())
}
for _, uplink := range planet.Uplinks {
t.Log("UPLINK", uplink.ID(), uplink.Addr())
}
for _, sat := range planet.Satellites {
satellite := sat.Local().Node
for _, sn := range planet.StorageNodes {
node := sn.Local()
conn, err := sn.Dialer.DialNode(ctx, &satellite)
require.NoError(t, err)
defer ctx.Check(conn.Close)
_, err = conn.NodeClient().CheckIn(ctx, &pb.CheckInRequest{
Address: node.GetAddress().GetAddress(),
Version: &node.Version,
Capacity: &node.Capacity,
Operator: &node.Operator,
})
require.NoError(t, err)
}
}
// wait a bit to see whether some failures occur
time.Sleep(time.Second)
}
for _, version := range storj.IDVersions {
test(version)
version := version
testplanet.Run(t, testplanet.Config{
SatelliteCount: 2, StorageNodeCount: 4, UplinkCount: 1,
IdentityVersion: &version,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
for _, satellite := range planet.Satellites {
t.Log("SATELLITE", satellite.ID(), satellite.Addr())
}
for _, storageNode := range planet.StorageNodes {
t.Log("STORAGE", storageNode.ID(), storageNode.Addr())
}
for _, uplink := range planet.Uplinks {
t.Log("UPLINK", uplink.ID(), uplink.Addr())
}
for _, sat := range planet.Satellites {
satellite := sat.Local().Node
for _, sn := range planet.StorageNodes {
node := sn.Local()
conn, err := sn.Dialer.DialNode(ctx, &satellite)
require.NoError(t, err)
defer ctx.Check(conn.Close)
_, err = conn.NodeClient().CheckIn(ctx, &pb.CheckInRequest{
Address: node.GetAddress().GetAddress(),
Version: &node.Version,
Capacity: &node.Capacity,
Operator: &node.Operator,
})
require.NoError(t, err)
}
}
// wait a bit to see whether some failures occur
time.Sleep(time.Second)
})
}
}

View File

@ -34,159 +34,149 @@ import (
)
func TestInvalidAPIKey(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
planet, err := testplanet.New(t, 1, 0, 1)
require.NoError(t, err)
defer ctx.Check(planet.Shutdown)
throwawayKey, err := macaroon.NewAPIKey([]byte("secret"))
require.NoError(t, err)
planet.Start(ctx)
for _, invalidAPIKey := range []string{"", "invalid", "testKey"} {
client, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], throwawayKey)
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
throwawayKey, err := macaroon.NewAPIKey([]byte("secret"))
require.NoError(t, err)
defer ctx.Check(client.Close)
client.SetRawAPIKey([]byte(invalidAPIKey))
for _, invalidAPIKey := range []string{"", "invalid", "testKey"} {
client, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], throwawayKey)
require.NoError(t, err)
defer ctx.Check(client.Close)
_, _, _, err = client.CreateSegmentOld(ctx, "hello", "world", 1, &pb.RedundancyScheme{}, 123, time.Now().Add(time.Hour))
assertUnauthenticated(t, err, false)
client.SetRawAPIKey([]byte(invalidAPIKey))
_, err = client.CommitSegmentOld(ctx, "testbucket", "testpath", 0, &pb.Pointer{}, nil)
assertUnauthenticated(t, err, false)
_, _, _, err = client.CreateSegmentOld(ctx, "hello", "world", 1, &pb.RedundancyScheme{}, 123, time.Now().Add(time.Hour))
assertUnauthenticated(t, err, false)
_, err = client.SegmentInfoOld(ctx, "testbucket", "testpath", 0)
assertUnauthenticated(t, err, false)
_, err = client.CommitSegmentOld(ctx, "testbucket", "testpath", 0, &pb.Pointer{}, nil)
assertUnauthenticated(t, err, false)
_, _, _, err = client.ReadSegmentOld(ctx, "testbucket", "testpath", 0)
assertUnauthenticated(t, err, false)
_, err = client.SegmentInfoOld(ctx, "testbucket", "testpath", 0)
assertUnauthenticated(t, err, false)
_, _, err = client.DeleteSegmentOld(ctx, "testbucket", "testpath", 0)
assertUnauthenticated(t, err, false)
_, _, _, err = client.ReadSegmentOld(ctx, "testbucket", "testpath", 0)
assertUnauthenticated(t, err, false)
_, _, err = client.ListSegmentsOld(ctx, "testbucket", "", "", "", true, 1, 0)
assertUnauthenticated(t, err, false)
}
_, _, err = client.DeleteSegmentOld(ctx, "testbucket", "testpath", 0)
assertUnauthenticated(t, err, false)
_, _, err = client.ListSegmentsOld(ctx, "testbucket", "", "", "", true, 1, 0)
assertUnauthenticated(t, err, false)
}
})
}
func TestRestrictedAPIKey(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
key := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
planet, err := testplanet.New(t, 1, 0, 1)
require.NoError(t, err)
defer ctx.Check(planet.Shutdown)
planet.Start(ctx)
key := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
tests := []struct {
Caveat macaroon.Caveat
CreateSegmentAllowed bool
CommitSegmentAllowed bool
SegmentInfoAllowed bool
ReadSegmentAllowed bool
DeleteSegmentAllowed bool
ListSegmentsAllowed bool
ReadBucketAllowed bool
}{
{ // Everything disallowed
Caveat: macaroon.Caveat{
DisallowReads: true,
DisallowWrites: true,
DisallowLists: true,
DisallowDeletes: true,
tests := []struct {
Caveat macaroon.Caveat
CreateSegmentAllowed bool
CommitSegmentAllowed bool
SegmentInfoAllowed bool
ReadSegmentAllowed bool
DeleteSegmentAllowed bool
ListSegmentsAllowed bool
ReadBucketAllowed bool
}{
{ // Everything disallowed
Caveat: macaroon.Caveat{
DisallowReads: true,
DisallowWrites: true,
DisallowLists: true,
DisallowDeletes: true,
},
ReadBucketAllowed: true,
},
ReadBucketAllowed: true,
},
{ // Read only
Caveat: macaroon.Caveat{
DisallowWrites: true,
DisallowDeletes: true,
{ // Read only
Caveat: macaroon.Caveat{
DisallowWrites: true,
DisallowDeletes: true,
},
SegmentInfoAllowed: true,
ReadSegmentAllowed: true,
ListSegmentsAllowed: true,
ReadBucketAllowed: true,
},
SegmentInfoAllowed: true,
ReadSegmentAllowed: true,
ListSegmentsAllowed: true,
ReadBucketAllowed: true,
},
{ // Write only
Caveat: macaroon.Caveat{
DisallowReads: true,
DisallowLists: true,
{ // Write only
Caveat: macaroon.Caveat{
DisallowReads: true,
DisallowLists: true,
},
CreateSegmentAllowed: true,
CommitSegmentAllowed: true,
DeleteSegmentAllowed: true,
ReadBucketAllowed: true,
},
CreateSegmentAllowed: true,
CommitSegmentAllowed: true,
DeleteSegmentAllowed: true,
ReadBucketAllowed: true,
},
{ // Bucket restriction
Caveat: macaroon.Caveat{
AllowedPaths: []*macaroon.Caveat_Path{{
Bucket: []byte("otherbucket"),
}},
{ // Bucket restriction
Caveat: macaroon.Caveat{
AllowedPaths: []*macaroon.Caveat_Path{{
Bucket: []byte("otherbucket"),
}},
},
},
},
{ // Path restriction
Caveat: macaroon.Caveat{
AllowedPaths: []*macaroon.Caveat_Path{{
Bucket: []byte("testbucket"),
EncryptedPathPrefix: []byte("otherpath"),
}},
{ // Path restriction
Caveat: macaroon.Caveat{
AllowedPaths: []*macaroon.Caveat_Path{{
Bucket: []byte("testbucket"),
EncryptedPathPrefix: []byte("otherpath"),
}},
},
ReadBucketAllowed: true,
},
ReadBucketAllowed: true,
},
{ // Time restriction after
Caveat: macaroon.Caveat{
NotAfter: func(x time.Time) *time.Time { return &x }(time.Now()),
{ // Time restriction after
Caveat: macaroon.Caveat{
NotAfter: func(x time.Time) *time.Time { return &x }(time.Now()),
},
},
},
{ // Time restriction before
Caveat: macaroon.Caveat{
NotBefore: func(x time.Time) *time.Time { return &x }(time.Now().Add(time.Hour)),
{ // Time restriction before
Caveat: macaroon.Caveat{
NotBefore: func(x time.Time) *time.Time { return &x }(time.Now().Add(time.Hour)),
},
},
},
}
}
for _, test := range tests {
restrictedKey, err := key.Restrict(test.Caveat)
require.NoError(t, err)
for _, test := range tests {
restrictedKey, err := key.Restrict(test.Caveat)
require.NoError(t, err)
client, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], restrictedKey)
require.NoError(t, err)
defer ctx.Check(client.Close)
client, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], restrictedKey)
require.NoError(t, err)
defer ctx.Check(client.Close)
_, _, _, err = client.CreateSegmentOld(ctx, "testbucket", "testpath", 1, &pb.RedundancyScheme{}, 123, time.Now().Add(time.Hour))
assertUnauthenticated(t, err, test.CreateSegmentAllowed)
_, _, _, err = client.CreateSegmentOld(ctx, "testbucket", "testpath", 1, &pb.RedundancyScheme{}, 123, time.Now().Add(time.Hour))
assertUnauthenticated(t, err, test.CreateSegmentAllowed)
_, err = client.CommitSegmentOld(ctx, "testbucket", "testpath", 0, &pb.Pointer{}, nil)
assertUnauthenticated(t, err, test.CommitSegmentAllowed)
_, err = client.CommitSegmentOld(ctx, "testbucket", "testpath", 0, &pb.Pointer{}, nil)
assertUnauthenticated(t, err, test.CommitSegmentAllowed)
_, err = client.SegmentInfoOld(ctx, "testbucket", "testpath", 0)
assertUnauthenticated(t, err, test.SegmentInfoAllowed)
_, err = client.SegmentInfoOld(ctx, "testbucket", "testpath", 0)
assertUnauthenticated(t, err, test.SegmentInfoAllowed)
_, _, _, err = client.ReadSegmentOld(ctx, "testbucket", "testpath", 0)
assertUnauthenticated(t, err, test.ReadSegmentAllowed)
_, _, _, err = client.ReadSegmentOld(ctx, "testbucket", "testpath", 0)
assertUnauthenticated(t, err, test.ReadSegmentAllowed)
_, _, err = client.DeleteSegmentOld(ctx, "testbucket", "testpath", 0)
assertUnauthenticated(t, err, test.DeleteSegmentAllowed)
_, _, err = client.DeleteSegmentOld(ctx, "testbucket", "testpath", 0)
assertUnauthenticated(t, err, test.DeleteSegmentAllowed)
_, _, err = client.ListSegmentsOld(ctx, "testbucket", "testpath", "", "", true, 1, 0)
assertUnauthenticated(t, err, test.ListSegmentsAllowed)
_, _, err = client.ListSegmentsOld(ctx, "testbucket", "testpath", "", "", true, 1, 0)
assertUnauthenticated(t, err, test.ListSegmentsAllowed)
_, _, _, err = client.ReadSegmentOld(ctx, "testbucket", "", -1)
assertUnauthenticated(t, err, test.ReadBucketAllowed)
}
_, _, _, err = client.ReadSegmentOld(ctx, "testbucket", "", -1)
assertUnauthenticated(t, err, test.ReadBucketAllowed)
}
})
}
func assertUnauthenticated(t *testing.T, err error, allowed bool) {
@ -200,79 +190,75 @@ func assertUnauthenticated(t *testing.T, err error, allowed bool) {
}
func TestServiceList(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
planet, err := testplanet.New(t, 1, 0, 1)
require.NoError(t, err)
defer ctx.Check(planet.Shutdown)
items := []struct {
Key string
Value []byte
}{
{Key: "sample.😶", Value: []byte{1}},
{Key: "müsic", Value: []byte{2}},
{Key: "müsic/söng1.mp3", Value: []byte{3}},
{Key: "müsic/söng2.mp3", Value: []byte{4}},
{Key: "müsic/album/söng3.mp3", Value: []byte{5}},
{Key: "müsic/söng4.mp3", Value: []byte{6}},
{Key: "ビデオ/movie.mkv", Value: []byte{7}},
}
planet.Start(ctx)
for _, item := range items {
err := planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "testbucket", item.Key, item.Value)
assert.NoError(t, err)
}
items := []struct {
Key string
Value []byte
}{
{Key: "sample.😶", Value: []byte{1}},
{Key: "müsic", Value: []byte{2}},
{Key: "müsic/söng1.mp3", Value: []byte{3}},
{Key: "müsic/söng2.mp3", Value: []byte{4}},
{Key: "müsic/album/söng3.mp3", Value: []byte{5}},
{Key: "müsic/söng4.mp3", Value: []byte{6}},
{Key: "ビデオ/movie.mkv", Value: []byte{7}},
}
config := planet.Uplinks[0].GetConfig(planet.Satellites[0])
project, bucket, err := planet.Uplinks[0].GetProjectAndBucket(ctx, planet.Satellites[0], "testbucket", config)
require.NoError(t, err)
defer ctx.Check(bucket.Close)
defer ctx.Check(project.Close)
list, err := bucket.ListObjects(ctx, &storj.ListOptions{Recursive: true, Direction: storj.After})
require.NoError(t, err)
for _, item := range items {
err := planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "testbucket", item.Key, item.Value)
assert.NoError(t, err)
}
expected := []storj.Object{
{Path: "müsic"},
{Path: "müsic/album/söng3.mp3"},
{Path: "müsic/söng1.mp3"},
{Path: "müsic/söng2.mp3"},
{Path: "müsic/söng4.mp3"},
{Path: "sample.😶"},
{Path: "ビデオ/movie.mkv"},
}
config := planet.Uplinks[0].GetConfig(planet.Satellites[0])
project, bucket, err := planet.Uplinks[0].GetProjectAndBucket(ctx, planet.Satellites[0], "testbucket", config)
require.NoError(t, err)
defer ctx.Check(bucket.Close)
defer ctx.Check(project.Close)
list, err := bucket.ListObjects(ctx, &storj.ListOptions{Recursive: true, Direction: storj.After})
require.NoError(t, err)
require.Equal(t, len(expected), len(list.Items))
sort.Slice(list.Items, func(i, k int) bool {
return list.Items[i].Path < list.Items[k].Path
})
for i, item := range expected {
require.Equal(t, item.Path, list.Items[i].Path)
require.Equal(t, item.IsPrefix, list.Items[i].IsPrefix)
}
expected := []storj.Object{
{Path: "müsic"},
{Path: "müsic/album/söng3.mp3"},
{Path: "müsic/söng1.mp3"},
{Path: "müsic/söng2.mp3"},
{Path: "müsic/söng4.mp3"},
{Path: "sample.😶"},
{Path: "ビデオ/movie.mkv"},
}
list, err = bucket.ListObjects(ctx, &storj.ListOptions{Recursive: false, Direction: storj.After})
require.NoError(t, err)
require.Equal(t, len(expected), len(list.Items))
sort.Slice(list.Items, func(i, k int) bool {
return list.Items[i].Path < list.Items[k].Path
expected = []storj.Object{
{Path: "müsic"},
{Path: "müsic/", IsPrefix: true},
{Path: "sample.😶"},
{Path: "ビデオ/", IsPrefix: true},
}
require.Equal(t, len(expected), len(list.Items))
sort.Slice(list.Items, func(i, k int) bool {
return list.Items[i].Path < list.Items[k].Path
})
for i, item := range expected {
t.Log(item.Path, list.Items[i].Path)
require.Equal(t, item.Path, list.Items[i].Path)
require.Equal(t, item.IsPrefix, list.Items[i].IsPrefix)
}
})
for i, item := range expected {
require.Equal(t, item.Path, list.Items[i].Path)
require.Equal(t, item.IsPrefix, list.Items[i].IsPrefix)
}
list, err = bucket.ListObjects(ctx, &storj.ListOptions{Recursive: false, Direction: storj.After})
require.NoError(t, err)
expected = []storj.Object{
{Path: "müsic"},
{Path: "müsic/", IsPrefix: true},
{Path: "sample.😶"},
{Path: "ビデオ/", IsPrefix: true},
}
require.Equal(t, len(expected), len(list.Items))
sort.Slice(list.Items, func(i, k int) bool {
return list.Items[i].Path < list.Items[k].Path
})
for i, item := range expected {
t.Log(item.Path, list.Items[i].Path)
require.Equal(t, item.Path, list.Items[i].Path)
require.Equal(t, item.IsPrefix, list.Items[i].IsPrefix)
}
}
func TestCommitSegment(t *testing.T) {

View File

@ -15,89 +15,74 @@ import (
)
func TestGetNonExitingSatellites(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
testplanet.Run(t, testplanet.Config{
SatelliteCount: 3, StorageNodeCount: 1, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
totalSatelliteCount := len(planet.Satellites)
exitingSatelliteCount := 1
exitingSatellite := planet.Satellites[0]
storagenode := planet.StorageNodes[0]
totalSatelliteCount := 3
exitingSatelliteCount := 1
planet, err := testplanet.New(t, totalSatelliteCount, 1, 1)
require.NoError(t, err)
defer ctx.Check(planet.Shutdown)
// set a satellite to already be exiting
err := storagenode.DB.Satellites().InitiateGracefulExit(ctx, exitingSatellite.ID(), time.Now().UTC(), 0)
require.NoError(t, err)
planet.Start(ctx)
exitingSatellite := planet.Satellites[0]
storagenode := planet.StorageNodes[0]
nonExitingSatellites, err := storagenode.GracefulExit.Endpoint.GetNonExitingSatellites(ctx, &pb.GetNonExitingSatellitesRequest{})
require.NoError(t, err)
require.Len(t, nonExitingSatellites.GetSatellites(), totalSatelliteCount-exitingSatelliteCount)
// set a satellite to already be exiting
err = storagenode.DB.Satellites().InitiateGracefulExit(ctx, exitingSatellite.ID(), time.Now().UTC(), 0)
require.NoError(t, err)
nonExitingSatellites, err := storagenode.GracefulExit.Endpoint.GetNonExitingSatellites(ctx, &pb.GetNonExitingSatellitesRequest{})
require.NoError(t, err)
require.Len(t, nonExitingSatellites.GetSatellites(), totalSatelliteCount-exitingSatelliteCount)
for _, satellite := range nonExitingSatellites.GetSatellites() {
require.NotEqual(t, exitingSatellite.ID(), satellite.NodeId)
}
for _, satellite := range nonExitingSatellites.GetSatellites() {
require.NotEqual(t, exitingSatellite.ID(), satellite.NodeId)
}
})
}
func TestInitiateGracefulExit(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
testplanet.Run(t, testplanet.Config{
SatelliteCount: 3, StorageNodeCount: 1, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
storagenode := planet.StorageNodes[0]
exitingSatelliteID := planet.Satellites[0].ID()
totalSatelliteCount := 3
planet, err := testplanet.New(t, totalSatelliteCount, 1, 1)
require.NoError(t, err)
defer ctx.Check(planet.Shutdown)
req := &pb.InitiateGracefulExitRequest{
NodeId: exitingSatelliteID,
}
planet.Start(ctx)
storagenode := planet.StorageNodes[0]
resp, err := storagenode.GracefulExit.Endpoint.InitiateGracefulExit(ctx, req)
require.NoError(t, err)
// check progress is 0
require.EqualValues(t, 0, resp.GetPercentComplete())
require.False(t, resp.GetSuccessful())
exitingSatelliteID := planet.Satellites[0].ID()
req := &pb.InitiateGracefulExitRequest{
NodeId: exitingSatelliteID,
}
resp, err := storagenode.GracefulExit.Endpoint.InitiateGracefulExit(ctx, req)
require.NoError(t, err)
// check progress is 0
require.EqualValues(t, 0, resp.GetPercentComplete())
require.False(t, resp.GetSuccessful())
exitStatuses, err := storagenode.DB.Satellites().ListGracefulExits(ctx)
require.NoError(t, err)
require.Len(t, exitStatuses, 1)
require.Equal(t, exitingSatelliteID, exitStatuses[0].SatelliteID)
exitStatuses, err := storagenode.DB.Satellites().ListGracefulExits(ctx)
require.NoError(t, err)
require.Len(t, exitStatuses, 1)
require.Equal(t, exitingSatelliteID, exitStatuses[0].SatelliteID)
})
}
func TestGetExitProgress(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
testplanet.Run(t, testplanet.Config{
SatelliteCount: 3, StorageNodeCount: 1, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
exitingSatellite := planet.Satellites[0]
storagenode := planet.StorageNodes[0]
totalSatelliteCount := 3
planet, err := testplanet.New(t, totalSatelliteCount, 1, 1)
require.NoError(t, err)
defer ctx.Check(planet.Shutdown)
// start graceful exit
err := storagenode.DB.Satellites().InitiateGracefulExit(ctx, exitingSatellite.ID(), time.Now().UTC(), 100)
require.NoError(t, err)
err = storagenode.DB.Satellites().UpdateGracefulExit(ctx, exitingSatellite.ID(), 20)
require.NoError(t, err)
planet.Start(ctx)
exitingSatellite := planet.Satellites[0]
storagenode := planet.StorageNodes[0]
// start graceful exit
err = storagenode.DB.Satellites().InitiateGracefulExit(ctx, exitingSatellite.ID(), time.Now().UTC(), 100)
require.NoError(t, err)
err = storagenode.DB.Satellites().UpdateGracefulExit(ctx, exitingSatellite.ID(), 20)
require.NoError(t, err)
// check graceful exit progress
resp, err := storagenode.GracefulExit.Endpoint.GetExitProgress(ctx, &pb.GetExitProgressRequest{})
require.NoError(t, err)
require.Len(t, resp.GetProgress(), 1)
progress := resp.GetProgress()[0]
require.Equal(t, progress.GetDomainName(), exitingSatellite.Addr())
require.Equal(t, progress.NodeId, exitingSatellite.ID())
require.EqualValues(t, 20, progress.GetPercentComplete())
require.False(t, progress.GetSuccessful())
require.Empty(t, progress.GetCompletionReceipt())
// check graceful exit progress
resp, err := storagenode.GracefulExit.Endpoint.GetExitProgress(ctx, &pb.GetExitProgressRequest{})
require.NoError(t, err)
require.Len(t, resp.GetProgress(), 1)
progress := resp.GetProgress()[0]
require.Equal(t, progress.GetDomainName(), exitingSatellite.Addr())
require.Equal(t, progress.NodeId, exitingSatellite.ID())
require.EqualValues(t, 20, progress.GetPercentComplete())
require.False(t, progress.GetSuccessful())
require.Empty(t, progress.GetCompletionReceipt())
})
}

View File

@ -20,86 +20,81 @@ import (
)
func TestInspectorStats(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
planet, err := testplanet.New(t, 1, 5, 1)
require.NoError(t, err)
defer ctx.Check(planet.Shutdown)
planet.Start(ctx)
var availableBandwidth int64
var availableSpace int64
for _, storageNode := range planet.StorageNodes {
response, err := storageNode.Storage2.Inspector.Stats(ctx, &pb.StatsRequest{})
require.NoError(t, err)
assert.Zero(t, response.UsedBandwidth)
assert.Zero(t, response.UsedSpace)
assert.Zero(t, response.UsedEgress)
assert.Zero(t, response.UsedIngress)
assert.True(t, response.AvailableBandwidth > 0)
assert.True(t, response.AvailableSpace > 0)
// assume that all storage node should have the same initial values
availableBandwidth = response.AvailableBandwidth
availableSpace = response.AvailableSpace
}
expectedData := testrand.Bytes(100 * memory.KiB)
rs := &uplink.RSConfig{
MinThreshold: 2,
RepairThreshold: 3,
SuccessThreshold: 4,
MaxThreshold: 5,
}
err = planet.Uplinks[0].UploadWithConfig(ctx, planet.Satellites[0], rs, "testbucket", "test/path", expectedData)
require.NoError(t, err)
_, err = planet.Uplinks[0].Download(ctx, planet.Satellites[0], "testbucket", "test/path")
assert.NoError(t, err)
// wait until all requests have been handled
for {
total := int32(0)
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 5, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
var availableBandwidth int64
var availableSpace int64
for _, storageNode := range planet.StorageNodes {
total += storageNode.Storage2.Endpoint.TestLiveRequestCount()
}
if total == 0 {
break
response, err := storageNode.Storage2.Inspector.Stats(ctx, &pb.StatsRequest{})
require.NoError(t, err)
assert.Zero(t, response.UsedBandwidth)
assert.Zero(t, response.UsedSpace)
assert.Zero(t, response.UsedEgress)
assert.Zero(t, response.UsedIngress)
assert.True(t, response.AvailableBandwidth > 0)
assert.True(t, response.AvailableSpace > 0)
// assume that all storage node should have the same initial values
availableBandwidth = response.AvailableBandwidth
availableSpace = response.AvailableSpace
}
sync2.Sleep(ctx, 100*time.Millisecond)
}
expectedData := testrand.Bytes(100 * memory.KiB)
var downloaded int
for _, storageNode := range planet.StorageNodes {
response, err := storageNode.Storage2.Inspector.Stats(ctx, &pb.StatsRequest{})
rs := &uplink.RSConfig{
MinThreshold: 2,
RepairThreshold: 3,
SuccessThreshold: 4,
MaxThreshold: 5,
}
err := planet.Uplinks[0].UploadWithConfig(ctx, planet.Satellites[0], rs, "testbucket", "test/path", expectedData)
require.NoError(t, err)
// TODO set more accurate assertions
if response.UsedSpace > 0 {
assert.NotZero(t, response.UsedBandwidth)
assert.Equal(t, response.UsedBandwidth, response.UsedIngress+response.UsedEgress)
assert.Equal(t, availableBandwidth-response.UsedBandwidth, response.AvailableBandwidth)
assert.Equal(t, availableSpace-response.UsedSpace, response.AvailableSpace)
_, err = planet.Uplinks[0].Download(ctx, planet.Satellites[0], "testbucket", "test/path")
assert.NoError(t, err)
assert.Equal(t, response.UsedSpace, response.UsedBandwidth-response.UsedEgress)
if response.UsedEgress > 0 {
downloaded++
assert.Equal(t, response.UsedBandwidth-response.UsedIngress, response.UsedEgress)
// wait until all requests have been handled
for {
total := int32(0)
for _, storageNode := range planet.StorageNodes {
total += storageNode.Storage2.Endpoint.TestLiveRequestCount()
}
} else {
assert.Zero(t, response.UsedSpace)
// TODO track why this is failing
//assert.Equal(t, availableBandwidth, response.AvailableBandwidth)
assert.Equal(t, availableSpace, response.AvailableSpace)
if total == 0 {
break
}
sync2.Sleep(ctx, 100*time.Millisecond)
}
}
assert.True(t, downloaded >= rs.MinThreshold, "downloaded=%v, rs.MinThreshold=%v", downloaded, rs.MinThreshold)
var downloaded int
for _, storageNode := range planet.StorageNodes {
response, err := storageNode.Storage2.Inspector.Stats(ctx, &pb.StatsRequest{})
require.NoError(t, err)
// TODO set more accurate assertions
if response.UsedSpace > 0 {
assert.NotZero(t, response.UsedBandwidth)
assert.Equal(t, response.UsedBandwidth, response.UsedIngress+response.UsedEgress)
assert.Equal(t, availableBandwidth-response.UsedBandwidth, response.AvailableBandwidth)
assert.Equal(t, availableSpace-response.UsedSpace, response.AvailableSpace)
assert.Equal(t, response.UsedSpace, response.UsedBandwidth-response.UsedEgress)
if response.UsedEgress > 0 {
downloaded++
assert.Equal(t, response.UsedBandwidth-response.UsedIngress, response.UsedEgress)
}
} else {
assert.Zero(t, response.UsedSpace)
// TODO track why this is failing
//assert.Equal(t, availableBandwidth, response.AvailableBandwidth)
assert.Equal(t, availableSpace, response.AvailableSpace)
}
}
assert.True(t, downloaded >= rs.MinThreshold, "downloaded=%v, rs.MinThreshold=%v", downloaded, rs.MinThreshold)
})
}
func TestInspectorDashboard(t *testing.T) {

View File

@ -90,352 +90,328 @@ func TestUploadAndPartialDownload(t *testing.T) {
}
func TestUpload(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
planet, err := testplanet.New(t, 1, 1, 1)
require.NoError(t, err)
defer ctx.Check(planet.Shutdown)
planet.Start(ctx)
client, err := planet.Uplinks[0].DialPiecestore(ctx, planet.StorageNodes[0])
require.NoError(t, err)
defer ctx.Check(client.Close)
for _, tt := range []struct {
pieceID storj.PieceID
contentLength memory.Size
action pb.PieceAction
err string
}{
{ // should successfully store data
pieceID: storj.PieceID{1},
contentLength: 50 * memory.KiB,
action: pb.PieceAction_PUT,
err: "",
},
{ // should err with piece ID not specified
pieceID: storj.PieceID{},
contentLength: 1 * memory.KiB,
action: pb.PieceAction_PUT,
err: "missing piece id",
},
{ // should err because invalid action
pieceID: storj.PieceID{1},
contentLength: 1 * memory.KiB,
action: pb.PieceAction_GET,
err: "expected put or put repair action got GET",
},
} {
data := testrand.Bytes(tt.contentLength)
serialNumber := testrand.SerialNumber()
orderLimit, piecePrivateKey := GenerateOrderLimit(
t,
planet.Satellites[0].ID(),
planet.StorageNodes[0].ID(),
tt.pieceID,
tt.action,
serialNumber,
24*time.Hour,
24*time.Hour,
int64(len(data)),
)
signer := signing.SignerFromFullIdentity(planet.Satellites[0].Identity)
orderLimit, err = signing.SignOrderLimit(ctx, signer, orderLimit)
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
client, err := planet.Uplinks[0].DialPiecestore(ctx, planet.StorageNodes[0])
require.NoError(t, err)
defer ctx.Check(client.Close)
uploader, err := client.Upload(ctx, orderLimit, piecePrivateKey)
require.NoError(t, err)
for _, tt := range []struct {
pieceID storj.PieceID
contentLength memory.Size
action pb.PieceAction
err string
}{
{ // should successfully store data
pieceID: storj.PieceID{1},
contentLength: 50 * memory.KiB,
action: pb.PieceAction_PUT,
err: "",
},
{ // should err with piece ID not specified
pieceID: storj.PieceID{},
contentLength: 1 * memory.KiB,
action: pb.PieceAction_PUT,
err: "missing piece id",
},
{ // should err because invalid action
pieceID: storj.PieceID{1},
contentLength: 1 * memory.KiB,
action: pb.PieceAction_GET,
err: "expected put or put repair action got GET",
},
} {
data := testrand.Bytes(tt.contentLength)
serialNumber := testrand.SerialNumber()
_, err = uploader.Write(data)
require.NoError(t, err)
pieceHash, err := uploader.Commit(ctx)
if tt.err != "" {
require.Error(t, err)
require.Contains(t, err.Error(), tt.err)
} else {
orderLimit, piecePrivateKey := GenerateOrderLimit(
t,
planet.Satellites[0].ID(),
planet.StorageNodes[0].ID(),
tt.pieceID,
tt.action,
serialNumber,
24*time.Hour,
24*time.Hour,
int64(len(data)),
)
signer := signing.SignerFromFullIdentity(planet.Satellites[0].Identity)
orderLimit, err = signing.SignOrderLimit(ctx, signer, orderLimit)
require.NoError(t, err)
expectedHash := pkcrypto.SHA256Hash(data)
assert.Equal(t, expectedHash, pieceHash.Hash)
uploader, err := client.Upload(ctx, orderLimit, piecePrivateKey)
require.NoError(t, err)
signee := signing.SignerFromFullIdentity(planet.StorageNodes[0].Identity)
require.NoError(t, signing.VerifyPieceHashSignature(ctx, signee, pieceHash))
_, err = uploader.Write(data)
require.NoError(t, err)
pieceHash, err := uploader.Commit(ctx)
if tt.err != "" {
require.Error(t, err)
require.Contains(t, err.Error(), tt.err)
} else {
require.NoError(t, err)
expectedHash := pkcrypto.SHA256Hash(data)
assert.Equal(t, expectedHash, pieceHash.Hash)
signee := signing.SignerFromFullIdentity(planet.StorageNodes[0].Identity)
require.NoError(t, signing.VerifyPieceHashSignature(ctx, signee, pieceHash))
}
}
}
})
}
func TestDownload(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
pieceID := storj.PieceID{1}
expectedData, _, _ := uploadPiece(t, ctx, pieceID, planet.StorageNodes[0], planet.Uplinks[0], planet.Satellites[0])
client, err := planet.Uplinks[0].DialPiecestore(ctx, planet.StorageNodes[0])
require.NoError(t, err)
planet, err := testplanet.New(t, 1, 1, 1)
require.NoError(t, err)
defer ctx.Check(planet.Shutdown)
for _, tt := range []struct {
pieceID storj.PieceID
action pb.PieceAction
errs []string
}{
{ // should successfully download data
pieceID: pieceID,
action: pb.PieceAction_GET,
},
{ // should err with piece ID not specified
pieceID: storj.PieceID{},
action: pb.PieceAction_GET,
errs: []string{"missing piece id"},
},
{ // should err with piece ID not specified
pieceID: storj.PieceID{2},
action: pb.PieceAction_GET,
errs: []string{"file does not exist", "The system cannot find the path specified"},
},
{ // should err with invalid action
pieceID: pieceID,
action: pb.PieceAction_PUT,
errs: []string{"expected get or get repair or audit action got PUT"},
},
} {
serialNumber := testrand.SerialNumber()
planet.Start(ctx)
orderLimit, piecePrivateKey := GenerateOrderLimit(
t,
planet.Satellites[0].ID(),
planet.StorageNodes[0].ID(),
tt.pieceID,
tt.action,
serialNumber,
24*time.Hour,
24*time.Hour,
int64(len(expectedData)),
)
signer := signing.SignerFromFullIdentity(planet.Satellites[0].Identity)
orderLimit, err = signing.SignOrderLimit(ctx, signer, orderLimit)
require.NoError(t, err)
pieceID := storj.PieceID{1}
expectedData, _, _ := uploadPiece(t, ctx, pieceID, planet.StorageNodes[0], planet.Uplinks[0], planet.Satellites[0])
client, err := planet.Uplinks[0].DialPiecestore(ctx, planet.StorageNodes[0])
require.NoError(t, err)
downloader, err := client.Download(ctx, orderLimit, piecePrivateKey, 0, int64(len(expectedData)))
require.NoError(t, err)
buffer := make([]byte, len(expectedData))
n, err := downloader.Read(buffer)
if len(tt.errs) > 0 {
} else {
require.NoError(t, err)
require.Equal(t, expectedData, buffer[:n])
}
err = downloader.Close()
if len(tt.errs) > 0 {
require.Error(t, err)
require.True(t, strings.Contains(err.Error(), tt.errs[0]) || strings.Contains(err.Error(), tt.errs[1]), err.Error())
} else {
require.NoError(t, err)
}
// these should only be not-nil if action = pb.PieceAction_GET_REPAIR
hash, originalLimit := downloader.GetHashAndLimit()
require.Nil(t, hash)
require.Nil(t, originalLimit)
}
})
}
func TestDownloadGetRepair(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
pieceID := storj.PieceID{1}
expectedData, ulOrderLimit, originHash := uploadPiece(
t, ctx, pieceID, planet.StorageNodes[0], planet.Uplinks[0], planet.Satellites[0],
)
client, err := planet.Uplinks[0].DialPiecestore(ctx, planet.StorageNodes[0])
require.NoError(t, err)
for _, tt := range []struct {
pieceID storj.PieceID
action pb.PieceAction
errs []string
}{
{ // should successfully download data
pieceID: pieceID,
action: pb.PieceAction_GET,
},
{ // should err with piece ID not specified
pieceID: storj.PieceID{},
action: pb.PieceAction_GET,
errs: []string{"missing piece id"},
},
{ // should err with piece ID not specified
pieceID: storj.PieceID{2},
action: pb.PieceAction_GET,
errs: []string{"file does not exist", "The system cannot find the path specified"},
},
{ // should err with invalid action
pieceID: pieceID,
action: pb.PieceAction_PUT,
errs: []string{"expected get or get repair or audit action got PUT"},
},
} {
serialNumber := testrand.SerialNumber()
orderLimit, piecePrivateKey := GenerateOrderLimit(
dlOrderLimit, piecePrivateKey := GenerateOrderLimit(
t,
planet.Satellites[0].ID(),
planet.StorageNodes[0].ID(),
tt.pieceID,
tt.action,
storj.PieceID{1},
pb.PieceAction_GET_REPAIR,
serialNumber,
24*time.Hour,
24*time.Hour,
int64(len(expectedData)),
)
signer := signing.SignerFromFullIdentity(planet.Satellites[0].Identity)
orderLimit, err = signing.SignOrderLimit(ctx, signer, orderLimit)
dlOrderLimit, err = signing.SignOrderLimit(ctx, signer, dlOrderLimit)
require.NoError(t, err)
downloader, err := client.Download(ctx, orderLimit, piecePrivateKey, 0, int64(len(expectedData)))
downloader, err := client.Download(ctx, dlOrderLimit, piecePrivateKey, 0, int64(len(expectedData)))
require.NoError(t, err)
buffer := make([]byte, len(expectedData))
n, err := downloader.Read(buffer)
if len(tt.errs) > 0 {
} else {
require.NoError(t, err)
require.Equal(t, expectedData, buffer[:n])
}
require.NoError(t, err)
require.Equal(t, expectedData, buffer[:n])
err = downloader.Close()
if len(tt.errs) > 0 {
require.Error(t, err)
require.True(t, strings.Contains(err.Error(), tt.errs[0]) || strings.Contains(err.Error(), tt.errs[1]), err.Error())
} else {
require.NoError(t, err)
}
require.NoError(t, err)
// these should only be not-nil if action = pb.PieceAction_GET_REPAIR
hash, originalLimit := downloader.GetHashAndLimit()
require.Nil(t, hash)
require.Nil(t, originalLimit)
}
}
hash, originLimit := downloader.GetHashAndLimit()
require.NotNil(t, hash)
require.Equal(t, originHash.Hash, hash.Hash)
require.Equal(t, originHash.PieceId, hash.PieceId)
func TestDownloadGetRepair(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
planet, err := testplanet.New(t, 1, 1, 1)
require.NoError(t, err)
defer ctx.Check(planet.Shutdown)
planet.Start(ctx)
pieceID := storj.PieceID{1}
expectedData, ulOrderLimit, originHash := uploadPiece(
t, ctx, pieceID, planet.StorageNodes[0], planet.Uplinks[0], planet.Satellites[0],
)
client, err := planet.Uplinks[0].DialPiecestore(ctx, planet.StorageNodes[0])
require.NoError(t, err)
serialNumber := testrand.SerialNumber()
dlOrderLimit, piecePrivateKey := GenerateOrderLimit(
t,
planet.Satellites[0].ID(),
planet.StorageNodes[0].ID(),
storj.PieceID{1},
pb.PieceAction_GET_REPAIR,
serialNumber,
24*time.Hour,
24*time.Hour,
int64(len(expectedData)),
)
signer := signing.SignerFromFullIdentity(planet.Satellites[0].Identity)
dlOrderLimit, err = signing.SignOrderLimit(ctx, signer, dlOrderLimit)
require.NoError(t, err)
downloader, err := client.Download(ctx, dlOrderLimit, piecePrivateKey, 0, int64(len(expectedData)))
require.NoError(t, err)
buffer := make([]byte, len(expectedData))
n, err := downloader.Read(buffer)
require.NoError(t, err)
require.Equal(t, expectedData, buffer[:n])
err = downloader.Close()
require.NoError(t, err)
hash, originLimit := downloader.GetHashAndLimit()
require.NotNil(t, hash)
require.Equal(t, originHash.Hash, hash.Hash)
require.Equal(t, originHash.PieceId, hash.PieceId)
require.NotNil(t, originLimit)
require.Equal(t, originLimit.Action, ulOrderLimit.Action)
require.Equal(t, originLimit.Limit, ulOrderLimit.Limit)
require.Equal(t, originLimit.PieceId, ulOrderLimit.PieceId)
require.Equal(t, originLimit.SatelliteId, ulOrderLimit.SatelliteId)
require.Equal(t, originLimit.SerialNumber, ulOrderLimit.SerialNumber)
require.Equal(t, originLimit.SatelliteSignature, ulOrderLimit.SatelliteSignature)
require.Equal(t, originLimit.UplinkPublicKey, ulOrderLimit.UplinkPublicKey)
require.NotNil(t, originLimit)
require.Equal(t, originLimit.Action, ulOrderLimit.Action)
require.Equal(t, originLimit.Limit, ulOrderLimit.Limit)
require.Equal(t, originLimit.PieceId, ulOrderLimit.PieceId)
require.Equal(t, originLimit.SatelliteId, ulOrderLimit.SatelliteId)
require.Equal(t, originLimit.SerialNumber, ulOrderLimit.SerialNumber)
require.Equal(t, originLimit.SatelliteSignature, ulOrderLimit.SatelliteSignature)
require.Equal(t, originLimit.UplinkPublicKey, ulOrderLimit.UplinkPublicKey)
})
}
func TestDelete(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
planet, err := testplanet.New(t, 1, 1, 1)
require.NoError(t, err)
defer ctx.Check(planet.Shutdown)
planet.Start(ctx)
pieceID := storj.PieceID{1}
uploadPiece(t, ctx, pieceID, planet.StorageNodes[0], planet.Uplinks[0], planet.Satellites[0])
client, err := planet.Uplinks[0].DialPiecestore(ctx, planet.StorageNodes[0])
require.NoError(t, err)
for _, tt := range []struct {
pieceID storj.PieceID
action pb.PieceAction
err string
}{
{ // should successfully delete data
pieceID: pieceID,
action: pb.PieceAction_DELETE,
err: "",
},
{ // should err with piece ID not found
pieceID: storj.PieceID{99},
action: pb.PieceAction_DELETE,
err: "", // TODO should this return error
},
{ // should err with piece ID not specified
pieceID: storj.PieceID{},
action: pb.PieceAction_DELETE,
err: "missing piece id",
},
{ // should err due to incorrect action
pieceID: pieceID,
action: pb.PieceAction_GET,
err: "expected delete action got GET",
},
} {
serialNumber := testrand.SerialNumber()
orderLimit, piecePrivateKey := GenerateOrderLimit(
t,
planet.Satellites[0].ID(),
planet.StorageNodes[0].ID(),
tt.pieceID,
tt.action,
serialNumber,
24*time.Hour,
24*time.Hour,
100,
)
signer := signing.SignerFromFullIdentity(planet.Satellites[0].Identity)
orderLimit, err = signing.SignOrderLimit(ctx, signer, orderLimit)
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
pieceID := storj.PieceID{1}
uploadPiece(t, ctx, pieceID, planet.StorageNodes[0], planet.Uplinks[0], planet.Satellites[0])
client, err := planet.Uplinks[0].DialPiecestore(ctx, planet.StorageNodes[0])
require.NoError(t, err)
err := client.Delete(ctx, orderLimit, piecePrivateKey)
if tt.err != "" {
require.Error(t, err)
require.Contains(t, err.Error(), tt.err)
} else {
for _, tt := range []struct {
pieceID storj.PieceID
action pb.PieceAction
err string
}{
{ // should successfully delete data
pieceID: pieceID,
action: pb.PieceAction_DELETE,
err: "",
},
{ // should err with piece ID not found
pieceID: storj.PieceID{99},
action: pb.PieceAction_DELETE,
err: "", // TODO should this return error
},
{ // should err with piece ID not specified
pieceID: storj.PieceID{},
action: pb.PieceAction_DELETE,
err: "missing piece id",
},
{ // should err due to incorrect action
pieceID: pieceID,
action: pb.PieceAction_GET,
err: "expected delete action got GET",
},
} {
serialNumber := testrand.SerialNumber()
orderLimit, piecePrivateKey := GenerateOrderLimit(
t,
planet.Satellites[0].ID(),
planet.StorageNodes[0].ID(),
tt.pieceID,
tt.action,
serialNumber,
24*time.Hour,
24*time.Hour,
100,
)
signer := signing.SignerFromFullIdentity(planet.Satellites[0].Identity)
orderLimit, err = signing.SignOrderLimit(ctx, signer, orderLimit)
require.NoError(t, err)
err := client.Delete(ctx, orderLimit, piecePrivateKey)
if tt.err != "" {
require.Error(t, err)
require.Contains(t, err.Error(), tt.err)
} else {
require.NoError(t, err)
}
}
}
})
}
func TestDeletePiece(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
planet, err := testplanet.New(t, 1, 1, 1)
require.NoError(t, err)
defer ctx.Check(planet.Shutdown)
planet.Start(ctx)
var (
planetSat = planet.Satellites[0]
planetSN = planet.StorageNodes[0]
)
var client *piecestore.Client
{
dossier, err := planetSat.Overlay.DB.Get(ctx.Context, planetSN.ID())
require.NoError(t, err)
client, err = piecestore.Dial(
ctx.Context, planetSat.Dialer, &dossier.Node, zaptest.NewLogger(t), piecestore.Config{},
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
var (
planetSat = planet.Satellites[0]
planetSN = planet.StorageNodes[0]
)
require.NoError(t, err)
}
t.Run("Ok", func(t *testing.T) {
pieceID := storj.PieceID{1}
data, _, _ := uploadPiece(t, ctx, pieceID, planetSN, planet.Uplinks[0], planetSat)
var client *piecestore.Client
{
dossier, err := planetSat.Overlay.DB.Get(ctx.Context, planetSN.ID())
require.NoError(t, err)
err := client.DeletePiece(ctx.Context, pieceID)
require.NoError(t, err)
client, err = piecestore.Dial(
ctx.Context, planetSat.Dialer, &dossier.Node, zaptest.NewLogger(t), piecestore.Config{},
)
require.NoError(t, err)
}
_, err = downloadPiece(t, ctx, pieceID, int64(len(data)), planetSN, planet.Uplinks[0], planetSat)
require.Error(t, err)
t.Run("Ok", func(t *testing.T) {
pieceID := storj.PieceID{1}
data, _, _ := uploadPiece(t, ctx, pieceID, planetSN, planet.Uplinks[0], planetSat)
require.Condition(t, func() bool {
return strings.Contains(err.Error(), "file does not exist") ||
strings.Contains(err.Error(), "The system cannot find the path specified")
}, "unexpected error message")
})
err := client.DeletePiece(ctx.Context, pieceID)
require.NoError(t, err)
t.Run("error: Not found", func(t *testing.T) {
err := client.DeletePiece(ctx.Context, storj.PieceID{2})
require.Error(t, err)
require.Equal(t, rpcstatus.NotFound, rpcstatus.Code(err))
})
_, err = downloadPiece(t, ctx, pieceID, int64(len(data)), planetSN, planet.Uplinks[0], planetSat)
require.Error(t, err)
t.Run("error: permission denied", func(t *testing.T) {
client, err := planet.Uplinks[0].DialPiecestore(ctx, planetSN)
require.NoError(t, err)
require.Condition(t, func() bool {
return strings.Contains(err.Error(), "file does not exist") ||
strings.Contains(err.Error(), "The system cannot find the path specified")
}, "unexpected error message")
})
err = client.DeletePiece(ctx.Context, storj.PieceID{})
require.Error(t, err)
require.Equal(t, rpcstatus.PermissionDenied, rpcstatus.Code(err))
t.Run("error: Not found", func(t *testing.T) {
err := client.DeletePiece(ctx.Context, storj.PieceID{2})
require.Error(t, err)
require.Equal(t, rpcstatus.NotFound, rpcstatus.Code(err))
})
t.Run("error: permission denied", func(t *testing.T) {
client, err := planet.Uplinks[0].DialPiecestore(ctx, planetSN)
require.NoError(t, err)
err = client.DeletePiece(ctx.Context, storj.PieceID{})
require.Error(t, err)
require.Equal(t, rpcstatus.PermissionDenied, rpcstatus.Code(err))
})
})
}

View File

@ -124,31 +124,147 @@ func TestOrderLimitPutValidation(t *testing.T) {
} {
tt := tt
t.Run(tt.testName, func(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
planet, err := testplanet.New(t, 1, 1, 1)
// set desirable bandwidth
setBandwidth(ctx, t, planet, tt.availableBandwidth)
// set desirable space
setSpace(ctx, t, planet, tt.availableSpace)
client, err := planet.Uplinks[0].DialPiecestore(ctx, planet.StorageNodes[0])
require.NoError(t, err)
defer ctx.Check(client.Close)
signer := signing.SignerFromFullIdentity(planet.Satellites[0].Identity)
satellite := planet.Satellites[0].Identity
if tt.useUnknownSatellite {
unapprovedSatellite, err := planet.NewIdentity()
require.NoError(t, err)
signer = signing.SignerFromFullIdentity(unapprovedSatellite)
satellite = unapprovedSatellite
}
orderLimit, piecePrivateKey := GenerateOrderLimit(
t,
satellite.ID,
planet.StorageNodes[0].ID(),
tt.pieceID,
tt.action,
tt.serialNumber,
tt.pieceExpiration,
tt.orderExpiration,
tt.limit,
)
orderLimit, err = signing.SignOrderLimit(ctx, signer, orderLimit)
require.NoError(t, err)
uploader, err := client.Upload(ctx, orderLimit, piecePrivateKey)
require.NoError(t, err)
var writeErr error
buffer := make([]byte, memory.KiB)
for i := 0; i < 10; i++ {
testrand.Read(buffer)
_, writeErr = uploader.Write(buffer)
if writeErr != nil {
break
}
}
_, commitErr := uploader.Commit(ctx)
err = errs.Combine(writeErr, commitErr)
if tt.err != "" {
require.Error(t, err)
require.Contains(t, err.Error(), tt.err)
} else {
require.NoError(t, err)
}
})
})
}
}
func TestOrderLimitGetValidation(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
defaultPieceSize := 10 * memory.KiB
for _, storageNode := range planet.StorageNodes {
err := storageNode.DB.Bandwidth().Add(ctx, planet.Satellites[0].ID(), pb.PieceAction_GET, memory.TB.Int64()-(15*memory.KiB.Int64()), time.Now())
require.NoError(t, err)
defer ctx.Check(planet.Shutdown)
planet.Start(ctx)
// set desirable bandwidth
setBandwidth(ctx, t, planet, tt.availableBandwidth)
// set desirable space
setSpace(ctx, t, planet, tt.availableSpace)
}
{ // upload test piece
client, err := planet.Uplinks[0].DialPiecestore(ctx, planet.StorageNodes[0])
require.NoError(t, err)
defer ctx.Check(client.Close)
signer := signing.SignerFromFullIdentity(planet.Satellites[0].Identity)
satellite := planet.Satellites[0].Identity
if tt.useUnknownSatellite {
unapprovedSatellite, err := planet.NewIdentity()
require.NoError(t, err)
signer = signing.SignerFromFullIdentity(unapprovedSatellite)
satellite = unapprovedSatellite
orderLimit, piecePrivateKey := GenerateOrderLimit(
t,
satellite.ID,
planet.StorageNodes[0].ID(),
storj.PieceID{1},
pb.PieceAction_PUT,
storj.SerialNumber{0},
oneWeek,
oneWeek,
defaultPieceSize.Int64(),
)
orderLimit, err = signing.SignOrderLimit(ctx, signer, orderLimit)
require.NoError(t, err)
uploader, err := client.Upload(ctx, orderLimit, piecePrivateKey)
require.NoError(t, err)
data := testrand.Bytes(defaultPieceSize)
_, err = uploader.Write(data)
require.NoError(t, err)
_, err = uploader.Commit(ctx)
require.NoError(t, err)
}
// wait for all requests to finish to ensure that the upload usage has been
// accounted for.
waitForEndpointRequestsToDrain(t, planet)
for _, tt := range []struct {
satellite *identity.FullIdentity
pieceID storj.PieceID
action pb.PieceAction
serialNumber storj.SerialNumber
pieceExpiration time.Duration
orderExpiration time.Duration
limit int64
err string
}{
{ // allocated bandwidth limit
pieceID: storj.PieceID{1},
action: pb.PieceAction_GET,
serialNumber: storj.SerialNumber{1},
pieceExpiration: oneWeek,
orderExpiration: oneWeek,
limit: 10 * memory.KiB.Int64(),
err: "out of bandwidth",
},
} {
client, err := planet.Uplinks[0].DialPiecestore(ctx, planet.StorageNodes[0])
require.NoError(t, err)
defer ctx.Check(client.Close)
signer := signing.SignerFromFullIdentity(planet.Satellites[0].Identity)
satellite := planet.Satellites[0].Identity
if tt.satellite != nil {
signer = signing.SignerFromFullIdentity(tt.satellite)
satellite = tt.satellite
}
orderLimit, piecePrivateKey := GenerateOrderLimit(
@ -166,145 +282,21 @@ func TestOrderLimitPutValidation(t *testing.T) {
orderLimit, err = signing.SignOrderLimit(ctx, signer, orderLimit)
require.NoError(t, err)
uploader, err := client.Upload(ctx, orderLimit, piecePrivateKey)
downloader, err := client.Download(ctx, orderLimit, piecePrivateKey, 0, tt.limit)
require.NoError(t, err)
var writeErr error
buffer := make([]byte, memory.KiB)
for i := 0; i < 10; i++ {
testrand.Read(buffer)
_, writeErr = uploader.Write(buffer)
if writeErr != nil {
break
}
}
_, commitErr := uploader.Commit(ctx)
err = errs.Combine(writeErr, commitErr)
buffer, readErr := ioutil.ReadAll(downloader)
closeErr := downloader.Close()
err = errs.Combine(readErr, closeErr)
if tt.err != "" {
assert.Equal(t, 0, len(buffer))
require.Error(t, err)
require.Contains(t, err.Error(), tt.err)
} else {
require.NoError(t, err)
}
})
}
}
func TestOrderLimitGetValidation(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
planet, err := testplanet.New(t, 1, 1, 1)
require.NoError(t, err)
defer ctx.Check(planet.Shutdown)
planet.Start(ctx)
defaultPieceSize := 10 * memory.KiB
for _, storageNode := range planet.StorageNodes {
err = storageNode.DB.Bandwidth().Add(ctx, planet.Satellites[0].ID(), pb.PieceAction_GET, memory.TB.Int64()-(15*memory.KiB.Int64()), time.Now())
require.NoError(t, err)
}
{ // upload test piece
client, err := planet.Uplinks[0].DialPiecestore(ctx, planet.StorageNodes[0])
require.NoError(t, err)
defer ctx.Check(client.Close)
signer := signing.SignerFromFullIdentity(planet.Satellites[0].Identity)
satellite := planet.Satellites[0].Identity
orderLimit, piecePrivateKey := GenerateOrderLimit(
t,
satellite.ID,
planet.StorageNodes[0].ID(),
storj.PieceID{1},
pb.PieceAction_PUT,
storj.SerialNumber{0},
oneWeek,
oneWeek,
defaultPieceSize.Int64(),
)
orderLimit, err = signing.SignOrderLimit(ctx, signer, orderLimit)
require.NoError(t, err)
uploader, err := client.Upload(ctx, orderLimit, piecePrivateKey)
require.NoError(t, err)
data := testrand.Bytes(defaultPieceSize)
_, err = uploader.Write(data)
require.NoError(t, err)
_, err = uploader.Commit(ctx)
require.NoError(t, err)
}
// wait for all requests to finish to ensure that the upload usage has been
// accounted for.
waitForEndpointRequestsToDrain(t, planet)
for _, tt := range []struct {
satellite *identity.FullIdentity
pieceID storj.PieceID
action pb.PieceAction
serialNumber storj.SerialNumber
pieceExpiration time.Duration
orderExpiration time.Duration
limit int64
err string
}{
{ // allocated bandwidth limit
pieceID: storj.PieceID{1},
action: pb.PieceAction_GET,
serialNumber: storj.SerialNumber{1},
pieceExpiration: oneWeek,
orderExpiration: oneWeek,
limit: 10 * memory.KiB.Int64(),
err: "out of bandwidth",
},
} {
client, err := planet.Uplinks[0].DialPiecestore(ctx, planet.StorageNodes[0])
require.NoError(t, err)
defer ctx.Check(client.Close)
signer := signing.SignerFromFullIdentity(planet.Satellites[0].Identity)
satellite := planet.Satellites[0].Identity
if tt.satellite != nil {
signer = signing.SignerFromFullIdentity(tt.satellite)
satellite = tt.satellite
}
orderLimit, piecePrivateKey := GenerateOrderLimit(
t,
satellite.ID,
planet.StorageNodes[0].ID(),
tt.pieceID,
tt.action,
tt.serialNumber,
tt.pieceExpiration,
tt.orderExpiration,
tt.limit,
)
orderLimit, err = signing.SignOrderLimit(ctx, signer, orderLimit)
require.NoError(t, err)
downloader, err := client.Download(ctx, orderLimit, piecePrivateKey, 0, tt.limit)
require.NoError(t, err)
buffer, readErr := ioutil.ReadAll(downloader)
closeErr := downloader.Close()
err = errs.Combine(readErr, closeErr)
if tt.err != "" {
assert.Equal(t, 0, len(buffer))
require.Error(t, err)
require.Contains(t, err.Error(), tt.err)
} else {
require.NoError(t, err)
}
}
})
}
func setBandwidth(ctx context.Context, t *testing.T, planet *testplanet.Planet, bandwidth int64) {

View File

@ -18,43 +18,39 @@ import (
)
func TestGetSignee(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 0,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
planet, err := testplanet.New(t, 1, 1, 0)
require.NoError(t, err)
defer ctx.Check(planet.Shutdown)
trust := planet.StorageNodes[0].Storage2.Trust
planet.Start(ctx)
canceledContext, cancel := context.WithCancel(ctx)
cancel()
trust := planet.StorageNodes[0].Storage2.Trust
canceledContext, cancel := context.WithCancel(ctx)
cancel()
var group errgroup.Group
group.Go(func() error {
_, err := trust.GetSignee(canceledContext, planet.Satellites[0].ID())
if errs2.IsCanceled(err) {
return nil
}
// if the other goroutine races us,
// then we might get the certificate from the cache, however we shouldn't get an error
return err
})
group.Go(func() error {
cert, err := trust.GetSignee(ctx, planet.Satellites[0].ID())
if err != nil {
var group errgroup.Group
group.Go(func() error {
_, err := trust.GetSignee(canceledContext, planet.Satellites[0].ID())
if errs2.IsCanceled(err) {
return nil
}
// if the other goroutine races us,
// then we might get the certificate from the cache, however we shouldn't get an error
return err
}
if cert == nil {
return errors.New("didn't get certificate")
}
return nil
})
})
assert.NoError(t, group.Wait())
group.Go(func() error {
cert, err := trust.GetSignee(ctx, planet.Satellites[0].ID())
if err != nil {
return err
}
if cert == nil {
return errors.New("didn't get certificate")
}
return nil
})
assert.NoError(t, group.Wait())
})
}
func TestGetAddress(t *testing.T) {

View File

@ -34,38 +34,33 @@ const (
)
func TestECClient(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: storageNodes, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
planet, err := testplanet.New(t, 1, storageNodes, 1)
require.NoError(t, err)
ec := ecclient.NewClient(planet.Uplinks[0].Log.Named("ecclient"), planet.Uplinks[0].Dialer, 0)
defer ctx.Check(planet.Shutdown)
k := storageNodes / 2
n := storageNodes
fc, err := infectious.NewFEC(k, n)
require.NoError(t, err)
planet.Start(ctx)
es := eestream.NewRSScheme(fc, dataSize.Int()/n)
rs, err := eestream.NewRedundancyStrategy(es, 0, 0)
require.NoError(t, err)
ec := ecclient.NewClient(planet.Uplinks[0].Log.Named("ecclient"), planet.Uplinks[0].Dialer, 0)
data, err := ioutil.ReadAll(io.LimitReader(testrand.Reader(), dataSize.Int64()))
require.NoError(t, err)
k := storageNodes / 2
n := storageNodes
fc, err := infectious.NewFEC(k, n)
require.NoError(t, err)
// Erasure encode some random data and upload the pieces
successfulNodes, successfulHashes := testPut(ctx, t, planet, ec, rs, data)
es := eestream.NewRSScheme(fc, dataSize.Int()/n)
rs, err := eestream.NewRedundancyStrategy(es, 0, 0)
require.NoError(t, err)
// Download the pieces and erasure decode the data
testGet(ctx, t, planet, ec, es, data, successfulNodes, successfulHashes)
data, err := ioutil.ReadAll(io.LimitReader(testrand.Reader(), dataSize.Int64()))
require.NoError(t, err)
// Erasure encode some random data and upload the pieces
successfulNodes, successfulHashes := testPut(ctx, t, planet, ec, rs, data)
// Download the pieces and erasure decode the data
testGet(ctx, t, planet, ec, es, data, successfulNodes, successfulHashes)
// Delete the pieces
testDelete(ctx, t, planet, ec, successfulNodes, successfulHashes)
// Delete the pieces
testDelete(ctx, t, planet, ec, successfulNodes, successfulHashes)
})
}
func testPut(ctx context.Context, t *testing.T, planet *testplanet.Planet, ec ecclient.Client, rs eestream.RedundancyStrategy, data []byte) ([]*pb.Node, []*pb.PieceHash) {