replace planet.Start in tests with planet.Run

planet.Start starts a testplanet system, whereas planet.Run starts a testplanet
and runs a test against it with each DB backend (cockroach compat).

Change-Id: I39c9da26d9619ee69a2b718d24ab00271f9e9bc2
This commit is contained in:
Cameron Ayer 2019-12-06 13:03:22 -05:00 committed by Cameron
parent 8cf1aa6e4f
commit 6fae361c31
11 changed files with 935 additions and 1062 deletions

View File

@ -11,33 +11,11 @@ import (
"time" "time"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"go.uber.org/zap/zapcore"
"go.uber.org/zap/zaptest"
"storj.io/storj/private/testcontext" "storj.io/storj/private/testcontext"
"storj.io/storj/private/testplanet" "storj.io/storj/private/testplanet"
) )
func RunPlanet(t *testing.T, run func(ctx *testcontext.Context, planet *testplanet.Planet)) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
planet, err := testplanet.NewCustom(
zaptest.NewLogger(t, zaptest.Level(zapcore.WarnLevel)),
testplanet.Config{
SatelliteCount: 1,
StorageNodeCount: 5,
UplinkCount: 1,
Reconfigure: testplanet.DisablePeerCAWhitelist,
},
)
require.NoError(t, err)
defer ctx.Check(planet.Shutdown)
planet.Start(ctx)
run(ctx, planet)
}
func TestC(t *testing.T) { func TestC(t *testing.T) {
ctx := testcontext.NewWithTimeout(t, 5*time.Minute) ctx := testcontext.NewWithTimeout(t, 5*time.Minute)
defer ctx.Cleanup() defer ctx.Cleanup()
@ -71,7 +49,10 @@ func TestC(t *testing.T) {
}, },
}) })
RunPlanet(t, func(ctx *testcontext.Context, planet *testplanet.Planet) { testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 5, UplinkCount: 1,
Reconfigure: testplanet.DisablePeerCAWhitelist,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
cmd := exec.Command(testexe) cmd := exec.Command(testexe)
cmd.Dir = filepath.Dir(testexe) cmd.Dir = filepath.Dir(testexe)
cmd.Env = append(os.Environ(), cmd.Env = append(os.Environ(),

View File

@ -122,29 +122,24 @@ func TestNewOptions(t *testing.T) {
} }
func TestOptions_ServerOption_Peer_CA_Whitelist(t *testing.T) { func TestOptions_ServerOption_Peer_CA_Whitelist(t *testing.T) {
ctx := testcontext.New(t) testplanet.Run(t, testplanet.Config{
SatelliteCount: 0, StorageNodeCount: 2, UplinkCount: 0,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
target := planet.StorageNodes[1].Local()
testidentity.CompleteIdentityVersionsTest(t, func(t *testing.T, version storj.IDVersion, ident *identity.FullIdentity) {
tlsOptions, err := tlsopts.NewOptions(ident, tlsopts.Config{
PeerIDVersions: "*",
}, nil)
require.NoError(t, err)
planet, err := testplanet.New(t, 0, 2, 0) dialer := rpc.NewDefaultDialer(tlsOptions)
require.NoError(t, err)
planet.Start(ctx) conn, err := dialer.DialNode(ctx, &target.Node)
defer ctx.Check(planet.Shutdown) assert.NotNil(t, conn)
assert.NoError(t, err)
target := planet.StorageNodes[1].Local() assert.NoError(t, conn.Close())
})
testidentity.CompleteIdentityVersionsTest(t, func(t *testing.T, version storj.IDVersion, ident *identity.FullIdentity) {
tlsOptions, err := tlsopts.NewOptions(ident, tlsopts.Config{
PeerIDVersions: "*",
}, nil)
require.NoError(t, err)
dialer := rpc.NewDefaultDialer(tlsOptions)
conn, err := dialer.DialNode(ctx, &target.Node)
assert.NotNil(t, conn)
assert.NoError(t, err)
assert.NoError(t, conn.Close())
}) })
} }

View File

@ -11,7 +11,6 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"go.uber.org/zap/zaptest"
"storj.io/storj/pkg/pb" "storj.io/storj/pkg/pb"
"storj.io/storj/pkg/peertls/tlsopts" "storj.io/storj/pkg/peertls/tlsopts"
@ -39,200 +38,180 @@ func TestRPCBuild(t *testing.T) {
} }
func TestDialNode(t *testing.T) { func TestDialNode(t *testing.T) {
ctx := testcontext.New(t) testplanet.Run(t, testplanet.Config{
defer ctx.Cleanup() SatelliteCount: 0, StorageNodeCount: 2, UplinkCount: 0,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
whitelistPath, err := planet.WriteWhitelist(storj.LatestIDVersion())
require.NoError(t, err)
planet, err := testplanet.New(t, 0, 2, 0) unsignedIdent, err := testidentity.PregeneratedIdentity(0, storj.LatestIDVersion())
if err != nil { require.NoError(t, err)
t.Fatal(err)
}
defer ctx.Check(planet.Shutdown)
whitelistPath, err := planet.WriteWhitelist(storj.LatestIDVersion()) signedIdent, err := testidentity.PregeneratedSignedIdentity(0, storj.LatestIDVersion())
require.NoError(t, err) require.NoError(t, err)
planet.Start(ctx) tlsOptions, err := tlsopts.NewOptions(signedIdent, tlsopts.Config{
UsePeerCAWhitelist: true,
PeerCAWhitelistPath: whitelistPath,
PeerIDVersions: "*",
}, nil)
require.NoError(t, err)
unsignedIdent, err := testidentity.PregeneratedIdentity(0, storj.LatestIDVersion()) dialer := rpc.NewDefaultDialer(tlsOptions)
require.NoError(t, err)
signedIdent, err := testidentity.PregeneratedSignedIdentity(0, storj.LatestIDVersion()) unsignedClientOpts, err := tlsopts.NewOptions(unsignedIdent, tlsopts.Config{
require.NoError(t, err) PeerIDVersions: "*",
}, nil)
require.NoError(t, err)
tlsOptions, err := tlsopts.NewOptions(signedIdent, tlsopts.Config{ unsignedDialer := rpc.NewDefaultDialer(unsignedClientOpts)
UsePeerCAWhitelist: true,
PeerCAWhitelistPath: whitelistPath,
PeerIDVersions: "*",
}, nil)
require.NoError(t, err)
dialer := rpc.NewDefaultDialer(tlsOptions) t.Run("DialNode with invalid targets", func(t *testing.T) {
targets := []*pb.Node{
unsignedClientOpts, err := tlsopts.NewOptions(unsignedIdent, tlsopts.Config{ {
PeerIDVersions: "*", Id: storj.NodeID{},
}, nil) Address: nil,
require.NoError(t, err)
unsignedDialer := rpc.NewDefaultDialer(unsignedClientOpts)
t.Run("DialNode with invalid targets", func(t *testing.T) {
targets := []*pb.Node{
{
Id: storj.NodeID{},
Address: nil,
},
{
Id: storj.NodeID{},
Address: &pb.NodeAddress{
Transport: pb.NodeTransport_TCP_TLS_GRPC,
}, },
}, {
{ Id: storj.NodeID{},
Id: storj.NodeID{123}, Address: &pb.NodeAddress{
Address: &pb.NodeAddress{ Transport: pb.NodeTransport_TCP_TLS_GRPC,
Transport: pb.NodeTransport_TCP_TLS_GRPC, },
Address: "127.0.0.1:100",
}, },
}, {
{ Id: storj.NodeID{123},
Id: storj.NodeID{}, Address: &pb.NodeAddress{
Transport: pb.NodeTransport_TCP_TLS_GRPC,
Address: "127.0.0.1:100",
},
},
{
Id: storj.NodeID{},
Address: &pb.NodeAddress{
Transport: pb.NodeTransport_TCP_TLS_GRPC,
Address: planet.StorageNodes[1].Addr(),
},
},
}
for _, target := range targets {
tag := fmt.Sprintf("%+v", target)
timedCtx, cancel := context.WithTimeout(ctx, time.Second)
conn, err := dialer.DialNode(timedCtx, target)
cancel()
assert.Error(t, err, tag)
assert.Nil(t, conn, tag)
}
})
t.Run("DialNode with valid signed target", func(t *testing.T) {
target := &pb.Node{
Id: planet.StorageNodes[1].ID(),
Address: &pb.NodeAddress{ Address: &pb.NodeAddress{
Transport: pb.NodeTransport_TCP_TLS_GRPC, Transport: pb.NodeTransport_TCP_TLS_GRPC,
Address: planet.StorageNodes[1].Addr(), Address: planet.StorageNodes[1].Addr(),
}, },
}, }
}
for _, target := range targets {
tag := fmt.Sprintf("%+v", target)
timedCtx, cancel := context.WithTimeout(ctx, time.Second) timedCtx, cancel := context.WithTimeout(ctx, time.Second)
conn, err := dialer.DialNode(timedCtx, target) conn, err := dialer.DialNode(timedCtx, target)
cancel() cancel()
assert.Error(t, err, tag)
assert.Nil(t, conn, tag)
}
})
t.Run("DialNode with valid signed target", func(t *testing.T) { assert.NoError(t, err)
target := &pb.Node{ require.NotNil(t, conn)
Id: planet.StorageNodes[1].ID(),
Address: &pb.NodeAddress{
Transport: pb.NodeTransport_TCP_TLS_GRPC,
Address: planet.StorageNodes[1].Addr(),
},
}
timedCtx, cancel := context.WithTimeout(ctx, time.Second) assert.NoError(t, conn.Close())
conn, err := dialer.DialNode(timedCtx, target) })
cancel()
assert.NoError(t, err) t.Run("DialNode with unsigned identity", func(t *testing.T) {
require.NotNil(t, conn) target := &pb.Node{
Id: planet.StorageNodes[1].ID(),
Address: &pb.NodeAddress{
Transport: pb.NodeTransport_TCP_TLS_GRPC,
Address: planet.StorageNodes[1].Addr(),
},
}
assert.NoError(t, conn.Close()) timedCtx, cancel := context.WithTimeout(ctx, time.Second)
}) conn, err := unsignedDialer.DialNode(timedCtx, target)
cancel()
t.Run("DialNode with unsigned identity", func(t *testing.T) { assert.NotNil(t, conn)
target := &pb.Node{ require.NoError(t, err)
Id: planet.StorageNodes[1].ID(), assert.NoError(t, conn.Close())
Address: &pb.NodeAddress{ })
Transport: pb.NodeTransport_TCP_TLS_GRPC,
Address: planet.StorageNodes[1].Addr(),
},
}
timedCtx, cancel := context.WithTimeout(ctx, time.Second) t.Run("DialAddress with unsigned identity", func(t *testing.T) {
conn, err := unsignedDialer.DialNode(timedCtx, target) timedCtx, cancel := context.WithTimeout(ctx, time.Second)
cancel() conn, err := unsignedDialer.DialAddressInsecure(timedCtx, planet.StorageNodes[1].Addr())
cancel()
assert.NotNil(t, conn) assert.NotNil(t, conn)
require.NoError(t, err) require.NoError(t, err)
assert.NoError(t, conn.Close()) assert.NoError(t, conn.Close())
}) })
t.Run("DialAddress with unsigned identity", func(t *testing.T) { t.Run("DialAddress with valid address", func(t *testing.T) {
timedCtx, cancel := context.WithTimeout(ctx, time.Second) timedCtx, cancel := context.WithTimeout(ctx, time.Second)
conn, err := unsignedDialer.DialAddressInsecure(timedCtx, planet.StorageNodes[1].Addr()) conn, err := dialer.DialAddressInsecure(timedCtx, planet.StorageNodes[1].Addr())
cancel() cancel()
assert.NotNil(t, conn) assert.NoError(t, err)
require.NoError(t, err) require.NotNil(t, conn)
assert.NoError(t, conn.Close()) assert.NoError(t, conn.Close())
}) })
t.Run("DialAddress with valid address", func(t *testing.T) {
timedCtx, cancel := context.WithTimeout(ctx, time.Second)
conn, err := dialer.DialAddressInsecure(timedCtx, planet.StorageNodes[1].Addr())
cancel()
assert.NoError(t, err)
require.NotNil(t, conn)
assert.NoError(t, conn.Close())
}) })
} }
func TestDialNode_BadServerCertificate(t *testing.T) { func TestDialNode_BadServerCertificate(t *testing.T) {
ctx := testcontext.New(t) testplanet.Run(t, testplanet.Config{
defer ctx.Cleanup() SatelliteCount: 0, StorageNodeCount: 2, UplinkCount: 0,
Reconfigure: testplanet.DisablePeerCAWhitelist,
Identities: testidentity.NewPregeneratedIdentities(storj.LatestIDVersion()),
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
planet, err := testplanet.NewCustom( whitelistPath, err := planet.WriteWhitelist(storj.LatestIDVersion())
zaptest.NewLogger(t), require.NoError(t, err)
testplanet.Config{
SatelliteCount: 0,
StorageNodeCount: 2,
UplinkCount: 0,
Reconfigure: testplanet.DisablePeerCAWhitelist,
Identities: testidentity.NewPregeneratedIdentities(storj.LatestIDVersion()),
},
)
if err != nil {
t.Fatal(err)
}
defer ctx.Check(planet.Shutdown)
whitelistPath, err := planet.WriteWhitelist(storj.LatestIDVersion()) ident, err := testidentity.PregeneratedSignedIdentity(0, storj.LatestIDVersion())
require.NoError(t, err) require.NoError(t, err)
planet.Start(ctx) tlsOptions, err := tlsopts.NewOptions(ident, tlsopts.Config{
UsePeerCAWhitelist: true,
PeerCAWhitelistPath: whitelistPath,
}, nil)
require.NoError(t, err)
ident, err := testidentity.PregeneratedSignedIdentity(0, storj.LatestIDVersion()) dialer := rpc.NewDefaultDialer(tlsOptions)
require.NoError(t, err)
tlsOptions, err := tlsopts.NewOptions(ident, tlsopts.Config{ t.Run("DialNode with bad server certificate", func(t *testing.T) {
UsePeerCAWhitelist: true, target := &pb.Node{
PeerCAWhitelistPath: whitelistPath, Id: planet.StorageNodes[1].ID(),
}, nil) Address: &pb.NodeAddress{
require.NoError(t, err) Transport: pb.NodeTransport_TCP_TLS_GRPC,
Address: planet.StorageNodes[1].Addr(),
},
}
dialer := rpc.NewDefaultDialer(tlsOptions) timedCtx, cancel := context.WithTimeout(ctx, time.Second)
conn, err := dialer.DialNode(timedCtx, target)
cancel()
t.Run("DialNode with bad server certificate", func(t *testing.T) { tag := fmt.Sprintf("%+v", target)
target := &pb.Node{ assert.Nil(t, conn, tag)
Id: planet.StorageNodes[1].ID(), require.Error(t, err, tag)
Address: &pb.NodeAddress{ assert.Contains(t, err.Error(), "not signed by any CA in the whitelist")
Transport: pb.NodeTransport_TCP_TLS_GRPC, })
Address: planet.StorageNodes[1].Addr(),
},
}
timedCtx, cancel := context.WithTimeout(ctx, time.Second) t.Run("DialAddress with bad server certificate", func(t *testing.T) {
conn, err := dialer.DialNode(timedCtx, target) timedCtx, cancel := context.WithTimeout(ctx, time.Second)
cancel() conn, err := dialer.DialAddressID(timedCtx, planet.StorageNodes[1].Addr(), planet.StorageNodes[1].ID())
cancel()
tag := fmt.Sprintf("%+v", target) assert.Nil(t, conn)
assert.Nil(t, conn, tag) require.Error(t, err)
require.Error(t, err, tag) assert.Contains(t, err.Error(), "not signed by any CA in the whitelist")
assert.Contains(t, err.Error(), "not signed by any CA in the whitelist") })
})
t.Run("DialAddress with bad server certificate", func(t *testing.T) {
timedCtx, cancel := context.WithTimeout(ctx, time.Second)
conn, err := dialer.DialAddressID(timedCtx, planet.StorageNodes[1].Addr(), planet.StorageNodes[1].ID())
cancel()
assert.Nil(t, conn)
require.Error(t, err)
assert.Contains(t, err.Error(), "not signed by any CA in the whitelist")
}) })
} }

View File

@ -18,48 +18,41 @@ import (
) )
func TestBasic(t *testing.T) { func TestBasic(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
test := func(version storj.IDVersion) {
planet, err := testplanet.NewWithIdentityVersion(t, &version, 2, 4, 1)
require.NoError(t, err)
defer ctx.Check(planet.Shutdown)
planet.Start(ctx)
for _, satellite := range planet.Satellites {
t.Log("SATELLITE", satellite.ID(), satellite.Addr())
}
for _, storageNode := range planet.StorageNodes {
t.Log("STORAGE", storageNode.ID(), storageNode.Addr())
}
for _, uplink := range planet.Uplinks {
t.Log("UPLINK", uplink.ID(), uplink.Addr())
}
for _, sat := range planet.Satellites {
satellite := sat.Local().Node
for _, sn := range planet.StorageNodes {
node := sn.Local()
conn, err := sn.Dialer.DialNode(ctx, &satellite)
require.NoError(t, err)
defer ctx.Check(conn.Close)
_, err = conn.NodeClient().CheckIn(ctx, &pb.CheckInRequest{
Address: node.GetAddress().GetAddress(),
Version: &node.Version,
Capacity: &node.Capacity,
Operator: &node.Operator,
})
require.NoError(t, err)
}
}
// wait a bit to see whether some failures occur
time.Sleep(time.Second)
}
for _, version := range storj.IDVersions { for _, version := range storj.IDVersions {
test(version) version := version
testplanet.Run(t, testplanet.Config{
SatelliteCount: 2, StorageNodeCount: 4, UplinkCount: 1,
IdentityVersion: &version,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
for _, satellite := range planet.Satellites {
t.Log("SATELLITE", satellite.ID(), satellite.Addr())
}
for _, storageNode := range planet.StorageNodes {
t.Log("STORAGE", storageNode.ID(), storageNode.Addr())
}
for _, uplink := range planet.Uplinks {
t.Log("UPLINK", uplink.ID(), uplink.Addr())
}
for _, sat := range planet.Satellites {
satellite := sat.Local().Node
for _, sn := range planet.StorageNodes {
node := sn.Local()
conn, err := sn.Dialer.DialNode(ctx, &satellite)
require.NoError(t, err)
defer ctx.Check(conn.Close)
_, err = conn.NodeClient().CheckIn(ctx, &pb.CheckInRequest{
Address: node.GetAddress().GetAddress(),
Version: &node.Version,
Capacity: &node.Capacity,
Operator: &node.Operator,
})
require.NoError(t, err)
}
}
// wait a bit to see whether some failures occur
time.Sleep(time.Second)
})
} }
} }

View File

@ -34,159 +34,149 @@ import (
) )
func TestInvalidAPIKey(t *testing.T) { func TestInvalidAPIKey(t *testing.T) {
ctx := testcontext.New(t) testplanet.Run(t, testplanet.Config{
defer ctx.Cleanup() SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
planet, err := testplanet.New(t, 1, 0, 1) throwawayKey, err := macaroon.NewAPIKey([]byte("secret"))
require.NoError(t, err)
defer ctx.Check(planet.Shutdown)
throwawayKey, err := macaroon.NewAPIKey([]byte("secret"))
require.NoError(t, err)
planet.Start(ctx)
for _, invalidAPIKey := range []string{"", "invalid", "testKey"} {
client, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], throwawayKey)
require.NoError(t, err) require.NoError(t, err)
defer ctx.Check(client.Close)
client.SetRawAPIKey([]byte(invalidAPIKey)) for _, invalidAPIKey := range []string{"", "invalid", "testKey"} {
client, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], throwawayKey)
require.NoError(t, err)
defer ctx.Check(client.Close)
_, _, _, err = client.CreateSegmentOld(ctx, "hello", "world", 1, &pb.RedundancyScheme{}, 123, time.Now().Add(time.Hour)) client.SetRawAPIKey([]byte(invalidAPIKey))
assertUnauthenticated(t, err, false)
_, err = client.CommitSegmentOld(ctx, "testbucket", "testpath", 0, &pb.Pointer{}, nil) _, _, _, err = client.CreateSegmentOld(ctx, "hello", "world", 1, &pb.RedundancyScheme{}, 123, time.Now().Add(time.Hour))
assertUnauthenticated(t, err, false) assertUnauthenticated(t, err, false)
_, err = client.SegmentInfoOld(ctx, "testbucket", "testpath", 0) _, err = client.CommitSegmentOld(ctx, "testbucket", "testpath", 0, &pb.Pointer{}, nil)
assertUnauthenticated(t, err, false) assertUnauthenticated(t, err, false)
_, _, _, err = client.ReadSegmentOld(ctx, "testbucket", "testpath", 0) _, err = client.SegmentInfoOld(ctx, "testbucket", "testpath", 0)
assertUnauthenticated(t, err, false) assertUnauthenticated(t, err, false)
_, _, err = client.DeleteSegmentOld(ctx, "testbucket", "testpath", 0) _, _, _, err = client.ReadSegmentOld(ctx, "testbucket", "testpath", 0)
assertUnauthenticated(t, err, false) assertUnauthenticated(t, err, false)
_, _, err = client.ListSegmentsOld(ctx, "testbucket", "", "", "", true, 1, 0) _, _, err = client.DeleteSegmentOld(ctx, "testbucket", "testpath", 0)
assertUnauthenticated(t, err, false) assertUnauthenticated(t, err, false)
}
_, _, err = client.ListSegmentsOld(ctx, "testbucket", "", "", "", true, 1, 0)
assertUnauthenticated(t, err, false)
}
})
} }
func TestRestrictedAPIKey(t *testing.T) { func TestRestrictedAPIKey(t *testing.T) {
ctx := testcontext.New(t) testplanet.Run(t, testplanet.Config{
defer ctx.Cleanup() SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
key := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
planet, err := testplanet.New(t, 1, 0, 1) tests := []struct {
require.NoError(t, err) Caveat macaroon.Caveat
defer ctx.Check(planet.Shutdown) CreateSegmentAllowed bool
CommitSegmentAllowed bool
planet.Start(ctx) SegmentInfoAllowed bool
ReadSegmentAllowed bool
key := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()] DeleteSegmentAllowed bool
ListSegmentsAllowed bool
tests := []struct { ReadBucketAllowed bool
Caveat macaroon.Caveat }{
CreateSegmentAllowed bool { // Everything disallowed
CommitSegmentAllowed bool Caveat: macaroon.Caveat{
SegmentInfoAllowed bool DisallowReads: true,
ReadSegmentAllowed bool DisallowWrites: true,
DeleteSegmentAllowed bool DisallowLists: true,
ListSegmentsAllowed bool DisallowDeletes: true,
ReadBucketAllowed bool },
}{ ReadBucketAllowed: true,
{ // Everything disallowed
Caveat: macaroon.Caveat{
DisallowReads: true,
DisallowWrites: true,
DisallowLists: true,
DisallowDeletes: true,
}, },
ReadBucketAllowed: true,
},
{ // Read only { // Read only
Caveat: macaroon.Caveat{ Caveat: macaroon.Caveat{
DisallowWrites: true, DisallowWrites: true,
DisallowDeletes: true, DisallowDeletes: true,
},
SegmentInfoAllowed: true,
ReadSegmentAllowed: true,
ListSegmentsAllowed: true,
ReadBucketAllowed: true,
}, },
SegmentInfoAllowed: true,
ReadSegmentAllowed: true,
ListSegmentsAllowed: true,
ReadBucketAllowed: true,
},
{ // Write only { // Write only
Caveat: macaroon.Caveat{ Caveat: macaroon.Caveat{
DisallowReads: true, DisallowReads: true,
DisallowLists: true, DisallowLists: true,
},
CreateSegmentAllowed: true,
CommitSegmentAllowed: true,
DeleteSegmentAllowed: true,
ReadBucketAllowed: true,
}, },
CreateSegmentAllowed: true,
CommitSegmentAllowed: true,
DeleteSegmentAllowed: true,
ReadBucketAllowed: true,
},
{ // Bucket restriction { // Bucket restriction
Caveat: macaroon.Caveat{ Caveat: macaroon.Caveat{
AllowedPaths: []*macaroon.Caveat_Path{{ AllowedPaths: []*macaroon.Caveat_Path{{
Bucket: []byte("otherbucket"), Bucket: []byte("otherbucket"),
}}, }},
},
}, },
},
{ // Path restriction { // Path restriction
Caveat: macaroon.Caveat{ Caveat: macaroon.Caveat{
AllowedPaths: []*macaroon.Caveat_Path{{ AllowedPaths: []*macaroon.Caveat_Path{{
Bucket: []byte("testbucket"), Bucket: []byte("testbucket"),
EncryptedPathPrefix: []byte("otherpath"), EncryptedPathPrefix: []byte("otherpath"),
}}, }},
},
ReadBucketAllowed: true,
}, },
ReadBucketAllowed: true,
},
{ // Time restriction after { // Time restriction after
Caveat: macaroon.Caveat{ Caveat: macaroon.Caveat{
NotAfter: func(x time.Time) *time.Time { return &x }(time.Now()), NotAfter: func(x time.Time) *time.Time { return &x }(time.Now()),
},
}, },
},
{ // Time restriction before { // Time restriction before
Caveat: macaroon.Caveat{ Caveat: macaroon.Caveat{
NotBefore: func(x time.Time) *time.Time { return &x }(time.Now().Add(time.Hour)), NotBefore: func(x time.Time) *time.Time { return &x }(time.Now().Add(time.Hour)),
},
}, },
}, }
}
for _, test := range tests { for _, test := range tests {
restrictedKey, err := key.Restrict(test.Caveat) restrictedKey, err := key.Restrict(test.Caveat)
require.NoError(t, err) require.NoError(t, err)
client, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], restrictedKey) client, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], restrictedKey)
require.NoError(t, err) require.NoError(t, err)
defer ctx.Check(client.Close) defer ctx.Check(client.Close)
_, _, _, err = client.CreateSegmentOld(ctx, "testbucket", "testpath", 1, &pb.RedundancyScheme{}, 123, time.Now().Add(time.Hour)) _, _, _, err = client.CreateSegmentOld(ctx, "testbucket", "testpath", 1, &pb.RedundancyScheme{}, 123, time.Now().Add(time.Hour))
assertUnauthenticated(t, err, test.CreateSegmentAllowed) assertUnauthenticated(t, err, test.CreateSegmentAllowed)
_, err = client.CommitSegmentOld(ctx, "testbucket", "testpath", 0, &pb.Pointer{}, nil) _, err = client.CommitSegmentOld(ctx, "testbucket", "testpath", 0, &pb.Pointer{}, nil)
assertUnauthenticated(t, err, test.CommitSegmentAllowed) assertUnauthenticated(t, err, test.CommitSegmentAllowed)
_, err = client.SegmentInfoOld(ctx, "testbucket", "testpath", 0) _, err = client.SegmentInfoOld(ctx, "testbucket", "testpath", 0)
assertUnauthenticated(t, err, test.SegmentInfoAllowed) assertUnauthenticated(t, err, test.SegmentInfoAllowed)
_, _, _, err = client.ReadSegmentOld(ctx, "testbucket", "testpath", 0) _, _, _, err = client.ReadSegmentOld(ctx, "testbucket", "testpath", 0)
assertUnauthenticated(t, err, test.ReadSegmentAllowed) assertUnauthenticated(t, err, test.ReadSegmentAllowed)
_, _, err = client.DeleteSegmentOld(ctx, "testbucket", "testpath", 0) _, _, err = client.DeleteSegmentOld(ctx, "testbucket", "testpath", 0)
assertUnauthenticated(t, err, test.DeleteSegmentAllowed) assertUnauthenticated(t, err, test.DeleteSegmentAllowed)
_, _, err = client.ListSegmentsOld(ctx, "testbucket", "testpath", "", "", true, 1, 0) _, _, err = client.ListSegmentsOld(ctx, "testbucket", "testpath", "", "", true, 1, 0)
assertUnauthenticated(t, err, test.ListSegmentsAllowed) assertUnauthenticated(t, err, test.ListSegmentsAllowed)
_, _, _, err = client.ReadSegmentOld(ctx, "testbucket", "", -1) _, _, _, err = client.ReadSegmentOld(ctx, "testbucket", "", -1)
assertUnauthenticated(t, err, test.ReadBucketAllowed) assertUnauthenticated(t, err, test.ReadBucketAllowed)
} }
})
} }
func assertUnauthenticated(t *testing.T, err error, allowed bool) { func assertUnauthenticated(t *testing.T, err error, allowed bool) {
@ -200,79 +190,75 @@ func assertUnauthenticated(t *testing.T, err error, allowed bool) {
} }
func TestServiceList(t *testing.T) { func TestServiceList(t *testing.T) {
ctx := testcontext.New(t) testplanet.Run(t, testplanet.Config{
defer ctx.Cleanup() SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
planet, err := testplanet.New(t, 1, 0, 1) items := []struct {
require.NoError(t, err) Key string
defer ctx.Check(planet.Shutdown) Value []byte
}{
{Key: "sample.😶", Value: []byte{1}},
{Key: "müsic", Value: []byte{2}},
{Key: "müsic/söng1.mp3", Value: []byte{3}},
{Key: "müsic/söng2.mp3", Value: []byte{4}},
{Key: "müsic/album/söng3.mp3", Value: []byte{5}},
{Key: "müsic/söng4.mp3", Value: []byte{6}},
{Key: "ビデオ/movie.mkv", Value: []byte{7}},
}
planet.Start(ctx) for _, item := range items {
err := planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "testbucket", item.Key, item.Value)
assert.NoError(t, err)
}
items := []struct { config := planet.Uplinks[0].GetConfig(planet.Satellites[0])
Key string project, bucket, err := planet.Uplinks[0].GetProjectAndBucket(ctx, planet.Satellites[0], "testbucket", config)
Value []byte require.NoError(t, err)
}{ defer ctx.Check(bucket.Close)
{Key: "sample.😶", Value: []byte{1}}, defer ctx.Check(project.Close)
{Key: "müsic", Value: []byte{2}}, list, err := bucket.ListObjects(ctx, &storj.ListOptions{Recursive: true, Direction: storj.After})
{Key: "müsic/söng1.mp3", Value: []byte{3}}, require.NoError(t, err)
{Key: "müsic/söng2.mp3", Value: []byte{4}},
{Key: "müsic/album/söng3.mp3", Value: []byte{5}},
{Key: "müsic/söng4.mp3", Value: []byte{6}},
{Key: "ビデオ/movie.mkv", Value: []byte{7}},
}
for _, item := range items { expected := []storj.Object{
err := planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "testbucket", item.Key, item.Value) {Path: "müsic"},
assert.NoError(t, err) {Path: "müsic/album/söng3.mp3"},
} {Path: "müsic/söng1.mp3"},
{Path: "müsic/söng2.mp3"},
{Path: "müsic/söng4.mp3"},
{Path: "sample.😶"},
{Path: "ビデオ/movie.mkv"},
}
config := planet.Uplinks[0].GetConfig(planet.Satellites[0]) require.Equal(t, len(expected), len(list.Items))
project, bucket, err := planet.Uplinks[0].GetProjectAndBucket(ctx, planet.Satellites[0], "testbucket", config) sort.Slice(list.Items, func(i, k int) bool {
require.NoError(t, err) return list.Items[i].Path < list.Items[k].Path
defer ctx.Check(bucket.Close) })
defer ctx.Check(project.Close) for i, item := range expected {
list, err := bucket.ListObjects(ctx, &storj.ListOptions{Recursive: true, Direction: storj.After}) require.Equal(t, item.Path, list.Items[i].Path)
require.NoError(t, err) require.Equal(t, item.IsPrefix, list.Items[i].IsPrefix)
}
expected := []storj.Object{ list, err = bucket.ListObjects(ctx, &storj.ListOptions{Recursive: false, Direction: storj.After})
{Path: "müsic"}, require.NoError(t, err)
{Path: "müsic/album/söng3.mp3"},
{Path: "müsic/söng1.mp3"},
{Path: "müsic/söng2.mp3"},
{Path: "müsic/söng4.mp3"},
{Path: "sample.😶"},
{Path: "ビデオ/movie.mkv"},
}
require.Equal(t, len(expected), len(list.Items)) expected = []storj.Object{
sort.Slice(list.Items, func(i, k int) bool { {Path: "müsic"},
return list.Items[i].Path < list.Items[k].Path {Path: "müsic/", IsPrefix: true},
{Path: "sample.😶"},
{Path: "ビデオ/", IsPrefix: true},
}
require.Equal(t, len(expected), len(list.Items))
sort.Slice(list.Items, func(i, k int) bool {
return list.Items[i].Path < list.Items[k].Path
})
for i, item := range expected {
t.Log(item.Path, list.Items[i].Path)
require.Equal(t, item.Path, list.Items[i].Path)
require.Equal(t, item.IsPrefix, list.Items[i].IsPrefix)
}
}) })
for i, item := range expected {
require.Equal(t, item.Path, list.Items[i].Path)
require.Equal(t, item.IsPrefix, list.Items[i].IsPrefix)
}
list, err = bucket.ListObjects(ctx, &storj.ListOptions{Recursive: false, Direction: storj.After})
require.NoError(t, err)
expected = []storj.Object{
{Path: "müsic"},
{Path: "müsic/", IsPrefix: true},
{Path: "sample.😶"},
{Path: "ビデオ/", IsPrefix: true},
}
require.Equal(t, len(expected), len(list.Items))
sort.Slice(list.Items, func(i, k int) bool {
return list.Items[i].Path < list.Items[k].Path
})
for i, item := range expected {
t.Log(item.Path, list.Items[i].Path)
require.Equal(t, item.Path, list.Items[i].Path)
require.Equal(t, item.IsPrefix, list.Items[i].IsPrefix)
}
} }
func TestCommitSegment(t *testing.T) { func TestCommitSegment(t *testing.T) {

View File

@ -15,89 +15,74 @@ import (
) )
func TestGetNonExitingSatellites(t *testing.T) { func TestGetNonExitingSatellites(t *testing.T) {
ctx := testcontext.New(t) testplanet.Run(t, testplanet.Config{
defer ctx.Cleanup() SatelliteCount: 3, StorageNodeCount: 1, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
totalSatelliteCount := len(planet.Satellites)
exitingSatelliteCount := 1
exitingSatellite := planet.Satellites[0]
storagenode := planet.StorageNodes[0]
totalSatelliteCount := 3 // set a satellite to already be exiting
exitingSatelliteCount := 1 err := storagenode.DB.Satellites().InitiateGracefulExit(ctx, exitingSatellite.ID(), time.Now().UTC(), 0)
planet, err := testplanet.New(t, totalSatelliteCount, 1, 1) require.NoError(t, err)
require.NoError(t, err)
defer ctx.Check(planet.Shutdown)
planet.Start(ctx) nonExitingSatellites, err := storagenode.GracefulExit.Endpoint.GetNonExitingSatellites(ctx, &pb.GetNonExitingSatellitesRequest{})
exitingSatellite := planet.Satellites[0] require.NoError(t, err)
storagenode := planet.StorageNodes[0] require.Len(t, nonExitingSatellites.GetSatellites(), totalSatelliteCount-exitingSatelliteCount)
// set a satellite to already be exiting for _, satellite := range nonExitingSatellites.GetSatellites() {
err = storagenode.DB.Satellites().InitiateGracefulExit(ctx, exitingSatellite.ID(), time.Now().UTC(), 0) require.NotEqual(t, exitingSatellite.ID(), satellite.NodeId)
require.NoError(t, err) }
})
nonExitingSatellites, err := storagenode.GracefulExit.Endpoint.GetNonExitingSatellites(ctx, &pb.GetNonExitingSatellitesRequest{})
require.NoError(t, err)
require.Len(t, nonExitingSatellites.GetSatellites(), totalSatelliteCount-exitingSatelliteCount)
for _, satellite := range nonExitingSatellites.GetSatellites() {
require.NotEqual(t, exitingSatellite.ID(), satellite.NodeId)
}
} }
func TestInitiateGracefulExit(t *testing.T) { func TestInitiateGracefulExit(t *testing.T) {
ctx := testcontext.New(t) testplanet.Run(t, testplanet.Config{
defer ctx.Cleanup() SatelliteCount: 3, StorageNodeCount: 1, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
storagenode := planet.StorageNodes[0]
exitingSatelliteID := planet.Satellites[0].ID()
totalSatelliteCount := 3 req := &pb.InitiateGracefulExitRequest{
planet, err := testplanet.New(t, totalSatelliteCount, 1, 1) NodeId: exitingSatelliteID,
require.NoError(t, err) }
defer ctx.Check(planet.Shutdown)
planet.Start(ctx) resp, err := storagenode.GracefulExit.Endpoint.InitiateGracefulExit(ctx, req)
storagenode := planet.StorageNodes[0] require.NoError(t, err)
// check progress is 0
require.EqualValues(t, 0, resp.GetPercentComplete())
require.False(t, resp.GetSuccessful())
exitingSatelliteID := planet.Satellites[0].ID() exitStatuses, err := storagenode.DB.Satellites().ListGracefulExits(ctx)
require.NoError(t, err)
req := &pb.InitiateGracefulExitRequest{ require.Len(t, exitStatuses, 1)
NodeId: exitingSatelliteID, require.Equal(t, exitingSatelliteID, exitStatuses[0].SatelliteID)
} })
resp, err := storagenode.GracefulExit.Endpoint.InitiateGracefulExit(ctx, req)
require.NoError(t, err)
// check progress is 0
require.EqualValues(t, 0, resp.GetPercentComplete())
require.False(t, resp.GetSuccessful())
exitStatuses, err := storagenode.DB.Satellites().ListGracefulExits(ctx)
require.NoError(t, err)
require.Len(t, exitStatuses, 1)
require.Equal(t, exitingSatelliteID, exitStatuses[0].SatelliteID)
} }
func TestGetExitProgress(t *testing.T) { func TestGetExitProgress(t *testing.T) {
ctx := testcontext.New(t) testplanet.Run(t, testplanet.Config{
defer ctx.Cleanup() SatelliteCount: 3, StorageNodeCount: 1, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
exitingSatellite := planet.Satellites[0]
storagenode := planet.StorageNodes[0]
totalSatelliteCount := 3 // start graceful exit
planet, err := testplanet.New(t, totalSatelliteCount, 1, 1) err := storagenode.DB.Satellites().InitiateGracefulExit(ctx, exitingSatellite.ID(), time.Now().UTC(), 100)
require.NoError(t, err) require.NoError(t, err)
defer ctx.Check(planet.Shutdown) err = storagenode.DB.Satellites().UpdateGracefulExit(ctx, exitingSatellite.ID(), 20)
require.NoError(t, err)
planet.Start(ctx) // check graceful exit progress
exitingSatellite := planet.Satellites[0] resp, err := storagenode.GracefulExit.Endpoint.GetExitProgress(ctx, &pb.GetExitProgressRequest{})
storagenode := planet.StorageNodes[0] require.NoError(t, err)
require.Len(t, resp.GetProgress(), 1)
// start graceful exit progress := resp.GetProgress()[0]
err = storagenode.DB.Satellites().InitiateGracefulExit(ctx, exitingSatellite.ID(), time.Now().UTC(), 100) require.Equal(t, progress.GetDomainName(), exitingSatellite.Addr())
require.NoError(t, err) require.Equal(t, progress.NodeId, exitingSatellite.ID())
err = storagenode.DB.Satellites().UpdateGracefulExit(ctx, exitingSatellite.ID(), 20) require.EqualValues(t, 20, progress.GetPercentComplete())
require.NoError(t, err) require.False(t, progress.GetSuccessful())
require.Empty(t, progress.GetCompletionReceipt())
// check graceful exit progress })
resp, err := storagenode.GracefulExit.Endpoint.GetExitProgress(ctx, &pb.GetExitProgressRequest{})
require.NoError(t, err)
require.Len(t, resp.GetProgress(), 1)
progress := resp.GetProgress()[0]
require.Equal(t, progress.GetDomainName(), exitingSatellite.Addr())
require.Equal(t, progress.NodeId, exitingSatellite.ID())
require.EqualValues(t, 20, progress.GetPercentComplete())
require.False(t, progress.GetSuccessful())
require.Empty(t, progress.GetCompletionReceipt())
} }

View File

@ -20,86 +20,81 @@ import (
) )
func TestInspectorStats(t *testing.T) { func TestInspectorStats(t *testing.T) {
ctx := testcontext.New(t) testplanet.Run(t, testplanet.Config{
defer ctx.Cleanup() SatelliteCount: 1, StorageNodeCount: 5, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
planet, err := testplanet.New(t, 1, 5, 1) var availableBandwidth int64
require.NoError(t, err) var availableSpace int64
defer ctx.Check(planet.Shutdown)
planet.Start(ctx)
var availableBandwidth int64
var availableSpace int64
for _, storageNode := range planet.StorageNodes {
response, err := storageNode.Storage2.Inspector.Stats(ctx, &pb.StatsRequest{})
require.NoError(t, err)
assert.Zero(t, response.UsedBandwidth)
assert.Zero(t, response.UsedSpace)
assert.Zero(t, response.UsedEgress)
assert.Zero(t, response.UsedIngress)
assert.True(t, response.AvailableBandwidth > 0)
assert.True(t, response.AvailableSpace > 0)
// assume that all storage node should have the same initial values
availableBandwidth = response.AvailableBandwidth
availableSpace = response.AvailableSpace
}
expectedData := testrand.Bytes(100 * memory.KiB)
rs := &uplink.RSConfig{
MinThreshold: 2,
RepairThreshold: 3,
SuccessThreshold: 4,
MaxThreshold: 5,
}
err = planet.Uplinks[0].UploadWithConfig(ctx, planet.Satellites[0], rs, "testbucket", "test/path", expectedData)
require.NoError(t, err)
_, err = planet.Uplinks[0].Download(ctx, planet.Satellites[0], "testbucket", "test/path")
assert.NoError(t, err)
// wait until all requests have been handled
for {
total := int32(0)
for _, storageNode := range planet.StorageNodes { for _, storageNode := range planet.StorageNodes {
total += storageNode.Storage2.Endpoint.TestLiveRequestCount() response, err := storageNode.Storage2.Inspector.Stats(ctx, &pb.StatsRequest{})
} require.NoError(t, err)
if total == 0 {
break assert.Zero(t, response.UsedBandwidth)
assert.Zero(t, response.UsedSpace)
assert.Zero(t, response.UsedEgress)
assert.Zero(t, response.UsedIngress)
assert.True(t, response.AvailableBandwidth > 0)
assert.True(t, response.AvailableSpace > 0)
// assume that all storage node should have the same initial values
availableBandwidth = response.AvailableBandwidth
availableSpace = response.AvailableSpace
} }
sync2.Sleep(ctx, 100*time.Millisecond) expectedData := testrand.Bytes(100 * memory.KiB)
}
var downloaded int rs := &uplink.RSConfig{
for _, storageNode := range planet.StorageNodes { MinThreshold: 2,
response, err := storageNode.Storage2.Inspector.Stats(ctx, &pb.StatsRequest{}) RepairThreshold: 3,
SuccessThreshold: 4,
MaxThreshold: 5,
}
err := planet.Uplinks[0].UploadWithConfig(ctx, planet.Satellites[0], rs, "testbucket", "test/path", expectedData)
require.NoError(t, err) require.NoError(t, err)
// TODO set more accurate assertions _, err = planet.Uplinks[0].Download(ctx, planet.Satellites[0], "testbucket", "test/path")
if response.UsedSpace > 0 { assert.NoError(t, err)
assert.NotZero(t, response.UsedBandwidth)
assert.Equal(t, response.UsedBandwidth, response.UsedIngress+response.UsedEgress)
assert.Equal(t, availableBandwidth-response.UsedBandwidth, response.AvailableBandwidth)
assert.Equal(t, availableSpace-response.UsedSpace, response.AvailableSpace)
assert.Equal(t, response.UsedSpace, response.UsedBandwidth-response.UsedEgress) // wait until all requests have been handled
if response.UsedEgress > 0 { for {
downloaded++ total := int32(0)
assert.Equal(t, response.UsedBandwidth-response.UsedIngress, response.UsedEgress) for _, storageNode := range planet.StorageNodes {
total += storageNode.Storage2.Endpoint.TestLiveRequestCount()
} }
} else { if total == 0 {
assert.Zero(t, response.UsedSpace) break
// TODO track why this is failing }
//assert.Equal(t, availableBandwidth, response.AvailableBandwidth)
assert.Equal(t, availableSpace, response.AvailableSpace) sync2.Sleep(ctx, 100*time.Millisecond)
} }
}
assert.True(t, downloaded >= rs.MinThreshold, "downloaded=%v, rs.MinThreshold=%v", downloaded, rs.MinThreshold) var downloaded int
for _, storageNode := range planet.StorageNodes {
response, err := storageNode.Storage2.Inspector.Stats(ctx, &pb.StatsRequest{})
require.NoError(t, err)
// TODO set more accurate assertions
if response.UsedSpace > 0 {
assert.NotZero(t, response.UsedBandwidth)
assert.Equal(t, response.UsedBandwidth, response.UsedIngress+response.UsedEgress)
assert.Equal(t, availableBandwidth-response.UsedBandwidth, response.AvailableBandwidth)
assert.Equal(t, availableSpace-response.UsedSpace, response.AvailableSpace)
assert.Equal(t, response.UsedSpace, response.UsedBandwidth-response.UsedEgress)
if response.UsedEgress > 0 {
downloaded++
assert.Equal(t, response.UsedBandwidth-response.UsedIngress, response.UsedEgress)
}
} else {
assert.Zero(t, response.UsedSpace)
// TODO track why this is failing
//assert.Equal(t, availableBandwidth, response.AvailableBandwidth)
assert.Equal(t, availableSpace, response.AvailableSpace)
}
}
assert.True(t, downloaded >= rs.MinThreshold, "downloaded=%v, rs.MinThreshold=%v", downloaded, rs.MinThreshold)
})
} }
func TestInspectorDashboard(t *testing.T) { func TestInspectorDashboard(t *testing.T) {

View File

@ -90,352 +90,328 @@ func TestUploadAndPartialDownload(t *testing.T) {
} }
func TestUpload(t *testing.T) { func TestUpload(t *testing.T) {
ctx := testcontext.New(t) testplanet.Run(t, testplanet.Config{
defer ctx.Cleanup() SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
planet, err := testplanet.New(t, 1, 1, 1) client, err := planet.Uplinks[0].DialPiecestore(ctx, planet.StorageNodes[0])
require.NoError(t, err)
defer ctx.Check(planet.Shutdown)
planet.Start(ctx)
client, err := planet.Uplinks[0].DialPiecestore(ctx, planet.StorageNodes[0])
require.NoError(t, err)
defer ctx.Check(client.Close)
for _, tt := range []struct {
pieceID storj.PieceID
contentLength memory.Size
action pb.PieceAction
err string
}{
{ // should successfully store data
pieceID: storj.PieceID{1},
contentLength: 50 * memory.KiB,
action: pb.PieceAction_PUT,
err: "",
},
{ // should err with piece ID not specified
pieceID: storj.PieceID{},
contentLength: 1 * memory.KiB,
action: pb.PieceAction_PUT,
err: "missing piece id",
},
{ // should err because invalid action
pieceID: storj.PieceID{1},
contentLength: 1 * memory.KiB,
action: pb.PieceAction_GET,
err: "expected put or put repair action got GET",
},
} {
data := testrand.Bytes(tt.contentLength)
serialNumber := testrand.SerialNumber()
orderLimit, piecePrivateKey := GenerateOrderLimit(
t,
planet.Satellites[0].ID(),
planet.StorageNodes[0].ID(),
tt.pieceID,
tt.action,
serialNumber,
24*time.Hour,
24*time.Hour,
int64(len(data)),
)
signer := signing.SignerFromFullIdentity(planet.Satellites[0].Identity)
orderLimit, err = signing.SignOrderLimit(ctx, signer, orderLimit)
require.NoError(t, err) require.NoError(t, err)
defer ctx.Check(client.Close)
uploader, err := client.Upload(ctx, orderLimit, piecePrivateKey) for _, tt := range []struct {
require.NoError(t, err) pieceID storj.PieceID
contentLength memory.Size
action pb.PieceAction
err string
}{
{ // should successfully store data
pieceID: storj.PieceID{1},
contentLength: 50 * memory.KiB,
action: pb.PieceAction_PUT,
err: "",
},
{ // should err with piece ID not specified
pieceID: storj.PieceID{},
contentLength: 1 * memory.KiB,
action: pb.PieceAction_PUT,
err: "missing piece id",
},
{ // should err because invalid action
pieceID: storj.PieceID{1},
contentLength: 1 * memory.KiB,
action: pb.PieceAction_GET,
err: "expected put or put repair action got GET",
},
} {
data := testrand.Bytes(tt.contentLength)
serialNumber := testrand.SerialNumber()
_, err = uploader.Write(data) orderLimit, piecePrivateKey := GenerateOrderLimit(
require.NoError(t, err) t,
planet.Satellites[0].ID(),
pieceHash, err := uploader.Commit(ctx) planet.StorageNodes[0].ID(),
if tt.err != "" { tt.pieceID,
require.Error(t, err) tt.action,
require.Contains(t, err.Error(), tt.err) serialNumber,
} else { 24*time.Hour,
24*time.Hour,
int64(len(data)),
)
signer := signing.SignerFromFullIdentity(planet.Satellites[0].Identity)
orderLimit, err = signing.SignOrderLimit(ctx, signer, orderLimit)
require.NoError(t, err) require.NoError(t, err)
expectedHash := pkcrypto.SHA256Hash(data) uploader, err := client.Upload(ctx, orderLimit, piecePrivateKey)
assert.Equal(t, expectedHash, pieceHash.Hash) require.NoError(t, err)
signee := signing.SignerFromFullIdentity(planet.StorageNodes[0].Identity) _, err = uploader.Write(data)
require.NoError(t, signing.VerifyPieceHashSignature(ctx, signee, pieceHash)) require.NoError(t, err)
pieceHash, err := uploader.Commit(ctx)
if tt.err != "" {
require.Error(t, err)
require.Contains(t, err.Error(), tt.err)
} else {
require.NoError(t, err)
expectedHash := pkcrypto.SHA256Hash(data)
assert.Equal(t, expectedHash, pieceHash.Hash)
signee := signing.SignerFromFullIdentity(planet.StorageNodes[0].Identity)
require.NoError(t, signing.VerifyPieceHashSignature(ctx, signee, pieceHash))
}
} }
} })
} }
func TestDownload(t *testing.T) { func TestDownload(t *testing.T) {
ctx := testcontext.New(t) testplanet.Run(t, testplanet.Config{
defer ctx.Cleanup() SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
pieceID := storj.PieceID{1}
expectedData, _, _ := uploadPiece(t, ctx, pieceID, planet.StorageNodes[0], planet.Uplinks[0], planet.Satellites[0])
client, err := planet.Uplinks[0].DialPiecestore(ctx, planet.StorageNodes[0])
require.NoError(t, err)
planet, err := testplanet.New(t, 1, 1, 1) for _, tt := range []struct {
require.NoError(t, err) pieceID storj.PieceID
defer ctx.Check(planet.Shutdown) action pb.PieceAction
errs []string
}{
{ // should successfully download data
pieceID: pieceID,
action: pb.PieceAction_GET,
},
{ // should err with piece ID not specified
pieceID: storj.PieceID{},
action: pb.PieceAction_GET,
errs: []string{"missing piece id"},
},
{ // should err with piece ID not specified
pieceID: storj.PieceID{2},
action: pb.PieceAction_GET,
errs: []string{"file does not exist", "The system cannot find the path specified"},
},
{ // should err with invalid action
pieceID: pieceID,
action: pb.PieceAction_PUT,
errs: []string{"expected get or get repair or audit action got PUT"},
},
} {
serialNumber := testrand.SerialNumber()
planet.Start(ctx) orderLimit, piecePrivateKey := GenerateOrderLimit(
t,
planet.Satellites[0].ID(),
planet.StorageNodes[0].ID(),
tt.pieceID,
tt.action,
serialNumber,
24*time.Hour,
24*time.Hour,
int64(len(expectedData)),
)
signer := signing.SignerFromFullIdentity(planet.Satellites[0].Identity)
orderLimit, err = signing.SignOrderLimit(ctx, signer, orderLimit)
require.NoError(t, err)
pieceID := storj.PieceID{1} downloader, err := client.Download(ctx, orderLimit, piecePrivateKey, 0, int64(len(expectedData)))
expectedData, _, _ := uploadPiece(t, ctx, pieceID, planet.StorageNodes[0], planet.Uplinks[0], planet.Satellites[0]) require.NoError(t, err)
client, err := planet.Uplinks[0].DialPiecestore(ctx, planet.StorageNodes[0])
require.NoError(t, err) buffer := make([]byte, len(expectedData))
n, err := downloader.Read(buffer)
if len(tt.errs) > 0 {
} else {
require.NoError(t, err)
require.Equal(t, expectedData, buffer[:n])
}
err = downloader.Close()
if len(tt.errs) > 0 {
require.Error(t, err)
require.True(t, strings.Contains(err.Error(), tt.errs[0]) || strings.Contains(err.Error(), tt.errs[1]), err.Error())
} else {
require.NoError(t, err)
}
// these should only be not-nil if action = pb.PieceAction_GET_REPAIR
hash, originalLimit := downloader.GetHashAndLimit()
require.Nil(t, hash)
require.Nil(t, originalLimit)
}
})
}
func TestDownloadGetRepair(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
pieceID := storj.PieceID{1}
expectedData, ulOrderLimit, originHash := uploadPiece(
t, ctx, pieceID, planet.StorageNodes[0], planet.Uplinks[0], planet.Satellites[0],
)
client, err := planet.Uplinks[0].DialPiecestore(ctx, planet.StorageNodes[0])
require.NoError(t, err)
for _, tt := range []struct {
pieceID storj.PieceID
action pb.PieceAction
errs []string
}{
{ // should successfully download data
pieceID: pieceID,
action: pb.PieceAction_GET,
},
{ // should err with piece ID not specified
pieceID: storj.PieceID{},
action: pb.PieceAction_GET,
errs: []string{"missing piece id"},
},
{ // should err with piece ID not specified
pieceID: storj.PieceID{2},
action: pb.PieceAction_GET,
errs: []string{"file does not exist", "The system cannot find the path specified"},
},
{ // should err with invalid action
pieceID: pieceID,
action: pb.PieceAction_PUT,
errs: []string{"expected get or get repair or audit action got PUT"},
},
} {
serialNumber := testrand.SerialNumber() serialNumber := testrand.SerialNumber()
orderLimit, piecePrivateKey := GenerateOrderLimit( dlOrderLimit, piecePrivateKey := GenerateOrderLimit(
t, t,
planet.Satellites[0].ID(), planet.Satellites[0].ID(),
planet.StorageNodes[0].ID(), planet.StorageNodes[0].ID(),
tt.pieceID, storj.PieceID{1},
tt.action, pb.PieceAction_GET_REPAIR,
serialNumber, serialNumber,
24*time.Hour, 24*time.Hour,
24*time.Hour, 24*time.Hour,
int64(len(expectedData)), int64(len(expectedData)),
) )
signer := signing.SignerFromFullIdentity(planet.Satellites[0].Identity) signer := signing.SignerFromFullIdentity(planet.Satellites[0].Identity)
orderLimit, err = signing.SignOrderLimit(ctx, signer, orderLimit) dlOrderLimit, err = signing.SignOrderLimit(ctx, signer, dlOrderLimit)
require.NoError(t, err) require.NoError(t, err)
downloader, err := client.Download(ctx, orderLimit, piecePrivateKey, 0, int64(len(expectedData))) downloader, err := client.Download(ctx, dlOrderLimit, piecePrivateKey, 0, int64(len(expectedData)))
require.NoError(t, err) require.NoError(t, err)
buffer := make([]byte, len(expectedData)) buffer := make([]byte, len(expectedData))
n, err := downloader.Read(buffer) n, err := downloader.Read(buffer)
if len(tt.errs) > 0 { require.NoError(t, err)
} else { require.Equal(t, expectedData, buffer[:n])
require.NoError(t, err)
require.Equal(t, expectedData, buffer[:n])
}
err = downloader.Close() err = downloader.Close()
if len(tt.errs) > 0 { require.NoError(t, err)
require.Error(t, err)
require.True(t, strings.Contains(err.Error(), tt.errs[0]) || strings.Contains(err.Error(), tt.errs[1]), err.Error())
} else {
require.NoError(t, err)
}
// these should only be not-nil if action = pb.PieceAction_GET_REPAIR hash, originLimit := downloader.GetHashAndLimit()
hash, originalLimit := downloader.GetHashAndLimit() require.NotNil(t, hash)
require.Nil(t, hash) require.Equal(t, originHash.Hash, hash.Hash)
require.Nil(t, originalLimit) require.Equal(t, originHash.PieceId, hash.PieceId)
}
}
func TestDownloadGetRepair(t *testing.T) { require.NotNil(t, originLimit)
ctx := testcontext.New(t) require.Equal(t, originLimit.Action, ulOrderLimit.Action)
defer ctx.Cleanup() require.Equal(t, originLimit.Limit, ulOrderLimit.Limit)
require.Equal(t, originLimit.PieceId, ulOrderLimit.PieceId)
planet, err := testplanet.New(t, 1, 1, 1) require.Equal(t, originLimit.SatelliteId, ulOrderLimit.SatelliteId)
require.NoError(t, err) require.Equal(t, originLimit.SerialNumber, ulOrderLimit.SerialNumber)
defer ctx.Check(planet.Shutdown) require.Equal(t, originLimit.SatelliteSignature, ulOrderLimit.SatelliteSignature)
require.Equal(t, originLimit.UplinkPublicKey, ulOrderLimit.UplinkPublicKey)
planet.Start(ctx) })
pieceID := storj.PieceID{1}
expectedData, ulOrderLimit, originHash := uploadPiece(
t, ctx, pieceID, planet.StorageNodes[0], planet.Uplinks[0], planet.Satellites[0],
)
client, err := planet.Uplinks[0].DialPiecestore(ctx, planet.StorageNodes[0])
require.NoError(t, err)
serialNumber := testrand.SerialNumber()
dlOrderLimit, piecePrivateKey := GenerateOrderLimit(
t,
planet.Satellites[0].ID(),
planet.StorageNodes[0].ID(),
storj.PieceID{1},
pb.PieceAction_GET_REPAIR,
serialNumber,
24*time.Hour,
24*time.Hour,
int64(len(expectedData)),
)
signer := signing.SignerFromFullIdentity(planet.Satellites[0].Identity)
dlOrderLimit, err = signing.SignOrderLimit(ctx, signer, dlOrderLimit)
require.NoError(t, err)
downloader, err := client.Download(ctx, dlOrderLimit, piecePrivateKey, 0, int64(len(expectedData)))
require.NoError(t, err)
buffer := make([]byte, len(expectedData))
n, err := downloader.Read(buffer)
require.NoError(t, err)
require.Equal(t, expectedData, buffer[:n])
err = downloader.Close()
require.NoError(t, err)
hash, originLimit := downloader.GetHashAndLimit()
require.NotNil(t, hash)
require.Equal(t, originHash.Hash, hash.Hash)
require.Equal(t, originHash.PieceId, hash.PieceId)
require.NotNil(t, originLimit)
require.Equal(t, originLimit.Action, ulOrderLimit.Action)
require.Equal(t, originLimit.Limit, ulOrderLimit.Limit)
require.Equal(t, originLimit.PieceId, ulOrderLimit.PieceId)
require.Equal(t, originLimit.SatelliteId, ulOrderLimit.SatelliteId)
require.Equal(t, originLimit.SerialNumber, ulOrderLimit.SerialNumber)
require.Equal(t, originLimit.SatelliteSignature, ulOrderLimit.SatelliteSignature)
require.Equal(t, originLimit.UplinkPublicKey, ulOrderLimit.UplinkPublicKey)
} }
func TestDelete(t *testing.T) { func TestDelete(t *testing.T) {
ctx := testcontext.New(t) testplanet.Run(t, testplanet.Config{
defer ctx.Cleanup() SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
planet, err := testplanet.New(t, 1, 1, 1) pieceID := storj.PieceID{1}
require.NoError(t, err) uploadPiece(t, ctx, pieceID, planet.StorageNodes[0], planet.Uplinks[0], planet.Satellites[0])
defer ctx.Check(planet.Shutdown) client, err := planet.Uplinks[0].DialPiecestore(ctx, planet.StorageNodes[0])
planet.Start(ctx)
pieceID := storj.PieceID{1}
uploadPiece(t, ctx, pieceID, planet.StorageNodes[0], planet.Uplinks[0], planet.Satellites[0])
client, err := planet.Uplinks[0].DialPiecestore(ctx, planet.StorageNodes[0])
require.NoError(t, err)
for _, tt := range []struct {
pieceID storj.PieceID
action pb.PieceAction
err string
}{
{ // should successfully delete data
pieceID: pieceID,
action: pb.PieceAction_DELETE,
err: "",
},
{ // should err with piece ID not found
pieceID: storj.PieceID{99},
action: pb.PieceAction_DELETE,
err: "", // TODO should this return error
},
{ // should err with piece ID not specified
pieceID: storj.PieceID{},
action: pb.PieceAction_DELETE,
err: "missing piece id",
},
{ // should err due to incorrect action
pieceID: pieceID,
action: pb.PieceAction_GET,
err: "expected delete action got GET",
},
} {
serialNumber := testrand.SerialNumber()
orderLimit, piecePrivateKey := GenerateOrderLimit(
t,
planet.Satellites[0].ID(),
planet.StorageNodes[0].ID(),
tt.pieceID,
tt.action,
serialNumber,
24*time.Hour,
24*time.Hour,
100,
)
signer := signing.SignerFromFullIdentity(planet.Satellites[0].Identity)
orderLimit, err = signing.SignOrderLimit(ctx, signer, orderLimit)
require.NoError(t, err) require.NoError(t, err)
err := client.Delete(ctx, orderLimit, piecePrivateKey) for _, tt := range []struct {
if tt.err != "" { pieceID storj.PieceID
require.Error(t, err) action pb.PieceAction
require.Contains(t, err.Error(), tt.err) err string
} else { }{
{ // should successfully delete data
pieceID: pieceID,
action: pb.PieceAction_DELETE,
err: "",
},
{ // should err with piece ID not found
pieceID: storj.PieceID{99},
action: pb.PieceAction_DELETE,
err: "", // TODO should this return error
},
{ // should err with piece ID not specified
pieceID: storj.PieceID{},
action: pb.PieceAction_DELETE,
err: "missing piece id",
},
{ // should err due to incorrect action
pieceID: pieceID,
action: pb.PieceAction_GET,
err: "expected delete action got GET",
},
} {
serialNumber := testrand.SerialNumber()
orderLimit, piecePrivateKey := GenerateOrderLimit(
t,
planet.Satellites[0].ID(),
planet.StorageNodes[0].ID(),
tt.pieceID,
tt.action,
serialNumber,
24*time.Hour,
24*time.Hour,
100,
)
signer := signing.SignerFromFullIdentity(planet.Satellites[0].Identity)
orderLimit, err = signing.SignOrderLimit(ctx, signer, orderLimit)
require.NoError(t, err) require.NoError(t, err)
err := client.Delete(ctx, orderLimit, piecePrivateKey)
if tt.err != "" {
require.Error(t, err)
require.Contains(t, err.Error(), tt.err)
} else {
require.NoError(t, err)
}
} }
} })
} }
func TestDeletePiece(t *testing.T) { func TestDeletePiece(t *testing.T) {
ctx := testcontext.New(t) testplanet.Run(t, testplanet.Config{
defer ctx.Cleanup() SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
planet, err := testplanet.New(t, 1, 1, 1) var (
require.NoError(t, err) planetSat = planet.Satellites[0]
defer ctx.Check(planet.Shutdown) planetSN = planet.StorageNodes[0]
planet.Start(ctx)
var (
planetSat = planet.Satellites[0]
planetSN = planet.StorageNodes[0]
)
var client *piecestore.Client
{
dossier, err := planetSat.Overlay.DB.Get(ctx.Context, planetSN.ID())
require.NoError(t, err)
client, err = piecestore.Dial(
ctx.Context, planetSat.Dialer, &dossier.Node, zaptest.NewLogger(t), piecestore.Config{},
) )
require.NoError(t, err)
}
t.Run("Ok", func(t *testing.T) { var client *piecestore.Client
pieceID := storj.PieceID{1} {
data, _, _ := uploadPiece(t, ctx, pieceID, planetSN, planet.Uplinks[0], planetSat) dossier, err := planetSat.Overlay.DB.Get(ctx.Context, planetSN.ID())
require.NoError(t, err)
err := client.DeletePiece(ctx.Context, pieceID) client, err = piecestore.Dial(
require.NoError(t, err) ctx.Context, planetSat.Dialer, &dossier.Node, zaptest.NewLogger(t), piecestore.Config{},
)
require.NoError(t, err)
}
_, err = downloadPiece(t, ctx, pieceID, int64(len(data)), planetSN, planet.Uplinks[0], planetSat) t.Run("Ok", func(t *testing.T) {
require.Error(t, err) pieceID := storj.PieceID{1}
data, _, _ := uploadPiece(t, ctx, pieceID, planetSN, planet.Uplinks[0], planetSat)
require.Condition(t, func() bool { err := client.DeletePiece(ctx.Context, pieceID)
return strings.Contains(err.Error(), "file does not exist") || require.NoError(t, err)
strings.Contains(err.Error(), "The system cannot find the path specified")
}, "unexpected error message")
})
t.Run("error: Not found", func(t *testing.T) { _, err = downloadPiece(t, ctx, pieceID, int64(len(data)), planetSN, planet.Uplinks[0], planetSat)
err := client.DeletePiece(ctx.Context, storj.PieceID{2}) require.Error(t, err)
require.Error(t, err)
require.Equal(t, rpcstatus.NotFound, rpcstatus.Code(err))
})
t.Run("error: permission denied", func(t *testing.T) { require.Condition(t, func() bool {
client, err := planet.Uplinks[0].DialPiecestore(ctx, planetSN) return strings.Contains(err.Error(), "file does not exist") ||
require.NoError(t, err) strings.Contains(err.Error(), "The system cannot find the path specified")
}, "unexpected error message")
})
err = client.DeletePiece(ctx.Context, storj.PieceID{}) t.Run("error: Not found", func(t *testing.T) {
require.Error(t, err) err := client.DeletePiece(ctx.Context, storj.PieceID{2})
require.Equal(t, rpcstatus.PermissionDenied, rpcstatus.Code(err)) require.Error(t, err)
require.Equal(t, rpcstatus.NotFound, rpcstatus.Code(err))
})
t.Run("error: permission denied", func(t *testing.T) {
client, err := planet.Uplinks[0].DialPiecestore(ctx, planetSN)
require.NoError(t, err)
err = client.DeletePiece(ctx.Context, storj.PieceID{})
require.Error(t, err)
require.Equal(t, rpcstatus.PermissionDenied, rpcstatus.Code(err))
})
}) })
} }

View File

@ -124,31 +124,147 @@ func TestOrderLimitPutValidation(t *testing.T) {
} { } {
tt := tt tt := tt
t.Run(tt.testName, func(t *testing.T) { t.Run(tt.testName, func(t *testing.T) {
ctx := testcontext.New(t) testplanet.Run(t, testplanet.Config{
defer ctx.Cleanup() SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
planet, err := testplanet.New(t, 1, 1, 1) // set desirable bandwidth
setBandwidth(ctx, t, planet, tt.availableBandwidth)
// set desirable space
setSpace(ctx, t, planet, tt.availableSpace)
client, err := planet.Uplinks[0].DialPiecestore(ctx, planet.StorageNodes[0])
require.NoError(t, err)
defer ctx.Check(client.Close)
signer := signing.SignerFromFullIdentity(planet.Satellites[0].Identity)
satellite := planet.Satellites[0].Identity
if tt.useUnknownSatellite {
unapprovedSatellite, err := planet.NewIdentity()
require.NoError(t, err)
signer = signing.SignerFromFullIdentity(unapprovedSatellite)
satellite = unapprovedSatellite
}
orderLimit, piecePrivateKey := GenerateOrderLimit(
t,
satellite.ID,
planet.StorageNodes[0].ID(),
tt.pieceID,
tt.action,
tt.serialNumber,
tt.pieceExpiration,
tt.orderExpiration,
tt.limit,
)
orderLimit, err = signing.SignOrderLimit(ctx, signer, orderLimit)
require.NoError(t, err)
uploader, err := client.Upload(ctx, orderLimit, piecePrivateKey)
require.NoError(t, err)
var writeErr error
buffer := make([]byte, memory.KiB)
for i := 0; i < 10; i++ {
testrand.Read(buffer)
_, writeErr = uploader.Write(buffer)
if writeErr != nil {
break
}
}
_, commitErr := uploader.Commit(ctx)
err = errs.Combine(writeErr, commitErr)
if tt.err != "" {
require.Error(t, err)
require.Contains(t, err.Error(), tt.err)
} else {
require.NoError(t, err)
}
})
})
}
}
func TestOrderLimitGetValidation(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
defaultPieceSize := 10 * memory.KiB
for _, storageNode := range planet.StorageNodes {
err := storageNode.DB.Bandwidth().Add(ctx, planet.Satellites[0].ID(), pb.PieceAction_GET, memory.TB.Int64()-(15*memory.KiB.Int64()), time.Now())
require.NoError(t, err) require.NoError(t, err)
defer ctx.Check(planet.Shutdown) }
planet.Start(ctx)
// set desirable bandwidth
setBandwidth(ctx, t, planet, tt.availableBandwidth)
// set desirable space
setSpace(ctx, t, planet, tt.availableSpace)
{ // upload test piece
client, err := planet.Uplinks[0].DialPiecestore(ctx, planet.StorageNodes[0]) client, err := planet.Uplinks[0].DialPiecestore(ctx, planet.StorageNodes[0])
require.NoError(t, err) require.NoError(t, err)
defer ctx.Check(client.Close) defer ctx.Check(client.Close)
signer := signing.SignerFromFullIdentity(planet.Satellites[0].Identity) signer := signing.SignerFromFullIdentity(planet.Satellites[0].Identity)
satellite := planet.Satellites[0].Identity satellite := planet.Satellites[0].Identity
if tt.useUnknownSatellite {
unapprovedSatellite, err := planet.NewIdentity() orderLimit, piecePrivateKey := GenerateOrderLimit(
require.NoError(t, err) t,
signer = signing.SignerFromFullIdentity(unapprovedSatellite) satellite.ID,
satellite = unapprovedSatellite planet.StorageNodes[0].ID(),
storj.PieceID{1},
pb.PieceAction_PUT,
storj.SerialNumber{0},
oneWeek,
oneWeek,
defaultPieceSize.Int64(),
)
orderLimit, err = signing.SignOrderLimit(ctx, signer, orderLimit)
require.NoError(t, err)
uploader, err := client.Upload(ctx, orderLimit, piecePrivateKey)
require.NoError(t, err)
data := testrand.Bytes(defaultPieceSize)
_, err = uploader.Write(data)
require.NoError(t, err)
_, err = uploader.Commit(ctx)
require.NoError(t, err)
}
// wait for all requests to finish to ensure that the upload usage has been
// accounted for.
waitForEndpointRequestsToDrain(t, planet)
for _, tt := range []struct {
satellite *identity.FullIdentity
pieceID storj.PieceID
action pb.PieceAction
serialNumber storj.SerialNumber
pieceExpiration time.Duration
orderExpiration time.Duration
limit int64
err string
}{
{ // allocated bandwidth limit
pieceID: storj.PieceID{1},
action: pb.PieceAction_GET,
serialNumber: storj.SerialNumber{1},
pieceExpiration: oneWeek,
orderExpiration: oneWeek,
limit: 10 * memory.KiB.Int64(),
err: "out of bandwidth",
},
} {
client, err := planet.Uplinks[0].DialPiecestore(ctx, planet.StorageNodes[0])
require.NoError(t, err)
defer ctx.Check(client.Close)
signer := signing.SignerFromFullIdentity(planet.Satellites[0].Identity)
satellite := planet.Satellites[0].Identity
if tt.satellite != nil {
signer = signing.SignerFromFullIdentity(tt.satellite)
satellite = tt.satellite
} }
orderLimit, piecePrivateKey := GenerateOrderLimit( orderLimit, piecePrivateKey := GenerateOrderLimit(
@ -166,145 +282,21 @@ func TestOrderLimitPutValidation(t *testing.T) {
orderLimit, err = signing.SignOrderLimit(ctx, signer, orderLimit) orderLimit, err = signing.SignOrderLimit(ctx, signer, orderLimit)
require.NoError(t, err) require.NoError(t, err)
uploader, err := client.Upload(ctx, orderLimit, piecePrivateKey) downloader, err := client.Download(ctx, orderLimit, piecePrivateKey, 0, tt.limit)
require.NoError(t, err) require.NoError(t, err)
var writeErr error buffer, readErr := ioutil.ReadAll(downloader)
buffer := make([]byte, memory.KiB) closeErr := downloader.Close()
for i := 0; i < 10; i++ { err = errs.Combine(readErr, closeErr)
testrand.Read(buffer)
_, writeErr = uploader.Write(buffer)
if writeErr != nil {
break
}
}
_, commitErr := uploader.Commit(ctx)
err = errs.Combine(writeErr, commitErr)
if tt.err != "" { if tt.err != "" {
assert.Equal(t, 0, len(buffer))
require.Error(t, err) require.Error(t, err)
require.Contains(t, err.Error(), tt.err) require.Contains(t, err.Error(), tt.err)
} else { } else {
require.NoError(t, err) require.NoError(t, err)
} }
})
}
}
func TestOrderLimitGetValidation(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
planet, err := testplanet.New(t, 1, 1, 1)
require.NoError(t, err)
defer ctx.Check(planet.Shutdown)
planet.Start(ctx)
defaultPieceSize := 10 * memory.KiB
for _, storageNode := range planet.StorageNodes {
err = storageNode.DB.Bandwidth().Add(ctx, planet.Satellites[0].ID(), pb.PieceAction_GET, memory.TB.Int64()-(15*memory.KiB.Int64()), time.Now())
require.NoError(t, err)
}
{ // upload test piece
client, err := planet.Uplinks[0].DialPiecestore(ctx, planet.StorageNodes[0])
require.NoError(t, err)
defer ctx.Check(client.Close)
signer := signing.SignerFromFullIdentity(planet.Satellites[0].Identity)
satellite := planet.Satellites[0].Identity
orderLimit, piecePrivateKey := GenerateOrderLimit(
t,
satellite.ID,
planet.StorageNodes[0].ID(),
storj.PieceID{1},
pb.PieceAction_PUT,
storj.SerialNumber{0},
oneWeek,
oneWeek,
defaultPieceSize.Int64(),
)
orderLimit, err = signing.SignOrderLimit(ctx, signer, orderLimit)
require.NoError(t, err)
uploader, err := client.Upload(ctx, orderLimit, piecePrivateKey)
require.NoError(t, err)
data := testrand.Bytes(defaultPieceSize)
_, err = uploader.Write(data)
require.NoError(t, err)
_, err = uploader.Commit(ctx)
require.NoError(t, err)
}
// wait for all requests to finish to ensure that the upload usage has been
// accounted for.
waitForEndpointRequestsToDrain(t, planet)
for _, tt := range []struct {
satellite *identity.FullIdentity
pieceID storj.PieceID
action pb.PieceAction
serialNumber storj.SerialNumber
pieceExpiration time.Duration
orderExpiration time.Duration
limit int64
err string
}{
{ // allocated bandwidth limit
pieceID: storj.PieceID{1},
action: pb.PieceAction_GET,
serialNumber: storj.SerialNumber{1},
pieceExpiration: oneWeek,
orderExpiration: oneWeek,
limit: 10 * memory.KiB.Int64(),
err: "out of bandwidth",
},
} {
client, err := planet.Uplinks[0].DialPiecestore(ctx, planet.StorageNodes[0])
require.NoError(t, err)
defer ctx.Check(client.Close)
signer := signing.SignerFromFullIdentity(planet.Satellites[0].Identity)
satellite := planet.Satellites[0].Identity
if tt.satellite != nil {
signer = signing.SignerFromFullIdentity(tt.satellite)
satellite = tt.satellite
} }
})
orderLimit, piecePrivateKey := GenerateOrderLimit(
t,
satellite.ID,
planet.StorageNodes[0].ID(),
tt.pieceID,
tt.action,
tt.serialNumber,
tt.pieceExpiration,
tt.orderExpiration,
tt.limit,
)
orderLimit, err = signing.SignOrderLimit(ctx, signer, orderLimit)
require.NoError(t, err)
downloader, err := client.Download(ctx, orderLimit, piecePrivateKey, 0, tt.limit)
require.NoError(t, err)
buffer, readErr := ioutil.ReadAll(downloader)
closeErr := downloader.Close()
err = errs.Combine(readErr, closeErr)
if tt.err != "" {
assert.Equal(t, 0, len(buffer))
require.Error(t, err)
require.Contains(t, err.Error(), tt.err)
} else {
require.NoError(t, err)
}
}
} }
func setBandwidth(ctx context.Context, t *testing.T, planet *testplanet.Planet, bandwidth int64) { func setBandwidth(ctx context.Context, t *testing.T, planet *testplanet.Planet, bandwidth int64) {

View File

@ -18,43 +18,39 @@ import (
) )
func TestGetSignee(t *testing.T) { func TestGetSignee(t *testing.T) {
ctx := testcontext.New(t) testplanet.Run(t, testplanet.Config{
defer ctx.Cleanup() SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 0,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
planet, err := testplanet.New(t, 1, 1, 0) trust := planet.StorageNodes[0].Storage2.Trust
require.NoError(t, err)
defer ctx.Check(planet.Shutdown)
planet.Start(ctx) canceledContext, cancel := context.WithCancel(ctx)
cancel()
trust := planet.StorageNodes[0].Storage2.Trust var group errgroup.Group
group.Go(func() error {
canceledContext, cancel := context.WithCancel(ctx) _, err := trust.GetSignee(canceledContext, planet.Satellites[0].ID())
cancel() if errs2.IsCanceled(err) {
return nil
var group errgroup.Group }
group.Go(func() error { // if the other goroutine races us,
_, err := trust.GetSignee(canceledContext, planet.Satellites[0].ID()) // then we might get the certificate from the cache, however we shouldn't get an error
if errs2.IsCanceled(err) {
return nil
}
// if the other goroutine races us,
// then we might get the certificate from the cache, however we shouldn't get an error
return err
})
group.Go(func() error {
cert, err := trust.GetSignee(ctx, planet.Satellites[0].ID())
if err != nil {
return err return err
} })
if cert == nil {
return errors.New("didn't get certificate")
}
return nil
})
assert.NoError(t, group.Wait()) group.Go(func() error {
cert, err := trust.GetSignee(ctx, planet.Satellites[0].ID())
if err != nil {
return err
}
if cert == nil {
return errors.New("didn't get certificate")
}
return nil
})
assert.NoError(t, group.Wait())
})
} }
func TestGetAddress(t *testing.T) { func TestGetAddress(t *testing.T) {

View File

@ -34,38 +34,33 @@ const (
) )
func TestECClient(t *testing.T) { func TestECClient(t *testing.T) {
ctx := testcontext.New(t) testplanet.Run(t, testplanet.Config{
defer ctx.Cleanup() SatelliteCount: 1, StorageNodeCount: storageNodes, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
planet, err := testplanet.New(t, 1, storageNodes, 1) ec := ecclient.NewClient(planet.Uplinks[0].Log.Named("ecclient"), planet.Uplinks[0].Dialer, 0)
require.NoError(t, err)
defer ctx.Check(planet.Shutdown) k := storageNodes / 2
n := storageNodes
fc, err := infectious.NewFEC(k, n)
require.NoError(t, err)
planet.Start(ctx) es := eestream.NewRSScheme(fc, dataSize.Int()/n)
rs, err := eestream.NewRedundancyStrategy(es, 0, 0)
require.NoError(t, err)
ec := ecclient.NewClient(planet.Uplinks[0].Log.Named("ecclient"), planet.Uplinks[0].Dialer, 0) data, err := ioutil.ReadAll(io.LimitReader(testrand.Reader(), dataSize.Int64()))
require.NoError(t, err)
k := storageNodes / 2 // Erasure encode some random data and upload the pieces
n := storageNodes successfulNodes, successfulHashes := testPut(ctx, t, planet, ec, rs, data)
fc, err := infectious.NewFEC(k, n)
require.NoError(t, err)
es := eestream.NewRSScheme(fc, dataSize.Int()/n) // Download the pieces and erasure decode the data
rs, err := eestream.NewRedundancyStrategy(es, 0, 0) testGet(ctx, t, planet, ec, es, data, successfulNodes, successfulHashes)
require.NoError(t, err)
data, err := ioutil.ReadAll(io.LimitReader(testrand.Reader(), dataSize.Int64())) // Delete the pieces
require.NoError(t, err) testDelete(ctx, t, planet, ec, successfulNodes, successfulHashes)
})
// Erasure encode some random data and upload the pieces
successfulNodes, successfulHashes := testPut(ctx, t, planet, ec, rs, data)
// Download the pieces and erasure decode the data
testGet(ctx, t, planet, ec, es, data, successfulNodes, successfulHashes)
// Delete the pieces
testDelete(ctx, t, planet, ec, successfulNodes, successfulHashes)
} }
func testPut(ctx context.Context, t *testing.T, planet *testplanet.Planet, ec ecclient.Client, rs eestream.RedundancyStrategy, data []byte) ([]*pb.Node, []*pb.PieceHash) { func testPut(ctx context.Context, t *testing.T, planet *testplanet.Planet, ec ecclient.Client, rs eestream.RedundancyStrategy, data []byte) ([]*pb.Node, []*pb.PieceHash) {