adds test for correct download with non-critical amount of nodes offline (#1574)
This commit is contained in:
parent
02e07c8c65
commit
5b48a48a79
@ -13,6 +13,9 @@ import (
|
||||
"storj.io/storj/internal/memory"
|
||||
"storj.io/storj/internal/testcontext"
|
||||
"storj.io/storj/internal/testplanet"
|
||||
"storj.io/storj/pkg/pb"
|
||||
"storj.io/storj/pkg/storj"
|
||||
"storj.io/storj/uplink"
|
||||
)
|
||||
|
||||
func TestUploadDownload(t *testing.T) {
|
||||
@ -37,3 +40,72 @@ func TestUploadDownload(t *testing.T) {
|
||||
|
||||
assert.Equal(t, expectedData, data)
|
||||
}
|
||||
|
||||
func TestDownloadWithSomeNodesOffline(t *testing.T) {
|
||||
testplanet.Run(t, testplanet.Config{
|
||||
SatelliteCount: 1, StorageNodeCount: 5, UplinkCount: 1,
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
|
||||
// first, upload some remote data
|
||||
ul := planet.Uplinks[0]
|
||||
satellite := planet.Satellites[0]
|
||||
|
||||
testData := make([]byte, 1*memory.MiB)
|
||||
_, err := rand.Read(testData)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ul.UploadWithConfig(ctx, satellite, &uplink.RSConfig{
|
||||
MinThreshold: 2,
|
||||
RepairThreshold: 3,
|
||||
SuccessThreshold: 4,
|
||||
MaxThreshold: 5,
|
||||
}, "testbucket", "test/path", testData)
|
||||
require.NoError(t, err)
|
||||
|
||||
// get a remote segment from pointerdb
|
||||
pdb := satellite.Metainfo.Service
|
||||
listResponse, _, err := pdb.List("", "", "", true, 0, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
var path string
|
||||
var pointer *pb.Pointer
|
||||
for _, v := range listResponse {
|
||||
path = v.GetPath()
|
||||
pointer, err = pdb.Get(path)
|
||||
require.NoError(t, err)
|
||||
if pointer.GetType() == pb.Pointer_REMOTE {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// calculate how many storagenodes to kill
|
||||
redundancy := pointer.GetRemote().GetRedundancy()
|
||||
remotePieces := pointer.GetRemote().GetRemotePieces()
|
||||
minReq := redundancy.GetMinReq()
|
||||
numPieces := len(remotePieces)
|
||||
toKill := numPieces - int(minReq)
|
||||
|
||||
nodesToKill := make(map[storj.NodeID]bool)
|
||||
for i, piece := range remotePieces {
|
||||
if i >= toKill {
|
||||
continue
|
||||
}
|
||||
nodesToKill[piece.NodeId] = true
|
||||
}
|
||||
|
||||
for _, node := range planet.StorageNodes {
|
||||
if nodesToKill[node.ID()] {
|
||||
err = planet.StopPeer(node)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = satellite.Overlay.Service.Delete(ctx, node.ID())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
// we should be able to download data without any of the original nodes
|
||||
newData, err := ul.Download(ctx, satellite, "testbucket", "test/path")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, testData, newData)
|
||||
})
|
||||
}
|
||||
|
@ -24,7 +24,7 @@ const (
|
||||
var ErrEmptyNode = errs.New("empty node ID")
|
||||
|
||||
// ErrNodeNotFound is returned if a node does not exist in database
|
||||
var ErrNodeNotFound = errs.New("Node not found")
|
||||
var ErrNodeNotFound = errs.Class("Node not found")
|
||||
|
||||
// ErrBucketNotFound is returned if a bucket is unable to be found in the routing table
|
||||
var ErrBucketNotFound = errs.New("Bucket not found")
|
||||
|
@ -71,7 +71,7 @@ func testCache(ctx context.Context, t *testing.T, store overlay.DB) {
|
||||
|
||||
invalid2, err := cache.Get(ctx, missingID)
|
||||
assert.Error(t, err)
|
||||
assert.True(t, err == overlay.ErrNodeNotFound)
|
||||
assert.True(t, overlay.ErrNodeNotFound.Has(err))
|
||||
assert.Nil(t, invalid2)
|
||||
|
||||
// TODO: add erroring database test
|
||||
@ -130,7 +130,7 @@ func testCache(ctx context.Context, t *testing.T, store overlay.DB) {
|
||||
deleted, err := cache.Get(ctx, valid1ID)
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, deleted)
|
||||
assert.True(t, err == overlay.ErrNodeNotFound)
|
||||
assert.True(t, overlay.ErrNodeNotFound.Has(err))
|
||||
|
||||
// Test idempotent delete / non existent key delete
|
||||
err = cache.Delete(ctx, valid1ID)
|
||||
|
@ -315,6 +315,7 @@ func (endpoint *Endpoint) createOrderLimitsForSegment(ctx context.Context, point
|
||||
pieceSize := eestream.CalcPieceSize(pointer.GetSegmentSize(), redundancy)
|
||||
expiration := pointer.ExpirationDate
|
||||
|
||||
var combinedErrs error
|
||||
var limits []*pb.AddressedOrderLimit
|
||||
for _, piece := range pointer.GetRemote().GetRemotePieces() {
|
||||
derivedPieceID := rootPieceID.Derive(piece.NodeId)
|
||||
@ -325,7 +326,9 @@ func (endpoint *Endpoint) createOrderLimitsForSegment(ctx context.Context, point
|
||||
|
||||
node, err := endpoint.cache.Get(ctx, piece.NodeId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
endpoint.log.Error("error getting node from overlay cache", zap.Error(err))
|
||||
combinedErrs = errs.Combine(combinedErrs, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if node != nil {
|
||||
@ -336,8 +339,13 @@ func (endpoint *Endpoint) createOrderLimitsForSegment(ctx context.Context, point
|
||||
Limit: orderLimit,
|
||||
StorageNodeAddress: node.Address,
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
if len(limits) < redundancy.RequiredCount() {
|
||||
err = Error.New("not enough nodes available: got %d, required %d", len(limits), redundancy.RequiredCount())
|
||||
return nil, errs.Combine(combinedErrs, err)
|
||||
}
|
||||
|
||||
return limits, nil
|
||||
}
|
||||
|
||||
|
@ -116,7 +116,7 @@ func (cache *overlaycache) Get(ctx context.Context, id storj.NodeID) (*pb.Node,
|
||||
dbx.OverlayCacheNode_NodeId(id.Bytes()),
|
||||
)
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, overlay.ErrNodeNotFound
|
||||
return nil, overlay.ErrNodeNotFound.New("couldn't find nodeID: %s", id.String())
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
Loading…
Reference in New Issue
Block a user