fix calcuation of durability ration (#3656)

This commit is contained in:
Ethan Adams 2019-11-26 12:04:48 -05:00 committed by GitHub
parent cf13cf6e00
commit bba92911cc
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 104 additions and 1 deletions

View File

@ -8,9 +8,11 @@ import (
"testing"
"time"
"github.com/gogo/protobuf/proto"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/storj"
"storj.io/storj/private/memory"
"storj.io/storj/private/testcontext"
@ -20,6 +22,7 @@ import (
"storj.io/storj/satellite/gracefulexit"
"storj.io/storj/satellite/overlay"
"storj.io/storj/satellite/satellitedb/satellitedbtest"
"storj.io/storj/storage"
"storj.io/storj/uplink"
)
@ -126,6 +129,105 @@ func TestChore(t *testing.T) {
})
}
func TestDurabilityRatio(t *testing.T) {
var maximumInactiveTimeFrame = time.Second * 1
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1,
StorageNodeCount: 4,
UplinkCount: 1,
Reconfigure: testplanet.Reconfigure{
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
config.GracefulExit.MaxInactiveTimeFrame = maximumInactiveTimeFrame
},
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
uplinkPeer := planet.Uplinks[0]
satellite := planet.Satellites[0]
nodeToRemove := planet.StorageNodes[0]
exitingNode := planet.StorageNodes[1]
satellite.GracefulExit.Chore.Loop.Pause()
rs := &uplink.RSConfig{
MinThreshold: 2,
RepairThreshold: 3,
SuccessThreshold: 4,
MaxThreshold: 4,
}
err := uplinkPeer.UploadWithConfig(ctx, satellite, rs, "testbucket", "test/path1", testrand.Bytes(5*memory.KiB))
require.NoError(t, err)
exitStatusRequest := overlay.ExitStatusRequest{
NodeID: exitingNode.ID(),
ExitInitiatedAt: time.Now().UTC(),
}
_, err = satellite.Overlay.DB.UpdateExitStatus(ctx, &exitStatusRequest)
require.NoError(t, err)
exitingNodes, err := satellite.Overlay.DB.GetExitingNodes(ctx)
require.NoError(t, err)
nodeIDs := make(storj.NodeIDList, 0, len(exitingNodes))
for _, exitingNode := range exitingNodes {
if exitingNode.ExitLoopCompletedAt == nil {
nodeIDs = append(nodeIDs, exitingNode.NodeID)
}
}
require.Len(t, nodeIDs, 1)
// retrieve remote segment
keys, err := satellite.Metainfo.Database.List(ctx, nil, -1)
require.NoError(t, err)
var oldPointer *pb.Pointer
var path []byte
for _, key := range keys {
p, err := satellite.Metainfo.Service.Get(ctx, string(key))
require.NoError(t, err)
if p.GetRemote() != nil {
oldPointer = p
path = key
break
}
}
// remove a piece from the pointer
require.NotNil(t, oldPointer)
oldPointerBytes, err := proto.Marshal(oldPointer)
require.NoError(t, err)
newPointer := &pb.Pointer{}
err = proto.Unmarshal(oldPointerBytes, newPointer)
require.NoError(t, err)
remotePieces := newPointer.GetRemote().GetRemotePieces()
var newPieces []*pb.RemotePiece = make([]*pb.RemotePiece, len(remotePieces)-1)
idx := 0
for _, p := range remotePieces {
if p.NodeId != nodeToRemove.ID() {
newPieces[idx] = p
idx++
}
}
newPointer.Remote.RemotePieces = newPieces
newPointerBytes, err := proto.Marshal(newPointer)
require.NoError(t, err)
err = satellite.Metainfo.Database.CompareAndSwap(ctx, storage.Key(path), oldPointerBytes, newPointerBytes)
require.NoError(t, err)
satellite.GracefulExit.Chore.Loop.TriggerWait()
incompleteTransfers, err := satellite.DB.GracefulExit().GetIncomplete(ctx, exitingNode.ID(), 20, 0)
require.NoError(t, err)
require.Len(t, incompleteTransfers, 1)
for _, incomplete := range incompleteTransfers {
require.Equal(t, float64(rs.SuccessThreshold-1)/float64(rs.SuccessThreshold), incomplete.DurabilityRatio)
require.NotNil(t, incomplete.RootPieceID)
}
})
}
func BenchmarkChore(b *testing.B) {
satellitedbtest.Bench(b, func(b *testing.B, db satellite.DB) {
gracefulexitdb := db.GracefulExit()

View File

@ -81,8 +81,9 @@ func (collector *PathCollector) RemoteSegment(ctx context.Context, path metainfo
Path: []byte(path.Raw),
PieceNum: piece.PieceNum,
RootPieceID: pointer.GetRemote().RootPieceId,
DurabilityRatio: float64(numPieces / pointer.GetRemote().GetRedundancy().GetTotal()),
DurabilityRatio: float64(numPieces) / float64(pointer.GetRemote().GetRedundancy().GetTotal()),
}
collector.log.Debug("adding piece to transfer queue.", zap.Stringer("Node ID", piece.NodeId),
zap.String("path", path.Raw), zap.Int32("piece num", piece.GetPieceNum()),
zap.Int32("num pieces", numPieces), zap.Int32("total possible pieces", pointer.GetRemote().GetRedundancy().GetTotal()))