Ensure checker tests run against postgres (#1229)

This commit is contained in:
Egon Elbre 2019-02-05 18:00:52 +02:00 committed by GitHub
parent 94ee200e35
commit 39c1e5ccec
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 110 additions and 187 deletions

View File

@ -8,7 +8,6 @@ import (
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"storj.io/storj/internal/testcontext"
"storj.io/storj/internal/testplanet"
@ -19,162 +18,89 @@ import (
func TestIdentifyInjuredSegments(t *testing.T) {
// TODO note satellite's: own sub-systems need to be disabled
// TODO test irreparable ??
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 0,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
time.Sleep(2 * time.Second)
const numberOfNodes = 10
tctx := testcontext.New(t)
defer tctx.Cleanup()
const numberOfNodes = 10
planet, err := testplanet.New(t, 1, 4, 0)
require.NoError(t, err)
defer tctx.Check(planet.Shutdown)
planet.Start(tctx)
time.Sleep(2 * time.Second)
pieces := make([]*pb.RemotePiece, 0, numberOfNodes)
// use online nodes
for i, storagenode := range planet.StorageNodes {
pieces = append(pieces, &pb.RemotePiece{
PieceNum: int32(i),
NodeId: storagenode.Identity.ID,
})
}
// simulate offline nodes
expectedLostPieces := make(map[int32]bool)
for i := len(pieces); i < numberOfNodes; i++ {
pieces = append(pieces, &pb.RemotePiece{
PieceNum: int32(i),
NodeId: storj.NodeID{byte(i)},
})
expectedLostPieces[int32(i)] = true
}
pointer := &pb.Pointer{
Remote: &pb.RemoteSegment{
Redundancy: &pb.RedundancyScheme{
MinReq: int32(4),
RepairThreshold: int32(8),
},
PieceId: "fake-piece-id",
RemotePieces: pieces,
},
}
// put test pointer to db
pointerdb := planet.Satellites[0].Metainfo.Service
err = pointerdb.Put(pointer.Remote.PieceId, pointer)
assert.NoError(t, err)
checker := planet.Satellites[0].Repair.Checker
err = checker.IdentifyInjuredSegments(tctx)
assert.NoError(t, err)
//check if the expected segments were added to the queue
repairQueue := planet.Satellites[0].DB.RepairQueue()
injuredSegment, err := repairQueue.Dequeue(tctx)
assert.NoError(t, err)
assert.Equal(t, "fake-piece-id", injuredSegment.Path)
assert.Equal(t, len(expectedLostPieces), len(injuredSegment.LostPieces))
for _, lostPiece := range injuredSegment.LostPieces {
if !expectedLostPieces[lostPiece] {
t.Error("should be lost: ", lostPiece)
pieces := make([]*pb.RemotePiece, 0, numberOfNodes)
// use online nodes
for i, storagenode := range planet.StorageNodes {
pieces = append(pieces, &pb.RemotePiece{
PieceNum: int32(i),
NodeId: storagenode.Identity.ID,
})
}
}
// simulate offline nodes
expectedLostPieces := make(map[int32]bool)
for i := len(pieces); i < numberOfNodes; i++ {
pieces = append(pieces, &pb.RemotePiece{
PieceNum: int32(i),
NodeId: storj.NodeID{byte(i)},
})
expectedLostPieces[int32(i)] = true
}
pointer := &pb.Pointer{
Remote: &pb.RemoteSegment{
Redundancy: &pb.RedundancyScheme{
MinReq: int32(4),
RepairThreshold: int32(8),
},
PieceId: "fake-piece-id",
RemotePieces: pieces,
},
}
// put test pointer to db
pointerdb := planet.Satellites[0].Metainfo.Service
err := pointerdb.Put(pointer.Remote.PieceId, pointer)
assert.NoError(t, err)
checker := planet.Satellites[0].Repair.Checker
err = checker.IdentifyInjuredSegments(ctx)
assert.NoError(t, err)
//check if the expected segments were added to the queue
repairQueue := planet.Satellites[0].DB.RepairQueue()
injuredSegment, err := repairQueue.Dequeue(ctx)
assert.NoError(t, err)
assert.Equal(t, "fake-piece-id", injuredSegment.Path)
assert.Equal(t, len(expectedLostPieces), len(injuredSegment.LostPieces))
for _, lostPiece := range injuredSegment.LostPieces {
if !expectedLostPieces[lostPiece] {
t.Error("should be lost: ", lostPiece)
}
}
})
}
func TestOfflineNodes(t *testing.T) {
tctx := testcontext.New(t)
defer tctx.Cleanup()
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 0,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
time.Sleep(2 * time.Second)
planet, err := testplanet.New(t, 1, 4, 0)
require.NoError(t, err)
defer tctx.Check(planet.Shutdown)
const numberOfNodes = 10
nodeIDs := storj.NodeIDList{}
planet.Start(tctx)
time.Sleep(2 * time.Second)
const numberOfNodes = 10
nodeIDs := storj.NodeIDList{}
// use online nodes
for _, storagenode := range planet.StorageNodes {
nodeIDs = append(nodeIDs, storagenode.Identity.ID)
}
// simulate offline nodes
expectedOffline := make([]int32, 0)
for i := len(nodeIDs); i < numberOfNodes; i++ {
nodeIDs = append(nodeIDs, storj.NodeID{byte(i)})
expectedOffline = append(expectedOffline, int32(i))
}
checker := planet.Satellites[0].Repair.Checker
offline, err := checker.OfflineNodes(tctx, nodeIDs)
assert.NoError(t, err)
assert.Equal(t, expectedOffline, offline)
}
func BenchmarkIdentifyInjuredSegments(b *testing.B) {
tctx := testcontext.New(b)
defer tctx.Cleanup()
const numberOfNodes = 10
planet, err := testplanet.New(b, 1, 4, 0)
require.NoError(b, err)
defer tctx.Check(planet.Shutdown)
planet.Start(tctx)
time.Sleep(2 * time.Second)
pieces := make([]*pb.RemotePiece, 0, numberOfNodes)
// use online nodes
for i, storagenode := range planet.StorageNodes {
pieces = append(pieces, &pb.RemotePiece{
PieceNum: int32(i),
NodeId: storagenode.Identity.ID,
})
}
// simulate offline nodes
expectedLostPieces := make(map[int32]bool)
for i := len(pieces); i < numberOfNodes; i++ {
pieces = append(pieces, &pb.RemotePiece{
PieceNum: int32(i),
NodeId: storj.NodeID{byte(i)},
})
expectedLostPieces[int32(i)] = true
}
pointer := &pb.Pointer{
Remote: &pb.RemoteSegment{
Redundancy: &pb.RedundancyScheme{
MinReq: int32(4),
RepairThreshold: int32(8),
},
PieceId: "fake-piece-id",
RemotePieces: pieces,
},
}
pointerdb := planet.Satellites[0].Metainfo.Service
err = pointerdb.Put(pointer.Remote.PieceId, pointer)
assert.NoError(b, err)
repairQueue := planet.Satellites[0].DB.RepairQueue()
checker := planet.Satellites[0].Repair.Checker
b.ResetTimer()
for i := 0; i < b.N; i++ {
err = checker.IdentifyInjuredSegments(tctx)
assert.NoError(b, err)
injuredSegment, err := repairQueue.Dequeue(tctx)
assert.NoError(b, err)
assert.Equal(b, "fake-piece-id", injuredSegment.Path)
assert.Equal(b, len(expectedLostPieces), len(injuredSegment.LostPieces))
for _, lostPiece := range injuredSegment.LostPieces {
assert.Equal(b, true, expectedLostPieces[lostPiece])
// use online nodes
for _, storagenode := range planet.StorageNodes {
nodeIDs = append(nodeIDs, storagenode.Identity.ID)
}
}
// simulate offline nodes
expectedOffline := make([]int32, 0)
for i := len(nodeIDs); i < numberOfNodes; i++ {
nodeIDs = append(nodeIDs, storj.NodeID{byte(i)})
expectedOffline = append(expectedOffline, int32(i))
}
checker := planet.Satellites[0].Repair.Checker
offline, err := checker.OfflineNodes(ctx, nodeIDs)
assert.NoError(t, err)
assert.Equal(t, expectedOffline, offline)
})
}

View File

@ -4,7 +4,6 @@
package irreparable_test
import (
"context"
"testing"
"time"
@ -21,40 +20,38 @@ func TestIrreparable(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
testDatabase(ctx, t, db.Irreparable())
irrdb := db.Irreparable()
//testing variables
segmentInfo := &irreparable.RemoteSegmentInfo{
EncryptedSegmentPath: []byte("IamSegmentkeyinfo"),
EncryptedSegmentDetail: []byte("IamSegmentdetailinfo"),
LostPiecesCount: int64(10),
RepairUnixSec: time.Now().Unix(),
RepairAttemptCount: int64(10),
}
{ // New entry
err := irrdb.IncrementRepairAttempts(ctx, segmentInfo)
assert.NoError(t, err)
}
{ //Create the already existing entry
err := irrdb.IncrementRepairAttempts(ctx, segmentInfo)
assert.NoError(t, err)
segmentInfo.RepairAttemptCount++
dbxInfo, err := irrdb.Get(ctx, segmentInfo.EncryptedSegmentPath)
assert.NoError(t, err)
assert.Equal(t, segmentInfo, dbxInfo)
}
{ //Delete existing entry
err := irrdb.Delete(ctx, segmentInfo.EncryptedSegmentPath)
assert.NoError(t, err)
_, err = irrdb.Get(ctx, segmentInfo.EncryptedSegmentPath)
assert.Error(t, err)
}
})
}
func testDatabase(ctx context.Context, t *testing.T, irrdb irreparable.DB) {
//testing variables
segmentInfo := &irreparable.RemoteSegmentInfo{
EncryptedSegmentPath: []byte("IamSegmentkeyinfo"),
EncryptedSegmentDetail: []byte("IamSegmentdetailinfo"),
LostPiecesCount: int64(10),
RepairUnixSec: time.Now().Unix(),
RepairAttemptCount: int64(10),
}
{ // New entry
err := irrdb.IncrementRepairAttempts(ctx, segmentInfo)
assert.NoError(t, err)
}
{ //Create the already existing entry
err := irrdb.IncrementRepairAttempts(ctx, segmentInfo)
assert.NoError(t, err)
segmentInfo.RepairAttemptCount++
dbxInfo, err := irrdb.Get(ctx, segmentInfo.EncryptedSegmentPath)
assert.NoError(t, err)
assert.Equal(t, segmentInfo, dbxInfo)
}
{ //Delete existing entry
err := irrdb.Delete(ctx, segmentInfo.EncryptedSegmentPath)
assert.NoError(t, err)
_, err = irrdb.Get(ctx, segmentInfo.EncryptedSegmentPath)
assert.Error(t, err)
}
}