satellite/audit: Fix flaky TestVerifierSlowDownload
TestVerifierSlowDownload would sometimes not have enough nodes finish in the allotted deadline period. This increases the deadline and also does not assert that exactly 3 have finished. Instead, in keeping with the purpose of the test, it asserts that the slow download is never counted as a success and is always counted as a pending audit in the final report. Change-Id: I180734fcc4a499420c75164bad6253ed155d87de
This commit is contained in:
parent
2b00d78add
commit
5a85e8d749
@ -4,6 +4,7 @@
|
||||
package audit_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
@ -774,7 +775,7 @@ func TestVerifierSlowDownload(t *testing.T) {
|
||||
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
|
||||
// These config values are chosen to force the slow node to time out without timing out on the three normal nodes
|
||||
config.Audit.MinBytesPerSecond = 100 * memory.KiB
|
||||
config.Audit.MinDownloadTimeout = 500 * time.Millisecond
|
||||
config.Audit.MinDownloadTimeout = 950 * time.Millisecond
|
||||
|
||||
config.Metainfo.RS.MinThreshold = 2
|
||||
config.Metainfo.RS.RepairThreshold = 2
|
||||
@ -802,9 +803,9 @@ func TestVerifierSlowDownload(t *testing.T) {
|
||||
pointer, err := satellite.Metainfo.Service.Get(ctx, path)
|
||||
require.NoError(t, err)
|
||||
|
||||
slowNode := pointer.Remote.RemotePieces[0].NodeId
|
||||
slowNodeID := pointer.Remote.RemotePieces[0].NodeId
|
||||
for _, node := range planet.StorageNodes {
|
||||
if node.ID() == slowNode {
|
||||
if node.ID() == slowNodeID {
|
||||
slowNodeDB := node.DB.(*testblobs.SlowDB)
|
||||
// make downloads on storage node slower than the timeout on the satellite for downloading shares
|
||||
delay := 1 * time.Second
|
||||
@ -816,12 +817,20 @@ func TestVerifierSlowDownload(t *testing.T) {
|
||||
report, err := audits.Verifier.Verify(ctx, path, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, report.Successes, 3)
|
||||
require.Len(t, report.Fails, 0)
|
||||
require.Len(t, report.Offlines, 0)
|
||||
require.Len(t, report.PendingAudits, 1)
|
||||
require.Len(t, report.Unknown, 0)
|
||||
require.Equal(t, report.PendingAudits[0].NodeID, slowNode)
|
||||
// Assert the slowNodeID is not in the successes
|
||||
assert.False(t, func() bool {
|
||||
for _, n := range report.Successes {
|
||||
if bytes.Equal(n.Bytes(), slowNodeID.Bytes()) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}())
|
||||
assert.Len(t, report.Fails, 0)
|
||||
assert.Len(t, report.Offlines, 0)
|
||||
assert.Len(t, report.Unknown, 0)
|
||||
assert.Len(t, report.PendingAudits, 1)
|
||||
assert.Equal(t, report.PendingAudits[0].NodeID, slowNodeID)
|
||||
})
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user