make tests run faster (#1553)

This commit is contained in:
Egon Elbre 2019-03-22 15:14:17 +02:00 committed by GitHub
parent 30dfc2b20c
commit 694b6dc1da
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 88 additions and 34 deletions

View File

@ -65,6 +65,7 @@ matrix:
- go run ./scripts/protobuf.go --protoc=$HOME/protoc/bin/protoc lint
- protolock status
- golangci-lint run
- ./scripts/check-dbx-version.sh
- ./scripts/check-travis-tidy.sh
### integration tests ###

View File

@ -437,8 +437,9 @@ func (planet *Planet) newSatellites(count int) ([]*satellite.Peer, error) {
MaxBufferMem: 4 * memory.MiB,
},
Audit: audit.Config{
MaxRetriesStatDB: 0,
Interval: 30 * time.Second,
MaxRetriesStatDB: 0,
Interval: 30 * time.Second,
MinBytesPerSecond: 1 * memory.KB,
},
Tally: tally.Config{
Interval: 30 * time.Second,

View File

@ -19,13 +19,13 @@ func TestUploadDownload(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
planet, err := testplanet.New(t, 1, 10, 1)
planet, err := testplanet.New(t, 1, 6, 1)
require.NoError(t, err)
defer ctx.Check(planet.Shutdown)
planet.Start(ctx)
expectedData := make([]byte, 5*memory.MiB)
expectedData := make([]byte, 1*memory.MiB)
_, err = rand.Read(expectedData)
assert.NoError(t, err)

View File

@ -12,32 +12,24 @@ import (
"storj.io/storj/internal/testcontext"
"storj.io/storj/internal/testplanet"
"storj.io/storj/pkg/cfgstruct"
"storj.io/storj/pkg/identity"
"storj.io/storj/pkg/storj"
"storj.io/storj/satellite"
ul "storj.io/storj/uplink"
)
func TestUplink(t *testing.T) {
// Planet Config for Uplink
testplanetConfig := testplanet.Config{
SatelliteCount: 1,
StorageNodeCount: 20,
UplinkCount: 1,
}
// Run Tests
testplanet.Run(t, testplanetConfig, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 6, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
satellite := planet.Satellites[0]
identity, err := identity.NewFullIdentity(ctx, 12, 4)
assert.NoError(t, err)
satelliteAddr := satellite.Addr() // get address
cfg := getConfig(satellite, planet)
uplink := NewUplink(identity, satelliteAddr, cfg)
uplink := NewUplink(planet.Uplinks[0].Identity, satelliteAddr, cfg)
permissions := Permissions{}
access := uplink.Access(ctx, permissions)
assert.NoError(t, err)
opts := CreateBucketOptions{}
bucket, err := access.CreateBucket(ctx, "testbucket", opts)
@ -56,7 +48,6 @@ func TestUplink(t *testing.T) {
assert.NoError(t, err)
assert.NotNil(t, storjBucket)
assert.Equal(t, storjBucket.Name, "testbucket")
assert.IsType(t, storj.Bucket{}, storjBucket)
encOpts := &Encryption{}
getbucket := access.GetBucket(ctx, "testbucket", encOpts)

View File

@ -19,14 +19,14 @@ import (
func TestVerifierHappyPath(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 10, UplinkCount: 1,
SatelliteCount: 1, StorageNodeCount: 6, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
err := planet.Satellites[0].Audit.Service.Close()
assert.NoError(t, err)
uplink := planet.Uplinks[0]
testData := make([]byte, 5*memory.MiB)
testData := make([]byte, 1*memory.MiB)
_, err = rand.Read(testData)
assert.NoError(t, err)

View File

@ -24,14 +24,14 @@ import (
// receive data back from a storage node.
func TestGetShareTimeout(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 10, UplinkCount: 1,
SatelliteCount: 1, StorageNodeCount: 6, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
err := planet.Satellites[0].Audit.Service.Close()
assert.NoError(t, err)
uplink := planet.Uplinks[0]
testData := make([]byte, 5*memory.MiB)
testData := make([]byte, 1*memory.MiB)
_, err = rand.Read(testData)
assert.NoError(t, err)

View File

@ -157,9 +157,13 @@ func (d *defaultDownloader) getShare(ctx context.Context, limit *pb.AddressedOrd
bandwidthMsgSize := shareSize
// determines number of seconds allotted for receiving data from a storage node
seconds := time.Duration(int32(time.Second) * bandwidthMsgSize / d.minBytesPerSecond.Int32())
timedCtx, cancel := context.WithTimeout(ctx, seconds)
defer cancel()
timedCtx := ctx
if d.minBytesPerSecond > 0 {
maxTransferTime := time.Duration(int32(time.Second) * bandwidthMsgSize / d.minBytesPerSecond.Int32())
var cancel func()
timedCtx, cancel = context.WithTimeout(ctx, maxTransferTime)
defer cancel()
}
storageNodeID := limit.GetLimit().StorageNodeId

View File

@ -14,7 +14,7 @@ import (
func TestCache_Refresh(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 10, UplinkCount: 0,
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 0,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
satellite := planet.Satellites[0]
for _, storageNode := range planet.StorageNodes {
@ -28,7 +28,7 @@ func TestCache_Refresh(t *testing.T) {
func TestCache_Graveyard(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 10, UplinkCount: 0,
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 0,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
satellite := planet.Satellites[0]
testnode := planet.StorageNodes[0]

View File

@ -30,7 +30,9 @@ func TestSegmentStoreRepair(t *testing.T) {
ul := planet.Uplinks[0]
satellite := planet.Satellites[0]
testData := make([]byte, 5*memory.MiB)
satellite.Repair.Checker.Loop.Stop()
testData := make([]byte, 1*memory.MiB)
_, err := rand.Read(testData)
assert.NoError(t, err)

View File

@ -10,7 +10,6 @@ import (
"errors"
"fmt"
"reflect"
"regexp"
"strconv"
"strings"
"sync"
@ -2814,10 +2813,54 @@ func __sqlbundle_Render(dialect __sqlbundle_Dialect, sql __sqlbundle_SQL, ops ..
return dialect.Rebind(out)
}
var __sqlbundle_reSpace = regexp.MustCompile(`\s+`)
func __sqlbundle_flattenSQL(x string) string {
// trim whitespace from beginning and end
s, e := 0, len(x)-1
for s < len(x) && (x[s] == ' ' || x[s] == '\t' || x[s] == '\n') {
s++
}
for s <= e && (x[e] == ' ' || x[e] == '\t' || x[e] == '\n') {
e--
}
if s > e {
return ""
}
x = x[s : e+1]
func __sqlbundle_flattenSQL(s string) string {
return strings.TrimSpace(__sqlbundle_reSpace.ReplaceAllString(s, " "))
// check for whitespace that needs fixing
wasSpace := false
for i := 0; i < len(x); i++ {
r := x[i]
justSpace := r == ' '
if (wasSpace && justSpace) || r == '\t' || r == '\n' {
// whitespace detected, start writing a new string
var result strings.Builder
result.Grow(len(x))
if wasSpace {
result.WriteString(x[:i-1])
} else {
result.WriteString(x[:i])
}
for p := i; p < len(x); p++ {
for p < len(x) && (x[p] == ' ' || x[p] == '\t' || x[p] == '\n') {
p++
}
result.WriteByte(' ')
start := p
for p < len(x) && !(x[p] == ' ' || x[p] == '\t' || x[p] == '\n') {
p++
}
result.WriteString(x[start:p])
}
return result.String()
}
wasSpace = justSpace
}
// no problematic whitespace found
return x
}
// this type is specially named to match up with the name returned by the
@ -2896,6 +2939,8 @@ type __sqlbundle_Condition struct {
func (*__sqlbundle_Condition) private() {}
func (c *__sqlbundle_Condition) Render() string {
// TODO(jeff): maybe check if we can use placeholders instead of the
// literal null: this would make the templates easier.
switch {
case c.Equal && c.Null:

View File

@ -526,7 +526,7 @@ func (m *lockedIrreparable) Get(ctx context.Context, segmentPath []byte) (*pb.Ir
return m.db.Get(ctx, segmentPath)
}
// GetLimited gets a limited number of irreparable segments by offset
// GetLimited number of segments from offset
func (m *lockedIrreparable) GetLimited(ctx context.Context, limit int, offset int64) ([]*pb.IrreparableSegment, error) {
m.Lock()
defer m.Unlock()

10
scripts/check-dbx-version.sh Executable file
View File

@ -0,0 +1,10 @@
#!/bin/bash
CHANGES=$(grep -r --include="*.dbx.go" regexp.MustCompile .)
if [ -z "$CHANGES" ]
then
echo "dbx version ok"
else
echo "please use latest dbx tool to generate code"
fi