satellite/metainfo: remove sleep from upload limit test

lrucache is now using time2 package and we can make expiration
test without using time.Sleep.

https://github.com/storj/storj/issues/5788

Change-Id: I48f2693c3db78fcf4e30e618bb3304be3625100c
This commit is contained in:
Michal Niewrzal 2023-04-18 09:37:36 +02:00
parent d53a56cef6
commit 114eda6e87

View File

@ -28,6 +28,7 @@ import (
"storj.io/common/storj" "storj.io/common/storj"
"storj.io/common/testcontext" "storj.io/common/testcontext"
"storj.io/common/testrand" "storj.io/common/testrand"
"storj.io/common/time2"
"storj.io/common/uuid" "storj.io/common/uuid"
"storj.io/storj/private/testplanet" "storj.io/storj/private/testplanet"
"storj.io/storj/satellite" "storj.io/storj/satellite"
@ -674,41 +675,46 @@ func TestEndpoint_Object_UploadLimit(t *testing.T) {
}, },
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) { }, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()] apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
endpoint := planet.Satellites[0].Metainfo.Endpoint
metainfoClient, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey)
require.NoError(t, err)
defer ctx.Check(metainfoClient.Close)
bucketName := "testbucket" bucketName := "testbucket"
deleteBucket := func() error {
_, err := metainfoClient.DeleteBucket(ctx, metaclient.DeleteBucketParams{ err := planet.Uplinks[0].CreateBucket(ctx, planet.Satellites[0], bucketName)
Name: []byte(bucketName), require.NoError(t, err)
DeleteAll: true,
})
return err
}
t.Run("limit single object upload", func(t *testing.T) { t.Run("limit single object upload", func(t *testing.T) {
defer ctx.Check(deleteBucket)
now := time.Now()
request := &pb.BeginObjectRequest{
Header: &pb.RequestHeader{
ApiKey: apiKey.SerializeRaw(),
},
Bucket: []byte(bucketName),
EncryptedObjectKey: []byte("single-object"),
EncryptionParameters: &pb.EncryptionParameters{
CipherSuite: pb.CipherSuite_ENC_AESGCM,
},
}
// upload to the same location one by one should fail // upload to the same location one by one should fail
err := planet.Uplinks[0].Upload(ctx, planet.Satellites[0], bucketName, "single-object", []byte("test")) _, err := endpoint.BeginObject(ctx, request)
require.NoError(t, err) require.NoError(t, err)
err = planet.Uplinks[0].Upload(ctx, planet.Satellites[0], bucketName, "single-object", []byte("test")) _, err = endpoint.BeginObject(ctx, request)
require.Error(t, err) require.Error(t, err)
require.True(t, errs2.IsRPC(err, rpcstatus.ResourceExhausted)) require.True(t, errs2.IsRPC(err, rpcstatus.ResourceExhausted))
time.Sleep(500 * time.Millisecond) ctx, _ := time2.WithNewMachine(ctx, time2.WithTimeAt(now.Add(250*time.Millisecond)))
err = planet.Uplinks[0].Upload(ctx, planet.Satellites[0], bucketName, "single-object", []byte("test")) _, err = endpoint.BeginObject(ctx, request)
require.NoError(t, err) require.NoError(t, err)
// upload to different locations one by one should NOT fail // upload to different locations one by one should NOT fail
err = planet.Uplinks[0].Upload(ctx, planet.Satellites[0], bucketName, "single-objectA", []byte("test")) request.EncryptedObjectKey = []byte("single-objectA")
_, err = endpoint.BeginObject(ctx, request)
require.NoError(t, err) require.NoError(t, err)
err = planet.Uplinks[0].Upload(ctx, planet.Satellites[0], bucketName, "single-objectB", []byte("test")) request.EncryptedObjectKey = []byte("single-objectB")
_, err = endpoint.BeginObject(ctx, request)
require.NoError(t, err) require.NoError(t, err)
}) })
}) })