satellite/metainfo: add cache expiration for project level rate limiting
Allow rate limit project cache to expire so we can make project level rate limit changes without restarting the satellite process. Change-Id: I159ea22edff5de7cbfcd13bfe70898dcef770e42
This commit is contained in:
parent
d30d2d920d
commit
149273c63f
@ -321,6 +321,7 @@ func (planet *Planet) newSatellites(count int) ([]*SatelliteSystem, error) {
|
||||
Enabled: true,
|
||||
Rate: 1000,
|
||||
CacheCapacity: 100,
|
||||
CacheExpiration: 10 * time.Second,
|
||||
},
|
||||
},
|
||||
Orders: orders.Config{
|
||||
|
@ -42,6 +42,7 @@ type RateLimiterConfig struct {
|
||||
Enabled bool `help:"whether rate limiting is enabled." releaseDefault:"true" devDefault:"true"`
|
||||
Rate float64 `help:"request rate per project per second." releaseDefault:"1000" devDefault:"100"`
|
||||
CacheCapacity int `help:"number of projects to cache." releaseDefault:"10000" devDefault:"10"`
|
||||
CacheExpiration time.Duration `help:"how long to cache the projects limiter." releaseDefault:"10m" devDefault:"10s"`
|
||||
}
|
||||
|
||||
// Config is a configuration struct that is everything you need to start a metainfo
|
||||
|
@ -115,7 +115,10 @@ func NewEndpoint(log *zap.Logger, metainfo *Service, deletePieces *DeletePiecesS
|
||||
requiredRSConfig: rsConfig,
|
||||
satellite: satellite,
|
||||
maxCommitInterval: maxCommitInterval,
|
||||
limiterCache: lrucache.New(lrucache.Options{Capacity: limiterConfig.CacheCapacity}),
|
||||
limiterCache: lrucache.New(lrucache.Options{
|
||||
Capacity: limiterConfig.CacheCapacity,
|
||||
Expiration: limiterConfig.CacheExpiration,
|
||||
}),
|
||||
limiterConfig: limiterConfig,
|
||||
}
|
||||
}
|
||||
|
@ -1111,45 +1111,51 @@ func TestBatch(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRateLimit(t *testing.T) {
|
||||
rateLimit := 2
|
||||
testplanet.Run(t, testplanet.Config{
|
||||
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 1,
|
||||
Reconfigure: testplanet.Reconfigure{
|
||||
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
|
||||
config.Metainfo.RateLimiter.Rate = 2
|
||||
config.Metainfo.RateLimiter.Rate = float64(rateLimit)
|
||||
},
|
||||
},
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
ul := planet.Uplinks[0]
|
||||
satellite := planet.Satellites[0]
|
||||
|
||||
err := ul.CreateBucket(ctx, satellite, testrand.BucketName())
|
||||
require.NoError(t, err)
|
||||
err = ul.CreateBucket(ctx, satellite, testrand.BucketName())
|
||||
require.NoError(t, err)
|
||||
err = ul.CreateBucket(ctx, satellite, testrand.BucketName())
|
||||
require.Error(t, err)
|
||||
var group errs2.Group
|
||||
for i := 0; i <= rateLimit; i++ {
|
||||
group.Go(func() error {
|
||||
return ul.CreateBucket(ctx, satellite, testrand.BucketName())
|
||||
})
|
||||
}
|
||||
groupErrs := group.Wait()
|
||||
require.Len(t, groupErrs, 1)
|
||||
})
|
||||
}
|
||||
|
||||
func TestRateLimit_Disabled(t *testing.T) {
|
||||
rateLimit := 2
|
||||
testplanet.Run(t, testplanet.Config{
|
||||
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 1,
|
||||
Reconfigure: testplanet.Reconfigure{
|
||||
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
|
||||
config.Metainfo.RateLimiter.Enabled = false
|
||||
config.Metainfo.RateLimiter.Rate = 2
|
||||
config.Metainfo.RateLimiter.Rate = float64(rateLimit)
|
||||
},
|
||||
},
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
ul := planet.Uplinks[0]
|
||||
satellite := planet.Satellites[0]
|
||||
|
||||
err := ul.CreateBucket(ctx, satellite, testrand.BucketName())
|
||||
require.NoError(t, err)
|
||||
err = ul.CreateBucket(ctx, satellite, testrand.BucketName())
|
||||
require.NoError(t, err)
|
||||
err = ul.CreateBucket(ctx, satellite, testrand.BucketName())
|
||||
require.NoError(t, err)
|
||||
var group errs2.Group
|
||||
for i := 0; i <= rateLimit; i++ {
|
||||
group.Go(func() error {
|
||||
return ul.CreateBucket(ctx, satellite, testrand.BucketName())
|
||||
})
|
||||
}
|
||||
groupErrs := group.Wait()
|
||||
require.Len(t, groupErrs, 0)
|
||||
})
|
||||
}
|
||||
|
||||
@ -1175,13 +1181,66 @@ func TestRateLimit_ProjectRateLimitOverride(t *testing.T) {
|
||||
err = satellite.DB.Console().Projects().Update(ctx, &projects[0])
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ul.CreateBucket(ctx, satellite, testrand.BucketName())
|
||||
require.NoError(t, err)
|
||||
err = ul.CreateBucket(ctx, satellite, testrand.BucketName())
|
||||
require.NoError(t, err)
|
||||
err = ul.CreateBucket(ctx, satellite, testrand.BucketName())
|
||||
require.NoError(t, err)
|
||||
err = ul.CreateBucket(ctx, satellite, testrand.BucketName())
|
||||
require.Error(t, err)
|
||||
var group errs2.Group
|
||||
for i := 0; i <= rateLimit; i++ {
|
||||
group.Go(func() error {
|
||||
return ul.CreateBucket(ctx, satellite, testrand.BucketName())
|
||||
})
|
||||
}
|
||||
groupErrs := group.Wait()
|
||||
require.Len(t, groupErrs, 1)
|
||||
})
|
||||
}
|
||||
|
||||
func TestRateLimit_ProjectRateLimitOverrideCachedExpired(t *testing.T) {
|
||||
testplanet.Run(t, testplanet.Config{
|
||||
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 1,
|
||||
Reconfigure: testplanet.Reconfigure{
|
||||
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
|
||||
config.Metainfo.RateLimiter.Rate = 2
|
||||
config.Metainfo.RateLimiter.CacheExpiration = 100 * time.Millisecond
|
||||
},
|
||||
},
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
ul := planet.Uplinks[0]
|
||||
satellite := planet.Satellites[0]
|
||||
|
||||
projects, err := satellite.DB.Console().Projects().GetAll(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, projects, 1)
|
||||
|
||||
rateLimit := 3
|
||||
projects[0].RateLimit = &rateLimit
|
||||
|
||||
err = satellite.DB.Console().Projects().Update(ctx, &projects[0])
|
||||
require.NoError(t, err)
|
||||
|
||||
var group1 errs2.Group
|
||||
|
||||
for i := 0; i <= rateLimit; i++ {
|
||||
group1.Go(func() error {
|
||||
return ul.CreateBucket(ctx, satellite, testrand.BucketName())
|
||||
})
|
||||
}
|
||||
group1Errs := group1.Wait()
|
||||
require.Len(t, group1Errs, 1)
|
||||
|
||||
rateLimit = 1
|
||||
projects[0].RateLimit = &rateLimit
|
||||
|
||||
err = satellite.DB.Console().Projects().Update(ctx, &projects[0])
|
||||
require.NoError(t, err)
|
||||
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
|
||||
var group2 errs2.Group
|
||||
|
||||
for i := 0; i <= rateLimit; i++ {
|
||||
group2.Go(func() error {
|
||||
return ul.CreateBucket(ctx, satellite, testrand.BucketName())
|
||||
})
|
||||
}
|
||||
group2Errs := group2.Wait()
|
||||
require.Len(t, group2Errs, 1)
|
||||
})
|
||||
}
|
||||
|
3
scripts/testdata/satellite-config.yaml.lock
vendored
3
scripts/testdata/satellite-config.yaml.lock
vendored
@ -250,6 +250,9 @@ identity.key-path: /root/.local/share/storj/identity/satellite/identity.key
|
||||
# number of projects to cache.
|
||||
# metainfo.rate-limiter.cache-capacity: 10000
|
||||
|
||||
# how long to cache the projects limiter.
|
||||
# metainfo.rate-limiter.cache-expiration: 10m0s
|
||||
|
||||
# whether rate limiting is enabled.
|
||||
# metainfo.rate-limiter.enabled: true
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user