satellite/metainfo: add cache expiration for project level rate limiting

Allow rate limit project cache to expire so we can make project level rate limit changes without restarting the satellite process.

Change-Id: I159ea22edff5de7cbfcd13bfe70898dcef770e42
This commit is contained in:
Ethan 2020-01-29 10:22:22 -05:00 committed by Ethan Adams
parent d30d2d920d
commit 149273c63f
5 changed files with 97 additions and 30 deletions

View File

@ -318,9 +318,10 @@ func (planet *Planet) newSatellites(count int) ([]*SatelliteSystem, error) {
CoalesceDuration: 1 * time.Second,
},
RateLimiter: metainfo.RateLimiterConfig{
Enabled: true,
Rate: 1000,
CacheCapacity: 100,
Enabled: true,
Rate: 1000,
CacheCapacity: 100,
CacheExpiration: 10 * time.Second,
},
},
Orders: orders.Config{

View File

@ -39,9 +39,10 @@ type RSConfig struct {
// RateLimiterConfig is a configuration struct for endpoint rate limiting
type RateLimiterConfig struct {
Enabled bool `help:"whether rate limiting is enabled." releaseDefault:"true" devDefault:"true"`
Rate float64 `help:"request rate per project per second." releaseDefault:"1000" devDefault:"100"`
CacheCapacity int `help:"number of projects to cache." releaseDefault:"10000" devDefault:"10"`
Enabled bool `help:"whether rate limiting is enabled." releaseDefault:"true" devDefault:"true"`
Rate float64 `help:"request rate per project per second." releaseDefault:"1000" devDefault:"100"`
CacheCapacity int `help:"number of projects to cache." releaseDefault:"10000" devDefault:"10"`
CacheExpiration time.Duration `help:"how long to cache the projects limiter." releaseDefault:"10m" devDefault:"10s"`
}
// Config is a configuration struct that is everything you need to start a metainfo

View File

@ -115,8 +115,11 @@ func NewEndpoint(log *zap.Logger, metainfo *Service, deletePieces *DeletePiecesS
requiredRSConfig: rsConfig,
satellite: satellite,
maxCommitInterval: maxCommitInterval,
limiterCache: lrucache.New(lrucache.Options{Capacity: limiterConfig.CacheCapacity}),
limiterConfig: limiterConfig,
limiterCache: lrucache.New(lrucache.Options{
Capacity: limiterConfig.CacheCapacity,
Expiration: limiterConfig.CacheExpiration,
}),
limiterConfig: limiterConfig,
}
}

View File

@ -1111,45 +1111,51 @@ func TestBatch(t *testing.T) {
}
func TestRateLimit(t *testing.T) {
rateLimit := 2
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 1,
Reconfigure: testplanet.Reconfigure{
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
config.Metainfo.RateLimiter.Rate = 2
config.Metainfo.RateLimiter.Rate = float64(rateLimit)
},
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
ul := planet.Uplinks[0]
satellite := planet.Satellites[0]
err := ul.CreateBucket(ctx, satellite, testrand.BucketName())
require.NoError(t, err)
err = ul.CreateBucket(ctx, satellite, testrand.BucketName())
require.NoError(t, err)
err = ul.CreateBucket(ctx, satellite, testrand.BucketName())
require.Error(t, err)
var group errs2.Group
for i := 0; i <= rateLimit; i++ {
group.Go(func() error {
return ul.CreateBucket(ctx, satellite, testrand.BucketName())
})
}
groupErrs := group.Wait()
require.Len(t, groupErrs, 1)
})
}
func TestRateLimit_Disabled(t *testing.T) {
rateLimit := 2
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 1,
Reconfigure: testplanet.Reconfigure{
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
config.Metainfo.RateLimiter.Enabled = false
config.Metainfo.RateLimiter.Rate = 2
config.Metainfo.RateLimiter.Rate = float64(rateLimit)
},
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
ul := planet.Uplinks[0]
satellite := planet.Satellites[0]
err := ul.CreateBucket(ctx, satellite, testrand.BucketName())
require.NoError(t, err)
err = ul.CreateBucket(ctx, satellite, testrand.BucketName())
require.NoError(t, err)
err = ul.CreateBucket(ctx, satellite, testrand.BucketName())
require.NoError(t, err)
var group errs2.Group
for i := 0; i <= rateLimit; i++ {
group.Go(func() error {
return ul.CreateBucket(ctx, satellite, testrand.BucketName())
})
}
groupErrs := group.Wait()
require.Len(t, groupErrs, 0)
})
}
@ -1175,13 +1181,66 @@ func TestRateLimit_ProjectRateLimitOverride(t *testing.T) {
err = satellite.DB.Console().Projects().Update(ctx, &projects[0])
require.NoError(t, err)
err = ul.CreateBucket(ctx, satellite, testrand.BucketName())
require.NoError(t, err)
err = ul.CreateBucket(ctx, satellite, testrand.BucketName())
require.NoError(t, err)
err = ul.CreateBucket(ctx, satellite, testrand.BucketName())
require.NoError(t, err)
err = ul.CreateBucket(ctx, satellite, testrand.BucketName())
require.Error(t, err)
var group errs2.Group
for i := 0; i <= rateLimit; i++ {
group.Go(func() error {
return ul.CreateBucket(ctx, satellite, testrand.BucketName())
})
}
groupErrs := group.Wait()
require.Len(t, groupErrs, 1)
})
}
func TestRateLimit_ProjectRateLimitOverrideCachedExpired(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 1,
Reconfigure: testplanet.Reconfigure{
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
config.Metainfo.RateLimiter.Rate = 2
config.Metainfo.RateLimiter.CacheExpiration = 100 * time.Millisecond
},
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
ul := planet.Uplinks[0]
satellite := planet.Satellites[0]
projects, err := satellite.DB.Console().Projects().GetAll(ctx)
require.NoError(t, err)
require.Len(t, projects, 1)
rateLimit := 3
projects[0].RateLimit = &rateLimit
err = satellite.DB.Console().Projects().Update(ctx, &projects[0])
require.NoError(t, err)
var group1 errs2.Group
for i := 0; i <= rateLimit; i++ {
group1.Go(func() error {
return ul.CreateBucket(ctx, satellite, testrand.BucketName())
})
}
group1Errs := group1.Wait()
require.Len(t, group1Errs, 1)
rateLimit = 1
projects[0].RateLimit = &rateLimit
err = satellite.DB.Console().Projects().Update(ctx, &projects[0])
require.NoError(t, err)
time.Sleep(200 * time.Millisecond)
var group2 errs2.Group
for i := 0; i <= rateLimit; i++ {
group2.Go(func() error {
return ul.CreateBucket(ctx, satellite, testrand.BucketName())
})
}
group2Errs := group2.Wait()
require.Len(t, group2Errs, 1)
})
}

View File

@ -250,6 +250,9 @@ identity.key-path: /root/.local/share/storj/identity/satellite/identity.key
# number of projects to cache.
# metainfo.rate-limiter.cache-capacity: 10000
# how long to cache the projects limiter.
# metainfo.rate-limiter.cache-expiration: 10m0s
# whether rate limiting is enabled.
# metainfo.rate-limiter.enabled: true