2020-04-08 20:40:49 +01:00
|
|
|
// Copyright (C) 2019 Storj Labs, Inc.
|
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
|
|
|
package web
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"net"
|
|
|
|
"net/http"
|
2020-09-05 17:20:21 +01:00
|
|
|
"strings"
|
2020-04-08 20:40:49 +01:00
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"golang.org/x/time/rate"
|
|
|
|
)
|
|
|
|
|
2021-08-17 20:38:34 +01:00
|
|
|
// RateLimiterConfig configures a RateLimiter.
|
|
|
|
type RateLimiterConfig struct {
|
2020-04-08 20:40:49 +01:00
|
|
|
Duration time.Duration `help:"the rate at which request are allowed" default:"5m"`
|
testplanet/satellite: reduce the number of places default values need to be configured
Satellites set their configuration values to default values using
cfgstruct, however, it turns out our tests don't test these values
at all! Instead, they have a completely separate definition system
that is easy to forget about.
As is to be expected, these values have drifted, and it appears
in a few cases test planet is testing unreasonable values that we
won't see in production, or perhaps worse, features enabled in
production were missed and weren't enabled in testplanet.
This change makes it so all values are configured the same,
systematic way, so it's easy to see when test values are different
than dev values or release values, and it's less hard to forget
to enable features in testplanet.
In terms of reviewing, this change should be actually fairly
easy to review, considering private/testplanet/satellite.go keeps
the current config system and the new one and confirms that they
result in identical configurations, so you can be certain that
nothing was missed and the config is all correct.
You can also check the config lock to see what actual config
values changed.
Change-Id: I6715d0794887f577e21742afcf56fd2b9d12170e
2021-05-31 22:15:00 +01:00
|
|
|
Burst int `help:"number of events before the limit kicks in" default:"5" testDefault:"3"`
|
2021-08-17 20:38:34 +01:00
|
|
|
NumLimits int `help:"number of clients whose rate limits we store" default:"1000" testDefault:"10"`
|
2020-04-08 20:40:49 +01:00
|
|
|
}
|
|
|
|
|
2021-08-17 20:38:34 +01:00
|
|
|
// RateLimiter imposes a rate limit per key.
|
|
|
|
type RateLimiter struct {
|
|
|
|
config RateLimiterConfig
|
|
|
|
mu sync.Mutex
|
|
|
|
limits map[string]*userLimit
|
|
|
|
keyFunc func(*http.Request) (string, error)
|
2020-04-08 20:40:49 +01:00
|
|
|
}
|
|
|
|
|
2021-08-17 20:38:34 +01:00
|
|
|
// userLimit is the per-key limiter.
|
2020-04-08 20:40:49 +01:00
|
|
|
type userLimit struct {
|
|
|
|
limiter *rate.Limiter
|
|
|
|
lastSeen time.Time
|
|
|
|
}
|
|
|
|
|
2021-08-17 20:38:34 +01:00
|
|
|
// NewIPRateLimiter constructs a RateLimiter that limits based on IP address.
|
|
|
|
func NewIPRateLimiter(config RateLimiterConfig) *RateLimiter {
|
|
|
|
return NewRateLimiter(config, GetRequestIP)
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewRateLimiter constructs a RateLimiter.
|
|
|
|
func NewRateLimiter(config RateLimiterConfig, keyFunc func(*http.Request) (string, error)) *RateLimiter {
|
|
|
|
return &RateLimiter{
|
|
|
|
config: config,
|
|
|
|
limits: make(map[string]*userLimit),
|
|
|
|
keyFunc: keyFunc,
|
2020-04-08 20:40:49 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Run occasionally cleans old rate-limiting data, until context cancel.
|
2021-08-17 20:38:34 +01:00
|
|
|
func (rl *RateLimiter) Run(ctx context.Context) {
|
2020-04-08 20:40:49 +01:00
|
|
|
cleanupTicker := time.NewTicker(rl.config.Duration)
|
|
|
|
defer cleanupTicker.Stop()
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return
|
|
|
|
case <-cleanupTicker.C:
|
|
|
|
rl.cleanupLimiters()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// cleanupLimiters removes old rate limits to free memory.
|
2021-08-17 20:38:34 +01:00
|
|
|
func (rl *RateLimiter) cleanupLimiters() {
|
2020-04-08 20:40:49 +01:00
|
|
|
rl.mu.Lock()
|
|
|
|
defer rl.mu.Unlock()
|
2021-08-17 20:38:34 +01:00
|
|
|
for k, v := range rl.limits {
|
2020-04-08 20:40:49 +01:00
|
|
|
if time.Since(v.lastSeen) > rl.config.Duration {
|
2021-08-17 20:38:34 +01:00
|
|
|
delete(rl.limits, k)
|
2020-04-08 20:40:49 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-08-17 20:38:34 +01:00
|
|
|
// Limit applies per-key rate limiting as an HTTP Handler.
|
|
|
|
func (rl *RateLimiter) Limit(next http.Handler) http.Handler {
|
2020-04-08 20:40:49 +01:00
|
|
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
2021-08-17 20:38:34 +01:00
|
|
|
key, err := rl.keyFunc(r)
|
2020-04-08 20:40:49 +01:00
|
|
|
if err != nil {
|
|
|
|
http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
2021-08-17 20:38:34 +01:00
|
|
|
limit := rl.getUserLimit(key)
|
|
|
|
if !limit.Allow() {
|
2020-04-08 20:40:49 +01:00
|
|
|
http.Error(w, http.StatusText(http.StatusTooManyRequests), http.StatusTooManyRequests)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
next.ServeHTTP(w, r)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2021-06-25 12:17:55 +01:00
|
|
|
// GetRequestIP gets the original IP address of the request by handling the request headers.
|
|
|
|
func GetRequestIP(r *http.Request) (ip string, err error) {
|
2020-09-05 17:20:21 +01:00
|
|
|
realIP := r.Header.Get("X-REAL-IP")
|
|
|
|
if realIP != "" {
|
|
|
|
return realIP, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
forwardedIPs := r.Header.Get("X-FORWARDED-FOR")
|
|
|
|
if forwardedIPs != "" {
|
|
|
|
ips := strings.Split(forwardedIPs, ", ")
|
|
|
|
if len(ips) > 0 {
|
|
|
|
return ips[0], nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ip, _, err = net.SplitHostPort(r.RemoteAddr)
|
|
|
|
|
|
|
|
return ip, err
|
|
|
|
}
|
|
|
|
|
2021-08-17 20:38:34 +01:00
|
|
|
// getUserLimit returns a rate limiter for a key.
|
|
|
|
func (rl *RateLimiter) getUserLimit(key string) *rate.Limiter {
|
2020-04-08 20:40:49 +01:00
|
|
|
rl.mu.Lock()
|
|
|
|
defer rl.mu.Unlock()
|
|
|
|
|
2021-08-17 20:38:34 +01:00
|
|
|
v, exists := rl.limits[key]
|
2020-04-08 20:40:49 +01:00
|
|
|
if !exists {
|
2021-08-17 20:38:34 +01:00
|
|
|
if len(rl.limits) >= rl.config.NumLimits {
|
2020-04-08 20:40:49 +01:00
|
|
|
// Tracking only N limits prevents an out-of-memory DOS attack
|
|
|
|
// Returning StatusTooManyRequests would be just as bad
|
|
|
|
// The least-bad option may be to remove the oldest key
|
|
|
|
oldestKey := ""
|
|
|
|
var oldestTime *time.Time
|
2021-08-17 20:38:34 +01:00
|
|
|
for key, v := range rl.limits {
|
2020-04-08 20:40:49 +01:00
|
|
|
// while we're looping, we'd prefer to just delete expired records
|
|
|
|
if time.Since(v.lastSeen) > rl.config.Duration {
|
2021-08-17 20:38:34 +01:00
|
|
|
delete(rl.limits, key)
|
2020-04-08 20:40:49 +01:00
|
|
|
}
|
|
|
|
// but we're prepared to delete the oldest non-expired
|
|
|
|
if oldestTime == nil || v.lastSeen.Before(*oldestTime) {
|
|
|
|
oldestTime = &v.lastSeen
|
2021-08-17 20:38:34 +01:00
|
|
|
oldestKey = key
|
2020-04-08 20:40:49 +01:00
|
|
|
}
|
|
|
|
}
|
2020-10-13 13:47:55 +01:00
|
|
|
// only delete the oldest non-expired if there's still an issue
|
2021-08-17 20:38:34 +01:00
|
|
|
if oldestKey != "" && len(rl.limits) >= rl.config.NumLimits {
|
|
|
|
delete(rl.limits, oldestKey)
|
2020-04-08 20:40:49 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
limiter := rate.NewLimiter(rate.Limit(time.Second)/rate.Limit(rl.config.Duration), rl.config.Burst)
|
2021-08-17 20:38:34 +01:00
|
|
|
rl.limits[key] = &userLimit{limiter, time.Now()}
|
2020-04-08 20:40:49 +01:00
|
|
|
return limiter
|
|
|
|
}
|
|
|
|
v.lastSeen = time.Now()
|
|
|
|
return v.limiter
|
|
|
|
}
|
|
|
|
|
2020-10-13 13:47:55 +01:00
|
|
|
// Burst returns the number of events that happen before the rate limit.
|
2021-08-17 20:38:34 +01:00
|
|
|
func (rl *RateLimiter) Burst() int {
|
2020-04-08 20:40:49 +01:00
|
|
|
return rl.config.Burst
|
|
|
|
}
|
|
|
|
|
2020-10-13 13:47:55 +01:00
|
|
|
// Duration returns the amount of time required between events.
|
2021-08-17 20:38:34 +01:00
|
|
|
func (rl *RateLimiter) Duration() time.Duration {
|
2020-04-08 20:40:49 +01:00
|
|
|
return rl.config.Duration
|
|
|
|
}
|