Skip to content

Commit

Permalink
Add local cache to store whether it is over the limit (#111)
Browse files Browse the repository at this point in the history
  • Loading branch information
freedomljc authored and mattklein123 committed Dec 20, 2019
1 parent 8d92350 commit 87ed58e
Show file tree
Hide file tree
Showing 14 changed files with 345 additions and 121 deletions.
7 changes: 7 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
- [Request Fields](#request-fields)
- [Statistics](#statistics)
- [Debug Port](#debug-port)
- [Local Cache](#local-cache)
- [Redis](#redis)
- [One Redis Instance](#one-redis-instance)
- [Two Redis Instances](#two-redis-instances)
Expand Down Expand Up @@ -373,6 +374,12 @@ $ curl 0:6070/

You can specify the debug port with the `DEBUG_PORT` environment variable. It defaults to `6070`.

# Local Cache

Ratelimit optionally uses [freecache](https://github.com/coocood/freecache) as its local caching layer, which stores the over-the-limit cache keys, and thus avoids reading the
redis cache again for the already over-the-limit keys. The local cache size can be configured via `LocalCacheSizeInBytes` in the [settings](https://github.com/lyft/ratelimit/blob/master/src/settings/settings.go).
If `LocalCacheSizeInBytes` is 0, local cache is disabled.

# Redis

Ratelimit uses Redis as its caching layer. Ratelimit supports two operation modes:
Expand Down
26 changes: 15 additions & 11 deletions glide.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 2 additions & 0 deletions glide.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -37,3 +37,5 @@ import:
version: v3.7.1
- package: github.com/golang/protobuf/proto
version: v1.3.1
- package: github.com/coocood/freecache
version: v1.1.0
9 changes: 5 additions & 4 deletions src/config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ package config
import (
pb_struct "github.com/envoyproxy/go-control-plane/envoy/api/v2/ratelimit"
pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2"
"github.com/lyft/gostats"
stats "github.com/lyft/gostats"
"golang.org/x/net/context"
)

Expand All @@ -21,9 +21,10 @@ func (e RateLimitConfigError) Error() string {

// Stats for an individual rate limit config entry.
type RateLimitStats struct {
TotalHits stats.Counter
OverLimit stats.Counter
NearLimit stats.Counter
TotalHits stats.Counter
OverLimit stats.Counter
NearLimit stats.Counter
OverLimitWithLocalCache stats.Counter
}

// Wrapper for an individual rate limit config entry which includes the defined limit and stats.
Expand Down
3 changes: 2 additions & 1 deletion src/config/config_impl.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ import (

pb_struct "github.com/envoyproxy/go-control-plane/envoy/api/v2/ratelimit"
pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2"
"github.com/lyft/gostats"
stats "github.com/lyft/gostats"
logger "github.com/sirupsen/logrus"
"golang.org/x/net/context"
"gopkg.in/yaml.v2"
Expand Down Expand Up @@ -61,6 +61,7 @@ func newRateLimitStats(statsScope stats.Scope, key string) RateLimitStats {
ret.TotalHits = statsScope.NewCounter(key + ".total_hits")
ret.OverLimit = statsScope.NewCounter(key + ".over_limit")
ret.NearLimit = statsScope.NewCounter(key + ".near_limit")
ret.OverLimitWithLocalCache = statsScope.NewCounter(key + ".over_limit_with_local_cache")
return ret
}

Expand Down
43 changes: 42 additions & 1 deletion src/redis/cache_impl.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import (
"sync"
"time"

"github.com/coocood/freecache"
pb_struct "github.com/envoyproxy/go-control-plane/envoy/api/v2/ratelimit"
pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2"
"github.com/lyft/ratelimit/src/assert"
Expand All @@ -28,6 +29,7 @@ type rateLimitCacheImpl struct {
expirationJitterMaxSeconds int64
// bytes.Buffer pool used to efficiently generate cache keys.
bufferPool sync.Pool
localCache *freecache.Cache
}

// Convert a rate limit into a time divider.
Expand Down Expand Up @@ -151,11 +153,24 @@ func (this *rateLimitCacheImpl) DoLimit(
}
}

isOverLimitWithLocalCache := make([]bool, len(request.Descriptors))

// Now, actually setup the pipeline, skipping empty cache keys.
for i, cacheKey := range cacheKeys {
if cacheKey.key == "" {
continue
}

if this.localCache != nil {
// Get returns the value or not found error.
_, err := this.localCache.Get([]byte(cacheKey.key))
if err == nil {
isOverLimitWithLocalCache[i] = true
logger.Debugf("cache key is over the limit: %s", cacheKey.key)
continue
}
}

logger.Debugf("looking up cache key: %s", cacheKey.key)

expirationSeconds := unitToDivider(limits[i].Limit.Unit)
Expand Down Expand Up @@ -185,6 +200,18 @@ func (this *rateLimitCacheImpl) DoLimit(
continue
}

if isOverLimitWithLocalCache[i] {
responseDescriptorStatuses[i] =
&pb.RateLimitResponse_DescriptorStatus{
Code: pb.RateLimitResponse_OVER_LIMIT,
CurrentLimit: limits[i].Limit,
LimitRemaining: 0,
}
limits[i].Stats.OverLimit.Add(uint64(hitsAddend))
limits[i].Stats.OverLimitWithLocalCache.Add(uint64(hitsAddend))
continue
}

var limitAfterIncrease uint32
// Use the perSecondConn if it is not nil and the cacheKey represents a per second Limit.
if this.perSecondPool != nil && cacheKey.perSecond {
Expand Down Expand Up @@ -222,6 +249,19 @@ func (this *rateLimitCacheImpl) DoLimit(
// in the near limit range.
limits[i].Stats.NearLimit.Add(uint64(overLimitThreshold - max(nearLimitThreshold, limitBeforeIncrease)))
}
if this.localCache != nil {
// Set the TTL of the local_cache to be the entire duration.
// Since the cache_key gets changed once the time crosses over current time slot, the over-the-limit
// cache keys in local_cache lose effectiveness.
// For example, if we have an hour limit on all mongo connections, the cache key would be
// similar to mongo_1h, mongo_2h, etc. In the hour 1 (0h0m - 0h59m), the cache key is mongo_1h, we start
// to get ratelimited in the 50th minute, the ttl of local_cache will be set as 1 hour(0h50m-1h49m).
// In the time of 1h1m, since the cache key becomes different (mongo_2h), it won't get ratelimited.
err := this.localCache.Set([]byte(cacheKey.key), []byte{}, int(unitToDivider(limits[i].Limit.Unit)))
if err != nil {
logger.Errorf("Failing to set local cache key: %s", cacheKey.key)
}
}
} else {
responseDescriptorStatuses[i] =
&pb.RateLimitResponse_DescriptorStatus{
Expand All @@ -248,14 +288,15 @@ func (this *rateLimitCacheImpl) DoLimit(
return responseDescriptorStatuses
}

func NewRateLimitCacheImpl(pool Pool, perSecondPool Pool, timeSource TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64) RateLimitCache {
func NewRateLimitCacheImpl(pool Pool, perSecondPool Pool, timeSource TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64, localCache *freecache.Cache) RateLimitCache {
return &rateLimitCacheImpl{
pool: pool,
perSecondPool: perSecondPool,
timeSource: timeSource,
jitterRand: jitterRand,
expirationJitterMaxSeconds: expirationJitterMaxSeconds,
bufferPool: newBufferPool(),
localCache: localCache,
}
}

Expand Down
10 changes: 9 additions & 1 deletion src/service_cmd/runner/runner.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@ import (
"net/http"
"time"

"github.com/coocood/freecache"

pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2"
pb_legacy "github.com/lyft/ratelimit/proto/ratelimit"

Expand Down Expand Up @@ -44,14 +46,20 @@ func Run() {
} else {
otherPool = redis.NewPoolImpl(srv.Scope().Scope("redis_pool"), s.RedisSocketType, s.RedisUrl, s.RedisPoolSize)
}

var localCache *freecache.Cache
if s.LocalCacheSizeInBytes != 0 {
localCache = freecache.NewCache(s.LocalCacheSizeInBytes)
}
service := ratelimit.NewService(
srv.Runtime(),
redis.NewRateLimitCacheImpl(
otherPool,
perSecondPool,
redis.NewTimeSourceImpl(),
rand.New(redis.NewLockedSource(time.Now().Unix())),
s.ExpirationJitterMaxSeconds),
s.ExpirationJitterMaxSeconds,
localCache),
config.NewRateLimitConfigLoaderImpl(),
srv.Scope().Scope("service"))

Expand Down
1 change: 1 addition & 0 deletions src/settings/settings.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ type Settings struct {
RedisPerSecondAuth string `envconfig:"REDIS_PERSECOND_AUTH" default:""`
RedisPerSecondTls bool `envconfig:"REDIS_PERSECOND_TLS" default:"false"`
ExpirationJitterMaxSeconds int64 `envconfig:"EXPIRATION_JITTER_MAX_SECONDS" default:"300"`
LocalCacheSizeInBytes int `envconfig:"LOCAL_CACHE_SIZE_IN_BYTES" default:"0"`
}

type Option func(*Settings)
Expand Down
Loading

0 comments on commit 87ed58e

Please sign in to comment.