From 7f344fcc15e5891ba70375e8508d1540372bd69b Mon Sep 17 00:00:00 2001 From: Donal McBreen Date: Tue, 9 Jan 2024 15:05:12 +0000 Subject: [PATCH] Expire records on a custom queue Add `expiry_queue` to the cache config. It can be used to specify a custom queue for SolidCache::ExpiryJob. --- README.md | 1 + lib/solid_cache/cluster/expiry.rb | 7 +++++-- test/unit/expiry_test.rb | 28 ++++++++++++++++++++++------ 3 files changed, 28 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index dddea76..6411ba2 100644 --- a/README.md +++ b/README.md @@ -80,6 +80,7 @@ Solid Cache supports these options in addition to the standard `ActiveSupport::C - `error_handler` - a Proc to call to handle any `ActiveRecord::ActiveRecordError`s that are raises (default: log errors as warnings) - `expiry_batch_size` - the batch size to use when deleting old records (default: `100`) - `expiry_method` - what expiry method to use `thread` or `job` (default: `thread`) +- `expiry_queue` - which queue to add expiry jobs to (default: `default`) - `max_age` - the maximum age of entries in the cache (default: `2.weeks.to_i`). Can be set to `nil`, but this is not recommended unless using `max_entries` to limit the size of the cache. - `max_entries` - the maximum number of entries allowed in the cache (default: `nil`, meaning no limit) - `cluster` - a Hash of options for the cache database cluster, e.g `{ shards: [:database1, :database2, :database3] }` diff --git a/lib/solid_cache/cluster/expiry.rb b/lib/solid_cache/cluster/expiry.rb index 8e5b9b9..ca2b01d 100644 --- a/lib/solid_cache/cluster/expiry.rb +++ b/lib/solid_cache/cluster/expiry.rb @@ -9,12 +9,13 @@ module Expiry # This ensures there is downward pressure on the cache size while there is valid data to delete EXPIRY_MULTIPLIER = 1.25 - attr_reader :expiry_batch_size, :expiry_method, :expire_every, :max_age, :max_entries + attr_reader :expiry_batch_size, :expiry_method, :expiry_queue, :expire_every, :max_age, :max_entries def initialize(options = {}) super(options) @expiry_batch_size = options.fetch(:expiry_batch_size, 100) @expiry_method = options.fetch(:expiry_method, :thread) + @expiry_queue = options.fetch(:expiry_queue, :default) @expire_every = [ (expiry_batch_size / EXPIRY_MULTIPLIER).floor, 1 ].max @max_age = options.fetch(:max_age, 2.weeks.to_i) @max_entries = options.fetch(:max_entries, nil) @@ -29,7 +30,9 @@ def track_writes(count) private def expire_later if expiry_method == :job - ExpiryJob.perform_later(expiry_batch_size, shard: Entry.current_shard, max_age: max_age, max_entries: max_entries) + ExpiryJob + .set(queue: expiry_queue) + .perform_later(expiry_batch_size, shard: Entry.current_shard, max_age: max_age, max_entries: max_entries) else async { Entry.expire(expiry_batch_size, max_age: max_age, max_entries: max_entries) } end diff --git a/test/unit/expiry_test.rb b/test/unit/expiry_test.rb index 3be9495..10bd0b8 100644 --- a/test/unit/expiry_test.rb +++ b/test/unit/expiry_test.rb @@ -117,10 +117,26 @@ class SolidCache::ExpiryTest < ActiveSupport::TestCase end end - private - def shard_keys(cache, shard) - namespaced_keys = 100.times.map { |i| @cache.send(:normalize_key, "key#{i}", {}) } - shard_keys = cache.primary_cluster.send(:connections).assign(namespaced_keys)[shard] - shard_keys.map { |key| key.delete_prefix("#{@namespace}:") } - end + test "expires old records with a custom queue" do + @cache = lookup_store(expiry_batch_size: 3, max_entries: 2, expiry_method: :job, expiry_queue: :cache_expiry) + + default_shard_keys = shard_keys(@cache, :default) + + assert_enqueued_jobs(2, only: SolidCache::ExpiryJob, queue: :cache_expiry) do + @cache.write(default_shard_keys[0], 1) + @cache.write(default_shard_keys[1], 2) + @cache.write(default_shard_keys[2], 3) + @cache.write(default_shard_keys[2], 4) + end + + perform_enqueued_jobs + assert_equal 0, SolidCache.each_shard.sum { SolidCache::Entry.count } + end + + private + def shard_keys(cache, shard) + namespaced_keys = 100.times.map { |i| @cache.send(:normalize_key, "key#{i}", {}) } + shard_keys = cache.primary_cluster.send(:connections).assign(namespaced_keys)[shard] + shard_keys.map { |key| key.delete_prefix("#{@namespace}:") } + end end