Skip to content

Commit

Permalink
cleanup and remove no longer needed hash store cache, prefer using pa…
Browse files Browse the repository at this point in the history
…ged reporting when needed. fixed #534
  • Loading branch information
danmayer committed Jun 19, 2024
1 parent cbf22cb commit c25cc2c
Show file tree
Hide file tree
Showing 3 changed files with 20 additions and 162 deletions.
5 changes: 0 additions & 5 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -299,11 +299,6 @@ Coverband on very high volume sites with many server processes reporting can hav

See more discussion [here](https://github.com/danmayer/coverband/issues/384).

Please note that with the Redis Hash Store, everytime you load the full report, Coverband will execute `HGETALL` queries in your Redis server twice for every file in the project (once for runtime coverage and once for eager loading coverage). This shouldn't have a big impact in small to medium projects, but can be quite a hassle if your project has a few thousand files.
To help reduce the extra redis load when getting the coverage report, you can enable `get_coverage_cache` (but note that when doing that, you will always get a previous version of the report, while a cache is re-populated with a newer version).

- Use Hash Redis Store with _get coverage cache_: `config.store = Coverband::Adapters::HashRedisStore.new(redis, get_coverage_cache: true)`

### Clear Coverage

Now that Coverband uses MD5 hashes there should be no reason to manually clear coverage unless one is testing, changing versions, or possibly debugging Coverband itself.
Expand Down
126 changes: 20 additions & 106 deletions lib/coverband/adapters/hash_redis_store.rb
Original file line number Diff line number Diff line change
Expand Up @@ -5,82 +5,6 @@
module Coverband
module Adapters
class HashRedisStore < Base
class GetCoverageNullCacheStore
def self.clear!(*_local_types)
end

def self.fetch(_local_type)
yield(0)
end
end

class GetCoverageRedisCacheStore
LOCK_LIMIT = 60 * 30 # 30 minutes

def initialize(redis, key_prefix)
@redis = redis
@key_prefix = [key_prefix, "get-coverage"].join(".")
end

def fetch(local_type)
cached_result = get(local_type)

# if no cache available, block the call and populate the cache
# if cache is available, return it and start re-populating it (with a lock)
if cached_result.nil?
value = yield(0)
result = set(local_type, JSON.generate(value))
value
else
if lock!(local_type)
Thread.new do
result = yield(deferred_time)
set(local_type, JSON.generate(result))
ensure
unlock!(local_type)
end
end
JSON.parse(cached_result)
end
end

def clear!(local_types = Coverband::TYPES)
Array(local_types).each do |local_type|
del(local_type)
unlock!(local_type)
end
end

private

# sleep in between to avoid holding other redis commands..
# with a small random offset so runtime and eager types can be processed "at the same time"
def deferred_time
rand(2.0..3.0)
end

def del(local_type)
@redis.del("#{@key_prefix}.cache.#{local_type}")
end

def get(local_type)
@redis.get("#{@key_prefix}.cache.#{local_type}")
end

def set(local_type, value)
@redis.set("#{@key_prefix}.cache.#{local_type}", value)
end

# lock for at most 60 minutes
def lock!(local_type)
@redis.set("#{@key_prefix}.lock.#{local_type}", "1", nx: true, ex: LOCK_LIMIT)
end

def unlock!(local_type)
@redis.del("#{@key_prefix}.lock.#{local_type}")
end
end

FILE_KEY = "file"
FILE_LENGTH_KEY = "file_length"
META_DATA_KEYS = [DATA_KEY, FIRST_UPDATED_KEY, LAST_UPDATED_KEY, FILE_HASH].freeze
Expand All @@ -93,7 +17,7 @@ def unlock!(local_type)

JSON_PAYLOAD_EXPIRATION = 5 * 60

attr_reader :redis_namespace, :get_coverage_cache
attr_reader :redis_namespace

def initialize(redis, opts = {})
super()
Expand All @@ -105,13 +29,6 @@ def initialize(redis, opts = {})

@ttl = opts[:ttl]
@relative_file_converter = opts[:relative_file_converter] || Utils::RelativeFileConverter

@get_coverage_cache = if opts[:get_coverage_cache]
key_prefix = [REDIS_STORAGE_FORMAT_VERSION, @redis_namespace].compact.join(".")
GetCoverageRedisCacheStore.new(redis, key_prefix)
else
GetCoverageNullCacheStore
end
end

def supported?
Expand All @@ -129,7 +46,6 @@ def clear!
@redis.del(*file_keys) if file_keys.any?
@redis.del(files_key)
@redis.del(files_key(type))
@get_coverage_cache.clear!(type)
end
self.type = old_type
end
Expand All @@ -139,7 +55,6 @@ def clear_file!(file)
relative_path_file = @relative_file_converter.convert(file)
Coverband::TYPES.each do |type|
@redis.del(key(relative_path_file, type, file_hash: file_hash))
@get_coverage_cache.clear!(type)
end
@redis.srem(files_key, relative_path_file)
end
Expand Down Expand Up @@ -176,30 +91,29 @@ def save_report(report)
# When paging code should use coverage_for_types and pull eager and runtime together as matched pairs
def coverage(local_type = nil, opts = {})
page_size = opts[:page_size] || 250
cached_results = @get_coverage_cache.fetch(local_type || type) do |sleep_time|
files_set = if opts[:page]
raise "call coverage_for_types with paging"
elsif opts[:filename]
type_key_prefix = key_prefix(local_type)
# NOTE: a better way to extract filename from key would be better
files_set(local_type).select do |cache_key|
cache_key.sub(type_key_prefix, "").match(short_name(opts[:filename]))
end || {}
else
files_set(local_type)
end
# below uses batches with a sleep in between to avoid overloading redis
files_set.each_slice(page_size).flat_map do |key_batch|
sleep sleep_time
@redis.pipelined do |pipeline|
key_batch.each do |key|
pipeline.hgetall(key)
end
files_set = if opts[:page]
raise "call coverage_for_types with paging"
elsif opts[:filename]
type_key_prefix = key_prefix(local_type)
# NOTE: a better way to extract filename from key would be better
files_set(local_type).select do |cache_key|
cache_key.sub(type_key_prefix, "").match(short_name(opts[:filename]))
end || {}
else
files_set(local_type)
end

# below uses batches with a sleep in between to avoid overloading redis
files_set = files_set.each_slice(page_size).flat_map do |key_batch|
sleep(0.01 * rand(1..10))
@redis.pipelined do |pipeline|
key_batch.each do |key|
pipeline.hgetall(key)
end
end
end

cached_results.each_with_object({}) do |data_from_redis, hash|
files_set.each_with_object({}) do |data_from_redis, hash|
add_coverage_for_file(data_from_redis, hash)
end
end
Expand Down
51 changes: 0 additions & 51 deletions test/coverband/adapters/hash_redis_store_test.rb
Original file line number Diff line number Diff line change
Expand Up @@ -192,57 +192,6 @@ def test_clear_file
assert_nil @store.get_coverage_report[:merged]["./dog.rb"]
end

def test_get_coverage_cache
@store = Coverband::Adapters::HashRedisStore.new(
@redis,
redis_namespace: "coverband_test",
relative_file_converter: MockRelativeFileConverter,
get_coverage_cache: true
)
@store.get_coverage_cache.stubs(:deferred_time).returns(0)
@store.get_coverage_cache.clear!
mock_file_hash
yesterday = DateTime.now.prev_day.to_time
mock_time(yesterday)
@store.save_report(
"app_path/dog.rb" => [0, 1, 2]
)
assert_equal(
{
"first_updated_at" => yesterday.to_i,
"last_updated_at" => yesterday.to_i,
"file_hash" => "abcd",
"data" => [0, 1, 2],
"timedata" => [nil, Time.at(yesterday.to_i), Time.at(yesterday.to_i)]
},
@store.coverage["./dog.rb"]
)
@store.save_report(
"app_path/dog.rb" => [0, 1, 2]
)
assert_equal(
{
"first_updated_at" => yesterday.to_i,
"last_updated_at" => yesterday.to_i,
"file_hash" => "abcd",
"data" => [0, 1, 2],
"timedata" => [nil, Time.at(yesterday.to_i), Time.at(yesterday.to_i)]
},
@store.coverage["./dog.rb"]
)
sleep 0.1 # wait caching thread finish
assert_equal(
{
"first_updated_at" => yesterday.to_i,
"last_updated_at" => yesterday.to_i,
"file_hash" => "abcd",
"data" => [0, 2, 4],
"timedata" => [nil, Time.at(yesterday.to_i), Time.at(yesterday.to_i)]
},
@store.coverage["./dog.rb"]
)
end

def test_split_coverage
@store = Coverband::Adapters::HashRedisStore.new(
@redis,
Expand Down

0 comments on commit c25cc2c

Please sign in to comment.