From 4a0a16b12909c880ef19b178e7f54ddf9c3b8e99 Mon Sep 17 00:00:00 2001 From: Thomas Korrison Date: Fri, 9 Jan 2026 15:25:15 +0000 Subject: [PATCH 1/4] Feature: Add metrics to FIFOCache. - Refactored FIFOCache with private inner pure cache logic - InstrumentedFifoCache only public interface - FifoMetrics separate and private --- benches/cache_benchmarks.rs | 12 +- benches/fifo_cache_benchmarking.rs | 45 +-- benches/fifo_complexity_benchmarks.rs | 40 +- .../disk/async_disk/cache/cache_manager.rs | 44 +-- src/storage/disk/async_disk/cache/fifo.rs | 371 +++++++++++++----- tests/storage/fifo_concurrency.rs | 46 ++- 6 files changed, 369 insertions(+), 189 deletions(-) diff --git a/benches/cache_benchmarks.rs b/benches/cache_benchmarks.rs index 9f8cce16..c5634013 100644 --- a/benches/cache_benchmarks.rs +++ b/benches/cache_benchmarks.rs @@ -1,6 +1,6 @@ use criterion::{BenchmarkId, Criterion, criterion_group, criterion_main}; use ferrite::storage::disk::async_disk::cache::cache_traits::CoreCache; -use ferrite::storage::disk::async_disk::cache::fifo::FIFOCache; +use ferrite::storage::disk::async_disk::cache::fifo::InstrumentedFifoCache; use std::hint::black_box; fn benchmark_fifo_cache_operations(c: &mut Criterion) { @@ -17,7 +17,7 @@ fn benchmark_fifo_cache_operations(c: &mut Criterion) { &size, |b, &size| { // Setup - let mut cache = FIFOCache::new(size); + let mut cache = InstrumentedFifoCache::new(size); // Fill cache to 80% capacity let fill_count = (size as f64 * 0.8) as usize; @@ -58,7 +58,7 @@ fn benchmark_fifo_eviction_complexity(c: &mut Criterion) { b.iter_batched( // Setup for each iteration || { - let mut cache = FIFOCache::new(size); + let mut cache = InstrumentedFifoCache::new(size); // Fill cache to capacity for i in 0..size { @@ -103,7 +103,7 @@ fn benchmark_cache_comparison(c: &mut Criterion) { // Compare different cache operations group.bench_function("fifo_insert", |b| { b.iter_batched( - || FIFOCache::new(size), + || InstrumentedFifoCache::new(size), |mut cache| { for i in 0..100 { cache.insert( @@ -118,7 +118,7 @@ fn benchmark_cache_comparison(c: &mut Criterion) { }); group.bench_function("fifo_get_hit", |b| { - let mut cache = FIFOCache::new(size); + let mut cache = InstrumentedFifoCache::new(size); for i in 0..100 { cache.insert(format!("key_{}", i), format!("value_{}", i)); } @@ -131,7 +131,7 @@ fn benchmark_cache_comparison(c: &mut Criterion) { }); group.bench_function("fifo_get_miss", |b| { - let mut cache = FIFOCache::new(size); + let mut cache = InstrumentedFifoCache::new(size); for i in 0..100 { cache.insert(format!("key_{}", i), format!("value_{}", i)); } diff --git a/benches/fifo_cache_benchmarking.rs b/benches/fifo_cache_benchmarking.rs index d7d7f8c4..91d1436c 100644 --- a/benches/fifo_cache_benchmarking.rs +++ b/benches/fifo_cache_benchmarking.rs @@ -1,3 +1,4 @@ +use std::fmt::Debug; use std::fs::{OpenOptions, create_dir_all}; use std::hash::Hash; use std::hint::black_box; @@ -11,7 +12,7 @@ use criterion::{ criterion_group, criterion_main, }; use ferrite::storage::disk::async_disk::cache::cache_traits::{CoreCache, FIFOCacheTrait}; -use ferrite::storage::disk::async_disk::cache::fifo::FIFOCache; +use ferrite::storage::disk::async_disk::cache::fifo::InstrumentedFifoCache; // ================================================================================= // TIME COMPLEXITY BENCHMARKS @@ -53,7 +54,7 @@ fn benchmark_insert_time_complexity(c: &mut Criterion) { |b, &cache_size| { b.iter_batched( || { - let mut cache = FIFOCache::new(cache_size); + let mut cache = InstrumentedFifoCache::new(cache_size); // Pre-fill to capacity-1 to avoid eviction effects for i in 0..(cache_size - 1) { cache.insert(i, i); @@ -119,7 +120,7 @@ fn benchmark_access_patterns(c: &mut Criterion) { // Calibrate repeat count using a temporary cache so each measured sample aggregates // enough work to reach ~target_sample duration. - let mut tmp_cache = FIFOCache::new(cache_size); + let mut tmp_cache = InstrumentedFifoCache::new(cache_size); prefill_cache( &mut tmp_cache, &keys, @@ -142,7 +143,7 @@ fn benchmark_access_patterns(c: &mut Criterion) { b.iter_batched( || { // Fresh cache per sample, setup not timed - let mut cache = FIFOCache::new(cache_size); + let mut cache = InstrumentedFifoCache::new(cache_size); prefill_cache( &mut cache, &keys, @@ -183,7 +184,7 @@ fn benchmark_access_patterns(c: &mut Criterion) { let values = pregen_values(total_ops.max(working_set)); let ops = build_random_ops(total_ops, working_set, rnd_insert_every); - let mut tmp_cache = FIFOCache::new(cache_size); + let mut tmp_cache = InstrumentedFifoCache::new(cache_size); prefill_cache( &mut tmp_cache, &keys, @@ -204,7 +205,7 @@ fn benchmark_access_patterns(c: &mut Criterion) { |b, &(cache_size, _)| { b.iter_batched( || { - let mut cache = FIFOCache::new(cache_size); + let mut cache = InstrumentedFifoCache::new(cache_size); prefill_cache( &mut cache, &keys, @@ -241,7 +242,7 @@ fn benchmark_eviction_scenarios(c: &mut Criterion) { // Heavy eviction - small cache, large key space group.bench_function("heavy_eviction", |b| { b.iter_batched( - || FIFOCache::new(500), + || InstrumentedFifoCache::new(500), |mut cache| { // 10x more keys than capacity - forces constant eviction for i in 0..15000 { @@ -263,7 +264,7 @@ fn benchmark_eviction_scenarios(c: &mut Criterion) { group.bench_function("light_eviction", |b| { b.iter_batched( || { - let mut cache = FIFOCache::new(2000); + let mut cache = InstrumentedFifoCache::new(2000); // Pre-populate with working set for i in 0..1500 { cache.insert(format!("stable_{}", i), format!("data_{}", i)); @@ -311,7 +312,7 @@ fn benchmark_fifo_operations(c: &mut Criterion) { // peek_oldest performance group.bench_function("peek_oldest", |b| { - let mut cache = FIFOCache::new(1000); + let mut cache = InstrumentedFifoCache::new(1000); for i in 0..1000 { cache.insert(format!("key_{}", i), format!("value_{}", i)); } @@ -323,7 +324,7 @@ fn benchmark_fifo_operations(c: &mut Criterion) { group.bench_function("pop_oldest", |b| { b.iter_batched( || { - let mut cache = FIFOCache::new(1000); + let mut cache = InstrumentedFifoCache::new(1000); for i in 0..1000 { cache.insert(format!("key_{}", i), format!("value_{}", i)); } @@ -342,7 +343,7 @@ fn benchmark_fifo_operations(c: &mut Criterion) { |b, &batch_size| { b.iter_batched( || { - let mut cache = FIFOCache::new(1000); + let mut cache = InstrumentedFifoCache::new(1000); for i in 0..1000 { cache.insert(format!("key_{}", i), format!("value_{}", i)); } @@ -369,7 +370,7 @@ fn benchmark_fifo_micro_ops(c: &mut Criterion) { // contains for &cap in &[128usize, 1024, 4096] { group.bench_with_input(BenchmarkId::new("contains", cap), &cap, |b, &cap| { - let mut cache = FIFOCache::new(cap); + let mut cache = InstrumentedFifoCache::new(cap); for i in 0..cap { cache.insert(format!("k{i}"), format!("v{i}")); } @@ -385,7 +386,7 @@ fn benchmark_fifo_micro_ops(c: &mut Criterion) { group.bench_with_input(BenchmarkId::new("clear", cap), &cap, |b, &cap| { b.iter_batched( || { - let mut cache = FIFOCache::new(cap); + let mut cache = InstrumentedFifoCache::new(cap); for i in 0..cap { cache.insert(format!("k{i}"), format!("v{i}")); } @@ -403,7 +404,7 @@ fn benchmark_fifo_micro_ops(c: &mut Criterion) { // age_rank for &cap in &[256usize, 1024, 4096] { group.bench_with_input(BenchmarkId::new("age_rank", cap), &cap, |b, &cap| { - let mut cache = FIFOCache::new(cap); + let mut cache = InstrumentedFifoCache::new(cap); for i in 0..cap { cache.insert(format!("k{i}"), format!("v{i}")); } @@ -439,7 +440,7 @@ fn benchmark_stale_impact(c: &mut Criterion) { group.bench_with_input(id, &(cap, factor), |b, &(cap, factor)| { b.iter_batched( || { - let mut cache = FIFOCache::new(cap); + let mut cache = InstrumentedFifoCache::new(cap); // Fill to capacity for i in 0..cap { cache.insert(format!("k{i}"), format!("v{i}")); @@ -460,7 +461,7 @@ fn benchmark_stale_impact(c: &mut Criterion) { group.bench_with_input(id, &(cap, factor), |b, &(cap, factor)| { b.iter_batched( || { - let mut cache = FIFOCache::new(cap); + let mut cache = InstrumentedFifoCache::new(cap); for i in 0..cap { cache.insert(format!("k{i}"), format!("v{i}")); } @@ -482,7 +483,7 @@ fn benchmark_stale_impact(c: &mut Criterion) { group.bench_with_input(id, &(cap, factor), |b, &(cap, factor)| { b.iter_batched( || { - let mut cache = FIFOCache::new(cap); + let mut cache = InstrumentedFifoCache::new(cap); for i in 0..cap { cache.insert(format!("k{i}"), format!("v{i}")); } @@ -518,7 +519,7 @@ fn benchmark_space_usage(c: &mut Criterion) { group.bench_with_input(BenchmarkId::from_parameter(cap), &cap, |b, &cap| { b.iter_batched( || { - let mut cache = FIFOCache::new(cap); + let mut cache = InstrumentedFifoCache::new(cap); // Fill beyond capacity to create stale entries as well for i in 0..(cap * 3) { cache.insert(format!("s{i}"), format!("vs{i}")); @@ -636,8 +637,8 @@ fn build_random_ops(total_ops: usize, working_set: usize, insert_every: usize) - ops } -fn prefill_cache( - cache: &mut FIFOCache, +fn prefill_cache( + cache: &mut InstrumentedFifoCache, keys: &[K], values: &[V], prefill: usize, @@ -648,8 +649,8 @@ fn prefill_cache( } } -fn run_ops_once( - cache: &mut FIFOCache, +fn run_ops_once( + cache: &mut InstrumentedFifoCache, keys: &[K], ops: &[Op], values: &[V], diff --git a/benches/fifo_complexity_benchmarks.rs b/benches/fifo_complexity_benchmarks.rs index 96d8992b..5003c578 100644 --- a/benches/fifo_complexity_benchmarks.rs +++ b/benches/fifo_complexity_benchmarks.rs @@ -3,7 +3,7 @@ use criterion::{ criterion_group, criterion_main, }; use ferrite::storage::disk::async_disk::cache::cache_traits::{CoreCache, FIFOCacheTrait}; -use ferrite::storage::disk::async_disk::cache::fifo::FIFOCache; +use ferrite::storage::disk::async_disk::cache::fifo::InstrumentedFifoCache; use std::hint::black_box; fn generate_cache_sizes() -> Vec { @@ -46,7 +46,7 @@ fn benchmark_insert_time_complexity(c: &mut Criterion) { |b, &cache_size| { b.iter_batched( || { - let mut cache = FIFOCache::new(cache_size); + let mut cache = InstrumentedFifoCache::new(cache_size); // Pre-fill to capacity-1 to avoid eviction effects for i in 0..(cache_size - 1) { cache.insert(i, i); @@ -82,7 +82,7 @@ fn benchmark_get_time_complexity(c: &mut Criterion) { &cache_size, |b, &cache_size| { // Setup: Create cache filled with data - let mut cache = FIFOCache::new(cache_size); + let mut cache = InstrumentedFifoCache::new(cache_size); for i in 0..cache_size { cache.insert(format!("key_{}", i), format!("value_{}", i)); } @@ -114,7 +114,7 @@ fn benchmark_contains_time_complexity(c: &mut Criterion) { BenchmarkId::new("contains_operations", cache_size), &cache_size, |b, &cache_size| { - let mut cache = FIFOCache::new(cache_size); + let mut cache = InstrumentedFifoCache::new(cache_size); for i in 0..cache_size { cache.insert(format!("key_{}", i), format!("value_{}", i)); } @@ -145,7 +145,7 @@ fn benchmark_eviction_time_complexity(c: &mut Criterion) { |b, &cache_size| { b.iter_batched( || { - let mut cache = FIFOCache::new(cache_size); + let mut cache = InstrumentedFifoCache::new(cache_size); // Fill to capacity for i in 0..cache_size { cache.insert(format!("key_{}", i), format!("value_{}", i)); @@ -182,7 +182,7 @@ fn benchmark_eviction_time_complexity(c: &mut Criterion) { cache.insert(key, value); black_box(cache) }, - criterion::BatchSize::SmallInput, + BatchSize::SmallInput, ); }, ); @@ -201,7 +201,7 @@ fn benchmark_age_rank_time_complexity(c: &mut Criterion) { BenchmarkId::new("age_rank_operations", cache_size), &cache_size, |b, &cache_size| { - let mut cache = FIFOCache::new(cache_size); + let mut cache = InstrumentedFifoCache::new(cache_size); for i in 0..cache_size { cache.insert(format!("key_{}", i), format!("value_{}", i)); } @@ -231,7 +231,7 @@ fn benchmark_clear_time_complexity(c: &mut Criterion) { |b, &cache_size| { b.iter_batched( || { - let mut cache = FIFOCache::new(cache_size); + let mut cache = InstrumentedFifoCache::new(cache_size); // Fill cache with data for i in 0..cache_size { cache.insert( @@ -271,7 +271,7 @@ fn benchmark_memory_usage_patterns(c: &mut Criterion) { b.iter_batched( || { // Create empty cache - FIFOCache::::new(cache_size) + InstrumentedFifoCache::::new(cache_size) }, |mut cache| { // Fill cache to capacity and measure allocation behavior @@ -305,7 +305,7 @@ fn benchmark_memory_pressure_scenarios(c: &mut Criterion) { &value_size, |b, &value_size| { b.iter_batched( - || FIFOCache::::new(1000), + || InstrumentedFifoCache::::new(1000), |mut cache| { let large_value = "x".repeat(value_size); // Fill cache with large values to create memory pressure @@ -337,7 +337,7 @@ fn benchmark_realistic_workloads(c: &mut Criterion) { // Small cache performance group.bench_function("small_cache_mixed_workload", |b| { b.iter_batched( - || FIFOCache::new(100), + || InstrumentedFifoCache::new(100), |mut cache| { let mut hits = 0; // Mixed workload: 33% inserts, 67% gets @@ -360,7 +360,7 @@ fn benchmark_realistic_workloads(c: &mut Criterion) { // Medium cache performance group.bench_function("medium_cache_mixed_workload", |b| { b.iter_batched( - || FIFOCache::new(1000), + || InstrumentedFifoCache::new(1000), |mut cache| { let mut hits = 0; // Complex workload: 40% inserts, 60% gets @@ -383,7 +383,7 @@ fn benchmark_realistic_workloads(c: &mut Criterion) { // Large cache performance group.bench_function("large_cache_batch_workload", |b| { b.iter_batched( - || FIFOCache::new(10000), + || InstrumentedFifoCache::new(10000), |mut cache| { let mut hits = 0; // Batch workload @@ -462,7 +462,7 @@ fn benchmark_access_patterns(c: &mut Criterion) { b.iter_batched( || { // Fresh cache per measurement - let mut cache = FIFOCache::new(cache_size); + let mut cache = InstrumentedFifoCache::new(cache_size); // Prefill to ~80% to simulate realistic hit/miss behavior let prefill = (cache_size as f64 * 0.8) as usize; for (i, _item) in keys.iter().enumerate().take(prefill.min(working_set)) @@ -519,7 +519,7 @@ fn benchmark_access_patterns(c: &mut Criterion) { b.iter_batched( || { // Fresh cache per measurement - let mut cache = FIFOCache::new(cache_size); + let mut cache = InstrumentedFifoCache::new(cache_size); // Prefill to ~60% for random to bias toward more misses let prefill = (cache_size as f64 * 0.6) as usize; for (i, _item) in keys.iter().enumerate().take(prefill.min(working_set)) @@ -561,7 +561,7 @@ fn benchmark_eviction_scenarios(c: &mut Criterion) { // Heavy eviction - small cache, large key space group.bench_function("heavy_eviction", |b| { b.iter_batched( - || FIFOCache::new(500), + || InstrumentedFifoCache::new(500), |mut cache| { // 10x more keys than capacity - forces constant eviction for i in 0..15000 { @@ -585,7 +585,7 @@ fn benchmark_eviction_scenarios(c: &mut Criterion) { group.bench_function("light_eviction", |b| { b.iter_batched( || { - let mut cache = FIFOCache::new(2000); + let mut cache = InstrumentedFifoCache::new(2000); // Pre-populate with working set for i in 0..1500 { cache.insert(format!("stable_{}", i), format!("data_{}", i)); @@ -633,7 +633,7 @@ fn benchmark_fifo_operations(c: &mut Criterion) { // peek_oldest performance group.bench_function("peek_oldest", |b| { - let mut cache = FIFOCache::new(1000); + let mut cache = InstrumentedFifoCache::new(1000); for i in 0..1000 { cache.insert(format!("key_{}", i), format!("value_{}", i)); } @@ -645,7 +645,7 @@ fn benchmark_fifo_operations(c: &mut Criterion) { group.bench_function("pop_oldest", |b| { b.iter_batched( || { - let mut cache = FIFOCache::new(1000); + let mut cache = InstrumentedFifoCache::new(1000); for i in 0..1000 { cache.insert(format!("key_{}", i), format!("value_{}", i)); } @@ -664,7 +664,7 @@ fn benchmark_fifo_operations(c: &mut Criterion) { |b, &batch_size| { b.iter_batched( || { - let mut cache = FIFOCache::new(1000); + let mut cache = InstrumentedFifoCache::new(1000); for i in 0..1000 { cache.insert(format!("key_{}", i), format!("value_{}", i)); } diff --git a/src/storage/disk/async_disk/cache/cache_manager.rs b/src/storage/disk/async_disk/cache/cache_manager.rs index 46699b7b..c32d80a7 100644 --- a/src/storage/disk/async_disk/cache/cache_manager.rs +++ b/src/storage/disk/async_disk/cache/cache_manager.rs @@ -121,7 +121,7 @@ //! | `CacheManager` | Central cache coordinator with three tiers | //! | `LRUKCache` | Hot cache with LRU-K (K=2) eviction | //! | `LFUCache` | Warm cache with frequency-based eviction | -//! | `FIFOCache` | Cold cache with FIFO eviction | +//! | `InstrumentedFifoCache` | Cold cache with FIFO eviction | //! | `AdmissionController` | Regulates cache entry based on memory pressure | //! | `PrefetchEngine` | Sequential and pattern-based prefetch prediction | //! | `DeduplicationEngine` | Identifies duplicate page content | @@ -237,7 +237,7 @@ use std::time::Instant; use tokio::sync::RwLock; -use super::fifo::FIFOCache; +use super::fifo::InstrumentedFifoCache; use super::lfu::LFUCache; use super::lru_k::LRUKCache; use crate::common::config::{DB_PAGE_SIZE, PageId}; @@ -818,7 +818,7 @@ pub struct CacheManager { /// L3 Cold Cache: FIFO based cache for newly inserted pages. /// Low overhead, ideal for one-time sequential scans. - cold_cache: Arc>>, + cold_cache: Arc>>, /// Configured capacity for hot cache (number of entries). hot_cache_size: usize, @@ -911,7 +911,7 @@ impl CacheManager { // Use LRU-K cache for hot cache (L1) let hot_cache = Arc::new(RwLock::new(LRUKCache::with_k(hot_cache_size, 2))); let warm_cache = Arc::new(RwLock::new(LFUCache::new(warm_cache_size))); - let cold_cache = Arc::new(RwLock::new(FIFOCache::new(cold_cache_size))); + let cold_cache = Arc::new(RwLock::new(InstrumentedFifoCache::new(cold_cache_size))); let prefetch_engine = Arc::new(RwLock::new(PrefetchEngine { access_patterns: HashMap::new(), @@ -1069,11 +1069,10 @@ impl CacheManager { // Check L3 cold cache (FIFO) if let Ok(mut cold_cache) = self.cold_cache.try_write() - && let Some(page_data) = - as CoreCache>::get( - &mut cold_cache, - &page_id, - ) + && let Some(page_data) = as CoreCache< + PageId, + PageData, + >>::get(&mut cold_cache, &page_id) { // Promote to warm cache on hit - clone the PageData (cheap with Arc) self.cache_hits.fetch_add(1, Ordering::Relaxed); @@ -1279,7 +1278,7 @@ impl CacheManager { temperature: temperature.clone(), }; if let Ok(mut cache) = self.cold_cache.try_write() { - as CoreCache>::insert( + as CoreCache>::insert( &mut cache, page_id, page_data, ); } @@ -1515,7 +1514,7 @@ impl CacheManager { }; let cold_used = if let Ok(cache) = self.cold_cache.try_read() { - as CoreCache>::len(&cache) + as CoreCache>::len(&cache) } else { 0 }; @@ -1585,14 +1584,16 @@ impl CacheManager { // High memory pressure - reduce cold cache size if let Ok(mut cold_cache) = self.cold_cache.try_write() { let target_size = self.cold_cache_size / 2; - let current_size = - as CoreCache>::len(&cold_cache); + let current_size = as CoreCache< + PageId, + PageData, + >>::len(&cold_cache); if current_size > target_size { // For FIFO cache, we can't easily evict specific items // Instead, we'll clear part of the cache if current_size > target_size * 2 { let evicted_count = current_size as u64; - as CoreCache>::clear( + as CoreCache>::clear( &mut cold_cache, ); self.demotion_count @@ -1669,7 +1670,7 @@ impl CacheManager { }; let cold_used = if let Ok(cache) = self.cold_cache.try_read() { - as CoreCache>::len(&cache) + as CoreCache>::len(&cache) } else { 0 }; @@ -1966,7 +1967,7 @@ impl CacheManager { pub fn perform_specialized_maintenance(&self) { // Use FIFO-specific operations for cold cache if let Ok(cold_cache) = self.cold_cache.try_read() - && let Some((oldest_key, _)) = as FIFOCacheTrait< + && let Some((oldest_key, _)) = as FIFOCacheTrait< PageId, PageData, >>::peek_oldest(&cold_cache) @@ -2095,15 +2096,14 @@ impl CacheManager { if let Ok(cold_cache) = self.cold_cache.try_read() { // Check if page is in cold cache and get FIFO specific details - if as CoreCache>::contains( + if as CoreCache>::contains( &cold_cache, &page_id, ) { - let age_rank = - as FIFOCacheTrait>::age_rank( - &cold_cache, - &page_id, - ); + let age_rank = as FIFOCacheTrait< + PageId, + PageData, + >>::age_rank(&cold_cache, &page_id); return Some(PageAccessDetails { cache_level: "Cold".to_string(), diff --git a/src/storage/disk/async_disk/cache/fifo.rs b/src/storage/disk/async_disk/cache/fifo.rs index 26d229f0..51169748 100644 --- a/src/storage/disk/async_disk/cache/fifo.rs +++ b/src/storage/disk/async_disk/cache/fifo.rs @@ -271,7 +271,9 @@ //! - **Update Semantics**: Updating existing key preserves insertion position //! - **Zero Capacity**: Supported - rejects all insertions -use std::collections::{HashMap, VecDeque}; +use std::cell::Cell; +use std::collections::{HashMap, VecDeque, hash_map}; +use std::fmt::Debug; use std::hash::Hash; use std::sync::Arc; @@ -282,7 +284,16 @@ use crate::storage::disk::async_disk::cache::cache_traits::{CoreCache, FIFOCache /// Evicts the oldest (first inserted) item when capacity is reached. /// See module-level documentation for details. #[derive(Debug)] -pub struct FIFOCache +pub struct InstrumentedFifoCache +where + K: Eq + Hash, +{ + inner: FIFOCacheInner, + metrics: FifoMetrics, +} + +#[derive(Debug)] +pub struct FIFOCacheInner where K: Eq + Hash, { @@ -291,41 +302,193 @@ where insertion_order: VecDeque>, // Tracks the order of insertion } -impl FIFOCache +#[derive(Debug, Default, Clone, Copy)] +pub struct FifoMetricsSnapshot { + pub get_calls: u64, + pub get_hits: u64, + pub get_misses: u64, + + pub insert_calls: u64, + pub insert_updates: u64, + pub insert_new: u64, + + pub evict_calls: u64, + pub evicted_entries: u64, + pub stale_skips: u64, // queue entries popped that were already removed from map + pub evict_scan_steps: u64, // how many pop_front iterations inside eviction + + pub pop_oldest_calls: u64, + pub pop_oldest_found: u64, + pub pop_oldest_empty_or_stale: u64, + + pub peek_oldest_calls: u64, + pub peek_oldest_found: u64, + + pub age_rank_calls: u64, + pub age_rank_found: u64, + pub age_rank_scan_steps: u64, + + // gauges captured at snapshot time + pub cache_len: usize, + pub insertion_order_len: usize, + pub capacity: usize, +} + +#[derive(Debug)] +struct FifoMetrics { + get_calls: u64, + get_hits: u64, + get_misses: u64, + insert_calls: u64, + insert_updates: u64, + insert_new: u64, + evict_calls: u64, + evicted_entries: u64, + stale_skips: u64, + evict_scan_steps: u64, + pop_oldest_calls: u64, + pop_oldest_found: u64, + pop_oldest_empty_or_stale: u64, + peek_oldest_calls: MetricsCell, + peek_oldest_found: MetricsCell, + age_rank_calls: MetricsCell, + age_rank_scan_steps: MetricsCell, + age_rank_found: MetricsCell, +} + +impl FifoMetrics { + fn new() -> FifoMetrics { + Self { + get_calls: 0, + get_hits: 0, + get_misses: 0, + insert_calls: 0, + insert_updates: 0, + insert_new: 0, + evict_calls: 0, + evicted_entries: 0, + stale_skips: 0, + evict_scan_steps: 0, + pop_oldest_calls: 0, + pop_oldest_found: 0, + pop_oldest_empty_or_stale: 0, + peek_oldest_calls: MetricsCell::new(), + peek_oldest_found: MetricsCell::new(), + age_rank_calls: MetricsCell::new(), + age_rank_scan_steps: MetricsCell::new(), + age_rank_found: MetricsCell::new(), + } + } +} + +/// A metrics-only cell. +/// +/// # Safety +/// This type is only safe if all accesses are externally synchronized. +/// In this system, it is protected by an RwLock at a higher level. +#[repr(transparent)] +#[derive(Debug)] +struct MetricsCell(Cell); + +impl MetricsCell { + #[inline] + fn new() -> Self { + Self(Cell::new(0)) + } + + #[inline] + fn get(&self) -> u64 { + self.0.get() + } + + #[inline] + fn incr(&self) { + self.0.set(self.0.get() + 1); + } +} + +// SAFETY: +// All access to MetricsCell is externally synchronized by an RwLock. +// Metrics are observational and do not affect correctness. +unsafe impl Sync for MetricsCell {} +unsafe impl Send for MetricsCell {} + +impl FIFOCacheInner where K: Eq + Hash, { - /// Creates a new FIFO cache with the given capacity - pub fn new(capacity: usize) -> Self { - FIFOCache { + fn new(capacity: usize) -> Self { + Self { capacity, cache: HashMap::with_capacity(capacity), insertion_order: VecDeque::with_capacity(capacity), } } +} + +impl InstrumentedFifoCache +where + K: Eq + Hash, + V: Debug, +{ + /// Creates a new FIFO cache with the given capacity + pub fn new(capacity: usize) -> Self { + Self { + inner: FIFOCacheInner::new(capacity), + metrics: FifoMetrics::new(), + } + } /// Returns the number of items currently in the cache. /// This is a duplicate of the CoreCache::len() method but provides direct access. pub fn current_size(&self) -> usize { - self.cache.len() + self.inner.cache.len() } /// Returns the insertion order length (may include stale entries). /// This is primarily for testing and debugging purposes. pub fn insertion_order_len(&self) -> usize { - self.insertion_order.len() + self.inner.insertion_order.len() } /// Checks if the internal cache HashMap contains a specific `Arc`. /// This is primarily for testing stale entry behavior. pub fn cache_contains_key(&self, key: &Arc) -> bool { - self.cache.contains_key(key) + self.inner.cache.contains_key(key) } /// Returns an iterator over the insertion order keys. /// This is primarily for testing and debugging purposes. pub fn insertion_order_iter(&self) -> impl Iterator> { - self.insertion_order.iter() + self.inner.insertion_order.iter() + } + + /// Returns snapshot metrics from the cache + /// Can be used to help understand the state of the cache + pub fn metrics_snapshot(&self) -> FifoMetricsSnapshot { + FifoMetricsSnapshot { + get_calls: self.metrics.get_calls, + get_hits: self.metrics.get_hits, + get_misses: self.metrics.get_misses, + insert_calls: self.metrics.insert_calls, + insert_updates: self.metrics.insert_updates, + insert_new: self.metrics.insert_new, + evict_calls: self.metrics.evict_calls, + evicted_entries: self.metrics.evicted_entries, + stale_skips: self.metrics.stale_skips, + evict_scan_steps: self.metrics.evict_scan_steps, + pop_oldest_calls: self.metrics.pop_oldest_calls, + pop_oldest_found: self.metrics.pop_oldest_found, + pop_oldest_empty_or_stale: self.metrics.pop_oldest_empty_or_stale, + peek_oldest_calls: self.metrics.peek_oldest_calls.get(), + peek_oldest_found: self.metrics.peek_oldest_found.get(), + age_rank_calls: self.metrics.age_rank_calls.get(), + age_rank_found: self.metrics.age_rank_found.get(), + age_rank_scan_steps: self.metrics.age_rank_scan_steps.get(), + cache_len: self.inner.cache.len(), + insertion_order_len: self.inner.insertion_order.len(), + capacity: self.inner.capacity, + } } /// Manually removes a key from the cache HashMap only (for testing stale entries). @@ -334,10 +497,10 @@ where #[cfg(test)] pub fn remove_from_cache_only(&mut self, key: &K) -> Option> { // Find the Arc that matches this key - let arc_key = self.cache.keys().find(|k| k.as_ref() == key).cloned(); + let arc_key = self.inner.cache.keys().find(|k| k.as_ref() == key).cloned(); if let Some(arc_key) = arc_key { - self.cache.remove(&arc_key) + self.inner.cache.remove(&arc_key) } else { None } @@ -346,37 +509,46 @@ where /// Returns the current cache HashMap capacity (for testing memory usage). #[cfg(test)] pub fn cache_capacity(&self) -> usize { - self.cache.capacity() + self.inner.cache.capacity() } /// Returns the current insertion order VecDeque capacity (for testing memory usage). #[cfg(test)] pub fn insertion_order_capacity(&self) -> usize { - self.insertion_order.capacity() + self.inner.insertion_order.capacity() } /// Evicts the oldest valid entry from the cache. /// Skips over any stale entries (keys that were lazily deleted). fn evict_oldest(&mut self) { + self.metrics.evict_calls += 1; // Keep popping from the front until we find a valid key or the queue is empty - while let Some(oldest_key) = self.insertion_order.pop_front() { - if self.cache.contains_key(&oldest_key) { + while let Some(oldest_key) = self.inner.insertion_order.pop_front() { + self.metrics.evict_scan_steps += 1; + + if self.inner.cache.contains_key(&oldest_key) { // Found a valid key, remove it and stop - self.cache.remove(&oldest_key); + self.inner.cache.remove(&oldest_key); + self.metrics.evicted_entries += 1; break; } // Skip stale entries (keys that were already removed from the cache) + self.metrics.stale_skips += 1; } } } -impl CoreCache for FIFOCache +impl CoreCache for InstrumentedFifoCache where K: Eq + Hash, + V: Debug, { fn insert(&mut self, key: K, value: V) -> Option { + // Update cache metrics + self.metrics.insert_calls += 1; + // If capacity is 0, cannot store anything - if self.capacity == 0 { + if self.inner.capacity == 0 { return None; } @@ -384,93 +556,85 @@ where let value_arc = Arc::new(value); // If the key already exists, update the value - if let std::collections::hash_map::Entry::Occupied(mut e) = - self.cache.entry(key_arc.clone()) - { - return Some(e.insert(value_arc)) - .map(|old_value_arc| { - // Try to unwrap the Arc to get the original value - match Arc::try_unwrap(old_value_arc) { - Ok(old_value) => old_value, - Err(_) => { - // If unwrap fails, there are external references to this Arc - // This violates our cache's ownership model - panic!("Failed to unwrap Arc in insert - there are external references to the value"); - } - } - }); + if let hash_map::Entry::Occupied(mut e) = self.inner.cache.entry(key_arc.clone()) { + self.metrics.insert_updates += 1; + + return Some(e.insert(value_arc)).map(|old_value_arc| { + Arc::try_unwrap(old_value_arc).expect("external Arc references detected") + }); } + self.metrics.insert_new += 1; + // If the cache is at capacity, remove the oldest valid item (FIFO) - if self.cache.len() >= self.capacity { + if self.inner.cache.len() >= self.inner.capacity { self.evict_oldest(); } // Add the new key to the insertion order and cache // Only the Arc pointers are cloned (8 bytes each), not the actual data - self.insertion_order.push_back(key_arc.clone()); - self.cache.insert(key_arc, value_arc); + self.inner.insertion_order.push_back(key_arc.clone()); + self.inner.cache.insert(key_arc, value_arc); None } fn get(&mut self, key: &K) -> Option<&V> { + self.metrics.get_calls += 1; + // In FIFO, getting an item doesn't change its position // Use HashMap's O(1) lookup by leveraging Borrow trait // HashMap, V> supports lookups with &K when K implements Borrow - self.cache.get(key).map(|v| v.as_ref()) + match self.inner.cache.get(key) { + Some(v) => { + self.metrics.get_hits += 1; + Some(v.as_ref()) + }, + None => { + self.metrics.get_misses += 1; + None + }, + } } fn contains(&self, key: &K) -> bool { // Use HashMap's O(1) lookup by leveraging Borrow trait // HashMap, V> supports lookups with &K when K implements Borrow - self.cache.contains_key(key) + self.inner.cache.contains_key(key) } fn len(&self) -> usize { - self.cache.len() + self.inner.cache.len() } fn capacity(&self) -> usize { - self.capacity + self.inner.capacity } fn clear(&mut self) { - self.cache.clear(); - self.insertion_order.clear(); + self.inner.cache.clear(); + self.inner.insertion_order.clear(); } } -impl FIFOCacheTrait for FIFOCache +impl FIFOCacheTrait for InstrumentedFifoCache where - K: Eq + Hash, + K: Eq + Hash + Debug, + V: Debug, { fn pop_oldest(&mut self) -> Option<(K, V)> { + self.metrics.pop_oldest_calls += 1; + // Use the existing evict_oldest logic but return the key-value pair - while let Some(oldest_key_arc) = self.insertion_order.pop_front() { - if let Some(value_arc) = self.cache.remove(&oldest_key_arc) { + while let Some(oldest_key_arc) = self.inner.insertion_order.pop_front() { + if let Some(value_arc) = self.inner.cache.remove(&oldest_key_arc) { + self.metrics.pop_oldest_found += 1; + // Try to unwrap both Arcs to get the original key and value // This should succeed since we just removed them from the cache - let key = match Arc::try_unwrap(oldest_key_arc) { - Ok(key) => key, - Err(_) => { - // If unwrap fails, it means there are external references to this Arc - // This violates our cache's ownership model and shouldn't happen in normal usage - panic!( - "Failed to unwrap Arc in pop_oldest - there are external references to the key" - ); - }, - }; - - let value = match Arc::try_unwrap(value_arc) { - Ok(value) => value, - Err(_) => { - // If unwrap fails, it means there are external references to this Arc - // This violates our cache's ownership model and shouldn't happen in normal usage - panic!( - "Failed to unwrap Arc in pop_oldest - there are external references to the value" - ); - }, - }; + let key = + Arc::try_unwrap(oldest_key_arc).expect("external Arc references detected"); + let value = + Arc::try_unwrap(value_arc).expect("external Arc references detected"); return Some((key, value)); } @@ -480,9 +644,11 @@ where } fn peek_oldest(&self) -> Option<(&K, &V)> { + self.metrics.peek_oldest_calls.incr(); + // Find the first valid entry in the insertion order - for key_arc in &self.insertion_order { - if let Some(value_arc) = self.cache.get(key_arc) { + for key_arc in &self.inner.insertion_order { + if let Some(value_arc) = self.inner.cache.get(key_arc) { return Some((key_arc.as_ref(), value_arc.as_ref())); } } @@ -504,8 +670,8 @@ where fn age_rank(&self, key: &K) -> Option { // Find position in insertion order, accounting for stale entries let mut rank = 0; - for insertion_key_arc in &self.insertion_order { - if self.cache.contains_key(insertion_key_arc) { + for insertion_key_arc in &self.inner.insertion_order { + if self.inner.cache.contains_key(insertion_key_arc) { if insertion_key_arc.as_ref() == key { return Some(rank); } @@ -529,7 +695,7 @@ mod tests { #[test] fn test_basic_fifo_insertion_and_retrieval() { - let mut cache = FIFOCache::new(3); + let mut cache = InstrumentedFifoCache::new(3); // Test basic insertion and retrieval assert_eq!(cache.insert("key1", "value1"), None); @@ -544,7 +710,7 @@ mod tests { #[test] fn test_fifo_eviction_order() { - let mut cache = FIFOCache::new(2); + let mut cache = InstrumentedFifoCache::new(2); // Fill cache to capacity cache.insert("first", "value1"); @@ -561,7 +727,7 @@ mod tests { #[test] fn test_capacity_enforcement() { - let mut cache = FIFOCache::new(3); + let mut cache = InstrumentedFifoCache::new(3); // Fill beyond capacity for i in 1..=5 { @@ -579,7 +745,7 @@ mod tests { #[test] fn test_update_existing_key() { - let mut cache = FIFOCache::new(3); + let mut cache = InstrumentedFifoCache::new(3); cache.insert("key1", "original"); cache.insert("key2", "value2"); @@ -593,7 +759,7 @@ mod tests { #[test] fn test_insertion_order_preservation() { - let mut cache = FIFOCache::new(4); + let mut cache = InstrumentedFifoCache::new(4); // Insert items in a specific order cache.insert("first", 1); @@ -635,7 +801,7 @@ mod tests { #[test] fn test_key_operations_consistency() { - let mut cache = FIFOCache::new(3); + let mut cache = InstrumentedFifoCache::new(3); // Test consistency between contents, get, and len assert_eq!(cache.len(), 0); @@ -716,7 +882,7 @@ mod tests { #[test] fn test_empty_cache_operations() { - let mut cache: FIFOCache = FIFOCache::new(5); + let mut cache: InstrumentedFifoCache = InstrumentedFifoCache::new(5); assert_eq!(cache.get(&"nonexistent".to_string()), None); assert!(!cache.contains(&"nonexistent".to_string())); @@ -730,7 +896,7 @@ mod tests { #[test] fn test_single_item_cache() { - let mut cache = FIFOCache::new(1); + let mut cache = InstrumentedFifoCache::new(1); cache.insert("only", "value1"); assert_eq!(cache.len(), 1); @@ -745,7 +911,7 @@ mod tests { #[test] fn test_zero_capacity_cache() { - let mut cache = FIFOCache::new(0); + let mut cache = InstrumentedFifoCache::new(0); // Should not be able to store anything cache.insert("key", "value"); @@ -756,7 +922,7 @@ mod tests { #[test] fn test_clear_operation() { - let mut cache = FIFOCache::new(3); + let mut cache = InstrumentedFifoCache::new(3); cache.insert("key1", "value1"); cache.insert("key2", "value2"); @@ -771,7 +937,7 @@ mod tests { #[test] fn test_duplicate_key_handling() { - let mut cache = FIFOCache::new(3); + let mut cache = InstrumentedFifoCache::new(3); // Insert the initial key assert_eq!(cache.insert("key1", "value1"), None); @@ -825,7 +991,7 @@ mod tests { #[test] fn test_boundary_conditions() { - let mut cache = FIFOCache::new(2); + let mut cache = InstrumentedFifoCache::new(2); // Test exactly at capacity cache.insert("key1", "value1"); @@ -880,7 +1046,7 @@ mod tests { #[test] fn test_empty_to_full_transition() { - let mut cache = FIFOCache::new(4); + let mut cache = InstrumentedFifoCache::new(4); // Start empty assert_eq!(cache.len(), 0); @@ -951,7 +1117,7 @@ mod tests { fn test_full_to_empty_transition() { // Helper function to create cache with same initial state (avoids cloning) let create_test_cache = || { - let mut cache = FIFOCache::new(3); + let mut cache = InstrumentedFifoCache::new(3); cache.insert("item1", 1); cache.insert("item2", 2); cache.insert("item3", 3); @@ -1015,7 +1181,7 @@ mod tests { } // Test partial emptying and refilling - let mut partial_cache = FIFOCache::new(4); + let mut partial_cache = InstrumentedFifoCache::new(4); partial_cache.insert("a", 1); partial_cache.insert("b", 2); partial_cache.insert("c", 3); @@ -1046,7 +1212,7 @@ mod tests { #[test] fn test_pop_oldest() { - let mut cache = FIFOCache::new(3); + let mut cache = InstrumentedFifoCache::new(3); cache.insert("first", "value1"); cache.insert("second", "value2"); @@ -1064,7 +1230,7 @@ mod tests { #[test] fn test_peek_oldest() { - let mut cache = FIFOCache::new(3); + let mut cache = InstrumentedFifoCache::new(3); cache.insert("first", "value1"); cache.insert("second", "value2"); @@ -1080,7 +1246,7 @@ mod tests { #[test] fn test_age_rank() { - let mut cache = FIFOCache::new(4); + let mut cache = InstrumentedFifoCache::new(4); cache.insert("first", "value1"); // rank 0 (oldest) cache.insert("second", "value2"); // rank 1 @@ -1096,7 +1262,7 @@ mod tests { #[test] fn test_pop_oldest_batch() { - let mut cache = FIFOCache::new(5); + let mut cache = InstrumentedFifoCache::new(5); for i in 1..=5 { cache.insert(format!("key{}", i), format!("value{}", i)); @@ -1117,7 +1283,7 @@ mod tests { #[test] fn test_pop_oldest_batch_more_than_available() { - let mut cache = FIFOCache::new(3); + let mut cache = InstrumentedFifoCache::new(3); cache.insert("key1", "value1"); cache.insert("key2", "value2"); @@ -1130,7 +1296,7 @@ mod tests { #[test] fn test_pop_oldest_empty_cache() { - let mut cache: FIFOCache = FIFOCache::new(5); + let mut cache: InstrumentedFifoCache = InstrumentedFifoCache::new(5); // Pop from the empty cache should return None assert_eq!(cache.pop_oldest(), None); @@ -1156,7 +1322,7 @@ mod tests { #[test] fn test_peek_oldest_empty_cache() { - let cache: FIFOCache = FIFOCache::new(5); + let cache: InstrumentedFifoCache = InstrumentedFifoCache::new(5); // Peek at the empty cache should return None assert_eq!(cache.peek_oldest(), None); @@ -1168,7 +1334,7 @@ mod tests { assert_eq!(cache.len(), 0); // Test peek after clear - let mut test_cache = FIFOCache::new(3); + let mut test_cache = InstrumentedFifoCache::new(3); test_cache.insert("key1".to_string(), "value1".to_string()); test_cache.insert("key2".to_string(), "value2".to_string()); @@ -1184,7 +1350,7 @@ mod tests { #[test] fn test_age_rank_after_eviction() { - let mut cache = FIFOCache::new(3); + let mut cache = InstrumentedFifoCache::new(3); // Fill cache cache.insert("first", 1); @@ -1242,7 +1408,7 @@ mod tests { #[test] fn test_batch_operations_edge_cases() { - let mut cache = FIFOCache::new(3); + let mut cache = InstrumentedFifoCache::new(3); // Test batch with count = 0 cache.insert("key1", "value1"); @@ -1255,7 +1421,8 @@ mod tests { assert!(cache.contains(&"key2")); // Test batch on empty cache - let mut empty_cache: FIFOCache = FIFOCache::new(5); + let mut empty_cache: InstrumentedFifoCache = + InstrumentedFifoCache::new(5); let empty_batch = empty_cache.pop_oldest_batch(3); assert_eq!(empty_batch.len(), 0); assert_eq!(empty_cache.len(), 0); @@ -1311,7 +1478,7 @@ mod tests { #[test] fn test_stale_entry_skipping_during_eviction() { - let mut cache = FIFOCache::new(3); + let mut cache = InstrumentedFifoCache::new(3); // Fill cache to capacity cache.insert("key1", "value1"); @@ -1357,7 +1524,7 @@ mod tests { #[test] fn test_insertion_order_consistency_with_stale_entries() { - let mut cache = FIFOCache::new(4); + let mut cache = InstrumentedFifoCache::new(4); // Fill cache cache.insert("a", 1); @@ -1408,7 +1575,7 @@ mod tests { #[test] fn test_lazy_deletion_behavior() { - let mut cache = FIFOCache::new(3); + let mut cache = InstrumentedFifoCache::new(3); // Test 1: Stale entries accumulate until cleanup operations cache.insert("temp1", "value1"); @@ -1483,7 +1650,7 @@ mod tests { #[test] fn test_stale_entry_cleanup_during_operations() { - let mut cache = FIFOCache::new(4); + let mut cache = InstrumentedFifoCache::new(4); // Setup: Create cache with mix of valid and future stale entries cache.insert("will_be_stale1", "stale1"); diff --git a/tests/storage/fifo_concurrency.rs b/tests/storage/fifo_concurrency.rs index 740ce3e4..7a36bbbe 100644 --- a/tests/storage/fifo_concurrency.rs +++ b/tests/storage/fifo_concurrency.rs @@ -7,17 +7,18 @@ use std::thread; use std::time::{Duration, Instant}; use ferrite::storage::disk::async_disk::cache::cache_traits::{CoreCache, FIFOCacheTrait}; -use ferrite::storage::disk::async_disk::cache::fifo::FIFOCache; +use ferrite::storage::disk::async_disk::cache::fifo::InstrumentedFifoCache; mod thread_safe_wrapper { use super::*; // Helper type for thread-safe testing - type ThreadSafeFIFOCache = Arc>>; + type ThreadSafeInstrumentedFifoCache = Arc>>; #[test] fn test_basic_thread_safe_operations() { - let cache: ThreadSafeFIFOCache = Arc::new(Mutex::new(FIFOCache::new(100))); + let cache: ThreadSafeInstrumentedFifoCache = + Arc::new(Mutex::new(InstrumentedFifoCache::new(100))); let num_threads = 8; let operations_per_thread = 250; let success_count = Arc::new(AtomicUsize::new(0)); @@ -113,7 +114,8 @@ mod thread_safe_wrapper { #[test] fn test_read_heavy_workload() { - let cache: ThreadSafeFIFOCache = Arc::new(Mutex::new(FIFOCache::new(200))); + let cache: ThreadSafeInstrumentedFifoCache = + Arc::new(Mutex::new(InstrumentedFifoCache::new(200))); let num_reader_threads = 12; let num_writer_threads = 2; let reads_per_thread = 500; @@ -226,7 +228,8 @@ mod thread_safe_wrapper { #[test] fn test_write_heavy_workload() { - let cache: ThreadSafeFIFOCache = Arc::new(Mutex::new(FIFOCache::new(150))); + let cache: ThreadSafeInstrumentedFifoCache = + Arc::new(Mutex::new(InstrumentedFifoCache::new(150))); let num_threads = 10; let writes_per_thread = 200; let total_writes = Arc::new(AtomicUsize::new(0)); @@ -309,7 +312,8 @@ mod thread_safe_wrapper { #[test] fn test_mixed_operations_concurrency() { - let cache: ThreadSafeFIFOCache = Arc::new(Mutex::new(FIFOCache::new(100))); + let cache: ThreadSafeInstrumentedFifoCache = + Arc::new(Mutex::new(InstrumentedFifoCache::new(100))); let num_threads = 16; let operations_per_thread = 150; let operation_counts = Arc::new(AtomicUsize::new(0)); @@ -414,7 +418,8 @@ mod thread_safe_wrapper { #[test] fn test_deadlock_prevention() { - let cache: ThreadSafeFIFOCache = Arc::new(Mutex::new(FIFOCache::new(50))); + let cache: ThreadSafeInstrumentedFifoCache = + Arc::new(Mutex::new(InstrumentedFifoCache::new(50))); let num_threads = 20; let timeout_duration = Duration::from_secs(10); let start_time = Instant::now(); @@ -484,7 +489,8 @@ mod thread_safe_wrapper { #[test] fn test_fairness_across_threads() { - let cache: ThreadSafeFIFOCache = Arc::new(Mutex::new(FIFOCache::new(80))); + let cache: ThreadSafeInstrumentedFifoCache = + Arc::new(Mutex::new(InstrumentedFifoCache::new(80))); let num_threads = 8; let target_operations = 200; let test_duration = Duration::from_secs(5); @@ -570,12 +576,13 @@ mod stress_testing { use super::*; // Helper type for thread-safe testing - type ThreadSafeFIFOCache = Arc>>; + type ThreadSafeInstrumentedFifoCache = Arc>>; #[test] fn test_high_contention_scenario() { // Many threads accessing same small set of keys - let cache: ThreadSafeFIFOCache = Arc::new(Mutex::new(FIFOCache::new(50))); + let cache: ThreadSafeInstrumentedFifoCache = + Arc::new(Mutex::new(InstrumentedFifoCache::new(50))); let num_threads = 20; let operations_per_thread = 500; let hot_keys = 10; // Small set of hotly contested keys @@ -656,7 +663,8 @@ mod stress_testing { #[test] fn test_cache_thrashing_scenario() { // Rapid insertions causing constant evictions (cache thrashing) - let cache: ThreadSafeFIFOCache = Arc::new(Mutex::new(FIFOCache::new(100))); + let cache: ThreadSafeInstrumentedFifoCache = + Arc::new(Mutex::new(InstrumentedFifoCache::new(100))); let num_threads = 15; let operations_per_thread = 300; let key_space_multiplier = 10; // 10x more keys than capacity @@ -748,7 +756,8 @@ mod stress_testing { #[test] fn test_long_running_stability() { // Verify stability over extended periods with continuous load - let cache: ThreadSafeFIFOCache = Arc::new(Mutex::new(FIFOCache::new(200))); + let cache: ThreadSafeInstrumentedFifoCache = + Arc::new(Mutex::new(InstrumentedFifoCache::new(200))); let num_threads = 8; let test_duration = Duration::from_secs(15); // Extended test let stability_check_interval = Duration::from_secs(3); @@ -879,8 +888,8 @@ mod stress_testing { fn test_memory_pressure_scenario() { // Test behavior with large cache and memory-intensive operations let large_capacity = 5000; - let cache: ThreadSafeFIFOCache = - Arc::new(Mutex::new(FIFOCache::new(large_capacity))); + let cache: ThreadSafeInstrumentedFifoCache = + Arc::new(Mutex::new(InstrumentedFifoCache::new(large_capacity))); let num_threads = 12; let operations_per_thread = 500; @@ -962,7 +971,8 @@ mod stress_testing { #[test] fn test_rapid_thread_creation_destruction() { // Test with threads being created and destroyed rapidly - let cache: ThreadSafeFIFOCache = Arc::new(Mutex::new(FIFOCache::new(150))); + let cache: ThreadSafeInstrumentedFifoCache = + Arc::new(Mutex::new(InstrumentedFifoCache::new(150))); let num_thread_waves = 20; let threads_per_wave = 10; let operations_per_thread = 50; @@ -1043,7 +1053,8 @@ mod stress_testing { #[test] fn test_burst_load_handling() { // Test handling of sudden burst loads - let cache: ThreadSafeFIFOCache = Arc::new(Mutex::new(FIFOCache::new(300))); + let cache: ThreadSafeInstrumentedFifoCache = + Arc::new(Mutex::new(InstrumentedFifoCache::new(300))); let burst_threads = 25; let operations_per_burst_thread = 100; let background_threads = 5; @@ -1169,7 +1180,8 @@ mod stress_testing { #[test] fn test_gradual_load_increase() { // Test behavior as load gradually increases - let cache: ThreadSafeFIFOCache = Arc::new(Mutex::new(FIFOCache::new(200))); + let cache: ThreadSafeInstrumentedFifoCache = + Arc::new(Mutex::new(InstrumentedFifoCache::new(200))); let max_threads = 20; let operations_per_thread = 100; let ramp_up_steps = 10; From e15001f35852d8fa51b871fce30251dadb9dcac4 Mon Sep 17 00:00:00 2001 From: Thomas Korrison Date: Fri, 9 Jan 2026 16:29:08 +0000 Subject: [PATCH 2/4] Rename InstrumentedFifiCache to FIFOCache because I prefer the name --- benches/cache_benchmarks.rs | 12 ++-- benches/fifo_cache_benchmarking.rs | 40 +++++------ benches/fifo_complexity_benchmarks.rs | 38 +++++----- .../disk/async_disk/cache/cache_manager.rs | 44 ++++++------ src/storage/disk/async_disk/cache/fifo.rs | 69 +++++++++---------- tests/storage/fifo_concurrency.rs | 46 +++++-------- 6 files changed, 118 insertions(+), 131 deletions(-) diff --git a/benches/cache_benchmarks.rs b/benches/cache_benchmarks.rs index c5634013..9f8cce16 100644 --- a/benches/cache_benchmarks.rs +++ b/benches/cache_benchmarks.rs @@ -1,6 +1,6 @@ use criterion::{BenchmarkId, Criterion, criterion_group, criterion_main}; use ferrite::storage::disk::async_disk::cache::cache_traits::CoreCache; -use ferrite::storage::disk::async_disk::cache::fifo::InstrumentedFifoCache; +use ferrite::storage::disk::async_disk::cache::fifo::FIFOCache; use std::hint::black_box; fn benchmark_fifo_cache_operations(c: &mut Criterion) { @@ -17,7 +17,7 @@ fn benchmark_fifo_cache_operations(c: &mut Criterion) { &size, |b, &size| { // Setup - let mut cache = InstrumentedFifoCache::new(size); + let mut cache = FIFOCache::new(size); // Fill cache to 80% capacity let fill_count = (size as f64 * 0.8) as usize; @@ -58,7 +58,7 @@ fn benchmark_fifo_eviction_complexity(c: &mut Criterion) { b.iter_batched( // Setup for each iteration || { - let mut cache = InstrumentedFifoCache::new(size); + let mut cache = FIFOCache::new(size); // Fill cache to capacity for i in 0..size { @@ -103,7 +103,7 @@ fn benchmark_cache_comparison(c: &mut Criterion) { // Compare different cache operations group.bench_function("fifo_insert", |b| { b.iter_batched( - || InstrumentedFifoCache::new(size), + || FIFOCache::new(size), |mut cache| { for i in 0..100 { cache.insert( @@ -118,7 +118,7 @@ fn benchmark_cache_comparison(c: &mut Criterion) { }); group.bench_function("fifo_get_hit", |b| { - let mut cache = InstrumentedFifoCache::new(size); + let mut cache = FIFOCache::new(size); for i in 0..100 { cache.insert(format!("key_{}", i), format!("value_{}", i)); } @@ -131,7 +131,7 @@ fn benchmark_cache_comparison(c: &mut Criterion) { }); group.bench_function("fifo_get_miss", |b| { - let mut cache = InstrumentedFifoCache::new(size); + let mut cache = FIFOCache::new(size); for i in 0..100 { cache.insert(format!("key_{}", i), format!("value_{}", i)); } diff --git a/benches/fifo_cache_benchmarking.rs b/benches/fifo_cache_benchmarking.rs index 91d1436c..f99bcc7a 100644 --- a/benches/fifo_cache_benchmarking.rs +++ b/benches/fifo_cache_benchmarking.rs @@ -12,7 +12,7 @@ use criterion::{ criterion_group, criterion_main, }; use ferrite::storage::disk::async_disk::cache::cache_traits::{CoreCache, FIFOCacheTrait}; -use ferrite::storage::disk::async_disk::cache::fifo::InstrumentedFifoCache; +use ferrite::storage::disk::async_disk::cache::fifo::FIFOCache; // ================================================================================= // TIME COMPLEXITY BENCHMARKS @@ -54,7 +54,7 @@ fn benchmark_insert_time_complexity(c: &mut Criterion) { |b, &cache_size| { b.iter_batched( || { - let mut cache = InstrumentedFifoCache::new(cache_size); + let mut cache = FIFOCache::new(cache_size); // Pre-fill to capacity-1 to avoid eviction effects for i in 0..(cache_size - 1) { cache.insert(i, i); @@ -120,7 +120,7 @@ fn benchmark_access_patterns(c: &mut Criterion) { // Calibrate repeat count using a temporary cache so each measured sample aggregates // enough work to reach ~target_sample duration. - let mut tmp_cache = InstrumentedFifoCache::new(cache_size); + let mut tmp_cache = FIFOCache::new(cache_size); prefill_cache( &mut tmp_cache, &keys, @@ -143,7 +143,7 @@ fn benchmark_access_patterns(c: &mut Criterion) { b.iter_batched( || { // Fresh cache per sample, setup not timed - let mut cache = InstrumentedFifoCache::new(cache_size); + let mut cache = FIFOCache::new(cache_size); prefill_cache( &mut cache, &keys, @@ -184,7 +184,7 @@ fn benchmark_access_patterns(c: &mut Criterion) { let values = pregen_values(total_ops.max(working_set)); let ops = build_random_ops(total_ops, working_set, rnd_insert_every); - let mut tmp_cache = InstrumentedFifoCache::new(cache_size); + let mut tmp_cache = FIFOCache::new(cache_size); prefill_cache( &mut tmp_cache, &keys, @@ -205,7 +205,7 @@ fn benchmark_access_patterns(c: &mut Criterion) { |b, &(cache_size, _)| { b.iter_batched( || { - let mut cache = InstrumentedFifoCache::new(cache_size); + let mut cache = FIFOCache::new(cache_size); prefill_cache( &mut cache, &keys, @@ -242,7 +242,7 @@ fn benchmark_eviction_scenarios(c: &mut Criterion) { // Heavy eviction - small cache, large key space group.bench_function("heavy_eviction", |b| { b.iter_batched( - || InstrumentedFifoCache::new(500), + || FIFOCache::new(500), |mut cache| { // 10x more keys than capacity - forces constant eviction for i in 0..15000 { @@ -264,7 +264,7 @@ fn benchmark_eviction_scenarios(c: &mut Criterion) { group.bench_function("light_eviction", |b| { b.iter_batched( || { - let mut cache = InstrumentedFifoCache::new(2000); + let mut cache = FIFOCache::new(2000); // Pre-populate with working set for i in 0..1500 { cache.insert(format!("stable_{}", i), format!("data_{}", i)); @@ -312,7 +312,7 @@ fn benchmark_fifo_operations(c: &mut Criterion) { // peek_oldest performance group.bench_function("peek_oldest", |b| { - let mut cache = InstrumentedFifoCache::new(1000); + let mut cache = FIFOCache::new(1000); for i in 0..1000 { cache.insert(format!("key_{}", i), format!("value_{}", i)); } @@ -324,7 +324,7 @@ fn benchmark_fifo_operations(c: &mut Criterion) { group.bench_function("pop_oldest", |b| { b.iter_batched( || { - let mut cache = InstrumentedFifoCache::new(1000); + let mut cache = FIFOCache::new(1000); for i in 0..1000 { cache.insert(format!("key_{}", i), format!("value_{}", i)); } @@ -343,7 +343,7 @@ fn benchmark_fifo_operations(c: &mut Criterion) { |b, &batch_size| { b.iter_batched( || { - let mut cache = InstrumentedFifoCache::new(1000); + let mut cache = FIFOCache::new(1000); for i in 0..1000 { cache.insert(format!("key_{}", i), format!("value_{}", i)); } @@ -370,7 +370,7 @@ fn benchmark_fifo_micro_ops(c: &mut Criterion) { // contains for &cap in &[128usize, 1024, 4096] { group.bench_with_input(BenchmarkId::new("contains", cap), &cap, |b, &cap| { - let mut cache = InstrumentedFifoCache::new(cap); + let mut cache = FIFOCache::new(cap); for i in 0..cap { cache.insert(format!("k{i}"), format!("v{i}")); } @@ -386,7 +386,7 @@ fn benchmark_fifo_micro_ops(c: &mut Criterion) { group.bench_with_input(BenchmarkId::new("clear", cap), &cap, |b, &cap| { b.iter_batched( || { - let mut cache = InstrumentedFifoCache::new(cap); + let mut cache = FIFOCache::new(cap); for i in 0..cap { cache.insert(format!("k{i}"), format!("v{i}")); } @@ -404,7 +404,7 @@ fn benchmark_fifo_micro_ops(c: &mut Criterion) { // age_rank for &cap in &[256usize, 1024, 4096] { group.bench_with_input(BenchmarkId::new("age_rank", cap), &cap, |b, &cap| { - let mut cache = InstrumentedFifoCache::new(cap); + let mut cache = FIFOCache::new(cap); for i in 0..cap { cache.insert(format!("k{i}"), format!("v{i}")); } @@ -440,7 +440,7 @@ fn benchmark_stale_impact(c: &mut Criterion) { group.bench_with_input(id, &(cap, factor), |b, &(cap, factor)| { b.iter_batched( || { - let mut cache = InstrumentedFifoCache::new(cap); + let mut cache = FIFOCache::new(cap); // Fill to capacity for i in 0..cap { cache.insert(format!("k{i}"), format!("v{i}")); @@ -461,7 +461,7 @@ fn benchmark_stale_impact(c: &mut Criterion) { group.bench_with_input(id, &(cap, factor), |b, &(cap, factor)| { b.iter_batched( || { - let mut cache = InstrumentedFifoCache::new(cap); + let mut cache = FIFOCache::new(cap); for i in 0..cap { cache.insert(format!("k{i}"), format!("v{i}")); } @@ -483,7 +483,7 @@ fn benchmark_stale_impact(c: &mut Criterion) { group.bench_with_input(id, &(cap, factor), |b, &(cap, factor)| { b.iter_batched( || { - let mut cache = InstrumentedFifoCache::new(cap); + let mut cache = FIFOCache::new(cap); for i in 0..cap { cache.insert(format!("k{i}"), format!("v{i}")); } @@ -519,7 +519,7 @@ fn benchmark_space_usage(c: &mut Criterion) { group.bench_with_input(BenchmarkId::from_parameter(cap), &cap, |b, &cap| { b.iter_batched( || { - let mut cache = InstrumentedFifoCache::new(cap); + let mut cache = FIFOCache::new(cap); // Fill beyond capacity to create stale entries as well for i in 0..(cap * 3) { cache.insert(format!("s{i}"), format!("vs{i}")); @@ -638,7 +638,7 @@ fn build_random_ops(total_ops: usize, working_set: usize, insert_every: usize) - } fn prefill_cache( - cache: &mut InstrumentedFifoCache, + cache: &mut FIFOCache, keys: &[K], values: &[V], prefill: usize, @@ -650,7 +650,7 @@ fn prefill_cache( } fn run_ops_once( - cache: &mut InstrumentedFifoCache, + cache: &mut FIFOCache, keys: &[K], ops: &[Op], values: &[V], diff --git a/benches/fifo_complexity_benchmarks.rs b/benches/fifo_complexity_benchmarks.rs index 5003c578..c2cc6072 100644 --- a/benches/fifo_complexity_benchmarks.rs +++ b/benches/fifo_complexity_benchmarks.rs @@ -3,7 +3,7 @@ use criterion::{ criterion_group, criterion_main, }; use ferrite::storage::disk::async_disk::cache::cache_traits::{CoreCache, FIFOCacheTrait}; -use ferrite::storage::disk::async_disk::cache::fifo::InstrumentedFifoCache; +use ferrite::storage::disk::async_disk::cache::fifo::FIFOCache; use std::hint::black_box; fn generate_cache_sizes() -> Vec { @@ -46,7 +46,7 @@ fn benchmark_insert_time_complexity(c: &mut Criterion) { |b, &cache_size| { b.iter_batched( || { - let mut cache = InstrumentedFifoCache::new(cache_size); + let mut cache = FIFOCache::new(cache_size); // Pre-fill to capacity-1 to avoid eviction effects for i in 0..(cache_size - 1) { cache.insert(i, i); @@ -82,7 +82,7 @@ fn benchmark_get_time_complexity(c: &mut Criterion) { &cache_size, |b, &cache_size| { // Setup: Create cache filled with data - let mut cache = InstrumentedFifoCache::new(cache_size); + let mut cache = FIFOCache::new(cache_size); for i in 0..cache_size { cache.insert(format!("key_{}", i), format!("value_{}", i)); } @@ -114,7 +114,7 @@ fn benchmark_contains_time_complexity(c: &mut Criterion) { BenchmarkId::new("contains_operations", cache_size), &cache_size, |b, &cache_size| { - let mut cache = InstrumentedFifoCache::new(cache_size); + let mut cache = FIFOCache::new(cache_size); for i in 0..cache_size { cache.insert(format!("key_{}", i), format!("value_{}", i)); } @@ -145,7 +145,7 @@ fn benchmark_eviction_time_complexity(c: &mut Criterion) { |b, &cache_size| { b.iter_batched( || { - let mut cache = InstrumentedFifoCache::new(cache_size); + let mut cache = FIFOCache::new(cache_size); // Fill to capacity for i in 0..cache_size { cache.insert(format!("key_{}", i), format!("value_{}", i)); @@ -201,7 +201,7 @@ fn benchmark_age_rank_time_complexity(c: &mut Criterion) { BenchmarkId::new("age_rank_operations", cache_size), &cache_size, |b, &cache_size| { - let mut cache = InstrumentedFifoCache::new(cache_size); + let mut cache = FIFOCache::new(cache_size); for i in 0..cache_size { cache.insert(format!("key_{}", i), format!("value_{}", i)); } @@ -231,7 +231,7 @@ fn benchmark_clear_time_complexity(c: &mut Criterion) { |b, &cache_size| { b.iter_batched( || { - let mut cache = InstrumentedFifoCache::new(cache_size); + let mut cache = FIFOCache::new(cache_size); // Fill cache with data for i in 0..cache_size { cache.insert( @@ -271,7 +271,7 @@ fn benchmark_memory_usage_patterns(c: &mut Criterion) { b.iter_batched( || { // Create empty cache - InstrumentedFifoCache::::new(cache_size) + FIFOCache::::new(cache_size) }, |mut cache| { // Fill cache to capacity and measure allocation behavior @@ -305,7 +305,7 @@ fn benchmark_memory_pressure_scenarios(c: &mut Criterion) { &value_size, |b, &value_size| { b.iter_batched( - || InstrumentedFifoCache::::new(1000), + || FIFOCache::::new(1000), |mut cache| { let large_value = "x".repeat(value_size); // Fill cache with large values to create memory pressure @@ -337,7 +337,7 @@ fn benchmark_realistic_workloads(c: &mut Criterion) { // Small cache performance group.bench_function("small_cache_mixed_workload", |b| { b.iter_batched( - || InstrumentedFifoCache::new(100), + || FIFOCache::new(100), |mut cache| { let mut hits = 0; // Mixed workload: 33% inserts, 67% gets @@ -360,7 +360,7 @@ fn benchmark_realistic_workloads(c: &mut Criterion) { // Medium cache performance group.bench_function("medium_cache_mixed_workload", |b| { b.iter_batched( - || InstrumentedFifoCache::new(1000), + || FIFOCache::new(1000), |mut cache| { let mut hits = 0; // Complex workload: 40% inserts, 60% gets @@ -383,7 +383,7 @@ fn benchmark_realistic_workloads(c: &mut Criterion) { // Large cache performance group.bench_function("large_cache_batch_workload", |b| { b.iter_batched( - || InstrumentedFifoCache::new(10000), + || FIFOCache::new(10000), |mut cache| { let mut hits = 0; // Batch workload @@ -462,7 +462,7 @@ fn benchmark_access_patterns(c: &mut Criterion) { b.iter_batched( || { // Fresh cache per measurement - let mut cache = InstrumentedFifoCache::new(cache_size); + let mut cache = FIFOCache::new(cache_size); // Prefill to ~80% to simulate realistic hit/miss behavior let prefill = (cache_size as f64 * 0.8) as usize; for (i, _item) in keys.iter().enumerate().take(prefill.min(working_set)) @@ -519,7 +519,7 @@ fn benchmark_access_patterns(c: &mut Criterion) { b.iter_batched( || { // Fresh cache per measurement - let mut cache = InstrumentedFifoCache::new(cache_size); + let mut cache = FIFOCache::new(cache_size); // Prefill to ~60% for random to bias toward more misses let prefill = (cache_size as f64 * 0.6) as usize; for (i, _item) in keys.iter().enumerate().take(prefill.min(working_set)) @@ -561,7 +561,7 @@ fn benchmark_eviction_scenarios(c: &mut Criterion) { // Heavy eviction - small cache, large key space group.bench_function("heavy_eviction", |b| { b.iter_batched( - || InstrumentedFifoCache::new(500), + || FIFOCache::new(500), |mut cache| { // 10x more keys than capacity - forces constant eviction for i in 0..15000 { @@ -585,7 +585,7 @@ fn benchmark_eviction_scenarios(c: &mut Criterion) { group.bench_function("light_eviction", |b| { b.iter_batched( || { - let mut cache = InstrumentedFifoCache::new(2000); + let mut cache = FIFOCache::new(2000); // Pre-populate with working set for i in 0..1500 { cache.insert(format!("stable_{}", i), format!("data_{}", i)); @@ -633,7 +633,7 @@ fn benchmark_fifo_operations(c: &mut Criterion) { // peek_oldest performance group.bench_function("peek_oldest", |b| { - let mut cache = InstrumentedFifoCache::new(1000); + let mut cache = FIFOCache::new(1000); for i in 0..1000 { cache.insert(format!("key_{}", i), format!("value_{}", i)); } @@ -645,7 +645,7 @@ fn benchmark_fifo_operations(c: &mut Criterion) { group.bench_function("pop_oldest", |b| { b.iter_batched( || { - let mut cache = InstrumentedFifoCache::new(1000); + let mut cache = FIFOCache::new(1000); for i in 0..1000 { cache.insert(format!("key_{}", i), format!("value_{}", i)); } @@ -664,7 +664,7 @@ fn benchmark_fifo_operations(c: &mut Criterion) { |b, &batch_size| { b.iter_batched( || { - let mut cache = InstrumentedFifoCache::new(1000); + let mut cache = FIFOCache::new(1000); for i in 0..1000 { cache.insert(format!("key_{}", i), format!("value_{}", i)); } diff --git a/src/storage/disk/async_disk/cache/cache_manager.rs b/src/storage/disk/async_disk/cache/cache_manager.rs index c32d80a7..46699b7b 100644 --- a/src/storage/disk/async_disk/cache/cache_manager.rs +++ b/src/storage/disk/async_disk/cache/cache_manager.rs @@ -121,7 +121,7 @@ //! | `CacheManager` | Central cache coordinator with three tiers | //! | `LRUKCache` | Hot cache with LRU-K (K=2) eviction | //! | `LFUCache` | Warm cache with frequency-based eviction | -//! | `InstrumentedFifoCache` | Cold cache with FIFO eviction | +//! | `FIFOCache` | Cold cache with FIFO eviction | //! | `AdmissionController` | Regulates cache entry based on memory pressure | //! | `PrefetchEngine` | Sequential and pattern-based prefetch prediction | //! | `DeduplicationEngine` | Identifies duplicate page content | @@ -237,7 +237,7 @@ use std::time::Instant; use tokio::sync::RwLock; -use super::fifo::InstrumentedFifoCache; +use super::fifo::FIFOCache; use super::lfu::LFUCache; use super::lru_k::LRUKCache; use crate::common::config::{DB_PAGE_SIZE, PageId}; @@ -818,7 +818,7 @@ pub struct CacheManager { /// L3 Cold Cache: FIFO based cache for newly inserted pages. /// Low overhead, ideal for one-time sequential scans. - cold_cache: Arc>>, + cold_cache: Arc>>, /// Configured capacity for hot cache (number of entries). hot_cache_size: usize, @@ -911,7 +911,7 @@ impl CacheManager { // Use LRU-K cache for hot cache (L1) let hot_cache = Arc::new(RwLock::new(LRUKCache::with_k(hot_cache_size, 2))); let warm_cache = Arc::new(RwLock::new(LFUCache::new(warm_cache_size))); - let cold_cache = Arc::new(RwLock::new(InstrumentedFifoCache::new(cold_cache_size))); + let cold_cache = Arc::new(RwLock::new(FIFOCache::new(cold_cache_size))); let prefetch_engine = Arc::new(RwLock::new(PrefetchEngine { access_patterns: HashMap::new(), @@ -1069,10 +1069,11 @@ impl CacheManager { // Check L3 cold cache (FIFO) if let Ok(mut cold_cache) = self.cold_cache.try_write() - && let Some(page_data) = as CoreCache< - PageId, - PageData, - >>::get(&mut cold_cache, &page_id) + && let Some(page_data) = + as CoreCache>::get( + &mut cold_cache, + &page_id, + ) { // Promote to warm cache on hit - clone the PageData (cheap with Arc) self.cache_hits.fetch_add(1, Ordering::Relaxed); @@ -1278,7 +1279,7 @@ impl CacheManager { temperature: temperature.clone(), }; if let Ok(mut cache) = self.cold_cache.try_write() { - as CoreCache>::insert( + as CoreCache>::insert( &mut cache, page_id, page_data, ); } @@ -1514,7 +1515,7 @@ impl CacheManager { }; let cold_used = if let Ok(cache) = self.cold_cache.try_read() { - as CoreCache>::len(&cache) + as CoreCache>::len(&cache) } else { 0 }; @@ -1584,16 +1585,14 @@ impl CacheManager { // High memory pressure - reduce cold cache size if let Ok(mut cold_cache) = self.cold_cache.try_write() { let target_size = self.cold_cache_size / 2; - let current_size = as CoreCache< - PageId, - PageData, - >>::len(&cold_cache); + let current_size = + as CoreCache>::len(&cold_cache); if current_size > target_size { // For FIFO cache, we can't easily evict specific items // Instead, we'll clear part of the cache if current_size > target_size * 2 { let evicted_count = current_size as u64; - as CoreCache>::clear( + as CoreCache>::clear( &mut cold_cache, ); self.demotion_count @@ -1670,7 +1669,7 @@ impl CacheManager { }; let cold_used = if let Ok(cache) = self.cold_cache.try_read() { - as CoreCache>::len(&cache) + as CoreCache>::len(&cache) } else { 0 }; @@ -1967,7 +1966,7 @@ impl CacheManager { pub fn perform_specialized_maintenance(&self) { // Use FIFO-specific operations for cold cache if let Ok(cold_cache) = self.cold_cache.try_read() - && let Some((oldest_key, _)) = as FIFOCacheTrait< + && let Some((oldest_key, _)) = as FIFOCacheTrait< PageId, PageData, >>::peek_oldest(&cold_cache) @@ -2096,14 +2095,15 @@ impl CacheManager { if let Ok(cold_cache) = self.cold_cache.try_read() { // Check if page is in cold cache and get FIFO specific details - if as CoreCache>::contains( + if as CoreCache>::contains( &cold_cache, &page_id, ) { - let age_rank = as FIFOCacheTrait< - PageId, - PageData, - >>::age_rank(&cold_cache, &page_id); + let age_rank = + as FIFOCacheTrait>::age_rank( + &cold_cache, + &page_id, + ); return Some(PageAccessDetails { cache_level: "Cold".to_string(), diff --git a/src/storage/disk/async_disk/cache/fifo.rs b/src/storage/disk/async_disk/cache/fifo.rs index 51169748..c759ef31 100644 --- a/src/storage/disk/async_disk/cache/fifo.rs +++ b/src/storage/disk/async_disk/cache/fifo.rs @@ -284,7 +284,7 @@ use crate::storage::disk::async_disk::cache::cache_traits::{CoreCache, FIFOCache /// Evicts the oldest (first inserted) item when capacity is reached. /// See module-level documentation for details. #[derive(Debug)] -pub struct InstrumentedFifoCache +pub struct FIFOCache where K: Eq + Hash, { @@ -426,7 +426,7 @@ where } } -impl InstrumentedFifoCache +impl FIFOCache where K: Eq + Hash, V: Debug, @@ -538,7 +538,7 @@ where } } -impl CoreCache for InstrumentedFifoCache +impl CoreCache for FIFOCache where K: Eq + Hash, V: Debug, @@ -616,7 +616,7 @@ where } } -impl FIFOCacheTrait for InstrumentedFifoCache +impl FIFOCacheTrait for FIFOCache where K: Eq + Hash + Debug, V: Debug, @@ -695,7 +695,7 @@ mod tests { #[test] fn test_basic_fifo_insertion_and_retrieval() { - let mut cache = InstrumentedFifoCache::new(3); + let mut cache = FIFOCache::new(3); // Test basic insertion and retrieval assert_eq!(cache.insert("key1", "value1"), None); @@ -710,7 +710,7 @@ mod tests { #[test] fn test_fifo_eviction_order() { - let mut cache = InstrumentedFifoCache::new(2); + let mut cache = FIFOCache::new(2); // Fill cache to capacity cache.insert("first", "value1"); @@ -727,7 +727,7 @@ mod tests { #[test] fn test_capacity_enforcement() { - let mut cache = InstrumentedFifoCache::new(3); + let mut cache = FIFOCache::new(3); // Fill beyond capacity for i in 1..=5 { @@ -745,7 +745,7 @@ mod tests { #[test] fn test_update_existing_key() { - let mut cache = InstrumentedFifoCache::new(3); + let mut cache = FIFOCache::new(3); cache.insert("key1", "original"); cache.insert("key2", "value2"); @@ -759,7 +759,7 @@ mod tests { #[test] fn test_insertion_order_preservation() { - let mut cache = InstrumentedFifoCache::new(4); + let mut cache = FIFOCache::new(4); // Insert items in a specific order cache.insert("first", 1); @@ -801,7 +801,7 @@ mod tests { #[test] fn test_key_operations_consistency() { - let mut cache = InstrumentedFifoCache::new(3); + let mut cache = FIFOCache::new(3); // Test consistency between contents, get, and len assert_eq!(cache.len(), 0); @@ -882,7 +882,7 @@ mod tests { #[test] fn test_empty_cache_operations() { - let mut cache: InstrumentedFifoCache = InstrumentedFifoCache::new(5); + let mut cache: FIFOCache = FIFOCache::new(5); assert_eq!(cache.get(&"nonexistent".to_string()), None); assert!(!cache.contains(&"nonexistent".to_string())); @@ -896,7 +896,7 @@ mod tests { #[test] fn test_single_item_cache() { - let mut cache = InstrumentedFifoCache::new(1); + let mut cache = FIFOCache::new(1); cache.insert("only", "value1"); assert_eq!(cache.len(), 1); @@ -911,7 +911,7 @@ mod tests { #[test] fn test_zero_capacity_cache() { - let mut cache = InstrumentedFifoCache::new(0); + let mut cache = FIFOCache::new(0); // Should not be able to store anything cache.insert("key", "value"); @@ -922,7 +922,7 @@ mod tests { #[test] fn test_clear_operation() { - let mut cache = InstrumentedFifoCache::new(3); + let mut cache = FIFOCache::new(3); cache.insert("key1", "value1"); cache.insert("key2", "value2"); @@ -937,7 +937,7 @@ mod tests { #[test] fn test_duplicate_key_handling() { - let mut cache = InstrumentedFifoCache::new(3); + let mut cache = FIFOCache::new(3); // Insert the initial key assert_eq!(cache.insert("key1", "value1"), None); @@ -991,7 +991,7 @@ mod tests { #[test] fn test_boundary_conditions() { - let mut cache = InstrumentedFifoCache::new(2); + let mut cache = FIFOCache::new(2); // Test exactly at capacity cache.insert("key1", "value1"); @@ -1046,7 +1046,7 @@ mod tests { #[test] fn test_empty_to_full_transition() { - let mut cache = InstrumentedFifoCache::new(4); + let mut cache = FIFOCache::new(4); // Start empty assert_eq!(cache.len(), 0); @@ -1117,7 +1117,7 @@ mod tests { fn test_full_to_empty_transition() { // Helper function to create cache with same initial state (avoids cloning) let create_test_cache = || { - let mut cache = InstrumentedFifoCache::new(3); + let mut cache = FIFOCache::new(3); cache.insert("item1", 1); cache.insert("item2", 2); cache.insert("item3", 3); @@ -1181,7 +1181,7 @@ mod tests { } // Test partial emptying and refilling - let mut partial_cache = InstrumentedFifoCache::new(4); + let mut partial_cache = FIFOCache::new(4); partial_cache.insert("a", 1); partial_cache.insert("b", 2); partial_cache.insert("c", 3); @@ -1212,7 +1212,7 @@ mod tests { #[test] fn test_pop_oldest() { - let mut cache = InstrumentedFifoCache::new(3); + let mut cache = FIFOCache::new(3); cache.insert("first", "value1"); cache.insert("second", "value2"); @@ -1230,7 +1230,7 @@ mod tests { #[test] fn test_peek_oldest() { - let mut cache = InstrumentedFifoCache::new(3); + let mut cache = FIFOCache::new(3); cache.insert("first", "value1"); cache.insert("second", "value2"); @@ -1246,7 +1246,7 @@ mod tests { #[test] fn test_age_rank() { - let mut cache = InstrumentedFifoCache::new(4); + let mut cache = FIFOCache::new(4); cache.insert("first", "value1"); // rank 0 (oldest) cache.insert("second", "value2"); // rank 1 @@ -1262,7 +1262,7 @@ mod tests { #[test] fn test_pop_oldest_batch() { - let mut cache = InstrumentedFifoCache::new(5); + let mut cache = FIFOCache::new(5); for i in 1..=5 { cache.insert(format!("key{}", i), format!("value{}", i)); @@ -1283,7 +1283,7 @@ mod tests { #[test] fn test_pop_oldest_batch_more_than_available() { - let mut cache = InstrumentedFifoCache::new(3); + let mut cache = FIFOCache::new(3); cache.insert("key1", "value1"); cache.insert("key2", "value2"); @@ -1296,7 +1296,7 @@ mod tests { #[test] fn test_pop_oldest_empty_cache() { - let mut cache: InstrumentedFifoCache = InstrumentedFifoCache::new(5); + let mut cache: FIFOCache = FIFOCache::new(5); // Pop from the empty cache should return None assert_eq!(cache.pop_oldest(), None); @@ -1322,7 +1322,7 @@ mod tests { #[test] fn test_peek_oldest_empty_cache() { - let cache: InstrumentedFifoCache = InstrumentedFifoCache::new(5); + let cache: FIFOCache = FIFOCache::new(5); // Peek at the empty cache should return None assert_eq!(cache.peek_oldest(), None); @@ -1334,7 +1334,7 @@ mod tests { assert_eq!(cache.len(), 0); // Test peek after clear - let mut test_cache = InstrumentedFifoCache::new(3); + let mut test_cache = FIFOCache::new(3); test_cache.insert("key1".to_string(), "value1".to_string()); test_cache.insert("key2".to_string(), "value2".to_string()); @@ -1350,7 +1350,7 @@ mod tests { #[test] fn test_age_rank_after_eviction() { - let mut cache = InstrumentedFifoCache::new(3); + let mut cache = FIFOCache::new(3); // Fill cache cache.insert("first", 1); @@ -1408,7 +1408,7 @@ mod tests { #[test] fn test_batch_operations_edge_cases() { - let mut cache = InstrumentedFifoCache::new(3); + let mut cache = FIFOCache::new(3); // Test batch with count = 0 cache.insert("key1", "value1"); @@ -1421,8 +1421,7 @@ mod tests { assert!(cache.contains(&"key2")); // Test batch on empty cache - let mut empty_cache: InstrumentedFifoCache = - InstrumentedFifoCache::new(5); + let mut empty_cache: FIFOCache = FIFOCache::new(5); let empty_batch = empty_cache.pop_oldest_batch(3); assert_eq!(empty_batch.len(), 0); assert_eq!(empty_cache.len(), 0); @@ -1478,7 +1477,7 @@ mod tests { #[test] fn test_stale_entry_skipping_during_eviction() { - let mut cache = InstrumentedFifoCache::new(3); + let mut cache = FIFOCache::new(3); // Fill cache to capacity cache.insert("key1", "value1"); @@ -1524,7 +1523,7 @@ mod tests { #[test] fn test_insertion_order_consistency_with_stale_entries() { - let mut cache = InstrumentedFifoCache::new(4); + let mut cache = FIFOCache::new(4); // Fill cache cache.insert("a", 1); @@ -1575,7 +1574,7 @@ mod tests { #[test] fn test_lazy_deletion_behavior() { - let mut cache = InstrumentedFifoCache::new(3); + let mut cache = FIFOCache::new(3); // Test 1: Stale entries accumulate until cleanup operations cache.insert("temp1", "value1"); @@ -1650,7 +1649,7 @@ mod tests { #[test] fn test_stale_entry_cleanup_during_operations() { - let mut cache = InstrumentedFifoCache::new(4); + let mut cache = FIFOCache::new(4); // Setup: Create cache with mix of valid and future stale entries cache.insert("will_be_stale1", "stale1"); diff --git a/tests/storage/fifo_concurrency.rs b/tests/storage/fifo_concurrency.rs index 7a36bbbe..740ce3e4 100644 --- a/tests/storage/fifo_concurrency.rs +++ b/tests/storage/fifo_concurrency.rs @@ -7,18 +7,17 @@ use std::thread; use std::time::{Duration, Instant}; use ferrite::storage::disk::async_disk::cache::cache_traits::{CoreCache, FIFOCacheTrait}; -use ferrite::storage::disk::async_disk::cache::fifo::InstrumentedFifoCache; +use ferrite::storage::disk::async_disk::cache::fifo::FIFOCache; mod thread_safe_wrapper { use super::*; // Helper type for thread-safe testing - type ThreadSafeInstrumentedFifoCache = Arc>>; + type ThreadSafeFIFOCache = Arc>>; #[test] fn test_basic_thread_safe_operations() { - let cache: ThreadSafeInstrumentedFifoCache = - Arc::new(Mutex::new(InstrumentedFifoCache::new(100))); + let cache: ThreadSafeFIFOCache = Arc::new(Mutex::new(FIFOCache::new(100))); let num_threads = 8; let operations_per_thread = 250; let success_count = Arc::new(AtomicUsize::new(0)); @@ -114,8 +113,7 @@ mod thread_safe_wrapper { #[test] fn test_read_heavy_workload() { - let cache: ThreadSafeInstrumentedFifoCache = - Arc::new(Mutex::new(InstrumentedFifoCache::new(200))); + let cache: ThreadSafeFIFOCache = Arc::new(Mutex::new(FIFOCache::new(200))); let num_reader_threads = 12; let num_writer_threads = 2; let reads_per_thread = 500; @@ -228,8 +226,7 @@ mod thread_safe_wrapper { #[test] fn test_write_heavy_workload() { - let cache: ThreadSafeInstrumentedFifoCache = - Arc::new(Mutex::new(InstrumentedFifoCache::new(150))); + let cache: ThreadSafeFIFOCache = Arc::new(Mutex::new(FIFOCache::new(150))); let num_threads = 10; let writes_per_thread = 200; let total_writes = Arc::new(AtomicUsize::new(0)); @@ -312,8 +309,7 @@ mod thread_safe_wrapper { #[test] fn test_mixed_operations_concurrency() { - let cache: ThreadSafeInstrumentedFifoCache = - Arc::new(Mutex::new(InstrumentedFifoCache::new(100))); + let cache: ThreadSafeFIFOCache = Arc::new(Mutex::new(FIFOCache::new(100))); let num_threads = 16; let operations_per_thread = 150; let operation_counts = Arc::new(AtomicUsize::new(0)); @@ -418,8 +414,7 @@ mod thread_safe_wrapper { #[test] fn test_deadlock_prevention() { - let cache: ThreadSafeInstrumentedFifoCache = - Arc::new(Mutex::new(InstrumentedFifoCache::new(50))); + let cache: ThreadSafeFIFOCache = Arc::new(Mutex::new(FIFOCache::new(50))); let num_threads = 20; let timeout_duration = Duration::from_secs(10); let start_time = Instant::now(); @@ -489,8 +484,7 @@ mod thread_safe_wrapper { #[test] fn test_fairness_across_threads() { - let cache: ThreadSafeInstrumentedFifoCache = - Arc::new(Mutex::new(InstrumentedFifoCache::new(80))); + let cache: ThreadSafeFIFOCache = Arc::new(Mutex::new(FIFOCache::new(80))); let num_threads = 8; let target_operations = 200; let test_duration = Duration::from_secs(5); @@ -576,13 +570,12 @@ mod stress_testing { use super::*; // Helper type for thread-safe testing - type ThreadSafeInstrumentedFifoCache = Arc>>; + type ThreadSafeFIFOCache = Arc>>; #[test] fn test_high_contention_scenario() { // Many threads accessing same small set of keys - let cache: ThreadSafeInstrumentedFifoCache = - Arc::new(Mutex::new(InstrumentedFifoCache::new(50))); + let cache: ThreadSafeFIFOCache = Arc::new(Mutex::new(FIFOCache::new(50))); let num_threads = 20; let operations_per_thread = 500; let hot_keys = 10; // Small set of hotly contested keys @@ -663,8 +656,7 @@ mod stress_testing { #[test] fn test_cache_thrashing_scenario() { // Rapid insertions causing constant evictions (cache thrashing) - let cache: ThreadSafeInstrumentedFifoCache = - Arc::new(Mutex::new(InstrumentedFifoCache::new(100))); + let cache: ThreadSafeFIFOCache = Arc::new(Mutex::new(FIFOCache::new(100))); let num_threads = 15; let operations_per_thread = 300; let key_space_multiplier = 10; // 10x more keys than capacity @@ -756,8 +748,7 @@ mod stress_testing { #[test] fn test_long_running_stability() { // Verify stability over extended periods with continuous load - let cache: ThreadSafeInstrumentedFifoCache = - Arc::new(Mutex::new(InstrumentedFifoCache::new(200))); + let cache: ThreadSafeFIFOCache = Arc::new(Mutex::new(FIFOCache::new(200))); let num_threads = 8; let test_duration = Duration::from_secs(15); // Extended test let stability_check_interval = Duration::from_secs(3); @@ -888,8 +879,8 @@ mod stress_testing { fn test_memory_pressure_scenario() { // Test behavior with large cache and memory-intensive operations let large_capacity = 5000; - let cache: ThreadSafeInstrumentedFifoCache = - Arc::new(Mutex::new(InstrumentedFifoCache::new(large_capacity))); + let cache: ThreadSafeFIFOCache = + Arc::new(Mutex::new(FIFOCache::new(large_capacity))); let num_threads = 12; let operations_per_thread = 500; @@ -971,8 +962,7 @@ mod stress_testing { #[test] fn test_rapid_thread_creation_destruction() { // Test with threads being created and destroyed rapidly - let cache: ThreadSafeInstrumentedFifoCache = - Arc::new(Mutex::new(InstrumentedFifoCache::new(150))); + let cache: ThreadSafeFIFOCache = Arc::new(Mutex::new(FIFOCache::new(150))); let num_thread_waves = 20; let threads_per_wave = 10; let operations_per_thread = 50; @@ -1053,8 +1043,7 @@ mod stress_testing { #[test] fn test_burst_load_handling() { // Test handling of sudden burst loads - let cache: ThreadSafeInstrumentedFifoCache = - Arc::new(Mutex::new(InstrumentedFifoCache::new(300))); + let cache: ThreadSafeFIFOCache = Arc::new(Mutex::new(FIFOCache::new(300))); let burst_threads = 25; let operations_per_burst_thread = 100; let background_threads = 5; @@ -1180,8 +1169,7 @@ mod stress_testing { #[test] fn test_gradual_load_increase() { // Test behavior as load gradually increases - let cache: ThreadSafeInstrumentedFifoCache = - Arc::new(Mutex::new(InstrumentedFifoCache::new(200))); + let cache: ThreadSafeFIFOCache = Arc::new(Mutex::new(FIFOCache::new(200))); let max_threads = 20; let operations_per_thread = 100; let ramp_up_steps = 10; From d002952bdd8276ed9e6eaab8c7ca30e8dc93eb60 Mon Sep 17 00:00:00 2001 From: Thomas Korrison Date: Tue, 27 Jan 2026 14:23:55 +0000 Subject: [PATCH 3/4] Update Cargo.toml keywords and enhance FIFO cache tests with basic metrics validation. Also, ensure file creation in async disk I/O tests. --- Cargo.toml | 2 +- src/storage/disk/async_disk/cache/fifo.rs | 23 ++++++++++++++++++++--- src/storage/disk/async_disk/io/io_impl.rs | 4 ++++ 3 files changed, 25 insertions(+), 4 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 85716c86..5a43ddb5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,7 +7,7 @@ edition = "2024" license = "MIT OR Apache-2.0" readme = "README.md" repository = "https://github.com/ferritedb/ferrite" -keywords = ["database", "dbms", "rust", "sql", "storage-engine"] +keywords = ["database", "dbms", "sql", "storage-engine", "oltp"] categories = ["database-implementations", "asynchronous", "concurrency"] [dependencies] diff --git a/src/storage/disk/async_disk/cache/fifo.rs b/src/storage/disk/async_disk/cache/fifo.rs index c759ef31..193c7ad9 100644 --- a/src/storage/disk/async_disk/cache/fifo.rs +++ b/src/storage/disk/async_disk/cache/fifo.rs @@ -684,10 +684,9 @@ where #[cfg(test)] mod tests { - use std::collections::HashSet; - - use super::*; use crate::storage::disk::async_disk::cache::cache_traits::{CoreCache, FIFOCacheTrait}; + use crate::storage::disk::async_disk::cache::fifo::FIFOCache; + use std::collections::HashSet; // Basic FIFO Behavior Tests mod basic_behavior { @@ -874,6 +873,24 @@ mod tests { assert_eq!(cache.peek_oldest(), None); assert_eq!(cache.pop_oldest(), None); } + + #[test] + fn test_basic_fifo_metrics() { + let mut cache = FIFOCache::new(3); + + // Test basic insertion and retrieval + assert_eq!(cache.insert("key1", "value1"), None); + assert_eq!(cache.insert("key2", "value2"), None); + assert_eq!(cache.insert("key3", "value3"), None); + + assert_eq!(cache.get(&"key1"), Some(&"value1")); + assert_eq!(cache.get(&"key2"), Some(&"value2")); + assert_eq!(cache.get(&"key3"), Some(&"value3")); + assert_eq!(cache.len(), 3); + + assert_eq!(cache.metrics.insert_new, 3); + assert_eq!(cache.metrics.get_calls, 3); + } } // Edge Cases Tests diff --git a/src/storage/disk/async_disk/io/io_impl.rs b/src/storage/disk/async_disk/io/io_impl.rs index 51c707a4..1773e43d 100644 --- a/src/storage/disk/async_disk/io/io_impl.rs +++ b/src/storage/disk/async_disk/io/io_impl.rs @@ -682,6 +682,8 @@ mod tests { File::options() .read(true) .write(true) + .create(true) + .truncate(true) .open(&db_path) .await .unwrap(), @@ -690,6 +692,8 @@ mod tests { File::options() .read(true) .write(true) + .create(true) + .truncate(true) .open(&log_path) .await .unwrap(), From d839b5cbeb7af4b19e78bdcf1430c76e78b31785 Mon Sep 17 00:00:00 2001 From: Thomas Korrison Date: Tue, 27 Jan 2026 15:28:39 +0000 Subject: [PATCH 4/4] Update dependencies in Cargo.lock and add patch for stacker to address Windows compilation issues. Enhance test configurations to ignore certain tests under Miri. --- Cargo.lock | 16 +++++++--------- Cargo.toml | 7 +++++++ tests/concurrency/concurrent_transactions.rs | 3 +++ tests/execution/engine_group_by.rs | 1 + tests/execution/engine_join.rs | 1 + tests/recovery/db_instance_recovery.rs | 1 + tests/storage/fifo_concurrency.rs | 2 ++ tests/storage/lfu_concurrency.rs | 1 + tests/storage/lfu_performance.rs | 2 ++ tests/storage/lru_concurrency.rs | 9 +++++---- tests/storage/lru_k_concurrency.rs | 9 +++++---- tests/storage/lru_k_performance.rs | 1 + tests/storage/lru_performance.rs | 1 + 13 files changed, 37 insertions(+), 17 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 21344c24..9d8b6185 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -87,9 +87,9 @@ dependencies = [ [[package]] name = "ar_archive_writer" -version = "0.2.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0c269894b6fe5e9d7ada0cf69b5bf847ff35bc25fc271f08e1d080fce80339a" +checksum = "7eb93bbb63b9c227414f6eb3a0adfddca591a8ce1e9b60661bb08969b87e340b" dependencies = [ "object", ] @@ -1070,9 +1070,9 @@ dependencies = [ [[package]] name = "object" -version = "0.32.2" +version = "0.37.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441" +checksum = "ff76201f031d8863c38aa7f905eca4f53abbfa15f609db4277d44cd8938f33fe" dependencies = [ "memchr", ] @@ -1273,9 +1273,8 @@ dependencies = [ [[package]] name = "psm" -version = "0.1.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d11f2fedc3b7dafdc2851bc52f277377c5473d378859be234bc7ebb593144d01" +version = "0.1.29" +source = "git+https://github.com/rust-lang/stacker#017540a8f1f151e6c2e48422571402fbe43d0edb" dependencies = [ "ar_archive_writer", "cc", @@ -1656,8 +1655,7 @@ dependencies = [ [[package]] name = "stacker" version = "0.1.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1f8b29fb42aafcea4edeeb6b2f2d7ecd0d969c48b4cf0d2e64aafc471dd6e59" +source = "git+https://github.com/rust-lang/stacker#017540a8f1f151e6c2e48422571402fbe43d0edb" dependencies = [ "cc", "cfg-if", diff --git a/Cargo.toml b/Cargo.toml index 5a43ddb5..e1e6b450 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -162,3 +162,10 @@ mocking = ["dep:mockall"] # Development and testing features dev = ["server", "mocking"] full = ["cli", "client", "server"] + +# Patch section to fix Windows compilation issues +# Issue: https://github.com/rust-lang/stacker/issues/87 (fixed but not yet released) +[patch.crates-io] +# Override stacker 0.1.22 which has a broken Windows backend +# Using a commit with the fix until a new version is published +stacker = { git = "https://github.com/rust-lang/stacker" } diff --git a/tests/concurrency/concurrent_transactions.rs b/tests/concurrency/concurrent_transactions.rs index e13ed1d3..fb3c7115 100644 --- a/tests/concurrency/concurrent_transactions.rs +++ b/tests/concurrency/concurrent_transactions.rs @@ -107,6 +107,7 @@ impl ConcurrentTestContext { } } +#[cfg_attr(miri, ignore)] #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_concurrent_inserts() { let ctx = Arc::new(ConcurrentTestContext::new("concurrent_inserts").await); @@ -183,6 +184,7 @@ async fn test_concurrent_inserts() { ctx.transaction_factory.commit_transaction(txn_ctx).await; } +#[cfg_attr(miri, ignore)] #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_concurrent_updates() { let ctx = Arc::new(ConcurrentTestContext::new("concurrent_updates").await); @@ -260,6 +262,7 @@ async fn test_concurrent_updates() { ); } +#[cfg_attr(miri, ignore)] #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_deadlock_detection() { let ctx = Arc::new(ConcurrentTestContext::new("deadlock_detection").await); diff --git a/tests/execution/engine_group_by.rs b/tests/execution/engine_group_by.rs index da2655e7..66f6f969 100644 --- a/tests/execution/engine_group_by.rs +++ b/tests/execution/engine_group_by.rs @@ -620,6 +620,7 @@ async fn test_group_by_empty_table() { assert_eq!(schema.get_columns()[1].get_name(), "count"); } +#[cfg_attr(miri, ignore)] #[tokio::test(flavor = "multi_thread", worker_threads = 5)] async fn test_group_by_performance_large_dataset() { init_test_logger(); diff --git a/tests/execution/engine_join.rs b/tests/execution/engine_join.rs index 1c0d9e68..5708b461 100644 --- a/tests/execution/engine_join.rs +++ b/tests/execution/engine_join.rs @@ -219,6 +219,7 @@ async fn test_full_outer_join_operations() { assert!(success, "Full outer join query execution failed"); } +#[cfg_attr(miri, ignore)] #[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[ignore] // Marked as ignore in original implementation async fn test_cross_join_operations() { diff --git a/tests/recovery/db_instance_recovery.rs b/tests/recovery/db_instance_recovery.rs index 9973802c..7929a1f8 100644 --- a/tests/recovery/db_instance_recovery.rs +++ b/tests/recovery/db_instance_recovery.rs @@ -5,6 +5,7 @@ use ferrite::concurrency::transaction::IsolationLevel; use crate::common::logger::init_test_logger; use crate::common::tempdb::temp_db_config; +#[cfg_attr(miri, ignore)] #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn db_instance_recovery_integration() { init_test_logger(); diff --git a/tests/storage/fifo_concurrency.rs b/tests/storage/fifo_concurrency.rs index 740ce3e4..0e7fe406 100644 --- a/tests/storage/fifo_concurrency.rs +++ b/tests/storage/fifo_concurrency.rs @@ -1,6 +1,8 @@ // ============================================== // FIFO CONCURRENCY TESTS (integration) // ============================================== +#![cfg(not(miri))] + use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; use std::sync::{Arc, Mutex}; use std::thread; diff --git a/tests/storage/lfu_concurrency.rs b/tests/storage/lfu_concurrency.rs index 8a220f04..58bc4396 100644 --- a/tests/storage/lfu_concurrency.rs +++ b/tests/storage/lfu_concurrency.rs @@ -1,6 +1,7 @@ // ============================================== // LFU CONCURRENCY TESTS (integration) // ============================================== +#![cfg(not(miri))] use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::{Arc, Mutex}; diff --git a/tests/storage/lfu_performance.rs b/tests/storage/lfu_performance.rs index 0a0e494d..fd9fe5a9 100644 --- a/tests/storage/lfu_performance.rs +++ b/tests/storage/lfu_performance.rs @@ -1,6 +1,8 @@ // ============================================== // LFU PERFORMANCE TESTS (integration) // ============================================== +#![cfg(not(miri))] + use std::time::{Duration, Instant}; use ferrite::storage::disk::async_disk::cache::cache_traits::{ diff --git a/tests/storage/lru_concurrency.rs b/tests/storage/lru_concurrency.rs index 08a5aa4c..373006ee 100644 --- a/tests/storage/lru_concurrency.rs +++ b/tests/storage/lru_concurrency.rs @@ -1,3 +1,8 @@ +// ============================================== +// LRU CONCURRENCY TESTS (integration) +// ============================================== +#![cfg(not(miri))] + use std::sync::Arc; use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; use std::thread; @@ -5,10 +10,6 @@ use std::time::{Duration, Instant}; use ferrite::storage::disk::async_disk::cache::lru::ConcurrentLRUCache; -// ============================================== -// LRU CONCURRENCY TESTS (integration) -// ============================================== - mod thread_safety { use super::*; diff --git a/tests/storage/lru_k_concurrency.rs b/tests/storage/lru_k_concurrency.rs index e9557730..aa4b3c1e 100644 --- a/tests/storage/lru_k_concurrency.rs +++ b/tests/storage/lru_k_concurrency.rs @@ -1,3 +1,8 @@ +// ============================================== +// LRU-K CONCURRENCY TESTS (integration) +// ============================================== +#![cfg(not(miri))] + use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::{Arc, Mutex}; use std::thread; @@ -6,10 +11,6 @@ use std::time::Duration; use ferrite::storage::disk::async_disk::cache::cache_traits::{CoreCache, LRUKCacheTrait}; use ferrite::storage::disk::async_disk::cache::lru_k::LRUKCache; -// ============================================== -// LRU-K CONCURRENCY TESTS (integration) -// ============================================== - // Thread Safety Tests mod thread_safety { use super::*; diff --git a/tests/storage/lru_k_performance.rs b/tests/storage/lru_k_performance.rs index 00d1a595..f1222316 100644 --- a/tests/storage/lru_k_performance.rs +++ b/tests/storage/lru_k_performance.rs @@ -1,6 +1,7 @@ // ============================================== // LRU-K PERFORMANCE TESTS (integration) // ============================================== +#![cfg(not(miri))] // Lookup Performance Tests mod lookup_performance { diff --git a/tests/storage/lru_performance.rs b/tests/storage/lru_performance.rs index a51ce68c..91daab85 100644 --- a/tests/storage/lru_performance.rs +++ b/tests/storage/lru_performance.rs @@ -1,6 +1,7 @@ // ============================================== // LRU PERFORMANCE TESTS (integration) // ============================================== +#![cfg(not(miri))] mod lookup_performance { #[test]