diff --git a/AspNetCore.sln b/AspNetCore.sln
index 296a8be79925..a19c1fb94c04 100644
--- a/AspNetCore.sln
+++ b/AspNetCore.sln
@@ -1796,6 +1796,9 @@ Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Microsoft.Extensions.Cachin
EndProject
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Microsoft.Extensions.Caching.Hybrid.Tests", "src\Caching\Hybrid\test\Microsoft.Extensions.Caching.Hybrid.Tests.csproj", "{CF63C942-895A-4F6B-888A-7653D7C4991A}"
EndProject
+Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "MicroBenchmarks", "MicroBenchmarks", "{6469F11E-8CEE-4292-820B-324DFFC88EBC}"
+EndProject
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Microsoft.Extensions.Caching.MicroBenchmarks", "src\Caching\perf\MicroBenchmarks\Microsoft.Extensions.Caching.MicroBenchmarks\Microsoft.Extensions.Caching.MicroBenchmarks.csproj", "{8D2CC6ED-5105-4F52-8757-C21F4DE78589}"
Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "perf", "perf", "{9DC6B242-457B-4767-A84B-C3D23B76C642}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Microsoft.AspNetCore.OpenApi.Microbenchmarks", "src\OpenApi\perf\Microbenchmarks\Microsoft.AspNetCore.OpenApi.Microbenchmarks.csproj", "{D53F0EF7-0CDC-49B4-AA2D-229901B0A734}"
@@ -10849,6 +10852,22 @@ Global
{CF63C942-895A-4F6B-888A-7653D7C4991A}.Release|x64.Build.0 = Release|Any CPU
{CF63C942-895A-4F6B-888A-7653D7C4991A}.Release|x86.ActiveCfg = Release|Any CPU
{CF63C942-895A-4F6B-888A-7653D7C4991A}.Release|x86.Build.0 = Release|Any CPU
+ {8D2CC6ED-5105-4F52-8757-C21F4DE78589}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {8D2CC6ED-5105-4F52-8757-C21F4DE78589}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {8D2CC6ED-5105-4F52-8757-C21F4DE78589}.Debug|arm64.ActiveCfg = Debug|Any CPU
+ {8D2CC6ED-5105-4F52-8757-C21F4DE78589}.Debug|arm64.Build.0 = Debug|Any CPU
+ {8D2CC6ED-5105-4F52-8757-C21F4DE78589}.Debug|x64.ActiveCfg = Debug|Any CPU
+ {8D2CC6ED-5105-4F52-8757-C21F4DE78589}.Debug|x64.Build.0 = Debug|Any CPU
+ {8D2CC6ED-5105-4F52-8757-C21F4DE78589}.Debug|x86.ActiveCfg = Debug|Any CPU
+ {8D2CC6ED-5105-4F52-8757-C21F4DE78589}.Debug|x86.Build.0 = Debug|Any CPU
+ {8D2CC6ED-5105-4F52-8757-C21F4DE78589}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {8D2CC6ED-5105-4F52-8757-C21F4DE78589}.Release|Any CPU.Build.0 = Release|Any CPU
+ {8D2CC6ED-5105-4F52-8757-C21F4DE78589}.Release|arm64.ActiveCfg = Release|Any CPU
+ {8D2CC6ED-5105-4F52-8757-C21F4DE78589}.Release|arm64.Build.0 = Release|Any CPU
+ {8D2CC6ED-5105-4F52-8757-C21F4DE78589}.Release|x64.ActiveCfg = Release|Any CPU
+ {8D2CC6ED-5105-4F52-8757-C21F4DE78589}.Release|x64.Build.0 = Release|Any CPU
+ {8D2CC6ED-5105-4F52-8757-C21F4DE78589}.Release|x86.ActiveCfg = Release|Any CPU
+ {8D2CC6ED-5105-4F52-8757-C21F4DE78589}.Release|x86.Build.0 = Release|Any CPU
{D53F0EF7-0CDC-49B4-AA2D-229901B0A734}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{D53F0EF7-0CDC-49B4-AA2D-229901B0A734}.Debug|Any CPU.Build.0 = Debug|Any CPU
{D53F0EF7-0CDC-49B4-AA2D-229901B0A734}.Debug|arm64.ActiveCfg = Debug|Any CPU
@@ -11752,6 +11771,8 @@ Global
{2D64CA23-6E81-488E-A7D3-9BDF87240098} = {0F39820F-F4A5-41C6-9809-D79B68F032EF}
{2B60E6D3-9E7C-427A-AD4E-BBE9A6D935B9} = {2D64CA23-6E81-488E-A7D3-9BDF87240098}
{CF63C942-895A-4F6B-888A-7653D7C4991A} = {2D64CA23-6E81-488E-A7D3-9BDF87240098}
+ {6469F11E-8CEE-4292-820B-324DFFC88EBC} = {0F39820F-F4A5-41C6-9809-D79B68F032EF}
+ {8D2CC6ED-5105-4F52-8757-C21F4DE78589} = {6469F11E-8CEE-4292-820B-324DFFC88EBC}
{9DC6B242-457B-4767-A84B-C3D23B76C642} = {2299CCD8-8F9C-4F2B-A633-9BF4DA81022B}
{D53F0EF7-0CDC-49B4-AA2D-229901B0A734} = {9DC6B242-457B-4767-A84B-C3D23B76C642}
EndGlobalSection
diff --git a/src/Caching/Caching.slnf b/src/Caching/Caching.slnf
index 63610b8e28d5..522505d8ab90 100644
--- a/src/Caching/Caching.slnf
+++ b/src/Caching/Caching.slnf
@@ -8,6 +8,7 @@
"src\\Caching\\SqlServer\\test\\Microsoft.Extensions.Caching.SqlServer.Tests.csproj",
"src\\Caching\\StackExchangeRedis\\src\\Microsoft.Extensions.Caching.StackExchangeRedis.csproj",
"src\\Caching\\StackExchangeRedis\\test\\Microsoft.Extensions.Caching.StackExchangeRedis.Tests.csproj",
+ "src\\Caching\\perf\\MicroBenchmarks\\Microsoft.Extensions.Caching.MicroBenchmarks\\Microsoft.Extensions.Caching.MicroBenchmarks.csproj",
"src\\Middleware\\OutputCaching\\src\\Microsoft.AspNetCore.OutputCaching.csproj"
]
}
diff --git a/src/Caching/Hybrid/src/HybridCacheOptions.cs b/src/Caching/Hybrid/src/HybridCacheOptions.cs
index 62407b9bf6a9..66793746a5dd 100644
--- a/src/Caching/Hybrid/src/HybridCacheOptions.cs
+++ b/src/Caching/Hybrid/src/HybridCacheOptions.cs
@@ -6,14 +6,17 @@
using System.Linq;
using System.Text;
using System.Threading.Tasks;
+using Microsoft.Extensions.Options;
namespace Microsoft.Extensions.Caching.Hybrid;
///
/// Options for configuring the default implementation.
///
-public class HybridCacheOptions
+public class HybridCacheOptions // : IOptions
{
+ // TODO: should we implement IOptions?
+
///
/// Default global options to be applied to operations; if options are
/// specified at the individual call level, the non-null values are merged (with the per-call
@@ -45,4 +48,6 @@ public class HybridCacheOptions
/// tags do not contain data that should not be visible in metrics systems.
///
public bool ReportTagMetrics { get; set; }
+
+ // HybridCacheOptions IOptions.Value => this;
}
diff --git a/src/Caching/Hybrid/src/HybridCacheServiceExtensions.cs b/src/Caching/Hybrid/src/HybridCacheServiceExtensions.cs
index bcbde7462a39..fcb7e4ae4d57 100644
--- a/src/Caching/Hybrid/src/HybridCacheServiceExtensions.cs
+++ b/src/Caching/Hybrid/src/HybridCacheServiceExtensions.cs
@@ -50,7 +50,6 @@ public static IHybridCacheBuilder AddHybridCache(this IServiceCollection service
services.TryAddSingleton(TimeProvider.System);
services.AddOptions();
services.AddMemoryCache();
- services.AddDistributedMemoryCache(); // we need a backend; use in-proc by default
services.TryAddSingleton();
services.TryAddSingleton>(InbuiltTypeSerializer.Instance);
services.TryAddSingleton>(InbuiltTypeSerializer.Instance);
diff --git a/src/Caching/Hybrid/src/Internal/BufferChunk.cs b/src/Caching/Hybrid/src/Internal/BufferChunk.cs
new file mode 100644
index 000000000000..c783810fcf30
--- /dev/null
+++ b/src/Caching/Hybrid/src/Internal/BufferChunk.cs
@@ -0,0 +1,77 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System;
+using System.Buffers;
+using System.Diagnostics;
+using System.Runtime.CompilerServices;
+
+namespace Microsoft.Extensions.Caching.Hybrid.Internal;
+
+// used to convey buffer status; like ArraySegment, but Offset is always
+// zero, and we use the MSB of the length to track whether or not to recycle this value
+internal readonly struct BufferChunk
+{
+ private const int MSB = (1 << 31);
+
+ private readonly int _lengthAndPoolFlag;
+ public byte[]? Array { get; } // null for default
+
+ public int Length => _lengthAndPoolFlag & ~MSB;
+
+ public bool ReturnToPool => (_lengthAndPoolFlag & MSB) != 0;
+
+ public byte[] ToArray()
+ {
+ var length = Length;
+ if (length == 0)
+ {
+ return [];
+ }
+
+ var copy = new byte[length];
+ Buffer.BlockCopy(Array!, 0, copy, 0, length);
+ return copy;
+ }
+
+ public BufferChunk(byte[] array)
+ {
+ Debug.Assert(array is not null, "expected valid array input");
+ Array = array;
+ _lengthAndPoolFlag = array.Length;
+ // assume not pooled, if exact-sized
+ Debug.Assert(!ReturnToPool, "do not return right-sized arrays");
+ Debug.Assert(Length == array.Length, "array length not respected");
+ }
+
+ public BufferChunk(byte[] array, int length, bool returnToPool)
+ {
+ Debug.Assert(array is not null, "expected valid array input");
+ Debug.Assert(length >= 0, "expected valid length");
+ Array = array;
+ _lengthAndPoolFlag = length | (returnToPool ? MSB : 0);
+ Debug.Assert(ReturnToPool == returnToPool, "return-to-pool not respected");
+ Debug.Assert(Length == length, "length not respected");
+ }
+
+ internal void RecycleIfAppropriate()
+ {
+ if (ReturnToPool)
+ {
+ ArrayPool.Shared.Return(Array!);
+ }
+ Unsafe.AsRef(in this) = default; // anti foot-shotgun double-return guard; not 100%, but worth doing
+ Debug.Assert(Array is null && !ReturnToPool, "expected clean slate after recycle");
+ }
+
+ internal ReadOnlySequence AsSequence() => Length == 0 ? default : new ReadOnlySequence(Array!, 0, Length);
+
+ internal BufferChunk DoNotReturnToPool()
+ {
+ var copy = this;
+ Unsafe.AsRef(in copy._lengthAndPoolFlag) &= ~MSB;
+ Debug.Assert(copy.Length == Length, "same length expected");
+ Debug.Assert(!copy.ReturnToPool, "do not return to pool");
+ return copy;
+ }
+}
diff --git a/src/Caching/Hybrid/src/Internal/DefaultHybridCache.CacheItem.cs b/src/Caching/Hybrid/src/Internal/DefaultHybridCache.CacheItem.cs
new file mode 100644
index 000000000000..7dba0f76d6be
--- /dev/null
+++ b/src/Caching/Hybrid/src/Internal/DefaultHybridCache.CacheItem.cs
@@ -0,0 +1,49 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System;
+using Microsoft.Extensions.Caching.Memory;
+
+namespace Microsoft.Extensions.Caching.Hybrid.Internal;
+
+partial class DefaultHybridCache
+{
+ internal abstract class CacheItem
+ {
+ internal static readonly PostEvictionDelegate _sharedOnEviction = static (key, value, reason, state) =>
+ {
+ if (value is CacheItem item)
+ {
+ // perform per-item clean-up; this could be buffer recycling (if defensive copies needed),
+ // or could be disposal
+ item.OnEviction();
+ }
+ };
+
+ public virtual void Release() { } // for recycling purposes
+
+ public abstract bool NeedsEvictionCallback { get; } // do we need to call Release when evicted?
+
+ public virtual void OnEviction() { } // only invoked if NeedsEvictionCallback reported true
+
+ public abstract bool TryReserveBuffer(out BufferChunk buffer);
+
+ public abstract bool DebugIsImmutable { get; }
+ }
+
+ internal abstract class CacheItem : CacheItem
+ {
+ public abstract bool TryGetValue(out T value);
+
+ public T GetValue()
+ {
+ if (!TryGetValue(out var value))
+ {
+ Throw();
+ }
+ return value;
+
+ static void Throw() => throw new ObjectDisposedException("The cache item has been recycled before the value was obtained");
+ }
+ }
+}
diff --git a/src/Caching/Hybrid/src/Internal/DefaultHybridCache.Debug.cs b/src/Caching/Hybrid/src/Internal/DefaultHybridCache.Debug.cs
new file mode 100644
index 000000000000..0be6f768e5f4
--- /dev/null
+++ b/src/Caching/Hybrid/src/Internal/DefaultHybridCache.Debug.cs
@@ -0,0 +1,70 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System.Diagnostics;
+using System.Diagnostics.CodeAnalysis;
+using System.Threading;
+
+namespace Microsoft.Extensions.Caching.Hybrid.Internal;
+
+partial class DefaultHybridCache
+{
+ internal bool DebugTryGetCacheItem(string key, [NotNullWhen(true)] out CacheItem? value)
+ {
+ if (_localCache.TryGetValue(key, out var untyped) && untyped is CacheItem typed)
+ {
+ value = typed;
+ return true;
+ }
+ value = null;
+ return false;
+ }
+
+#if DEBUG // enable ref-counted buffers
+
+ private int _outstandingBufferCount;
+
+ internal int DebugGetOutstandingBuffers(bool flush = false)
+ => flush ? Interlocked.Exchange(ref _outstandingBufferCount, 0) : Volatile.Read(ref _outstandingBufferCount);
+
+ [Conditional("DEBUG")]
+ internal void DebugDecrementOutstandingBuffers()
+ {
+ Interlocked.Decrement(ref _outstandingBufferCount);
+ }
+
+ [Conditional("DEBUG")]
+ internal void DebugIncrementOutstandingBuffers()
+ {
+ Interlocked.Increment(ref _outstandingBufferCount);
+ }
+#endif
+
+ partial class MutableCacheItem
+ {
+ partial void DebugDecrementOutstandingBuffers();
+ partial void DebugTrackBufferCore(DefaultHybridCache cache);
+
+ [Conditional("DEBUG")]
+ internal void DebugTrackBuffer(DefaultHybridCache cache) => DebugTrackBufferCore(cache);
+
+#if DEBUG
+ private DefaultHybridCache? _cache; // for buffer-tracking - only enabled in DEBUG
+ partial void DebugDecrementOutstandingBuffers()
+ {
+ if (_buffer.ReturnToPool)
+ {
+ _cache?.DebugDecrementOutstandingBuffers();
+ }
+ }
+ partial void DebugTrackBufferCore(DefaultHybridCache cache)
+ {
+ _cache = cache;
+ if (_buffer.ReturnToPool)
+ {
+ _cache?.DebugIncrementOutstandingBuffers();
+ }
+ }
+#endif
+ }
+}
diff --git a/src/Caching/Hybrid/src/Internal/DefaultHybridCache.ImmutableCacheItem.cs b/src/Caching/Hybrid/src/Internal/DefaultHybridCache.ImmutableCacheItem.cs
new file mode 100644
index 000000000000..f87335674b95
--- /dev/null
+++ b/src/Caching/Hybrid/src/Internal/DefaultHybridCache.ImmutableCacheItem.cs
@@ -0,0 +1,45 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System;
+using System.Diagnostics;
+using System.Diagnostics.CodeAnalysis;
+
+namespace Microsoft.Extensions.Caching.Hybrid.Internal;
+
+partial class DefaultHybridCache
+{
+ private sealed class ImmutableCacheItem : CacheItem // used to hold types that do not require defensive copies
+ {
+ private readonly T _value;
+ public ImmutableCacheItem(T value) => _value = value;
+
+ private static ImmutableCacheItem? _sharedDefault;
+
+ // this is only used when the underlying store is disabled; we don't need 100% singleton; "good enough is"
+ public static ImmutableCacheItem Default => _sharedDefault ??= new(default!);
+
+ public override void OnEviction()
+ {
+ var obj = _value as IDisposable;
+ Debug.Assert(obj is not null, "shouldn't be here for non-disposable types");
+ obj?.Dispose();
+ }
+
+ public override bool NeedsEvictionCallback => ImmutableTypeCache.IsDisposable;
+
+ public override bool TryGetValue(out T value)
+ {
+ value = _value;
+ return true; // always available
+ }
+
+ public override bool TryReserveBuffer(out BufferChunk buffer)
+ {
+ buffer = default;
+ return false; // we don't have one to reserve!
+ }
+
+ public override bool DebugIsImmutable => true;
+ }
+}
diff --git a/src/Caching/Hybrid/src/Internal/DefaultHybridCache.L2.cs b/src/Caching/Hybrid/src/Internal/DefaultHybridCache.L2.cs
new file mode 100644
index 000000000000..aef445d44e29
--- /dev/null
+++ b/src/Caching/Hybrid/src/Internal/DefaultHybridCache.L2.cs
@@ -0,0 +1,125 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System;
+using System.Diagnostics;
+using System.Diagnostics.CodeAnalysis;
+using System.Runtime.CompilerServices;
+using System.Threading;
+using System.Threading.Tasks;
+using Microsoft.Extensions.Caching.Distributed;
+using Microsoft.Extensions.Caching.Memory;
+
+namespace Microsoft.Extensions.Caching.Hybrid.Internal;
+
+partial class DefaultHybridCache
+{
+ internal ValueTask GetFromL2Async(string key, CancellationToken token)
+ {
+ switch (GetFeatures(CacheFeatures.BackendCache | CacheFeatures.BackendBuffers))
+ {
+ case CacheFeatures.BackendCache: // legacy byte[]-based
+ var pendingLegacy = _backendCache!.GetAsync(key, token);
+#if NETCOREAPP2_0_OR_GREATER || NETSTANDARD2_1_OR_GREATER
+ if (!pendingLegacy.IsCompletedSuccessfully)
+#else
+ if (pendingLegacy.Status != TaskStatus.RanToCompletion)
+#endif
+ {
+ return new(AwaitedLegacy(pendingLegacy, this));
+ }
+ return new(GetValidPayloadSegment(pendingLegacy.Result)); // already complete
+ case CacheFeatures.BackendCache | CacheFeatures.BackendBuffers: // IBufferWriter-based
+ var writer = RecyclableArrayBufferWriter.Create(MaximumPayloadBytes);
+ var cache = Unsafe.As(_backendCache!); // type-checked already
+ var pendingBuffers = cache.TryGetAsync(key, writer, token);
+ if (!pendingBuffers.IsCompletedSuccessfully)
+ {
+ return new(AwaitedBuffers(pendingBuffers, writer));
+ }
+ BufferChunk result = pendingBuffers.GetAwaiter().GetResult()
+ ? new(writer.DetachCommitted(out var length), length, returnToPool: true)
+ : default;
+ writer.Dispose(); // it is not accidental that this isn't "using"; avoid recycling if not 100% sure what happened
+ return new(result);
+ }
+ return default;
+
+ static async Task AwaitedLegacy(Task pending, DefaultHybridCache @this)
+ {
+ var bytes = await pending.ConfigureAwait(false);
+ return @this.GetValidPayloadSegment(bytes);
+ }
+
+ static async Task AwaitedBuffers(ValueTask pending, RecyclableArrayBufferWriter writer)
+ {
+ BufferChunk result = await pending.ConfigureAwait(false)
+ ? new(writer.DetachCommitted(out var length), length, returnToPool: true)
+ : default;
+ writer.Dispose(); // it is not accidental that this isn't "using"; avoid recycling if not 100% sure what happened
+ return result;
+ }
+ }
+
+ private BufferChunk GetValidPayloadSegment(byte[]? payload)
+ {
+ if (payload is not null)
+ {
+ if (payload.Length > MaximumPayloadBytes)
+ {
+ ThrowPayloadLengthExceeded(payload.Length);
+ }
+ return new(payload);
+ }
+ return default;
+ }
+
+ [DoesNotReturn, MethodImpl(MethodImplOptions.NoInlining)]
+ private void ThrowPayloadLengthExceeded(int size) // splitting the exception bits out to a different method
+ {
+ // TODO: also log to logger (hence instance method)
+ throw new InvalidOperationException($"Maximum cache length ({MaximumPayloadBytes} bytes) exceeded");
+ }
+
+ internal ValueTask SetL2Async(string key, in BufferChunk buffer, HybridCacheEntryOptions? options, CancellationToken token)
+ {
+ Debug.Assert(buffer.Array is not null);
+ switch (GetFeatures(CacheFeatures.BackendCache | CacheFeatures.BackendBuffers))
+ {
+ case CacheFeatures.BackendCache: // legacy byte[]-based
+ var arr = buffer.Array;
+ if (arr.Length != buffer.Length)
+ {
+ // we'll need a right-sized snapshot
+ arr = buffer.ToArray();
+ }
+ return new(_backendCache!.SetAsync(key, arr, GetOptions(options), token));
+ case CacheFeatures.BackendCache | CacheFeatures.BackendBuffers: // ReadOnlySequence-based
+ var cache = Unsafe.As(_backendCache!); // type-checked already
+ return cache.SetAsync(key, buffer.AsSequence(), GetOptions(options), token);
+ }
+ return default;
+ }
+
+ private DistributedCacheEntryOptions GetOptions(HybridCacheEntryOptions? options)
+ {
+ DistributedCacheEntryOptions? result = null;
+ if (options is not null && options.Expiration.HasValue && options.Expiration.GetValueOrDefault() != _defaultExpiration)
+ {
+ result = options.ToDistributedCacheEntryOptions();
+ }
+ return result ?? _defaultDistributedCacheExpiration;
+ }
+
+ internal void SetL1(string key, CacheItem value, HybridCacheEntryOptions? options)
+ {
+ // based on CacheExtensions.Set, but with post-eviction recycling
+ using var cacheEntry = _localCache.CreateEntry(key);
+ cacheEntry.AbsoluteExpirationRelativeToNow = options?.LocalCacheExpiration ?? _defaultLocalCacheExpiration;
+ cacheEntry.Value = value;
+ if (value.NeedsEvictionCallback)
+ {
+ cacheEntry.RegisterPostEvictionCallback(CacheItem._sharedOnEviction);
+ }
+ }
+}
diff --git a/src/Caching/Hybrid/src/Internal/DefaultHybridCache.MutableCacheItem.cs b/src/Caching/Hybrid/src/Internal/DefaultHybridCache.MutableCacheItem.cs
new file mode 100644
index 000000000000..ef99a5738091
--- /dev/null
+++ b/src/Caching/Hybrid/src/Internal/DefaultHybridCache.MutableCacheItem.cs
@@ -0,0 +1,100 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System;
+using System.Diagnostics;
+using System.Threading;
+
+namespace Microsoft.Extensions.Caching.Hybrid.Internal;
+
+partial class DefaultHybridCache
+{
+ private sealed partial class MutableCacheItem : CacheItem // used to hold types that require defensive copies
+ {
+ private readonly IHybridCacheSerializer _serializer;
+ private readonly BufferChunk _buffer;
+ private int _refCount = 1; // buffer released when this becomes zero
+
+ public MutableCacheItem(ref BufferChunk buffer, IHybridCacheSerializer serializer)
+ {
+ _serializer = serializer;
+ _buffer = buffer;
+ buffer = default; // we're taking over the lifetime; the caller no longer has it!
+ }
+
+ public MutableCacheItem(T value, IHybridCacheSerializer serializer, int maxLength)
+ {
+ _serializer = serializer;
+ var writer = RecyclableArrayBufferWriter.Create(maxLength);
+ serializer.Serialize(value, writer);
+
+ _buffer = new(writer.DetachCommitted(out var length), length, returnToPool: true);
+ writer.Dispose(); // no buffers left (we just detached them), but just in case of other logic
+ }
+
+ public override bool NeedsEvictionCallback => _buffer.ReturnToPool;
+
+ public override void OnEviction() => Release();
+
+ public override void Release()
+ {
+ var newCount = Interlocked.Decrement(ref _refCount);
+ if (newCount == 0)
+ {
+ DebugDecrementOutstandingBuffers();
+ _buffer.RecycleIfAppropriate();
+ }
+ }
+
+ public bool TryReserve()
+ {
+ var oldValue = Volatile.Read(ref _refCount);
+ do
+ {
+ if (oldValue is 0 or -1)
+ {
+ return false; // already burned, or about to roll around back to zero
+ }
+
+ var updated = Interlocked.CompareExchange(ref _refCount, oldValue + 1, oldValue);
+ if (updated == oldValue)
+ {
+ return true; // we exchanged
+ }
+ oldValue = updated; // we failed, but we have an updated state
+ } while (true);
+ }
+
+ public override bool TryGetValue(out T value)
+ {
+ if (!TryReserve()) // only if we haven't already burned
+ {
+ value = default!;
+ return false;
+ }
+
+ try
+ {
+ value = _serializer.Deserialize(_buffer.AsSequence());
+ return true;
+ }
+ finally
+ {
+ Release();
+ }
+ }
+
+ public override bool TryReserveBuffer(out BufferChunk buffer)
+ {
+ if (TryReserve()) // only if we haven't already burned
+ {
+ buffer = _buffer.DoNotReturnToPool(); // not up to them!
+ return true;
+ }
+ buffer = default;
+ return false;
+ }
+
+ public override bool DebugIsImmutable => false;
+ }
+}
diff --git a/src/Caching/Hybrid/src/Internal/DefaultHybridCache.Serialization.cs b/src/Caching/Hybrid/src/Internal/DefaultHybridCache.Serialization.cs
new file mode 100644
index 000000000000..42b5789e4b86
--- /dev/null
+++ b/src/Caching/Hybrid/src/Internal/DefaultHybridCache.Serialization.cs
@@ -0,0 +1,114 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System;
+using System.Collections.Concurrent;
+using System.ComponentModel;
+using System.Linq;
+using System.Reflection;
+using System.Runtime.CompilerServices;
+using System.Runtime.InteropServices;
+using System.Runtime.Serialization;
+using Microsoft.Extensions.DependencyInjection;
+
+namespace Microsoft.Extensions.Caching.Hybrid.Internal;
+partial class DefaultHybridCache
+{
+ // per instance cache of typed serializers; each serializer is a
+ // IHybridCacheSerializer for the corresponding Type, but we can't
+ // know which here - and undesirable to add an artificial non-generic
+ // IHybridCacheSerializer base that serves no other purpose
+ private readonly ConcurrentDictionary _serializers = new();
+
+ internal int MaximumPayloadBytes { get; }
+
+ internal IHybridCacheSerializer GetSerializer()
+ {
+ return _serializers.TryGetValue(typeof(T), out var serializer)
+ ? Unsafe.As>(serializer) : ResolveAndAddSerializer(this);
+
+ static IHybridCacheSerializer ResolveAndAddSerializer(DefaultHybridCache @this)
+ {
+ // it isn't critical that we get only one serializer instance during start-up; what matters
+ // is that we don't get a new serializer instance *every time*
+ var serializer = @this._services.GetService>();
+ if (serializer is null)
+ {
+ foreach (var factory in @this._serializerFactories)
+ {
+ if (factory.TryCreateSerializer(out var current))
+ {
+ serializer = current;
+ break; // we've already reversed the factories, so: the first hit is what we want
+ }
+ }
+ }
+ if (serializer is null)
+ {
+ throw new InvalidOperationException($"No {nameof(IHybridCacheSerializer)} configured for type '{typeof(T).Name}'");
+ }
+ // store the result so we don't repeat this in future
+ @this._serializers[typeof(T)] = serializer;
+ return serializer;
+ }
+ }
+
+ internal static class ImmutableTypeCache // lazy memoize; T doesn't change per cache instance
+ {
+ // note for blittable types: a pure struct will be a full copy every time - nothing shared to mutate
+ public static readonly bool IsImmutable = (typeof(T).IsValueType && IsBlittable()) || IsImmutable(typeof(T));
+
+ public static bool IsDisposable => typeof(IDisposable).IsAssignableFrom(typeof(T));
+ }
+
+ private static bool IsBlittable() // minimize the generic portion
+ {
+#if NETCOREAPP2_0_OR_GREATER || NETSTANDARD2_1_OR_GREATER
+ return !RuntimeHelpers.IsReferenceOrContainsReferences();
+#else
+ try // down-level: only blittable types can be pinned
+ {
+ // get a typed, zeroed, non-null boxed instance of the appropriate type
+ // (can't use (object)default(T), as that would box to null for nullable types)
+ var obj = FormatterServices.GetUninitializedObject(Nullable.GetUnderlyingType(typeof(T)) ?? typeof(T));
+ GCHandle.Alloc(obj, GCHandleType.Pinned).Free();
+ return true;
+ }
+ catch
+ {
+ return false;
+ }
+#endif
+ }
+
+ private static bool IsImmutable(Type type)
+ {
+ // check for known types
+ if (type == typeof(string))
+ {
+ return true;
+ }
+
+ if (type.IsValueType)
+ {
+ // switch from Foo? to Foo if necessary
+ if (Nullable.GetUnderlyingType(type) is { } nullable)
+ {
+ type = nullable;
+ }
+ }
+
+ if (type.IsValueType || (type.IsClass & type.IsSealed))
+ {
+ // check for [ImmutableObject(true)]; note we're looking at this as a statement about
+ // the overall nullability; for example, a type could contain a private int[] field,
+ // where the field is mutable and the list is mutable; but if the type is annotated:
+ // we're trusting that the API and use-case is such that the type is immutable
+ return type.GetCustomAttribute() is { Immutable: true };
+ }
+ // don't trust interfaces and non-sealed types; we might have any concrete
+ // type that has different behaviour
+ return false;
+
+ }
+}
diff --git a/src/Caching/Hybrid/src/Internal/DefaultHybridCache.Stampede.cs b/src/Caching/Hybrid/src/Internal/DefaultHybridCache.Stampede.cs
new file mode 100644
index 000000000000..01d797a08194
--- /dev/null
+++ b/src/Caching/Hybrid/src/Internal/DefaultHybridCache.Stampede.cs
@@ -0,0 +1,97 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System;
+using System.Collections.Concurrent;
+using System.Diagnostics;
+using System.Diagnostics.CodeAnalysis;
+
+namespace Microsoft.Extensions.Caching.Hybrid.Internal;
+
+partial class DefaultHybridCache
+{
+ private readonly ConcurrentDictionary _currentOperations = new();
+
+ internal int DebugGetCallerCount(string key, HybridCacheEntryFlags? flags = null)
+ {
+ var stampedeKey = new StampedeKey(key, flags ?? _defaultFlags);
+ return _currentOperations.TryGetValue(stampedeKey, out var state) ? state.DebugCallerCount : 0;
+ }
+
+ // returns true for a new session (in which case: we need to start the work), false for a pre-existing session
+ public bool GetOrCreateStampedeState(string key, HybridCacheEntryFlags flags, out StampedeState stampedeState, bool canBeCanceled)
+ {
+ var stampedeKey = new StampedeKey(key, flags);
+
+ // double-checked locking to try to avoid unnecessary sessions in race conditions,
+ // while avoiding the lock completely whenever possible
+ if (TryJoinExistingSession(this, stampedeKey, out var existing))
+ {
+ stampedeState = existing;
+ return false; // someone ELSE is running the work
+ }
+
+ // most common scenario here, then, is that we're not fighting with anyone else;
+ // go ahead and create a placeholder state object and *try* to add it
+ stampedeState = new StampedeState(this, stampedeKey, canBeCanceled);
+ if (_currentOperations.TryAdd(stampedeKey, stampedeState))
+ {
+ // successfully added; indeed, no-one else was fighting: we're done
+ return true; // the CURRENT caller is responsible for making the work happen
+ }
+
+ // hmm; failed to add - there's concurrent activity on the same key; we're now
+ // in very rare race condition territory; go ahead and take a lock while we
+ // collect our thoughts
+ lock (_currentOperations)
+ {
+ // check again while we hold the lock
+ if (TryJoinExistingSession(this, stampedeKey, out existing))
+ {
+ // we found an existing state we can join; do that
+ stampedeState.SetCanceled(); // to be thorough: mark our speculative one as doomed (no-one has seen it, though)
+ stampedeState = existing; // and replace with the one we found
+ return false; // someone ELSE is running the work
+
+ // note that in this case we allocated a StampedeState that got dropped on
+ // the floor; in the grand scheme of things, that's OK; this is a rare outcome
+ }
+
+ // otherwise, either nothing existed - or the thing that already exists can't be joined;
+ // in that case, go ahead and use the state that we invented a moment ago (outside of the lock)
+ _currentOperations[stampedeKey] = stampedeState;
+ return true; // the CURRENT caller is responsible for making the work happen
+ }
+
+ static bool TryJoinExistingSession(DefaultHybridCache @this, in StampedeKey stampedeKey,
+ [NotNullWhen(true)] out StampedeState? stampedeState)
+ {
+ if (@this._currentOperations.TryGetValue(stampedeKey, out var found))
+ {
+ if (found is not StampedeState tmp)
+ {
+ ThrowWrongType(stampedeKey.Key, found.Type, typeof(T));
+ }
+
+ if (tmp.TryAddCaller())
+ {
+ // we joined an existing session
+ stampedeState = tmp;
+ return true;
+ }
+ }
+ stampedeState = null;
+ return false;
+ }
+
+ [DoesNotReturn]
+ static void ThrowWrongType(string key, Type existingType, Type newType)
+ {
+ Debug.Assert(existingType != newType);
+ throw new InvalidOperationException($"All calls to {nameof(HybridCache)} with the same key should use the same data type; the same key is being used for '{existingType.FullName}' and '{newType.FullName}' data")
+ {
+ Data = { { "CacheKey", key } }
+ };
+ }
+ }
+}
diff --git a/src/Caching/Hybrid/src/Internal/DefaultHybridCache.StampedeKey.cs b/src/Caching/Hybrid/src/Internal/DefaultHybridCache.StampedeKey.cs
new file mode 100644
index 000000000000..bf5001360eb3
--- /dev/null
+++ b/src/Caching/Hybrid/src/Internal/DefaultHybridCache.StampedeKey.cs
@@ -0,0 +1,39 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System;
+using System.Diagnostics.CodeAnalysis;
+
+namespace Microsoft.Extensions.Caching.Hybrid.Internal;
+
+partial class DefaultHybridCache
+{
+ internal readonly struct StampedeKey : IEquatable
+ {
+ private readonly string _key;
+ private readonly HybridCacheEntryFlags _flags;
+ private readonly int _hashCode; // we know we'll need it; compute it once only
+ public StampedeKey(string key, HybridCacheEntryFlags flags)
+ {
+ _key = key;
+ _flags = flags;
+#if NETCOREAPP2_1_OR_GREATER || NETSTANDARD2_1_OR_GREATER
+ _hashCode = HashCode.Combine(key, flags);
+#else
+ _hashCode = key.GetHashCode() ^ (int)flags;
+#endif
+ }
+
+ public string Key => _key;
+ public HybridCacheEntryFlags Flags => _flags;
+
+ public bool Equals(StampedeKey other) => _flags == other._flags & _key == other._key;
+
+ public override bool Equals([NotNullWhen(true)] object? obj)
+ => obj is StampedeKey other && Equals(other);
+
+ public override int GetHashCode() => _hashCode;
+
+ public override string ToString() => $"{_key} ({_flags})";
+ }
+}
diff --git a/src/Caching/Hybrid/src/Internal/DefaultHybridCache.StampedeState.cs b/src/Caching/Hybrid/src/Internal/DefaultHybridCache.StampedeState.cs
new file mode 100644
index 000000000000..f0a983093ac1
--- /dev/null
+++ b/src/Caching/Hybrid/src/Internal/DefaultHybridCache.StampedeState.cs
@@ -0,0 +1,107 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System;
+using System.Runtime.CompilerServices;
+using System.Threading;
+
+namespace Microsoft.Extensions.Caching.Hybrid.Internal;
+
+partial class DefaultHybridCache
+{
+ internal abstract class StampedeState
+#if NETCOREAPP3_0_OR_GREATER
+ : IThreadPoolWorkItem
+#endif
+ {
+ private readonly DefaultHybridCache _cache;
+ private int _activeCallers = 1;
+
+ // because multiple callers can enlist, we need to track when the *last* caller cancels
+ // (and keep going until then); that means we need to run with custom cancellation
+ private readonly CancellationTokenSource? _sharedCancellation;
+ internal readonly CancellationToken SharedToken; // this might have a value even when _sharedCancellation is null
+
+ public StampedeKey Key { get; }
+
+ ///
+ /// Create a stamped token optionally with shared cancellation support
+ ///
+ protected StampedeState(DefaultHybridCache cache, in StampedeKey key, bool canBeCanceled)
+ {
+ _cache = cache;
+ Key = key;
+ if (canBeCanceled)
+ {
+ // if the first (or any) caller can't be cancelled; we'll never get to zero; no point tracking
+ // (in reality, all callers usually use the same path, so cancellation is usually "all" or "none")
+ _sharedCancellation = new();
+ SharedToken = _sharedCancellation.Token;
+ }
+ else
+ {
+ SharedToken = CancellationToken.None;
+ }
+ }
+
+ ///
+ /// Create a stamped token using a fixed cancellation token
+ ///
+ protected StampedeState(DefaultHybridCache cache, in StampedeKey key, CancellationToken token)
+ {
+ _cache = cache;
+ Key = key;
+ SharedToken = token;
+ }
+
+#if !NETCOREAPP3_0_OR_GREATER
+ protected static readonly WaitCallback SharedWaitCallback = static obj => Unsafe.As(obj).Execute();
+#endif
+
+ protected DefaultHybridCache Cache => _cache;
+
+ public abstract void Execute();
+
+ protected int MaximumPayloadBytes => _cache.MaximumPayloadBytes;
+
+ public override string ToString() => Key.ToString();
+
+ public abstract void SetCanceled();
+
+ public int DebugCallerCount => Volatile.Read(ref _activeCallers);
+
+ public abstract Type Type { get; }
+
+ public void RemoveCaller()
+ {
+ // note that TryAddCaller has protections to avoid getting back from zero
+ if (Interlocked.Decrement(ref _activeCallers) == 0)
+ {
+ // we're the last to leave; turn off the lights
+ _sharedCancellation?.Cancel();
+ SetCanceled();
+ }
+ }
+
+ public bool TryAddCaller() // essentially just interlocked-increment, but with a leading zero check and overflow detection
+ {
+ var oldValue = Volatile.Read(ref _activeCallers);
+ do
+ {
+ if (oldValue is 0 or -1)
+ {
+ return false; // already burned or about to roll around back to zero
+ }
+
+ var updated = Interlocked.CompareExchange(ref _activeCallers, oldValue + 1, oldValue);
+ if (updated == oldValue)
+ {
+ return true; // we exchanged
+ }
+ oldValue = updated; // we failed, but we have an updated state
+ } while (true);
+ }
+ }
+
+ private void RemoveStampede(StampedeKey key) => _currentOperations.TryRemove(key, out _);
+}
diff --git a/src/Caching/Hybrid/src/Internal/DefaultHybridCache.StampedeStateT.cs b/src/Caching/Hybrid/src/Internal/DefaultHybridCache.StampedeStateT.cs
new file mode 100644
index 000000000000..7a76cdcdaf78
--- /dev/null
+++ b/src/Caching/Hybrid/src/Internal/DefaultHybridCache.StampedeStateT.cs
@@ -0,0 +1,264 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System;
+using System.Diagnostics;
+using System.Threading;
+using System.Threading.Tasks;
+using Microsoft.Extensions.Caching.Memory;
+
+namespace Microsoft.Extensions.Caching.Hybrid.Internal;
+
+partial class DefaultHybridCache
+{
+ internal sealed class StampedeState : StampedeState
+ {
+ private readonly TaskCompletionSource>? _result;
+ private TState? _state;
+ private Func>? _underlying; // main data factory
+ private HybridCacheEntryOptions? _options;
+ private Task? _sharedUnwrap; // allows multiple non-cancellable callers to share a single task (when no defensive copy needed)
+
+ public StampedeState(DefaultHybridCache cache, in StampedeKey key, bool canBeCanceled)
+ : base(cache, key, canBeCanceled)
+ {
+ _result = new(TaskCreationOptions.RunContinuationsAsynchronously);
+ }
+
+ public override Type Type => typeof(T);
+
+ public StampedeState(DefaultHybridCache cache, in StampedeKey key, CancellationToken token)
+ : base(cache, key, token) { } // no TCS in this case - this is for SetValue only
+
+ public void QueueUserWorkItem(in TState state, Func> underlying, HybridCacheEntryOptions? options)
+ {
+ Debug.Assert(_underlying is null);
+ Debug.Assert(underlying is not null);
+
+ // initialize the callback state
+ _state = state;
+ _underlying = underlying;
+ _options = options;
+
+#if NETCOREAPP3_0_OR_GREATER
+ ThreadPool.UnsafeQueueUserWorkItem(this, false);
+#else
+ ThreadPool.UnsafeQueueUserWorkItem(SharedWaitCallback, this);
+#endif
+ }
+
+ public Task ExecuteDirectAsync(in TState state, Func> underlying, HybridCacheEntryOptions? options)
+ {
+ Debug.Assert(_underlying is null);
+ Debug.Assert(underlying is not null);
+
+ // initialize the callback state
+ _state = state;
+ _underlying = underlying;
+ _options = options;
+
+ return BackgroundFetchAsync();
+ }
+
+ public override void Execute() => _ = BackgroundFetchAsync();
+
+ private async Task BackgroundFetchAsync()
+ {
+ try
+ {
+ // read from L2 if appropriate
+ if ((Key.Flags & HybridCacheEntryFlags.DisableDistributedCacheRead) == 0)
+ {
+ var result = await Cache.GetFromL2Async(Key.Key, SharedToken).ConfigureAwait(false);
+
+ if (result.Array is not null)
+ {
+ SetResultAndRecycleIfAppropriate(ref result);
+ return;
+ }
+ }
+
+ // nothing from L2; invoke the underlying data store
+ if ((Key.Flags & HybridCacheEntryFlags.DisableUnderlyingData) == 0)
+ {
+ var cacheItem = SetResult(await _underlying!(_state!, SharedToken).ConfigureAwait(false));
+
+ // note that at this point we've already released most or all of the waiting callers; everything
+ // else here is background
+
+ // write to L2 if appropriate
+ if ((Key.Flags & HybridCacheEntryFlags.DisableDistributedCacheWrite) == 0)
+ {
+ if (cacheItem.TryReserveBuffer(out var buffer))
+ {
+ // mutable: we've already serialized it for the shared cache item
+ await Cache.SetL2Async(Key.Key, in buffer, _options, SharedToken).ConfigureAwait(false);
+ cacheItem.Release(); // because we reserved
+ }
+ else if (cacheItem.TryGetValue(out var value))
+ {
+ // immutable: we'll need to do the serialize ourselves
+ var writer = RecyclableArrayBufferWriter.Create(MaximumPayloadBytes); // note this lifetime spans the SetL2Async
+ Cache.GetSerializer().Serialize(value, writer);
+ buffer = new(writer.GetBuffer(out var length), length, returnToPool: false); // writer still owns the buffer
+ await Cache.SetL2Async(Key.Key, in buffer, _options, SharedToken).ConfigureAwait(false);
+ writer.Dispose(); // recycle on success
+ }
+ }
+ }
+ else
+ {
+ // can't read from data store; implies we shouldn't write
+ // back to anywhere else, either
+ SetDefaultResult();
+ }
+ }
+ catch (Exception ex)
+ {
+ SetException(ex);
+ }
+ }
+
+ public Task> Task
+ {
+ get
+ {
+ Debug.Assert(_result is not null);
+ return _result is null ? Invalid() : _result.Task;
+
+ static Task> Invalid() => System.Threading.Tasks.Task.FromException>(new InvalidOperationException("Task should not be accessed for non-shared instances"));
+ }
+ }
+
+ private void SetException(Exception ex)
+ {
+ if (_result is not null)
+ {
+ Cache.RemoveStampede(Key);
+ _result.TrySetException(ex);
+ }
+ }
+
+ private void SetResult(CacheItem value)
+ {
+ if ((Key.Flags & HybridCacheEntryFlags.DisableLocalCacheWrite) == 0)
+ {
+ Cache.SetL1(Key.Key, value, _options); // we can do this without a TCS, for SetValue
+ }
+
+ if (_result is not null)
+ {
+ Cache.RemoveStampede(Key);
+ _result.TrySetResult(value);
+ }
+ }
+
+ private void SetDefaultResult()
+ {
+ // note we don't store this dummy result in L1 or L2
+ if (_result is not null)
+ {
+ Cache.RemoveStampede(Key);
+ _result.TrySetResult(ImmutableCacheItem.Default);
+ }
+ }
+
+ private void SetResultAndRecycleIfAppropriate(ref BufferChunk value)
+ {
+ // set a result from L2 cache
+ Debug.Assert(value.Array is not null, "expected buffer");
+
+ var serializer = Cache.GetSerializer();
+ CacheItem cacheItem;
+ if (ImmutableTypeCache.IsImmutable)
+ {
+ // deserialize; and store object; buffer can be recycled now
+ cacheItem = new ImmutableCacheItem(serializer.Deserialize(new(value.Array!, 0, value.Length)));
+ value.RecycleIfAppropriate();
+ }
+ else
+ {
+ // use the buffer directly as the backing in the cache-item; do *not* recycle now
+ var tmp = new MutableCacheItem(ref value, serializer);
+ tmp.DebugTrackBuffer(Cache); // conditional: DEBUG
+ cacheItem = tmp;
+ }
+
+ SetResult(cacheItem);
+ }
+
+ private CacheItem SetResult(T value)
+ {
+ // set a result from a value we calculated directly
+ CacheItem cacheItem;
+ if (ImmutableTypeCache.IsImmutable)
+ {
+ cacheItem = new ImmutableCacheItem(value); // no serialize needed
+ }
+ else
+ {
+ var tmp = new MutableCacheItem(value, Cache.GetSerializer(), MaximumPayloadBytes); // serialization happens here
+ tmp.DebugTrackBuffer(Cache); // conditional: DEBUG
+ cacheItem = tmp;
+ }
+ SetResult(cacheItem);
+ return cacheItem;
+ }
+
+ public override void SetCanceled() => _result?.TrySetCanceled(SharedToken);
+
+ internal ValueTask UnwrapAsync()
+ {
+ var task = Task;
+#if NETCOREAPP2_0_OR_GREATER || NETSTANDARD2_1_OR_GREATER
+ if (task.IsCompletedSuccessfully)
+#else
+ if (task.Status == TaskStatus.RanToCompletion)
+#endif
+ {
+ return new(task.Result.GetValue());
+ }
+
+ // if the type is immutable, callers can share the final step too
+ var result = ImmutableTypeCache.IsImmutable ? (_sharedUnwrap ??= Awaited(Task)) : Awaited(Task);
+ return new(result);
+
+ static async Task Awaited(Task> task)
+ => (await task.ConfigureAwait(false)).GetValue();
+ }
+
+ public ValueTask JoinAsync(CancellationToken token)
+ {
+ // if the underlying has already completed, and/or our local token can't cancel: we
+ // can simply wrap the shared task; otherwise, we need our own cancellation state
+ return token.CanBeCanceled && !Task.IsCompleted ? WithCancellation(this, token) : UnwrapAsync();
+
+ static async ValueTask WithCancellation(StampedeState stampede, CancellationToken token)
+ {
+ var cancelStub = new TaskCompletionSource(TaskCreationOptions.RunContinuationsAsynchronously);
+ using var reg = token.Register(static obj =>
+ {
+ ((TaskCompletionSource)obj!).TrySetResult(true);
+ }, cancelStub);
+
+ try
+ {
+ var first = await System.Threading.Tasks.Task.WhenAny(stampede.Task, cancelStub.Task).ConfigureAwait(false);
+ if (ReferenceEquals(first, cancelStub.Task))
+ {
+ // we expect this to throw, because otherwise we wouldn't have gotten here
+ token.ThrowIfCancellationRequested(); // get an appropriate exception
+ }
+ Debug.Assert(ReferenceEquals(first, stampede.Task));
+
+ // this has already completed, but we'll get the stack nicely
+ return (await stampede.Task.ConfigureAwait(false)).GetValue();
+ }
+ finally
+ {
+ stampede.RemoveCaller();
+ }
+ }
+ }
+ }
+}
diff --git a/src/Caching/Hybrid/src/Internal/DefaultHybridCache.cs b/src/Caching/Hybrid/src/Internal/DefaultHybridCache.cs
index 0ec73b682118..947648051cf4 100644
--- a/src/Caching/Hybrid/src/Internal/DefaultHybridCache.cs
+++ b/src/Caching/Hybrid/src/Internal/DefaultHybridCache.cs
@@ -4,10 +4,14 @@
using System;
using System.Collections.Generic;
using System.Linq;
+using System.Runtime.CompilerServices;
using System.Threading;
using System.Threading.Tasks;
using Microsoft.Extensions.Caching.Distributed;
+using Microsoft.Extensions.Caching.Memory;
using Microsoft.Extensions.DependencyInjection;
+using Microsoft.Extensions.Logging;
+using Microsoft.Extensions.Logging.Abstractions;
using Microsoft.Extensions.Options;
namespace Microsoft.Extensions.Caching.Hybrid.Internal;
@@ -15,48 +19,146 @@ namespace Microsoft.Extensions.Caching.Hybrid.Internal;
///
/// The inbuilt ASP.NET implementation of .
///
-internal sealed class DefaultHybridCache : HybridCache
+internal sealed partial class DefaultHybridCache : HybridCache
{
- private readonly IDistributedCache _backendCache;
- private readonly IServiceProvider _services;
+ private readonly IDistributedCache? _backendCache;
+ private readonly IMemoryCache _localCache;
+ private readonly IServiceProvider _services; // we can't resolve per-type serializers until we see each T
+ private readonly IHybridCacheSerializerFactory[] _serializerFactories;
private readonly HybridCacheOptions _options;
+ private readonly ILogger _logger;
+ private readonly CacheFeatures _features; // used to avoid constant type-testing
- public DefaultHybridCache(IOptions options, IDistributedCache backendCache, IServiceProvider services)
+ private readonly HybridCacheEntryFlags _hardFlags; // *always* present (for example, because no L2)
+ private readonly HybridCacheEntryFlags _defaultFlags; // note this already includes hardFlags
+ private readonly TimeSpan _defaultExpiration;
+ private readonly TimeSpan _defaultLocalCacheExpiration;
+
+ private readonly DistributedCacheEntryOptions _defaultDistributedCacheExpiration;
+
+ [Flags]
+ internal enum CacheFeatures
+ {
+ None = 0,
+ BackendCache = 1 << 0,
+ BackendBuffers = 1 << 1,
+ }
+
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ private CacheFeatures GetFeatures(CacheFeatures mask) => _features & mask;
+
+ internal CacheFeatures GetFeatures() => _features;
+
+ // used to restrict features in test suite
+ internal void DebugRemoveFeatures(CacheFeatures features) => Unsafe.AsRef(in _features) &= ~features;
+
+ public DefaultHybridCache(IOptions options, IServiceProvider services)
{
- _backendCache = backendCache ?? throw new ArgumentNullException(nameof(backendCache));
_services = services ?? throw new ArgumentNullException(nameof(services));
+ _localCache = services.GetRequiredService();
_options = options.Value;
+ _logger = services.GetService()?.CreateLogger(typeof(HybridCache)) ?? NullLogger.Instance;
+
+ _backendCache = services.GetService(); // note optional
+
+ // ignore L2 if it is really just the same L1, wrapped
+ // (note not just an "is" test; if someone has a custom subclass, who knows what it does?)
+ if (_backendCache is not null
+ && _backendCache.GetType() == typeof(MemoryDistributedCache)
+ && _localCache.GetType() == typeof(MemoryCache))
+ {
+ _backendCache = null;
+ }
+
+ // perform type-tests on the backend once only
+ _features |= _backendCache switch
+ {
+ IBufferDistributedCache => CacheFeatures.BackendCache | CacheFeatures.BackendBuffers,
+ not null => CacheFeatures.BackendCache,
+ _ => CacheFeatures.None
+ };
+
+ // When resolving serializers via the factory API, we will want the *last* instance,
+ // i.e. "last added wins"; we can optimize by reversing the array ahead of time, and
+ // taking the first match
+ var factories = services.GetServices().ToArray();
+ Array.Reverse(factories);
+ _serializerFactories = factories;
+
+ MaximumPayloadBytes = checked((int)_options.MaximumPayloadBytes); // for now hard-limit to 2GiB
+
+ var defaultEntryOptions = _options.DefaultEntryOptions;
+
+ if (_backendCache is null)
+ {
+ _hardFlags |= HybridCacheEntryFlags.DisableDistributedCache;
+ }
+ _defaultFlags = (defaultEntryOptions?.Flags ?? HybridCacheEntryFlags.None) | _hardFlags;
+ _defaultExpiration = defaultEntryOptions?.Expiration ?? TimeSpan.FromMinutes(5);
+ _defaultLocalCacheExpiration = defaultEntryOptions?.LocalCacheExpiration ?? TimeSpan.FromMinutes(1);
+ _defaultDistributedCacheExpiration = new DistributedCacheEntryOptions { AbsoluteExpirationRelativeToNow = _defaultExpiration };
}
+ internal IDistributedCache? BackendCache => _backendCache;
+ internal IMemoryCache LocalCache => _localCache;
+
internal HybridCacheOptions Options => _options;
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ private HybridCacheEntryFlags GetEffectiveFlags(HybridCacheEntryOptions? options)
+ => (options?.Flags | _hardFlags) ?? _defaultFlags;
+
public override ValueTask GetOrCreateAsync(string key, TState state, Func> underlyingDataCallback, HybridCacheEntryOptions? options = null, IReadOnlyCollection? tags = null, CancellationToken token = default)
- => underlyingDataCallback(state, token); // pass-thru without caching for initial API pass
+ {
+ var canBeCanceled = token.CanBeCanceled;
+ if (canBeCanceled)
+ {
+ token.ThrowIfCancellationRequested();
+ }
+
+ var flags = GetEffectiveFlags(options);
+ if ((flags & HybridCacheEntryFlags.DisableLocalCacheRead) == 0 && _localCache.TryGetValue(key, out var untyped)
+ && untyped is CacheItem typed && typed.TryGetValue(out var value))
+ {
+ // short-circuit
+ return new(value);
+ }
+
+ if (GetOrCreateStampedeState(key, flags, out var stampede, canBeCanceled))
+ {
+ // new query; we're responsible for making it happen
+ if (canBeCanceled)
+ {
+ // *we* might cancel, but someone else might be depending on the result; start the
+ // work independently, then we'll with join the outcome
+ stampede.QueueUserWorkItem(in state, underlyingDataCallback, options);
+ }
+ else
+ {
+ // we're going to run to completion; no need to get complicated
+ _ = stampede.ExecuteDirectAsync(in state, underlyingDataCallback, options); // this larger task includes L2 write etc
+ return stampede.UnwrapAsync();
+ }
+ }
+
+ return stampede.JoinAsync(token);
+ }
public override ValueTask RemoveKeyAsync(string key, CancellationToken token = default)
- => default; // no cache, nothing to remove
+ {
+ _localCache.Remove(key);
+ return _backendCache is null ? default : new(_backendCache.RemoveAsync(key, token));
+ }
public override ValueTask RemoveTagAsync(string tag, CancellationToken token = default)
- => default; // no cache, nothing to remove
+ => default; // tags not yet implemented
public override ValueTask SetAsync(string key, T value, HybridCacheEntryOptions? options = null, IReadOnlyCollection? tags = null, CancellationToken token = default)
- => default; // no cache, nothing to set
-
- internal IHybridCacheSerializer GetSerializer()
{
- // unused API, primarily intended to show configuration is working;
- // the real version would memoize the result
- var service = _services.GetService>();
- if (service is null)
- {
- foreach (var factory in _services.GetServices())
- {
- if (factory.TryCreateSerializer(out var current))
- {
- service = current;
- }
- }
- }
- return service ?? throw new InvalidOperationException("No serializer configured for type: " + typeof(T).Name);
+ // since we're forcing a write: disable L1+L2 read; we'll use a direct pass-thru of the value as the callback, to reuse all the code;
+ // note also that stampede token is not shared with anyone else
+ var flags = GetEffectiveFlags(options) | (HybridCacheEntryFlags.DisableLocalCacheRead | HybridCacheEntryFlags.DisableDistributedCacheRead);
+ var state = new StampedeState(this, new StampedeKey(key, flags), token);
+ return new(state.ExecuteDirectAsync(value, static (state, _) => new(state), options)); // note this spans L2 write etc
}
}
diff --git a/src/Caching/Hybrid/src/Internal/DefaultJsonSerializerFactory.cs b/src/Caching/Hybrid/src/Internal/DefaultJsonSerializerFactory.cs
index e925a033951f..63ce186e1ec4 100644
--- a/src/Caching/Hybrid/src/Internal/DefaultJsonSerializerFactory.cs
+++ b/src/Caching/Hybrid/src/Internal/DefaultJsonSerializerFactory.cs
@@ -21,17 +21,21 @@ internal sealed class DefaultJsonSerializer : IHybridCacheSerializer
T IHybridCacheSerializer.Deserialize(ReadOnlySequence source)
{
var reader = new Utf8JsonReader(source);
+#pragma warning disable IDE0079 // unnecessary suppression: TFM-dependent
#pragma warning disable IL2026, IL3050 // AOT bits
return JsonSerializer.Deserialize(ref reader)!;
#pragma warning restore IL2026, IL3050
+#pragma warning restore IDE0079
}
void IHybridCacheSerializer.Serialize(T value, IBufferWriter target)
{
using var writer = new Utf8JsonWriter(target);
+#pragma warning disable IDE0079 // unnecessary suppression: TFM-dependent
#pragma warning disable IL2026, IL3050 // AOT bits
JsonSerializer.Serialize(writer, value, JsonSerializerOptions.Default);
#pragma warning restore IL2026, IL3050
+#pragma warning restore IDE0079
}
}
diff --git a/src/Caching/Hybrid/src/Internal/RecyclableArrayBufferWriter.cs b/src/Caching/Hybrid/src/Internal/RecyclableArrayBufferWriter.cs
new file mode 100644
index 000000000000..5ff6d4699db8
--- /dev/null
+++ b/src/Caching/Hybrid/src/Internal/RecyclableArrayBufferWriter.cs
@@ -0,0 +1,184 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System;
+using System.Buffers;
+using System.Diagnostics;
+using System.Threading;
+
+namespace Microsoft.Extensions.Caching.Hybrid.Internal;
+
+// this is effectively a cut-down re-implementation of ArrayBufferWriter
+// from https://github.com/dotnet/runtime/blob/6cd9bf1937c3b4d2f7304a6c534aacde58a202b6/src/libraries/Common/src/System/Buffers/ArrayBufferWriter.cs
+// except it uses the array pool for allocations
+internal sealed class RecyclableArrayBufferWriter : IBufferWriter, IDisposable
+{
+
+ // Copy of Array.MaxLength.
+ // Used by projects targeting .NET Framework.
+ private const int ArrayMaxLength = 0x7FFFFFC7;
+
+ private const int DefaultInitialBufferSize = 256;
+
+ private T[] _buffer;
+ private int _index;
+ private int _maxLength;
+
+ public int CommittedBytes => _index;
+ public int FreeCapacity => _buffer.Length - _index;
+
+ private static RecyclableArrayBufferWriter? _spare;
+ public static RecyclableArrayBufferWriter Create(int maxLength)
+ {
+ var obj = Interlocked.Exchange(ref _spare, null) ?? new();
+ Debug.Assert(obj._index == 0);
+ obj._maxLength = maxLength;
+ return obj;
+ }
+
+ private RecyclableArrayBufferWriter()
+ {
+ _buffer = [];
+ _index = 0;
+ _maxLength = int.MaxValue;
+ }
+
+ public void Dispose()
+ {
+ // attempt to reuse everything via "spare"; if that isn't possible,
+ // recycle the buffers instead
+ _index = 0;
+ if (Interlocked.CompareExchange(ref _spare, this, null) != null)
+ {
+ var tmp = _buffer;
+ _buffer = [];
+ if (tmp.Length != 0)
+ {
+ ArrayPool.Shared.Return(tmp);
+ }
+ }
+ }
+
+ public void Advance(int count)
+ {
+ if (count < 0)
+ {
+ throw new ArgumentException(null, nameof(count));
+ }
+
+ if (_index > _buffer.Length - count)
+ {
+ ThrowCount();
+ }
+
+ if (_index + count > _maxLength)
+ {
+ ThrowQuota();
+ }
+
+ _index += count;
+
+ static void ThrowCount()
+ => throw new ArgumentOutOfRangeException(nameof(count));
+
+ static void ThrowQuota()
+ => throw new InvalidOperationException("Max length exceeded");
+ }
+
+ ///
+ /// Disconnect the current buffer so that we can store it without it being recycled
+ ///
+ internal T[] DetachCommitted(out int length)
+ {
+ var tmp = _index == 0 ? [] : _buffer;
+ length = _index;
+
+ _buffer = [];
+ _index = 0;
+
+ return tmp;
+ }
+
+ public void ResetInPlace()
+ {
+ // resets the writer *without* resetting the buffer;
+ // the existing memory should be considered "gone"
+ // (to claim the buffer instead, use DetachCommitted)
+ _index = 0;
+ }
+
+ internal T[] GetBuffer(out int length)
+ {
+ length = _index;
+ return _index == 0 ? [] : _buffer;
+ }
+
+ public ReadOnlyMemory GetCommittedMemory() => new(_buffer, 0, _index); // could also directly expose a ReadOnlySpan if useful
+
+ public Memory GetMemory(int sizeHint = 0)
+ {
+ CheckAndResizeBuffer(sizeHint);
+ Debug.Assert(_buffer.Length > _index);
+ return _buffer.AsMemory(_index);
+ }
+
+ public Span GetSpan(int sizeHint = 0)
+ {
+ CheckAndResizeBuffer(sizeHint);
+ Debug.Assert(_buffer.Length > _index);
+ return _buffer.AsSpan(_index);
+ }
+
+ // create a standalone isolated copy of the buffer
+ public T[] ToArray() => _buffer.AsSpan(0, _index).ToArray();
+
+ private void CheckAndResizeBuffer(int sizeHint)
+ {
+ if (sizeHint <= 0)
+ {
+ sizeHint = 1;
+ }
+
+ if (sizeHint > FreeCapacity)
+ {
+ var currentLength = _buffer.Length;
+
+ // Attempt to grow by the larger of the sizeHint and double the current size.
+ var growBy = Math.Max(sizeHint, currentLength);
+
+ if (currentLength == 0)
+ {
+ growBy = Math.Max(growBy, DefaultInitialBufferSize);
+ }
+
+ var newSize = currentLength + growBy;
+
+ if ((uint)newSize > int.MaxValue)
+ {
+ // Attempt to grow to ArrayMaxLength.
+ var needed = (uint)(currentLength - FreeCapacity + sizeHint);
+ Debug.Assert(needed > currentLength);
+
+ if (needed > ArrayMaxLength)
+ {
+ ThrowOutOfMemoryException();
+ }
+
+ newSize = ArrayMaxLength;
+ }
+
+ // resize the backing buffer
+ var oldArray = _buffer;
+ _buffer = ArrayPool.Shared.Rent(newSize);
+ oldArray.AsSpan(0, _index).CopyTo(_buffer);
+ if (oldArray.Length != 0)
+ {
+ ArrayPool.Shared.Return(oldArray);
+ }
+ }
+
+ Debug.Assert(FreeCapacity > 0 && FreeCapacity >= sizeHint);
+
+ static void ThrowOutOfMemoryException() => throw new InvalidOperationException("Unable to grow buffer as requested");
+ }
+}
diff --git a/src/Caching/Hybrid/src/Internal/readme.md b/src/Caching/Hybrid/src/Internal/readme.md
new file mode 100644
index 000000000000..8d6a7d878481
--- /dev/null
+++ b/src/Caching/Hybrid/src/Internal/readme.md
@@ -0,0 +1,27 @@
+# HybridCache internal design
+
+`HybridCache` encapsulates serialization, caching and stampede protection.
+
+The `DefaultHybridCache` implementation keeps a collection of `StampedeState` entries
+that represent the current in-flight operations (keyed by `StampedeKey`); if a duplicate
+operation occurs during the execution, the second operation will be joined with that
+same flow, rather than executing independently. When attempting to merge with an
+existing flow, interlocked counting is used: we can only join if we can successfully
+increment the value from a non-zero value (zero meaning all existing consumers have
+canceled, and the shared token is therefore canceled)
+
+The `StampedeState<>` performs back-end fetch operations, resulting not in a `T` (of the final
+value), but instead a `CacheItem`; this is the object that gets put into L1 cache,
+and can describe both mutable and immutable types; the significance here is that for
+mutable types, we need a defensive copy per-call to prevent callers impacting each-other.
+
+`StampedeState<>` combines cancellation (so that operations proceed as long as *a* caller
+is still active); this covers all L2 access and serialization operations, releasing all pending
+shared callers for the same operation. Note that L2 storage can occur *after* callers
+have been released.
+
+To ensure correct buffer recycling, when dealing with cache entries that need defensive copies
+we use more ref-counting while reading the buffer, combined with an eviction callback which
+decrements that counter. This means that we recycle things when evicted, without impacting
+in-progress deserialize operations. To simplify tracking, `BufferChunk` acts like a `byte[]`+`int`
+(we don't need non-zero offset), but also tracking "should this be returned to the pool?".
diff --git a/src/Caching/Hybrid/src/Microsoft.Extensions.Caching.Hybrid.csproj b/src/Caching/Hybrid/src/Microsoft.Extensions.Caching.Hybrid.csproj
index 49671f048347..041d425c1d69 100644
--- a/src/Caching/Hybrid/src/Microsoft.Extensions.Caching.Hybrid.csproj
+++ b/src/Caching/Hybrid/src/Microsoft.Extensions.Caching.Hybrid.csproj
@@ -2,7 +2,14 @@
Multi-level caching implementation building on and extending IDistributedCache
- $(DefaultNetCoreTargetFramework);$(DefaultNetFxTargetFramework);netstandard2.0
+
+ $(DefaultNetCoreTargetFramework);$(DefaultNetFxTargetFramework);netstandard2.0;netstandard2.1
true
cache;distributedcache;hybrid
true
@@ -12,18 +19,20 @@
-
+
-
+
+
+
-
+
diff --git a/src/Caching/Hybrid/src/Runtime/HybridCache.cs b/src/Caching/Hybrid/src/Runtime/HybridCache.cs
index a2aaad2c0f26..d2ba7f809cd4 100644
--- a/src/Caching/Hybrid/src/Runtime/HybridCache.cs
+++ b/src/Caching/Hybrid/src/Runtime/HybridCache.cs
@@ -94,7 +94,7 @@ static async ValueTask ForEachAsync(HybridCache @this, IEnumerable keys,
}
///
- /// Asynchronously removes the value associated with the specified tags.
+ /// Asynchronously removes all values associated with the specified tags.
///
/// Implementors should treat null as empty
public virtual ValueTask RemoveTagsAsync(IEnumerable tags, CancellationToken token = default)
@@ -118,7 +118,7 @@ static async ValueTask ForEachAsync(HybridCache @this, IEnumerable keys,
}
///
- /// Asynchronously removes the value associated with the specified tag.
+ /// Asynchronously removes all values associated with the specified tag.
///
public abstract ValueTask RemoveTagAsync(string tag, CancellationToken token = default);
}
diff --git a/src/Caching/Hybrid/src/Runtime/HybridCacheEntryOptions.cs b/src/Caching/Hybrid/src/Runtime/HybridCacheEntryOptions.cs
index a5416cce9692..e5fd18b00699 100644
--- a/src/Caching/Hybrid/src/Runtime/HybridCacheEntryOptions.cs
+++ b/src/Caching/Hybrid/src/Runtime/HybridCacheEntryOptions.cs
@@ -2,6 +2,7 @@
// The .NET Foundation licenses this file to you under the MIT license.
using System;
+using Microsoft.Extensions.Caching.Distributed;
namespace Microsoft.Extensions.Caching.Hybrid;
@@ -29,4 +30,9 @@ public sealed class HybridCacheEntryOptions
/// Additional flags that apply to this usage.
///
public HybridCacheEntryFlags? Flags { get; init; }
+
+ // memoize when possible
+ private DistributedCacheEntryOptions? _dc;
+ internal DistributedCacheEntryOptions? ToDistributedCacheEntryOptions()
+ => Expiration is null ? null : (_dc ??= new() { AbsoluteExpirationRelativeToNow = Expiration });
}
diff --git a/src/Caching/Hybrid/test/BufferReleaseTests.cs b/src/Caching/Hybrid/test/BufferReleaseTests.cs
new file mode 100644
index 000000000000..f83ce760c3df
--- /dev/null
+++ b/src/Caching/Hybrid/test/BufferReleaseTests.cs
@@ -0,0 +1,221 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System.Buffers;
+using System.Runtime.CompilerServices;
+using Microsoft.Extensions.Caching.Distributed;
+using Microsoft.Extensions.Caching.Hybrid.Internal;
+using Microsoft.Extensions.Caching.Memory;
+using Microsoft.Extensions.DependencyInjection;
+using Microsoft.Extensions.Options;
+using static Microsoft.Extensions.Caching.Hybrid.Internal.DefaultHybridCache;
+
+namespace Microsoft.Extensions.Caching.Hybrid.Tests;
+
+public class BufferReleaseTests // note that buffer ref-counting is only enabled for DEBUG builds; can only verify general behaviour without that
+{
+ static ServiceProvider GetDefaultCache(out DefaultHybridCache cache, Action? config = null)
+ {
+ var services = new ServiceCollection();
+ config?.Invoke(services);
+ services.AddHybridCache();
+ var provider = services.BuildServiceProvider();
+ cache = Assert.IsType(provider.GetRequiredService());
+ return provider;
+ }
+
+ [Fact]
+ public async Task BufferGetsReleased_NoL2()
+ {
+ using var provider = GetDefaultCache(out var cache);
+#if DEBUG
+ cache.DebugGetOutstandingBuffers(flush: true);
+#endif
+
+ var key = Me();
+#if DEBUG
+ Assert.Equal(0, cache.DebugGetOutstandingBuffers());
+#endif
+ var first = await cache.GetOrCreateAsync(key, _ => GetAsync());
+ Assert.NotNull(first);
+#if DEBUG
+ Assert.Equal(1, cache.DebugGetOutstandingBuffers());
+#endif
+ Assert.True(cache.DebugTryGetCacheItem(key, out var cacheItem));
+
+ // assert that we can reserve the buffer *now* (mostly to see that it behaves differently later)
+ Assert.True(cacheItem.NeedsEvictionCallback, "should be pooled memory");
+ Assert.True(cacheItem.TryReserveBuffer(out _));
+ cacheItem.Release(); // for the above reserve
+
+ var second = await cache.GetOrCreateAsync(key, _ => GetAsync(), _noUnderlying);
+ Assert.NotNull(second);
+ Assert.NotSame(first, second);
+
+ await cache.RemoveKeyAsync(key);
+ var third = await cache.GetOrCreateAsync(key, _ => GetAsync(), _noUnderlying);
+ Assert.Null(third);
+
+ // give it a moment for the eviction callback to kick in
+ for (var i = 0; i < 10 && cacheItem.NeedsEvictionCallback; i++)
+ {
+ await Task.Delay(250);
+ }
+#if DEBUG
+ Assert.Equal(0, cache.DebugGetOutstandingBuffers());
+#endif
+ // assert that we can *no longer* reserve this buffer, because we've already recycled it
+ Assert.False(cacheItem.TryReserveBuffer(out _));
+ Assert.False(cacheItem.NeedsEvictionCallback, "should be recycled now");
+ static ValueTask GetAsync() => new(new Customer { Id = 42, Name = "Fred" });
+ }
+
+ private static readonly HybridCacheEntryOptions _noUnderlying = new() { Flags = HybridCacheEntryFlags.DisableUnderlyingData };
+
+ class TestCache : MemoryDistributedCache, IBufferDistributedCache
+ {
+ public TestCache(IOptions options) : base(options) { }
+
+ void IBufferDistributedCache.Set(string key, ReadOnlySequence value, DistributedCacheEntryOptions options)
+ => Set(key, value.ToArray(), options); // efficiency not important for this
+
+ ValueTask IBufferDistributedCache.SetAsync(string key, ReadOnlySequence value, DistributedCacheEntryOptions options, CancellationToken token)
+ => new(SetAsync(key, value.ToArray(), options, token)); // efficiency not important for this
+
+ bool IBufferDistributedCache.TryGet(string key, IBufferWriter destination)
+ => Write(destination, Get(key));
+
+ async ValueTask IBufferDistributedCache.TryGetAsync(string key, IBufferWriter destination, CancellationToken token)
+ => Write(destination, await GetAsync(key, token));
+
+ static bool Write(IBufferWriter destination, byte[]? buffer)
+ {
+ if (buffer is null)
+ {
+ return false;
+ }
+ destination.Write(buffer);
+ return true;
+ }
+ }
+
+ [Fact]
+ public async Task BufferDoesNotNeedRelease_LegacyL2() // byte[] API; not pooled
+ {
+ using var provider = GetDefaultCache(out var cache,
+ services => services.AddSingleton());
+
+ cache.DebugRemoveFeatures(CacheFeatures.BackendBuffers);
+ // prep the backend with our data
+ var key = Me();
+ Assert.NotNull(cache.BackendCache);
+ var serializer = cache.GetSerializer();
+ using (var writer = RecyclableArrayBufferWriter.Create(int.MaxValue))
+ {
+ serializer.Serialize(await GetAsync(), writer);
+ cache.BackendCache.Set(key, writer.ToArray());
+ }
+#if DEBUG
+ cache.DebugGetOutstandingBuffers(flush: true);
+ Assert.Equal(0, cache.DebugGetOutstandingBuffers());
+#endif
+ var first = await cache.GetOrCreateAsync(key, _ => GetAsync(), _noUnderlying); // we expect this to come from L2, hence NoUnderlying
+ Assert.NotNull(first);
+#if DEBUG
+ Assert.Equal(0, cache.DebugGetOutstandingBuffers());
+#endif
+ Assert.True(cache.DebugTryGetCacheItem(key, out var cacheItem));
+
+ // assert that we can reserve the buffer *now* (mostly to see that it behaves differently later)
+ Assert.False(cacheItem.NeedsEvictionCallback, "should NOT be pooled memory");
+ Assert.True(cacheItem.TryReserveBuffer(out _));
+ cacheItem.Release(); // for the above reserve
+
+ var second = await cache.GetOrCreateAsync(key, _ => GetAsync(), _noUnderlying);
+ Assert.NotNull(second);
+ Assert.NotSame(first, second);
+
+ await cache.RemoveKeyAsync(key);
+ var third = await cache.GetOrCreateAsync(key, _ => GetAsync(), _noUnderlying);
+ Assert.Null(third);
+ Assert.Null(await cache.BackendCache.GetAsync(key)); // should be gone from L2 too
+
+ // give it a moment for the eviction callback to kick in
+ for (var i = 0; i < 10 && cacheItem.NeedsEvictionCallback; i++)
+ {
+ await Task.Delay(250);
+ }
+#if DEBUG
+ Assert.Equal(0, cache.DebugGetOutstandingBuffers());
+#endif
+ // assert that we can *no longer* reserve this buffer, because we've already recycled it
+ Assert.True(cacheItem.TryReserveBuffer(out _)); // always readable
+ cacheItem.Release();
+
+ Assert.False(cacheItem.NeedsEvictionCallback, "should still not need recycling");
+ static ValueTask GetAsync() => new(new Customer { Id = 42, Name = "Fred" });
+ }
+
+ [Fact]
+ public async Task BufferGetsReleased_BufferL2() // IBufferWriter API; pooled
+ {
+ using var provider = GetDefaultCache(out var cache,
+ services => services.AddSingleton());
+
+ // prep the backend with our data
+ var key = Me();
+ Assert.NotNull(cache.BackendCache);
+ var serializer = cache.GetSerializer();
+ using (var writer = RecyclableArrayBufferWriter.Create(int.MaxValue))
+ {
+ serializer.Serialize(await GetAsync(), writer);
+ cache.BackendCache.Set(key, writer.ToArray());
+ }
+#if DEBUG
+ cache.DebugGetOutstandingBuffers(flush: true);
+ Assert.Equal(0, cache.DebugGetOutstandingBuffers());
+#endif
+ var first = await cache.GetOrCreateAsync(key, _ => GetAsync(), _noUnderlying); // we expect this to come from L2, hence NoUnderlying
+ Assert.NotNull(first);
+#if DEBUG
+ Assert.Equal(1, cache.DebugGetOutstandingBuffers());
+#endif
+ Assert.True(cache.DebugTryGetCacheItem(key, out var cacheItem));
+
+ // assert that we can reserve the buffer *now* (mostly to see that it behaves differently later)
+ Assert.True(cacheItem.NeedsEvictionCallback, "should be pooled memory");
+ Assert.True(cacheItem.TryReserveBuffer(out _));
+ cacheItem.Release(); // for the above reserve
+
+ var second = await cache.GetOrCreateAsync(key, _ => GetAsync(), _noUnderlying);
+ Assert.NotNull(second);
+ Assert.NotSame(first, second);
+
+ await cache.RemoveKeyAsync(key);
+ var third = await cache.GetOrCreateAsync(key, _ => GetAsync(), _noUnderlying);
+ Assert.Null(third);
+ Assert.Null(await cache.BackendCache.GetAsync(key)); // should be gone from L2 too
+
+ // give it a moment for the eviction callback to kick in
+ for (var i = 0; i < 10 && cacheItem.NeedsEvictionCallback; i++)
+ {
+ await Task.Delay(250);
+ }
+#if DEBUG
+ Assert.Equal(0, cache.DebugGetOutstandingBuffers());
+#endif
+ // assert that we can *no longer* reserve this buffer, because we've already recycled it
+ Assert.False(cacheItem.TryReserveBuffer(out _)); // released now
+
+ Assert.False(cacheItem.NeedsEvictionCallback, "should be recycled by now");
+ static ValueTask GetAsync() => new(new Customer { Id = 42, Name = "Fred" });
+ }
+
+ public class Customer
+ {
+ public int Id { get; set; }
+ public string Name { get; set; } = "";
+ }
+
+ private static string Me([CallerMemberName] string caller = "") => caller;
+}
diff --git a/src/Caching/Hybrid/test/DisposableValueTests.cs b/src/Caching/Hybrid/test/DisposableValueTests.cs
new file mode 100644
index 000000000000..57b2e950485b
--- /dev/null
+++ b/src/Caching/Hybrid/test/DisposableValueTests.cs
@@ -0,0 +1,188 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System;
+using System.Collections.Generic;
+using System.ComponentModel;
+using System.Linq;
+using System.Runtime.CompilerServices;
+using System.Text;
+using System.Threading.Tasks;
+using Microsoft.Extensions.Caching.Hybrid.Internal;
+using Microsoft.Extensions.DependencyInjection;
+
+namespace Microsoft.Extensions.Caching.Hybrid.Tests;
+public class DisposableValueTests
+{
+ // We can only reasonable be expected to be responsible for disposal (IDisposable) of objects
+ // if we're keeping hold of references, which means: things we consider immutable.
+ // It is noted that this creates an oddity whereby for *mutable* types, the caller needs to dispose
+ // per fetch (GetOrCreateAsync), and for *immutable* types: they're not - but that is unavoidable.
+ // In reality, it is expected to be pretty rare to hold disposable types here.
+
+ private static ServiceProvider GetCache(out DefaultHybridCache cache)
+ {
+ var services = new ServiceCollection();
+ services.AddHybridCache();
+ var provider = services.BuildServiceProvider();
+ cache = Assert.IsType(provider.GetRequiredService());
+ return provider;
+ }
+
+ [Fact]
+ public async void NonDisposableImmutableTypeDoesNotNeedEvictionCallback()
+ {
+ using var provider = GetCache(out var cache);
+ var key = Me();
+
+ var s = await cache.GetOrCreateAsync(key, _ => GetSomeString());
+ Assert.NotNull(s);
+ Assert.False(string.IsNullOrWhiteSpace(s));
+ Assert.True(cache.DebugTryGetCacheItem(key, out var cacheItem));
+ Assert.True(cacheItem.DebugIsImmutable);
+ Assert.False(cacheItem.NeedsEvictionCallback);
+
+ static ValueTask GetSomeString() => new(Guid.NewGuid().ToString());
+ }
+
+ [Fact]
+ public async void NonDisposableBlittableTypeDoesNotNeedEvictionCallback()
+ {
+ using var provider = GetCache(out var cache);
+ var key = Me();
+
+ var g = await cache.GetOrCreateAsync(key, _ => GetSomeGuid());
+ Assert.NotEqual(Guid.Empty, g);
+ Assert.True(cache.DebugTryGetCacheItem(key, out var cacheItem));
+ Assert.True(cacheItem.DebugIsImmutable);
+ Assert.False(cacheItem.NeedsEvictionCallback);
+
+ static ValueTask GetSomeGuid() => new(Guid.NewGuid());
+ }
+
+ [Fact]
+ public async Task DispsableRefTypeNeedsEvictionCallback()
+ {
+ using var provider = GetCache(out var cache);
+ var key = Me();
+
+ var inst = new DisposableTestClass();
+ var obj = await cache.GetOrCreateAsync(key, _ => new ValueTask(inst));
+ Assert.Same(inst, obj);
+ Assert.True(cache.DebugTryGetCacheItem(key, out var cacheItem));
+ Assert.True(cacheItem.DebugIsImmutable);
+ Assert.True(cacheItem.NeedsEvictionCallback);
+
+ Assert.Equal(0, inst.DisposeCount);
+
+ // now remove it
+ await cache.RemoveKeyAsync(key);
+
+ // give it a moment for the eviction callback to kick in
+ for (var i = 0; i < 10; i++)
+ {
+ await Task.Delay(250);
+ if (inst.DisposeCount != 0)
+ {
+ break;
+ }
+ }
+ Assert.Equal(1, inst.DisposeCount);
+ }
+
+ [Fact]
+ public async Task DisposableValueTypeNeedsEvictionCallback()
+ {
+ using var provider = GetCache(out var cache);
+ var key = Me();
+
+ // disposal of value-type
+ var inst = new DisposableTestClass();
+ var v = await cache.GetOrCreateAsync(key, _ => new ValueTask(new DisposableTestValue(inst)));
+ Assert.Same(inst, v.Wrapped);
+ Assert.True(cache.DebugTryGetCacheItem(key, out var cacheItem));
+ Assert.True(cacheItem.DebugIsImmutable);
+ Assert.True(cacheItem.NeedsEvictionCallback);
+
+ Assert.Equal(0, inst.DisposeCount);
+
+ // now remove it
+ await cache.RemoveKeyAsync(key);
+
+ // give it a moment for the eviction callback to kick in
+ for (var i = 0; i < 10; i++)
+ {
+ await Task.Delay(250);
+ if (inst.DisposeCount != 0)
+ {
+ break;
+ }
+ }
+ Assert.Equal(1, inst.DisposeCount);
+ }
+
+ [Fact]
+ public async Task NonDispsableRefTypeDoesNotNeedEvictionCallback()
+ {
+ using var provider = GetCache(out var cache);
+ var key = Me();
+
+ var inst = new NonDisposableTestClass();
+ var obj = await cache.GetOrCreateAsync(key, _ => new ValueTask(inst));
+ Assert.Same(inst, obj);
+ Assert.True(cache.DebugTryGetCacheItem(key, out var cacheItem));
+ Assert.True(cacheItem.DebugIsImmutable);
+ Assert.False(cacheItem.NeedsEvictionCallback);
+ }
+
+ [Fact]
+ public async Task NonDisposableValueTypeDoesNotNeedEvictionCallback()
+ {
+ using var provider = GetCache(out var cache);
+ var key = Me();
+
+ // disposal of value-type
+ var inst = new DisposableTestClass();
+ var v = await cache.GetOrCreateAsync(key, _ => new ValueTask(new NonDisposableTestValue(inst)));
+ Assert.Same(inst, v.Wrapped);
+ Assert.True(cache.DebugTryGetCacheItem(key, out var cacheItem));
+ Assert.True(cacheItem.DebugIsImmutable);
+ Assert.False(cacheItem.NeedsEvictionCallback);
+ }
+
+ [ImmutableObject(true)]
+ public sealed class DisposableTestClass : IDisposable
+ {
+ private int _disposeCount;
+ public void Dispose() => Interlocked.Increment(ref _disposeCount);
+
+ public int DisposeCount => Volatile.Read(ref _disposeCount);
+ }
+
+ [ImmutableObject(true)]
+ public readonly struct DisposableTestValue : IDisposable
+ {
+ public DisposableTestClass Wrapped { get; }
+ public DisposableTestValue(DisposableTestClass inner) => Wrapped = inner;
+ public void Dispose() => Wrapped.Dispose();
+ }
+
+ [ImmutableObject(true)]
+ public sealed class NonDisposableTestClass
+ {
+ private int _disposeCount;
+ public void Dispose() => Interlocked.Increment(ref _disposeCount);
+
+ public int DisposeCount => Volatile.Read(ref _disposeCount);
+ }
+
+ [ImmutableObject(true)]
+ public readonly struct NonDisposableTestValue
+ {
+ public DisposableTestClass Wrapped { get; }
+ public NonDisposableTestValue(DisposableTestClass inner) => Wrapped = inner;
+ public void Dispose() => Wrapped.Dispose();
+ }
+
+ private static string Me([CallerMemberName] string caller = "") => caller;
+}
diff --git a/src/Caching/Hybrid/test/DistributedCacheTests.cs b/src/Caching/Hybrid/test/DistributedCacheTests.cs
new file mode 100644
index 000000000000..bd7a5e58049e
--- /dev/null
+++ b/src/Caching/Hybrid/test/DistributedCacheTests.cs
@@ -0,0 +1,381 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System.Buffers;
+using System.Runtime.CompilerServices;
+using System.Runtime.InteropServices;
+using Microsoft.Extensions.Caching.Distributed;
+using Microsoft.Extensions.Caching.Hybrid.Internal;
+using Microsoft.Extensions.DependencyInjection;
+using Microsoft.Extensions.Internal;
+using Xunit.Abstractions;
+
+namespace Microsoft.Extensions.Caching.Hybrid.Tests;
+
+///
+/// Validate over-arching expectations of DC implementations, in particular behaviour re IBufferDistributedCache added for HybridCache
+///
+public abstract class DistributedCacheTests
+{
+ public DistributedCacheTests(ITestOutputHelper log) => Log = log;
+ protected ITestOutputHelper Log { get; }
+ protected abstract ValueTask ConfigureAsync(IServiceCollection services);
+ protected abstract bool CustomClockSupported { get; }
+
+ protected FakeTime Clock { get; } = new();
+
+ protected class FakeTime : TimeProvider, ISystemClock
+ {
+ private DateTimeOffset _now = DateTimeOffset.UtcNow;
+ public void Reset() => _now = DateTimeOffset.UtcNow;
+
+ DateTimeOffset ISystemClock.UtcNow => _now;
+
+ public override DateTimeOffset GetUtcNow() => _now;
+
+ public void Add(TimeSpan delta) => _now += delta;
+ }
+
+ private async ValueTask InitAsync()
+ {
+ Clock.Reset();
+ var services = new ServiceCollection();
+ services.AddSingleton(Clock);
+ services.AddSingleton(Clock);
+ await ConfigureAsync(services);
+ return services;
+ }
+
+ [Theory]
+ [InlineData(0)]
+ [InlineData(128)]
+ [InlineData(1024)]
+ [InlineData(16 * 1024)]
+ public async Task SimpleBufferRoundtrip(int size)
+ {
+ var cache = (await InitAsync()).BuildServiceProvider().GetService();
+ if (cache is null)
+ {
+ Log.WriteLine("Cache is not available");
+ return; // inconclusive
+ }
+
+ var key = $"{Me()}:{size}";
+ cache.Remove(key);
+ Assert.Null(cache.Get(key));
+
+ var expected = new byte[size];
+ new Random().NextBytes(expected);
+ cache.Set(key, expected, _fiveMinutes);
+
+ var actual = cache.Get(key);
+ Assert.NotNull(actual);
+ Assert.True(expected.SequenceEqual(actual));
+ Log.WriteLine("Data validated");
+
+ if (CustomClockSupported)
+ {
+ Clock.Add(TimeSpan.FromMinutes(4));
+ actual = cache.Get(key);
+ Assert.NotNull(actual);
+ Assert.True(expected.SequenceEqual(actual));
+
+ Clock.Add(TimeSpan.FromMinutes(2));
+ actual = cache.Get(key);
+ Assert.Null(actual);
+
+ Log.WriteLine("Expiration validated");
+ }
+ else
+ {
+ Log.WriteLine("Expiration not validated - TimeProvider not supported");
+ }
+ }
+
+ [Theory]
+ [InlineData(0)]
+ [InlineData(128)]
+ [InlineData(1024)]
+ [InlineData(16 * 1024)]
+ public async Task SimpleBufferRoundtripAsync(int size)
+ {
+ var cache = (await InitAsync()).BuildServiceProvider().GetService();
+ if (cache is null)
+ {
+ Log.WriteLine("Cache is not available");
+ return; // inconclusive
+ }
+
+ var key = $"{Me()}:{size}";
+ await cache.RemoveAsync(key);
+ Assert.Null(cache.Get(key));
+
+ var expected = new byte[size];
+ new Random().NextBytes(expected);
+ await cache.SetAsync(key, expected, _fiveMinutes);
+
+ var actual = await cache.GetAsync(key);
+ Assert.NotNull(actual);
+ Assert.True(expected.SequenceEqual(actual));
+ Log.WriteLine("Data validated");
+
+ if (CustomClockSupported)
+ {
+ Clock.Add(TimeSpan.FromMinutes(4));
+ actual = await cache.GetAsync(key);
+ Assert.NotNull(actual);
+ Assert.True(expected.SequenceEqual(actual));
+
+ Clock.Add(TimeSpan.FromMinutes(2));
+ actual = await cache.GetAsync(key);
+ Assert.Null(actual);
+
+ Log.WriteLine("Expiration validated");
+ }
+ else
+ {
+ Log.WriteLine("Expiration not validated - TimeProvider not supported");
+ }
+ }
+
+ public enum SequenceKind
+ {
+ FullArray,
+ PaddedArray,
+ CustomMemory,
+ MultiSegment,
+ }
+
+ [Theory]
+ [InlineData(0, SequenceKind.FullArray)]
+ [InlineData(128, SequenceKind.FullArray)]
+ [InlineData(1024, SequenceKind.FullArray)]
+ [InlineData(16 * 1024, SequenceKind.FullArray)]
+ [InlineData(0, SequenceKind.PaddedArray)]
+ [InlineData(128, SequenceKind.PaddedArray)]
+ [InlineData(1024, SequenceKind.PaddedArray)]
+ [InlineData(16 * 1024, SequenceKind.PaddedArray)]
+ [InlineData(0, SequenceKind.CustomMemory)]
+ [InlineData(128, SequenceKind.CustomMemory)]
+ [InlineData(1024, SequenceKind.CustomMemory)]
+ [InlineData(16 * 1024, SequenceKind.CustomMemory)]
+ [InlineData(0, SequenceKind.MultiSegment)]
+ [InlineData(128, SequenceKind.MultiSegment)]
+ [InlineData(1024, SequenceKind.MultiSegment)]
+ [InlineData(16 * 1024, SequenceKind.MultiSegment)]
+ public async Task ReadOnlySequenceBufferRoundtrip(int size, SequenceKind kind)
+ {
+ var cache = (await InitAsync()).BuildServiceProvider().GetService() as IBufferDistributedCache;
+ if (cache is null)
+ {
+ Log.WriteLine("Cache is not available or does not support IBufferDistributedCache");
+ return; // inconclusive
+ }
+
+ var key = $"{Me()}:{size}/{kind}";
+ cache.Remove(key);
+ Assert.Null(cache.Get(key));
+
+ var payload = Invent(size, kind);
+ ReadOnlyMemory expected = payload.ToArray(); // simplify for testing
+ Assert.Equal(size, expected.Length);
+ cache.Set(key, payload, _fiveMinutes);
+
+ var writer = RecyclableArrayBufferWriter.Create(int.MaxValue);
+ Assert.True(cache.TryGet(key, writer));
+ Assert.True(expected.Span.SequenceEqual(writer.GetCommittedMemory().Span));
+ writer.ResetInPlace();
+ Log.WriteLine("Data validated");
+
+ if (CustomClockSupported)
+ {
+ Clock.Add(TimeSpan.FromMinutes(4));
+ Assert.True(cache.TryGet(key, writer));
+ Assert.True(expected.Span.SequenceEqual(writer.GetCommittedMemory().Span));
+ writer.ResetInPlace();
+
+ Clock.Add(TimeSpan.FromMinutes(2));
+ Assert.False(cache.TryGet(key, writer));
+ Assert.Equal(0, writer.CommittedBytes);
+
+ Log.WriteLine("Expiration validated");
+ }
+ else
+ {
+ Log.WriteLine("Expiration not validated - TimeProvider not supported");
+ }
+ }
+
+ [Theory]
+ [InlineData(0, SequenceKind.FullArray)]
+ [InlineData(128, SequenceKind.FullArray)]
+ [InlineData(1024, SequenceKind.FullArray)]
+ [InlineData(16 * 1024, SequenceKind.FullArray)]
+ [InlineData(0, SequenceKind.PaddedArray)]
+ [InlineData(128, SequenceKind.PaddedArray)]
+ [InlineData(1024, SequenceKind.PaddedArray)]
+ [InlineData(16 * 1024, SequenceKind.PaddedArray)]
+ [InlineData(0, SequenceKind.CustomMemory)]
+ [InlineData(128, SequenceKind.CustomMemory)]
+ [InlineData(1024, SequenceKind.CustomMemory)]
+ [InlineData(16 * 1024, SequenceKind.CustomMemory)]
+ [InlineData(0, SequenceKind.MultiSegment)]
+ [InlineData(128, SequenceKind.MultiSegment)]
+ [InlineData(1024, SequenceKind.MultiSegment)]
+ [InlineData(16 * 1024, SequenceKind.MultiSegment)]
+ public async Task ReadOnlySequenceBufferRoundtripAsync(int size, SequenceKind kind)
+ {
+ var cache = (await InitAsync()).BuildServiceProvider().GetService() as IBufferDistributedCache;
+ if (cache is null)
+ {
+ Log.WriteLine("Cache is not available or does not support IBufferDistributedCache");
+ return; // inconclusive
+ }
+
+ var key = $"{Me()}:{size}/{kind}";
+ await cache.RemoveAsync(key);
+ Assert.Null(await cache.GetAsync(key));
+
+ var payload = Invent(size, kind);
+ ReadOnlyMemory expected = payload.ToArray(); // simplify for testing
+ Assert.Equal(size, expected.Length);
+ await cache.SetAsync(key, payload, _fiveMinutes);
+
+ var writer = RecyclableArrayBufferWriter.Create(int.MaxValue);
+ Assert.True(await cache.TryGetAsync(key, writer));
+ Assert.True(expected.Span.SequenceEqual(writer.GetCommittedMemory().Span));
+ writer.ResetInPlace();
+ Log.WriteLine("Data validated");
+
+ if (CustomClockSupported)
+ {
+ Clock.Add(TimeSpan.FromMinutes(4));
+ Assert.True(await cache.TryGetAsync(key, writer));
+ Assert.True(expected.Span.SequenceEqual(writer.GetCommittedMemory().Span));
+ writer.ResetInPlace();
+
+ Clock.Add(TimeSpan.FromMinutes(2));
+ Assert.False(await cache.TryGetAsync(key, writer));
+ Assert.Equal(0, writer.CommittedBytes);
+
+ Log.WriteLine("Expiration validated");
+ }
+ else
+ {
+ Log.WriteLine("Expiration not validated - TimeProvider not supported");
+ }
+ }
+
+ static ReadOnlySequence Invent(int size, SequenceKind kind)
+ {
+ var rand = new Random();
+ ReadOnlySequence payload;
+ switch (kind)
+ {
+ case SequenceKind.FullArray:
+ var arr = new byte[size];
+ rand.NextBytes(arr);
+ payload = new(arr);
+ break;
+ case SequenceKind.PaddedArray:
+ arr = new byte[size + 10];
+ rand.NextBytes(arr);
+ payload = new(arr, 5, arr.Length - 10);
+ break;
+ case SequenceKind.CustomMemory:
+ var mem = new CustomMemory(size, rand).Memory;
+ payload = new(mem);
+ break;
+ case SequenceKind.MultiSegment:
+ if (size == 0)
+ {
+ payload = default;
+ break;
+ }
+ if (size < 10)
+ {
+ throw new ArgumentException("small segments not considered"); // a pain to construct
+ }
+ CustomSegment first = new(10, rand, null), // we'll take the last 3 of this 10
+ second = new(size - 7, rand, first), // we'll take all of this one
+ third = new(10, rand, second); // we'll take the first 4 of this 10
+ payload = new(first, 7, third, 4);
+ break;
+ default:
+ throw new ArgumentOutOfRangeException(nameof(kind));
+ }
+
+ // now validate what we expect of that payload
+ Assert.Equal(size, payload.Length);
+ switch (kind)
+ {
+ case SequenceKind.CustomMemory or SequenceKind.MultiSegment when size == 0:
+ Assert.True(payload.IsSingleSegment);
+ Assert.True(MemoryMarshal.TryGetArray(payload.First, out _));
+ break;
+ case SequenceKind.MultiSegment:
+ Assert.False(payload.IsSingleSegment);
+ break;
+ case SequenceKind.CustomMemory:
+ Assert.True(payload.IsSingleSegment);
+ Assert.False(MemoryMarshal.TryGetArray(payload.First, out _));
+ break;
+ case SequenceKind.FullArray:
+ Assert.True(payload.IsSingleSegment);
+ Assert.True(MemoryMarshal.TryGetArray(payload.First, out var segment));
+ Assert.Equal(0, segment.Offset);
+ Assert.NotNull(segment.Array);
+ Assert.Equal(size, segment.Count);
+ Assert.Equal(size, segment.Array.Length);
+ break;
+ case SequenceKind.PaddedArray:
+ Assert.True(payload.IsSingleSegment);
+ Assert.True(MemoryMarshal.TryGetArray(payload.First, out segment));
+ Assert.NotEqual(0, segment.Offset);
+ Assert.NotNull(segment.Array);
+ Assert.Equal(size, segment.Count);
+ Assert.NotEqual(size, segment.Array.Length);
+ break;
+ }
+ return payload;
+ }
+
+ class CustomSegment : ReadOnlySequenceSegment
+ {
+ public CustomSegment(int size, Random? rand, CustomSegment? previous)
+ {
+ var arr = new byte[size + 10];
+ rand?.NextBytes(arr);
+ Memory = new(arr, 5, arr.Length - 10);
+ if (previous is not null)
+ {
+ RunningIndex = previous.RunningIndex + previous.Memory.Length;
+ previous.Next = this;
+ }
+ }
+ }
+
+ class CustomMemory : MemoryManager
+ {
+ private readonly byte[] _data;
+ public CustomMemory(int size, Random? rand = null)
+ {
+ _data = new byte[size + 10];
+ rand?.NextBytes(_data);
+ }
+ public override Span GetSpan() => new(_data, 5, _data.Length - 10);
+ public override MemoryHandle Pin(int elementIndex = 0) => throw new NotSupportedException();
+ public override void Unpin() => throw new NotSupportedException();
+ protected override void Dispose(bool disposing) { }
+ protected override bool TryGetArray(out ArraySegment segment)
+ {
+ segment = default;
+ return false;
+ }
+ }
+
+ private static readonly DistributedCacheEntryOptions _fiveMinutes
+ = new() { AbsoluteExpirationRelativeToNow = TimeSpan.FromMinutes(5) };
+
+ protected static string Me([CallerMemberName] string caller = "") => caller;
+}
diff --git a/src/Caching/Hybrid/test/L2Tests.cs b/src/Caching/Hybrid/test/L2Tests.cs
new file mode 100644
index 000000000000..7d8389f03b7f
--- /dev/null
+++ b/src/Caching/Hybrid/test/L2Tests.cs
@@ -0,0 +1,259 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System.Buffers;
+using System.Collections;
+using System.Runtime.CompilerServices;
+using Microsoft.Extensions.Caching.Distributed;
+using Microsoft.Extensions.Caching.Hybrid.Internal;
+using Microsoft.Extensions.Caching.Memory;
+using Microsoft.Extensions.DependencyInjection;
+using Microsoft.Extensions.DependencyInjection.Extensions;
+using Microsoft.Extensions.Logging;
+using Microsoft.Extensions.Options;
+using Xunit.Abstractions;
+
+namespace Microsoft.Extensions.Caching.Hybrid.Tests;
+public class L2Tests(ITestOutputHelper Log)
+{
+ class Options(T Value) : IOptions where T : class
+ {
+ T IOptions.Value => Value;
+ }
+ ServiceProvider GetDefaultCache(bool buffers, out DefaultHybridCache cache)
+ {
+ var services = new ServiceCollection();
+ var localCacheOptions = new Options(new());
+ var localCache = new MemoryDistributedCache(localCacheOptions);
+ services.AddSingleton(buffers ? new BufferLoggingCache(Log, localCache) : new LoggingCache(Log, localCache));
+ services.AddHybridCache();
+ var provider = services.BuildServiceProvider();
+ cache = Assert.IsType(provider.GetRequiredService());
+ return provider;
+ }
+
+ static string CreateString(bool work = false)
+ {
+ Assert.True(work, "we didn't expect this to be invoked");
+ return Guid.NewGuid().ToString();
+ }
+
+ static readonly HybridCacheEntryOptions _noL1 = new() { Flags = HybridCacheEntryFlags.DisableLocalCache };
+
+ [Theory]
+ [InlineData(true)]
+ [InlineData(false)]
+ public async Task AssertL2Operations_Immutable(bool buffers)
+ {
+ using var provider = GetDefaultCache(buffers, out var cache);
+ var backend = Assert.IsAssignableFrom(cache.BackendCache);
+ Log.WriteLine("Inventing key...");
+ var s = await cache.GetOrCreateAsync(Me(), ct => new ValueTask(CreateString(true)));
+ Assert.Equal(2, backend.OpCount); // GET, SET
+
+ Log.WriteLine("Reading with L1...");
+ for (var i = 0; i < 5; i++)
+ {
+ var x = await cache.GetOrCreateAsync(Me(), ct => new ValueTask(CreateString()));
+ Assert.Equal(s, x);
+ Assert.Same(s, x);
+ }
+ Assert.Equal(2, backend.OpCount); // shouldn't be hit
+
+ Log.WriteLine("Reading without L1...");
+ for (var i = 0; i < 5; i++)
+ {
+ var x = await cache.GetOrCreateAsync(Me(), ct => new ValueTask(CreateString()), _noL1);
+ Assert.Equal(s, x);
+ Assert.NotSame(s, x);
+ }
+ Assert.Equal(7, backend.OpCount); // should be read every time
+
+ Log.WriteLine("Setting value directly");
+ s = CreateString(true);
+ await cache.SetAsync(Me(), s);
+ for (var i = 0; i < 5; i++)
+ {
+ var x = await cache.GetOrCreateAsync(Me(), ct => new ValueTask(CreateString()));
+ Assert.Equal(s, x);
+ Assert.Same(s, x);
+ }
+ Assert.Equal(8, backend.OpCount); // SET
+
+ Log.WriteLine("Removing key...");
+ await cache.RemoveKeyAsync(Me());
+ Assert.Equal(9, backend.OpCount); // DEL
+
+ Log.WriteLine("Fetching new...");
+ var t = await cache.GetOrCreateAsync(Me(), ct => new ValueTask(CreateString(true)));
+ Assert.NotEqual(s, t);
+ Assert.Equal(11, backend.OpCount); // GET, SET
+ }
+
+ public sealed class Foo
+ {
+ public string Value { get; set; } = "";
+ }
+
+ [Theory]
+ [InlineData(true)]
+ [InlineData(false)]
+ public async Task AssertL2Operations_Mutable(bool buffers)
+ {
+ using var provider = GetDefaultCache(buffers, out var cache);
+ var backend = Assert.IsAssignableFrom(cache.BackendCache);
+ Log.WriteLine("Inventing key...");
+ var s = await cache.GetOrCreateAsync(Me(), ct => new ValueTask(new Foo { Value = CreateString(true) }));
+ Assert.Equal(2, backend.OpCount); // GET, SET
+
+ Log.WriteLine("Reading with L1...");
+ for (var i = 0; i < 5; i++)
+ {
+ var x = await cache.GetOrCreateAsync(Me(), ct => new ValueTask(new Foo { Value = CreateString() }));
+ Assert.Equal(s.Value, x.Value);
+ Assert.NotSame(s, x);
+ }
+ Assert.Equal(2, backend.OpCount); // shouldn't be hit
+
+ Log.WriteLine("Reading without L1...");
+ for (var i = 0; i < 5; i++)
+ {
+ var x = await cache.GetOrCreateAsync(Me(), ct => new ValueTask(new Foo { Value = CreateString() }), _noL1);
+ Assert.Equal(s.Value, x.Value);
+ Assert.NotSame(s, x);
+ }
+ Assert.Equal(7, backend.OpCount); // should be read every time
+
+ Log.WriteLine("Setting value directly");
+ s = new Foo { Value = CreateString(true) };
+ await cache.SetAsync(Me(), s);
+ for (var i = 0; i < 5; i++)
+ {
+ var x = await cache.GetOrCreateAsync(Me(), ct => new ValueTask(new Foo { Value = CreateString() }));
+ Assert.Equal(s.Value, x.Value);
+ Assert.NotSame(s, x);
+ }
+ Assert.Equal(8, backend.OpCount); // SET
+
+ Log.WriteLine("Removing key...");
+ await cache.RemoveKeyAsync(Me());
+ Assert.Equal(9, backend.OpCount); // DEL
+
+ Log.WriteLine("Fetching new...");
+ var t = await cache.GetOrCreateAsync(Me(), ct => new ValueTask(new Foo { Value = CreateString(true) }));
+ Assert.NotEqual(s.Value, t.Value);
+ Assert.Equal(11, backend.OpCount); // GET, SET
+ }
+
+ class BufferLoggingCache : LoggingCache, IBufferDistributedCache
+ {
+ public BufferLoggingCache(ITestOutputHelper log, IDistributedCache tail) : base(log, tail) { }
+
+ void IBufferDistributedCache.Set(string key, ReadOnlySequence value, DistributedCacheEntryOptions options)
+ {
+ Interlocked.Increment(ref opcount);
+ Log.WriteLine($"Set (ROS-byte): {key}");
+ Tail.Set(key, value.ToArray(), options);
+ }
+
+ ValueTask IBufferDistributedCache.SetAsync(string key, ReadOnlySequence value, DistributedCacheEntryOptions options, CancellationToken token)
+ {
+ Interlocked.Increment(ref opcount);
+ Log.WriteLine($"SetAsync (ROS-byte): {key}");
+ return new(Tail.SetAsync(key, value.ToArray(), options, token));
+ }
+
+ bool IBufferDistributedCache.TryGet(string key, IBufferWriter destination)
+ {
+ Interlocked.Increment(ref opcount);
+ Log.WriteLine($"TryGet: {key}");
+ var buffer = Tail.Get(key);
+ if (buffer is null)
+ {
+ return false;
+ }
+ destination.Write(buffer);
+ return true;
+ }
+
+ async ValueTask IBufferDistributedCache.TryGetAsync(string key, IBufferWriter destination, CancellationToken token)
+ {
+ Interlocked.Increment(ref opcount);
+ Log.WriteLine($"TryGetAsync: {key}");
+ var buffer = await Tail.GetAsync(key, token);
+ if (buffer is null)
+ {
+ return false;
+ }
+ destination.Write(buffer);
+ return true;
+ }
+ }
+
+ class LoggingCache(ITestOutputHelper log, IDistributedCache tail) : IDistributedCache
+ {
+ protected ITestOutputHelper Log => log;
+ protected IDistributedCache Tail => tail;
+
+ protected int opcount;
+ public int OpCount => Volatile.Read(ref opcount);
+
+ byte[]? IDistributedCache.Get(string key)
+ {
+ Interlocked.Increment(ref opcount);
+ Log.WriteLine($"Get: {key}");
+ return Tail.Get(key);
+ }
+
+ Task IDistributedCache.GetAsync(string key, CancellationToken token)
+ {
+ Interlocked.Increment(ref opcount);
+ Log.WriteLine($"GetAsync: {key}");
+ return Tail.GetAsync(key, token);
+ }
+
+ void IDistributedCache.Refresh(string key)
+ {
+ Interlocked.Increment(ref opcount);
+ Log.WriteLine($"Refresh: {key}");
+ Tail.Refresh(key);
+ }
+
+ Task IDistributedCache.RefreshAsync(string key, CancellationToken token)
+ {
+ Interlocked.Increment(ref opcount);
+ Log.WriteLine($"RefreshAsync: {key}");
+ return Tail.RefreshAsync(key, token);
+ }
+
+ void IDistributedCache.Remove(string key)
+ {
+ Interlocked.Increment(ref opcount);
+ Log.WriteLine($"Remove: {key}");
+ Tail.Remove(key);
+ }
+
+ Task IDistributedCache.RemoveAsync(string key, CancellationToken token)
+ {
+ Interlocked.Increment(ref opcount);
+ Log.WriteLine($"RemoveAsync: {key}");
+ return Tail.RemoveAsync(key, token);
+ }
+
+ void IDistributedCache.Set(string key, byte[] value, DistributedCacheEntryOptions options)
+ {
+ Interlocked.Increment(ref opcount);
+ Log.WriteLine($"Set (byte[]): {key}");
+ Tail.Set(key, value, options);
+ }
+
+ Task IDistributedCache.SetAsync(string key, byte[] value, DistributedCacheEntryOptions options, CancellationToken token)
+ {
+ Interlocked.Increment(ref opcount);
+ Log.WriteLine($"SetAsync (byte[]): {key}");
+ return Tail.SetAsync(key, value, options, token);
+ }
+ }
+
+ private static string Me([CallerMemberName] string caller = "") => caller;
+}
diff --git a/src/Caching/Hybrid/test/Microsoft.Extensions.Caching.Hybrid.Tests.csproj b/src/Caching/Hybrid/test/Microsoft.Extensions.Caching.Hybrid.Tests.csproj
index c589f1499cc8..54c0de3adf67 100644
--- a/src/Caching/Hybrid/test/Microsoft.Extensions.Caching.Hybrid.Tests.csproj
+++ b/src/Caching/Hybrid/test/Microsoft.Extensions.Caching.Hybrid.Tests.csproj
@@ -9,6 +9,8 @@
+
+
diff --git a/src/Caching/Hybrid/test/RedisTests.cs b/src/Caching/Hybrid/test/RedisTests.cs
new file mode 100644
index 000000000000..32d3ca797a23
--- /dev/null
+++ b/src/Caching/Hybrid/test/RedisTests.cs
@@ -0,0 +1,103 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using Microsoft.Extensions.Caching.Hybrid.Internal;
+using Microsoft.Extensions.Caching.StackExchangeRedis;
+using Microsoft.Extensions.DependencyInjection;
+using StackExchange.Redis;
+using Xunit.Abstractions;
+
+namespace Microsoft.Extensions.Caching.Hybrid.Tests;
+
+public sealed class RedisFixture : IDisposable
+{
+ private ConnectionMultiplexer? _muxer;
+ private Task? _sharedConnect;
+ public Task ConnectAsync() => _sharedConnect ??= DoConnectAsync();
+
+ public void Dispose() => _muxer?.Dispose();
+
+ async Task DoConnectAsync()
+ {
+ try
+ {
+ _muxer = await ConnectionMultiplexer.ConnectAsync("127.0.0.1:6379");
+ await _muxer.GetDatabase().PingAsync();
+ return _muxer;
+ }
+ catch
+ {
+ return null;
+ }
+ }
+}
+public class RedisTests : DistributedCacheTests, IClassFixture
+{
+ private readonly RedisFixture _fixture;
+ public RedisTests(RedisFixture fixture, ITestOutputHelper log) : base(log) => _fixture = fixture;
+
+ protected override bool CustomClockSupported => false;
+
+ protected override async ValueTask ConfigureAsync(IServiceCollection services)
+ {
+ var redis = await _fixture.ConnectAsync();
+ if (redis is null)
+ {
+ Log.WriteLine("Redis is not available");
+ return; // inconclusive
+ }
+ Log.WriteLine("Redis is available");
+ services.AddSingleton(redis);
+ services.AddStackExchangeRedisCache(options =>
+ {
+ options.ConnectionMultiplexerFactory = () => Task.FromResult(redis);
+ });
+ }
+
+ [Theory]
+ [InlineData(false)]
+ [InlineData(true)]
+ public async Task BasicUsage(bool useBuffers)
+ {
+ var services = new ServiceCollection();
+ await ConfigureAsync(services);
+ services.AddHybridCache();
+ var provider = services.BuildServiceProvider(); // not "using" - that will tear down our redis; use the fixture for that
+
+ var cache = Assert.IsType(provider.GetRequiredService());
+ if (cache.BackendCache is null)
+ {
+ Log.WriteLine("Backend cache not available; inconclusive");
+ return;
+ }
+ Assert.IsAssignableFrom(cache.BackendCache);
+
+ if (!useBuffers) // force byte[] mode
+ {
+ cache.DebugRemoveFeatures(DefaultHybridCache.CacheFeatures.BackendBuffers);
+ }
+ Log.WriteLine($"features: {cache.GetFeatures()}");
+
+ var key = Me();
+ var redis = provider.GetRequiredService();
+ await redis.GetDatabase().KeyDeleteAsync(key); // start from known state
+ Assert.False(await redis.GetDatabase().KeyExistsAsync(key));
+
+ var count = 0;
+ for (var i = 0; i < 10; i++)
+ {
+ await cache.GetOrCreateAsync(key, _ =>
+ {
+ Interlocked.Increment(ref count);
+ return new(Guid.NewGuid());
+ });
+ }
+ Assert.Equal(1, count);
+
+ await Task.Delay(500); // the L2 write continues in the background; give it a chance
+
+ var ttl = await redis.GetDatabase().KeyTimeToLiveAsync(key);
+ Log.WriteLine($"ttl: {ttl}");
+ Assert.NotNull(ttl);
+ }
+}
diff --git a/src/Caching/Hybrid/test/ServiceConstructionTests.cs b/src/Caching/Hybrid/test/ServiceConstructionTests.cs
index d9515816f222..50887d990aa0 100644
--- a/src/Caching/Hybrid/test/ServiceConstructionTests.cs
+++ b/src/Caching/Hybrid/test/ServiceConstructionTests.cs
@@ -3,10 +3,14 @@
using System.Buffers;
using System.Runtime.CompilerServices;
+using Microsoft.Extensions.Caching.Distributed;
using Microsoft.Extensions.Caching.Hybrid.Internal;
+using Microsoft.Extensions.Caching.Memory;
using Microsoft.Extensions.Configuration;
using Microsoft.Extensions.Configuration.Json;
using Microsoft.Extensions.DependencyInjection;
+using Microsoft.Extensions.Logging;
+using Microsoft.Extensions.Options;
#pragma warning disable CS1998 // Async method lacks 'await' operators and will run synchronously
#pragma warning disable CS8769 // Nullability of reference types in type of parameter doesn't match implemented member (possibly because of nullability attributes).
@@ -136,6 +140,73 @@ public void CustomSerializerFactoryConfiguration()
Assert.IsType>(cache.GetSerializer());
}
+ [Theory]
+ [InlineData(true)]
+ [InlineData(false)]
+ public void DefaultMemoryDistributedCacheIsIgnored(bool manual)
+ {
+ var services = new ServiceCollection();
+ if (manual)
+ {
+ services.AddSingleton();
+ }
+ else
+ {
+ services.AddDistributedMemoryCache();
+ }
+ services.AddHybridCache();
+ using var provider = services.BuildServiceProvider();
+ var cache = Assert.IsType(provider.GetRequiredService());
+
+ Assert.Null(cache.BackendCache);
+ }
+
+ [Fact]
+ public void SubclassMemoryDistributedCacheIsNotIgnored()
+ {
+ var services = new ServiceCollection();
+ services.AddSingleton();
+ services.AddHybridCache();
+ using var provider = services.BuildServiceProvider();
+ var cache = Assert.IsType(provider.GetRequiredService());
+
+ Assert.NotNull(cache.BackendCache);
+ }
+
+ [Theory]
+ [InlineData(true)]
+ [InlineData(false)]
+ public void SubclassMemoryCacheIsNotIgnored(bool manual)
+ {
+ var services = new ServiceCollection();
+ if (manual)
+ {
+ services.AddSingleton();
+ }
+ else
+ {
+ services.AddDistributedMemoryCache();
+ }
+ services.AddSingleton();
+ services.AddHybridCache();
+ using var provider = services.BuildServiceProvider();
+ var cache = Assert.IsType(provider.GetRequiredService());
+
+ Assert.NotNull(cache.BackendCache);
+ }
+
+ class CustomMemoryCache : MemoryCache
+ {
+ public CustomMemoryCache(IOptions options) : base(options) { }
+ public CustomMemoryCache(IOptions options, ILoggerFactory loggerFactory) : base(options, loggerFactory) { }
+ }
+
+ class CustomMemoryDistributedCache : MemoryDistributedCache
+ {
+ public CustomMemoryDistributedCache(IOptions options) : base(options) { }
+ public CustomMemoryDistributedCache(IOptions options, ILoggerFactory loggerFactory) : base(options, loggerFactory) { }
+ }
+
class Customer { }
class Order { }
@@ -158,5 +229,6 @@ bool IHybridCacheSerializerFactory.TryCreateSerializer(out IHybridCacheSerial
return false;
}
}
+
private static string Me([CallerMemberName] string caller = "") => caller;
}
diff --git a/src/Caching/Hybrid/test/SqlServerTests.cs b/src/Caching/Hybrid/test/SqlServerTests.cs
new file mode 100644
index 000000000000..bbfc18338933
--- /dev/null
+++ b/src/Caching/Hybrid/test/SqlServerTests.cs
@@ -0,0 +1,46 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using Microsoft.Data.SqlClient;
+using Microsoft.Extensions.DependencyInjection;
+using Xunit.Abstractions;
+
+namespace Microsoft.Extensions.Caching.Hybrid.Tests;
+
+public class SqlServerTests : DistributedCacheTests
+{
+ public SqlServerTests(ITestOutputHelper log) : base(log) { }
+
+ protected override bool CustomClockSupported => true;
+
+ protected override async ValueTask ConfigureAsync(IServiceCollection services)
+ {
+ // create a local DB named CacheBench, then
+ // dotnet tool install --global dotnet-sql-cache
+ // dotnet sql-cache create "Data Source=.;Initial Catalog=CacheBench;Integrated Security=True;Trust Server Certificate=True" dbo BenchmarkCache
+
+ const string ConnectionString = "Data Source=.;Initial Catalog=CacheBench;Integrated Security=True;Trust Server Certificate=True";
+
+ try
+ {
+ using var conn = new SqlConnection(ConnectionString);
+ using var cmd = conn.CreateCommand();
+ cmd.CommandText = "truncate table dbo.BenchmarkCache";
+ await conn.OpenAsync();
+ await cmd.ExecuteNonQueryAsync();
+
+ // if that worked: we should be fine
+ services.AddDistributedSqlServerCache(options =>
+ {
+ options.SchemaName = "dbo";
+ options.TableName = "BenchmarkCache";
+ options.ConnectionString = ConnectionString;
+ options.SystemClock = Clock;
+ });
+ }
+ catch (Exception ex)
+ {
+ Log.WriteLine(ex.Message);
+ }
+ }
+}
diff --git a/src/Caching/Hybrid/test/StampedeTests.cs b/src/Caching/Hybrid/test/StampedeTests.cs
new file mode 100644
index 000000000000..b6536655a5e3
--- /dev/null
+++ b/src/Caching/Hybrid/test/StampedeTests.cs
@@ -0,0 +1,380 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System;
+using System.Collections.Generic;
+using System.ComponentModel;
+using System.Linq;
+using System.Runtime.CompilerServices;
+using System.Text;
+using System.Threading.Tasks;
+using Microsoft.Extensions.Caching.Distributed;
+using Microsoft.Extensions.Caching.Hybrid.Internal;
+using Microsoft.Extensions.Caching.Memory;
+using Microsoft.Extensions.DependencyInjection;
+using Microsoft.Extensions.DependencyInjection.Extensions;
+
+namespace Microsoft.Extensions.Caching.Hybrid.Tests;
+public class StampedeTests
+{
+ static ServiceProvider GetDefaultCache(out DefaultHybridCache cache)
+ {
+ var services = new ServiceCollection();
+ services.AddSingleton();
+ services.AddSingleton();
+ services.AddHybridCache(options =>
+ {
+ options.DefaultEntryOptions = new()
+ {
+ Flags = HybridCacheEntryFlags.DisableDistributedCache | HybridCacheEntryFlags.DisableLocalCache
+ };
+ });
+ var provider = services.BuildServiceProvider();
+ cache = Assert.IsType(provider.GetRequiredService());
+ return provider;
+ }
+
+ public sealed class InvalidCache : IDistributedCache, IMemoryCache
+ {
+ void IDisposable.Dispose() { }
+ ICacheEntry IMemoryCache.CreateEntry(object key) => throw new NotSupportedException("Intentionally not provided");
+
+ byte[]? IDistributedCache.Get(string key) => throw new NotSupportedException("Intentionally not provided");
+
+ Task IDistributedCache.GetAsync(string key, CancellationToken token) => throw new NotSupportedException("Intentionally not provided");
+
+ void IDistributedCache.Refresh(string key) => throw new NotSupportedException("Intentionally not provided");
+
+ Task IDistributedCache.RefreshAsync(string key, CancellationToken token) => throw new NotSupportedException("Intentionally not provided");
+
+ void IDistributedCache.Remove(string key) => throw new NotSupportedException("Intentionally not provided");
+
+ void IMemoryCache.Remove(object key) => throw new NotSupportedException("Intentionally not provided");
+
+ Task IDistributedCache.RemoveAsync(string key, CancellationToken token) => throw new NotSupportedException("Intentionally not provided");
+
+ void IDistributedCache.Set(string key, byte[] value, DistributedCacheEntryOptions options) => throw new NotSupportedException("Intentionally not provided");
+
+ Task IDistributedCache.SetAsync(string key, byte[] value, DistributedCacheEntryOptions options, CancellationToken token) => throw new NotSupportedException("Intentionally not provided");
+
+ bool IMemoryCache.TryGetValue(object key, out object? value) => throw new NotSupportedException("Intentionally not provided");
+ }
+
+ [Theory]
+ [InlineData(1, false)]
+ [InlineData(1, true)]
+ [InlineData(10, false)]
+ [InlineData(10, true)]
+ public async Task MultipleCallsShareExecution_NoCancellation(int callerCount, bool canBeCanceled)
+ {
+ using var scope = GetDefaultCache(out var cache);
+ using var semaphore = new SemaphoreSlim(0);
+
+ var token = canBeCanceled ? new CancellationTokenSource().Token : CancellationToken.None;
+
+ int executeCount = 0, cancelCount = 0;
+ var results = new Task[callerCount];
+ for (var i = 0; i < callerCount; i++)
+ {
+ results[i] = cache.GetOrCreateAsync(Me(), async ct =>
+ {
+ using var reg = ct.Register(() => Interlocked.Increment(ref cancelCount));
+ if (!await semaphore.WaitAsync(5_000, CancellationToken.None))
+ {
+ throw new TimeoutException("Failed to activate");
+ }
+ Interlocked.Increment(ref executeCount);
+ ct.ThrowIfCancellationRequested(); // assert not cancelled
+ return Guid.NewGuid();
+ }, token: token).AsTask();
+ }
+
+ Assert.Equal(callerCount, cache.DebugGetCallerCount(Me()));
+
+ // everyone is queued up; release the hounds and check
+ // that we all got the same result
+ Assert.Equal(0, Volatile.Read(ref executeCount));
+ Assert.Equal(0, Volatile.Read(ref cancelCount));
+ semaphore.Release();
+ var first = await results[0];
+ Assert.Equal(1, Volatile.Read(ref executeCount));
+ Assert.Equal(0, Volatile.Read(ref cancelCount));
+ foreach (var result in results)
+ {
+ Assert.Equal(first, await result);
+ }
+ Assert.Equal(1, Volatile.Read(ref executeCount));
+ Assert.Equal(0, Volatile.Read(ref cancelCount));
+
+ // and do it a second time; we expect different results
+ Volatile.Write(ref executeCount, 0);
+ for (var i = 0; i < callerCount; i++)
+ {
+ results[i] = cache.GetOrCreateAsync(Me(), async ct =>
+ {
+ using var reg = ct.Register(() => Interlocked.Increment(ref cancelCount));
+ if (!await semaphore.WaitAsync(5_000, CancellationToken.None))
+ {
+ throw new TimeoutException("Failed to activate");
+ }
+ Interlocked.Increment(ref executeCount);
+ ct.ThrowIfCancellationRequested(); // assert not cancelled
+ return Guid.NewGuid();
+ }, token: token).AsTask();
+ }
+
+ Assert.Equal(callerCount, cache.DebugGetCallerCount(Me()));
+
+ // everyone is queued up; release the hounds and check
+ // that we all got the same result
+ Assert.Equal(0, Volatile.Read(ref executeCount));
+ Assert.Equal(0, Volatile.Read(ref cancelCount));
+ semaphore.Release();
+ var second = await results[0];
+ Assert.NotEqual(first, second);
+ Assert.Equal(1, Volatile.Read(ref executeCount));
+ Assert.Equal(0, Volatile.Read(ref cancelCount));
+ foreach (var result in results)
+ {
+ Assert.Equal(second, await result);
+ }
+ Assert.Equal(1, Volatile.Read(ref executeCount));
+ Assert.Equal(0, Volatile.Read(ref cancelCount));
+ }
+
+ [Theory]
+ [InlineData(1)]
+ [InlineData(10)]
+ public async Task MultipleCallsShareExecution_EveryoneCancels(int callerCount)
+ {
+ // what we want to prove here is that everyone ends up cancelling promptly by
+ // *their own* cancellation (not dependent on the shared task), and that
+ // the shared task becomes cancelled (which can be later)
+
+ using var scope = GetDefaultCache(out var cache);
+ using var semaphore = new SemaphoreSlim(0);
+
+ int executeCount = 0, cancelCount = 0;
+ var results = new Task[callerCount];
+ var cancels = new CancellationTokenSource[callerCount];
+ for (var i = 0; i < callerCount; i++)
+ {
+ cancels[i] = new CancellationTokenSource();
+ results[i] = cache.GetOrCreateAsync(Me(), async ct =>
+ {
+ using var reg = ct.Register(() => Interlocked.Increment(ref cancelCount));
+ if (!await semaphore.WaitAsync(5_000, CancellationToken.None))
+ {
+ throw new TimeoutException("Failed to activate");
+ }
+ try
+ {
+ Interlocked.Increment(ref executeCount);
+ ct.ThrowIfCancellationRequested();
+ return Guid.NewGuid();
+ }
+ finally
+ {
+ semaphore.Release(); // handshake so we can check when available again
+ }
+ }, token: cancels[i].Token).AsTask();
+ }
+
+ Assert.Equal(callerCount, cache.DebugGetCallerCount(Me()));
+
+ // everyone is queued up; release the hounds and check
+ // that we all got the same result
+ foreach (var cancel in cancels)
+ {
+ cancel.Cancel();
+ }
+ await Task.Delay(500); // cancellation happens on a worker; need to allow a moment
+ for (var i = 0; i < callerCount; i++)
+ {
+ var result = results[i];
+ // should have already cancelled, even though underlying task hasn't finished yet
+ Assert.Equal(TaskStatus.Canceled, result.Status);
+ var ex = Assert.Throws(() => result.GetAwaiter().GetResult());
+ Assert.Equal(cancels[i].Token, ex.CancellationToken); // each gets the correct blame
+ }
+
+ Assert.Equal(0, Volatile.Read(ref executeCount));
+ semaphore.Release();
+ if (!await semaphore.WaitAsync(5_000)) // wait for underlying task to hand back to us
+ {
+ throw new TimeoutException("Didn't get handshake back from task");
+ }
+ Assert.Equal(1, Volatile.Read(ref executeCount));
+ Assert.Equal(1, Volatile.Read(ref cancelCount));
+ }
+
+ [Theory]
+ [InlineData(2, 0)]
+ [InlineData(2, 1)]
+ [InlineData(10, 0)]
+ [InlineData(10, 1)]
+ [InlineData(10, 7)]
+ public async Task MultipleCallsShareExecution_MostCancel(int callerCount, int remaining)
+ {
+ Assert.True(callerCount >= 2); // "most" is not "one"
+
+ // what we want to prove here is that everyone ends up cancelling promptly by
+ // *their own* cancellation (not dependent on the shared task), and that
+ // the shared task becomes cancelled (which can be later)
+
+ using var scope = GetDefaultCache(out var cache);
+ using var semaphore = new SemaphoreSlim(0);
+
+ int executeCount = 0, cancelCount = 0;
+ var results = new Task[callerCount];
+ var cancels = new CancellationTokenSource[callerCount];
+ for (var i = 0; i < callerCount; i++)
+ {
+ cancels[i] = new CancellationTokenSource();
+ results[i] = cache.GetOrCreateAsync(Me(), async ct =>
+ {
+ using var reg = ct.Register(() => Interlocked.Increment(ref cancelCount));
+ if (!await semaphore.WaitAsync(5_000, CancellationToken.None))
+ {
+ throw new TimeoutException("Failed to activate");
+ }
+ try
+ {
+ Interlocked.Increment(ref executeCount);
+ ct.ThrowIfCancellationRequested();
+ return Guid.NewGuid();
+ }
+ finally
+ {
+ semaphore.Release(); // handshake so we can check when available again
+ }
+ }, token: cancels[i].Token).AsTask();
+ }
+
+ Assert.Equal(callerCount, cache.DebugGetCallerCount(Me()));
+
+ // everyone is queued up; release the hounds and check
+ // that we all got the same result
+ for (var i = 0; i < callerCount; i++)
+ {
+ if (i != remaining)
+ {
+ cancels[i].Cancel();
+ }
+ }
+ await Task.Delay(500); // cancellation happens on a worker; need to allow a moment
+ for (var i = 0; i < callerCount; i++)
+ {
+ if (i != remaining)
+ {
+ var result = results[i];
+ // should have already cancelled, even though underlying task hasn't finished yet
+ Assert.Equal(TaskStatus.Canceled, result.Status);
+ var ex = Assert.Throws(() => result.GetAwaiter().GetResult());
+ Assert.Equal(cancels[i].Token, ex.CancellationToken); // each gets the correct blame
+ }
+ }
+
+ Assert.Equal(0, Volatile.Read(ref executeCount));
+ semaphore.Release();
+ if (!await semaphore.WaitAsync(5_000)) // wait for underlying task to hand back to us
+ {
+ throw new TimeoutException("Didn't get handshake back from task");
+ }
+ Assert.Equal(1, Volatile.Read(ref executeCount));
+ Assert.Equal(0, Volatile.Read(ref cancelCount)); // ran to completion
+ await results[remaining];
+ }
+
+ [Theory]
+ [InlineData(true)]
+ [InlineData(false)]
+ public async Task ImmutableTypesShareFinalTask(bool withCancelation)
+ {
+ var token = withCancelation ? new CancellationTokenSource().Token : CancellationToken.None;
+
+ using var scope = GetDefaultCache(out var cache);
+ using var semaphore = new SemaphoreSlim(0);
+
+ // note AsTask *in this scenario* fetches the underlying incomplete task
+ var first = cache.GetOrCreateAsync(Me(), async ct => { await semaphore.WaitAsync(CancellationToken.None); semaphore.Release(); return Guid.NewGuid(); }, token: token).AsTask();
+ var second = cache.GetOrCreateAsync(Me(), async ct => { await semaphore.WaitAsync(CancellationToken.None); semaphore.Release(); return Guid.NewGuid(); }, token: token).AsTask();
+
+ if (withCancelation)
+ {
+ Assert.NotSame(first, second);
+ }
+ else
+ {
+ Assert.Same(first, second);
+ }
+ semaphore.Release();
+ Assert.Equal(await first, await second);
+ }
+
+ [Theory]
+ [InlineData(true)]
+ [InlineData(false)]
+ public async Task ImmutableCustomTypesShareFinalTask(bool withCancelation)
+ {
+ var token = withCancelation ? new CancellationTokenSource().Token : CancellationToken.None;
+
+ using var scope = GetDefaultCache(out var cache);
+ using var semaphore = new SemaphoreSlim(0);
+
+ // AsTask *in this scenario* fetches the underlying incomplete task
+ var first = cache.GetOrCreateAsync(Me(), async ct => { await semaphore.WaitAsync(CancellationToken.None); semaphore.Release(); return new Immutable(Guid.NewGuid()); }, token: token).AsTask();
+ var second = cache.GetOrCreateAsync(Me(), async ct => { await semaphore.WaitAsync(CancellationToken.None); semaphore.Release(); return new Immutable(Guid.NewGuid()); }, token: token).AsTask();
+
+ if (withCancelation)
+ {
+ Assert.NotSame(first, second);
+ }
+ else
+ {
+ Assert.Same(first, second);
+ }
+ semaphore.Release();
+
+ var x = await first;
+ var y = await second;
+ Assert.Equal(x.Value, y.Value);
+ Assert.Same(x, y); // same instance regardless of whether the tasks were shared
+ }
+
+ [Theory]
+ [InlineData(true)]
+ [InlineData(false)]
+ public async Task MutableTypesNeverShareFinalTask(bool withCancelation)
+ {
+ var token = withCancelation ? new CancellationTokenSource().Token : CancellationToken.None;
+
+ using var scope = GetDefaultCache(out var cache);
+ using var semaphore = new SemaphoreSlim(0);
+
+ // AsTask *in this scenario* fetches the underlying incomplete task
+ var first = cache.GetOrCreateAsync(Me(), async ct => { await semaphore.WaitAsync(CancellationToken.None); semaphore.Release(); return new Mutable(Guid.NewGuid()); }, token: token).AsTask();
+ var second = cache.GetOrCreateAsync(Me(), async ct => { await semaphore.WaitAsync(CancellationToken.None); semaphore.Release(); return new Mutable(Guid.NewGuid()); }, token: token).AsTask();
+
+ Assert.NotSame(first, second);
+ semaphore.Release();
+
+ var x = await first;
+ var y = await second;
+ Assert.Equal(x.Value, y.Value);
+ Assert.NotSame(x, y);
+ }
+
+ class Mutable(Guid value)
+ {
+ public Guid Value => value;
+ }
+
+ [ImmutableObject(true)]
+ public sealed class Immutable(Guid value)
+ {
+ public Guid Value => value;
+ }
+
+ private static string Me([CallerMemberName] string caller = "") => caller;
+}
diff --git a/src/Caching/Hybrid/test/TypeTests.cs b/src/Caching/Hybrid/test/TypeTests.cs
new file mode 100644
index 000000000000..dc1f6f06749b
--- /dev/null
+++ b/src/Caching/Hybrid/test/TypeTests.cs
@@ -0,0 +1,62 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System;
+using System.Collections.Generic;
+using System.ComponentModel;
+using System.Linq;
+using System.Reflection;
+using System.Text;
+using System.Threading.Tasks;
+using Microsoft.Extensions.Caching.Hybrid.Internal;
+
+namespace Microsoft.Extensions.Caching.Hybrid.Tests;
+public class TypeTests
+{
+ [Theory]
+ [InlineData(typeof(string))]
+ [InlineData(typeof(int))] // primitive
+ [InlineData(typeof(int?))]
+ [InlineData(typeof(Guid))] // non-primitive but blittable
+ [InlineData(typeof(Guid?))]
+ [InlineData(typeof(SealedCustomClassAttribTrue))] // attrib says explicitly true, and sealed
+ [InlineData(typeof(CustomBlittableStruct))] // blittable, and we're copying each time
+ [InlineData(typeof(CustomNonBlittableStructAttribTrue))] // non-blittable, attrib says explicitly true
+ public void ImmutableTypes(Type type)
+ {
+ Assert.True((bool)typeof(DefaultHybridCache.ImmutableTypeCache<>).MakeGenericType(type)
+ .GetField(nameof(DefaultHybridCache.ImmutableTypeCache.IsImmutable), BindingFlags.Static | BindingFlags.Public)!
+ .GetValue(null)!);
+ }
+
+ [Theory]
+ [InlineData(typeof(byte[]))]
+ [InlineData(typeof(string[]))]
+ [InlineData(typeof(object))]
+ [InlineData(typeof(CustomClassNoAttrib))] // no attrib, who knows?
+ [InlineData(typeof(CustomClassAttribFalse))] // attrib says explicitly no
+ [InlineData(typeof(CustomClassAttribTrue))] // attrib says explicitly true, but not sealed: we might have a sub-class
+ [InlineData(typeof(CustomNonBlittableStructNoAttrib))] // no attrib, who knows?
+ [InlineData(typeof(CustomNonBlittableStructAttribFalse))] // attrib says explicitly no
+ public void MutableTypes(Type type)
+ {
+ Assert.False((bool)typeof(DefaultHybridCache.ImmutableTypeCache<>).MakeGenericType(type)
+ .GetField(nameof(DefaultHybridCache.ImmutableTypeCache.IsImmutable), BindingFlags.Static | BindingFlags.Public)!
+ .GetValue(null)!);
+ }
+
+ class CustomClassNoAttrib { }
+ [ImmutableObject(false)]
+ class CustomClassAttribFalse { }
+ [ImmutableObject(true)]
+ class CustomClassAttribTrue { }
+ [ImmutableObject(true)]
+ sealed class SealedCustomClassAttribTrue { }
+
+ struct CustomBlittableStruct(int x) { public int X => x; }
+ struct CustomNonBlittableStructNoAttrib(string x) { public string X => x; }
+ [ImmutableObject(false)]
+ struct CustomNonBlittableStructAttribFalse(string x) { public string X => x; }
+ [ImmutableObject(true)]
+ struct CustomNonBlittableStructAttribTrue(string x) { public string X => x; }
+}
diff --git a/src/Caching/SqlServer/src/DatabaseOperations.cs b/src/Caching/SqlServer/src/DatabaseOperations.cs
index 04544e600a42..0adbd9d54128 100644
--- a/src/Caching/SqlServer/src/DatabaseOperations.cs
+++ b/src/Caching/SqlServer/src/DatabaseOperations.cs
@@ -2,8 +2,10 @@
// The .NET Foundation licenses this file to you under the MIT license.
using System;
+using System.Buffers;
using System.Data;
using System.Linq;
+using System.Runtime.InteropServices;
using System.Threading;
using System.Threading.Tasks;
using Microsoft.Data.SqlClient;
@@ -77,11 +79,24 @@ public void DeleteCacheItem(string key)
return GetCacheItem(key, includeValue: true);
}
- public async Task GetCacheItemAsync(string key, CancellationToken token = default(CancellationToken))
+ public bool TryGetCacheItem(string key, IBufferWriter destination)
+ {
+ return GetCacheItem(key, includeValue: true, destination: destination) is not null;
+ }
+
+ public Task GetCacheItemAsync(string key, CancellationToken token = default(CancellationToken))
+ {
+ token.ThrowIfCancellationRequested();
+
+ return GetCacheItemAsync(key, includeValue: true, token: token);
+ }
+
+ public async Task TryGetCacheItemAsync(string key, IBufferWriter destination, CancellationToken token = default(CancellationToken))
{
token.ThrowIfCancellationRequested();
- return await GetCacheItemAsync(key, includeValue: true, token: token).ConfigureAwait(false);
+ var arr = await GetCacheItemAsync(key, includeValue: true, destination: destination, token: token).ConfigureAwait(false);
+ return arr is not null;
}
public void RefreshCacheItem(string key)
@@ -89,11 +104,11 @@ public void RefreshCacheItem(string key)
GetCacheItem(key, includeValue: false);
}
- public async Task RefreshCacheItemAsync(string key, CancellationToken token = default(CancellationToken))
+ public Task RefreshCacheItemAsync(string key, CancellationToken token = default(CancellationToken))
{
token.ThrowIfCancellationRequested();
- await GetCacheItemAsync(key, includeValue: false, token: token).ConfigureAwait(false);
+ return GetCacheItemAsync(key, includeValue: false, token: token);
}
public void DeleteExpiredCacheItems()
@@ -111,7 +126,7 @@ public void DeleteExpiredCacheItems()
}
}
- public void SetCacheItem(string key, byte[] value, DistributedCacheEntryOptions options)
+ public void SetCacheItem(string key, ArraySegment value, DistributedCacheEntryOptions options)
{
var utcNow = SystemClock.UtcNow;
@@ -149,7 +164,7 @@ public void SetCacheItem(string key, byte[] value, DistributedCacheEntryOptions
}
}
- public async Task SetCacheItemAsync(string key, byte[] value, DistributedCacheEntryOptions options, CancellationToken token = default(CancellationToken))
+ public async Task SetCacheItemAsync(string key, ArraySegment value, DistributedCacheEntryOptions options, CancellationToken token = default(CancellationToken))
{
token.ThrowIfCancellationRequested();
@@ -189,7 +204,7 @@ public void SetCacheItem(string key, byte[] value, DistributedCacheEntryOptions
}
}
- private byte[]? GetCacheItem(string key, bool includeValue)
+ private byte[]? GetCacheItem(string key, bool includeValue, IBufferWriter