diff --git a/Cargo.lock b/Cargo.lock
index 36c2c58b..d49149b0 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1908,6 +1908,16 @@ dependencies = [
  "bytes 1.4.0",
 ]
 
+[[package]]
+name = "cache-redis"
+version = "1.0.0"
+dependencies = [
+ "actix-web",
+ "actix-web-lab",
+ "env_logger",
+ "redis",
+]
+
 [[package]]
 name = "casbin"
 version = "2.0.9"
@@ -5834,9 +5844,9 @@ dependencies = [
 
 [[package]]
 name = "redis"
-version = "0.23.0"
+version = "0.23.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3ea8c51b5dc1d8e5fd3350ec8167f464ec0995e79f2e90a075b63371500d557f"
+checksum = "ff5d95dd18a4d76650f0c2607ed8ebdbf63baf9cb934e1c233cd220c694db1d7"
 dependencies = [
  "arc-swap",
  "async-trait",
@@ -5845,11 +5855,14 @@ dependencies = [
  "futures 0.3.28",
  "futures-util",
  "itoa 1.0.9",
+ "native-tls",
  "percent-encoding",
  "pin-project-lite 0.2.10",
  "ryu",
  "sha1_smol",
+ "socket2 0.4.9",
  "tokio 1.29.1",
+ "tokio-retry",
  "tokio-util 0.7.8",
  "url",
 ]
@@ -7946,6 +7959,17 @@ dependencies = [
  "tokio-sync",
 ]
 
+[[package]]
+name = "tokio-retry"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7f57eb36ecbe0fc510036adff84824dd3c24bb781e21bfa67b69d556aa85214f"
+dependencies = [
+ "pin-project",
+ "rand 0.8.5",
+ "tokio 1.29.1",
+]
+
 [[package]]
 name = "tokio-rustls"
 version = "0.23.4"
diff --git a/Cargo.toml b/Cargo.toml
index d2564f91..9916c0a4 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -13,6 +13,7 @@ members = [
    "basics/state",
    "basics/static-files",
    "basics/todo",
+   "cache/redis",
    "cors/backend",
    "data-factory",
    "databases/diesel",
diff --git a/cache/redis/Cargo.toml b/cache/redis/Cargo.toml
new file mode 100644
index 00000000..29f50352
--- /dev/null
+++ b/cache/redis/Cargo.toml
@@ -0,0 +1,12 @@
+[package]
+name = "cache-redis"
+version = "1.0.0"
+publish = false
+authors = ["Jatutep Bunupuradah <bangbaew@gmail.com>"]
+edition = "2021"
+
+[dependencies]
+actix-web-lab.workspace = true
+actix-web.workspace = true
+env_logger.workspace = true
+redis = { version = "0.23.1", features = ["tls-native-tls"] }
diff --git a/cache/redis/README.md b/cache/redis/README.md
new file mode 100644
index 00000000..2192b378
--- /dev/null
+++ b/cache/redis/README.md
@@ -0,0 +1,34 @@
+# Redis Cache Middleware
+
+This project demonstrates how to implement Redis cache middleware to handle cache responses synchronously.
+The application should be able to function properly even if Redis is not running; however, the caching process will be disabled in such cases.
+
+
+## Setting up
+Configure the environment variable `REDIS_HOST`, or if not set, the default host `redis://localhost:4379` will be used. TLS is supported using the `rediss://` protocol.
+Run the server using `cargo run`.
+
+## Endpoints
+
+### `GET /fibonacci/{number}`
+
+To test the demo, send a GET request to `/fibonacci/{number}`, where {number} is a positive integer of type u64.
+
+## Request Directives
+
+- `Cache-Control: no-cache` will return the most up-to-date response while still caching it. This will always yield a `miss` cache status.
+- `Cache-Control: no-store` will prevent caching, ensuring you always receive the latest response.
+
+## Verify Redis Contents
+
+When making the first GET request to `/fibonacci/47`, it may take around 8 seconds to respond.
+If Redis is running and the connection is established, subsequent requests should return the cached result immediately, a `hit` cache status will be returned, but with content type `application/json`.
+
+## Known issues
+
+- Connecting to a remote Redis server might introduce significant overhead and could lead to prolonged connection times or even failure to reach the remote server.
+
+## Further implementations
+
+- Implement asynchronous insertion of cache responses.
+- Explore using an in-memory datastore within the application process to reduce reliance on Redis.
diff --git a/cache/redis/src/main.rs b/cache/redis/src/main.rs
new file mode 100644
index 00000000..bef115d5
--- /dev/null
+++ b/cache/redis/src/main.rs
@@ -0,0 +1,178 @@
+use std::env;
+
+use actix_web::{
+    body::{self, MessageBody},
+    dev::{ServiceRequest, ServiceResponse},
+    http::{
+        header::{CacheDirective, HeaderValue, CACHE_CONTROL, CACHE_STATUS, CONTENT_TYPE},
+        Method, StatusCode,
+    },
+    middleware, web, App, Error, HttpResponse, HttpServer, Responder,
+};
+use actix_web_lab::middleware::{from_fn, Next};
+use redis::{Client as RedisClient, Commands, RedisError};
+
+fn fib_recursive(n: u64) -> u64 {
+    if n <= 1 {
+        return n;
+    }
+    fib_recursive(n - 1) + fib_recursive(n - 2)
+}
+
+async fn an_expensive_function(n: web::Path<u64>) -> impl Responder {
+    let result = fib_recursive(n.to_owned());
+    HttpResponse::Ok().body(result.to_string())
+}
+
+#[actix_web::main]
+async fn main() -> std::result::Result<(), std::io::Error> {
+    env_logger::init();
+
+    let redis_client =
+        redis::Client::open(env::var("REDIS_HOST").unwrap_or("redis://localhost:6379".to_owned()))
+            .unwrap();
+
+    let listen_port: String = env::var("LISTEN_PORT").unwrap_or(8080.to_string());
+    let listen_address: String = ["0.0.0.0", &listen_port].join(":");
+
+    println!("Server is listening at {}...", listen_address);
+    HttpServer::new(move || {
+        App::new()
+            .wrap(from_fn(cache_middleware))
+            .app_data(redis_client.to_owned())
+            .service(web::resource("/fibonacci/{n}").route(web::get().to(an_expensive_function)))
+            .wrap(middleware::Logger::default())
+    })
+    .bind(listen_address)?
+    .run()
+    .await?;
+
+    Ok(())
+}
+
+pub async fn cache_middleware(
+    req: ServiceRequest,
+    next: Next<impl MessageBody>,
+) -> Result<ServiceResponse<impl MessageBody>, Error> {
+    // Adjust cache expiry here
+    const MAX_AGE: usize = 86400;
+    let cache_max_age = format!("max-age={MAX_AGE}").parse::<HeaderValue>().unwrap();
+    // Defining cache key based on request path and query string
+    let key = if req.query_string().is_empty() {
+        req.path().to_owned()
+    } else {
+        format!("{}?{}", req.path(), req.query_string())
+    };
+    println!("cache key: {key:?}");
+
+    // Get "Cache-Control" request header and get cache directive
+    let headers = req.headers().to_owned();
+    let cache_directive = match headers.get(CACHE_CONTROL) {
+        Some(cache_control_header) => cache_control_header.to_str().unwrap_or(""),
+        None => "",
+    };
+
+    // If cache directive is not "no-cache" and not "no-store"
+    if cache_directive != CacheDirective::NoCache.to_string()
+        && cache_directive != CacheDirective::NoStore.to_string()
+        && key != "/metrics"
+    {
+        // Initialize Redis Client from App Data
+        let redis_client = req.app_data::<RedisClient>();
+        // This should always be Some, so let's unwrap
+        let mut redis_conn = redis_client.unwrap().get_connection();
+        let redis_ok = redis_conn.is_ok();
+
+        // If Redis connection succeeded and request method is GET
+        if redis_ok && req.method() == Method::GET {
+            // Unwrap the connection
+            let redis_conn = redis_conn.as_mut().unwrap();
+
+            // Try to get the cached response by defined key
+            let cached_response: Result<Vec<u8>, RedisError> = redis_conn.get(key.to_owned());
+            if let Err(e) = cached_response {
+                // If cache cannot be deserialized
+                println!("cache get error: {}", e);
+            } else if cached_response.as_ref().unwrap().is_empty() {
+                // If cache body is empty
+                println!("cache not found");
+            } else {
+                // If cache is found
+                println!("cache found");
+
+                // Prepare response body
+                let res = HttpResponse::new(StatusCode::OK).set_body(cached_response.unwrap());
+                let mut res = ServiceResponse::new(req.request().to_owned(), res);
+
+                // Define content-type and headers here
+                res.headers_mut()
+                    .append(CONTENT_TYPE, HeaderValue::from_static("application/json"));
+                res.headers_mut().append(CACHE_CONTROL, cache_max_age);
+                res.headers_mut()
+                    .append(CACHE_STATUS, HeaderValue::from_static("hit"));
+
+                return Ok(res);
+            }
+        }
+    }
+
+    // If Redis connection fails or cache could not be found
+    // Call the next service
+    let res = next.call(req).await?;
+
+    // deconstruct response into parts
+    let (req, res) = res.into_parts();
+    let (res, body) = res.into_parts();
+
+    // Convert body to Bytes
+    let body = body::to_bytes(body).await.ok().unwrap();
+    // Use bytes directly for caching instead of converting to a String
+    let res_body_enc = body.to_vec();
+
+    // Prepare response body
+    let res = res.set_body(res_body_enc.to_owned());
+    let mut res = ServiceResponse::new(req.to_owned(), res);
+
+    // If a GET request succeeded and cache directive is not "no-store"
+    if req.method() == Method::GET
+        && StatusCode::is_success(&res.status())
+        && cache_directive != CacheDirective::NoStore.to_string()
+        && key != "/metrics"
+    {
+        // Define response headers here
+        res.headers_mut().append(CACHE_CONTROL, cache_max_age);
+        res.headers_mut()
+            .append(CACHE_STATUS, HeaderValue::from_static("miss"));
+
+        // Initialize Redis Client from App Data
+        let redis_client = req.app_data::<RedisClient>();
+        // This should always be Some, so let's unwrap
+        let redis_conn = redis_client.unwrap().get_connection();
+        let redis_ok = redis_conn.is_ok();
+
+        // If Redis connection succeeded
+        if redis_ok {
+            // Try to insert the response body into Redis
+            let mut redis_conn = redis_conn.unwrap();
+            let insert = redis::Cmd::set_ex(key, res_body_enc, MAX_AGE);
+            // Or keep the cache forever:
+            // let insert = redis::Cmd::set(key, res_body_enc);
+            let insert = insert.query::<String>(&mut redis_conn);
+
+            if let Err(e) = insert {
+                // If cache insertion failed
+                println!("cache insert error: {}", e);
+            } else {
+                // This should print "cache insert success: OK"
+                println!("cache insert success: {}", insert.unwrap());
+            }
+        } else if let Err(e) = redis_conn {
+            // If Redis connection failed
+            println!("RedisError: {}", e);
+        }
+    } else {
+        // If the request method is not "GET" or the operation failed or cache directive is "no-store"
+        println!("not inserting cache");
+    }
+    Ok(res)
+}