Skip to content

Commit

Permalink
test HTTP connection reuse with new zstd fix
Browse files Browse the repository at this point in the history
  • Loading branch information
Andrey36652 committed Mar 8, 2025
1 parent 6f9d0ee commit 79f5b45
Showing 1 changed file with 107 additions and 0 deletions.
107 changes: 107 additions & 0 deletions tests/zstd.rs
Original file line number Diff line number Diff line change
Expand Up @@ -296,6 +296,113 @@ async fn test_chunked_fragmented_multiple_frames_in_one_chunk() {
assert!(start.elapsed() >= DELAY_BETWEEN_RESPONSE_PARTS - DELAY_MARGIN);
}

#[tokio::test]
async fn test_connection_reuse_with_chunked_fragmented_multiple_frames_in_one_chunk() {
// Define constants for delay and timing margin
const DELAY_BETWEEN_RESPONSE_PARTS: tokio::time::Duration =
tokio::time::Duration::from_millis(1000); // 1-second delay
const DELAY_MARGIN: tokio::time::Duration = tokio::time::Duration::from_millis(50); // Margin for timing assertions

// We will record the peer addresses of each client request here
let peer_addrs = std::sync::Arc::new(std::sync::Mutex::new(Vec::<std::net::SocketAddr>::new()));
let peer_addrs_clone = peer_addrs.clone();

// Set up a low-level server (it will reuse existing client connection, executing callback for each client request)
let server = server::low_level_with_response(move |_raw_request, client_socket| {
let peer_addrs = peer_addrs_clone.clone();
Box::new(async move {
// Split RESPONSE_CONTENT into two parts
let mid = RESPONSE_CONTENT.len() / 2;
let part1 = &RESPONSE_CONTENT[0..mid];
let part2 = &RESPONSE_CONTENT[mid..];

// Compress each part separately to create two ZSTD frames
let compressed_part1 = zstd_compress(part1.as_bytes());
let compressed_part2 = zstd_compress(part2.as_bytes());

// Concatenate the frames into a single chunk's data
let chunk_data = [compressed_part1.as_slice(), compressed_part2.as_slice()].concat();

// Calculate the chunk size in bytes
let chunk_size = chunk_data.len();

// Prepare the initial response part: headers + chunk size
let headers = [
COMPRESSED_RESPONSE_HEADERS, // e.g., "HTTP/1.1 200 OK\r\nContent-Encoding: zstd\r\n"
b"Transfer-Encoding: chunked\r\n\r\n", // Indicate chunked encoding
format!("{:x}\r\n", chunk_size).as_bytes(), // Chunk size in hex
]
.concat();

// Send headers + chunk size + chunk data
client_socket
.write_all([headers.as_slice(), &chunk_data].concat().as_slice())
.await
.expect("write_all failed");
client_socket.flush().await.expect("flush failed");

// Introduce a delay to simulate fragmentation
tokio::time::sleep(DELAY_BETWEEN_RESPONSE_PARTS).await;

peer_addrs
.lock()
.unwrap()
.push(client_socket.peer_addr().unwrap());

// Send chunk terminator + final chunk
client_socket
.write_all(b"\r\n0\r\n\r\n")
.await
.expect("write_all failed");
client_socket.flush().await.expect("flush failed");
})
});

let client = reqwest::Client::builder()
.pool_idle_timeout(std::time::Duration::from_secs(30))
.pool_max_idle_per_host(1)
.build()
.unwrap();

const NUMBER_OF_REQUESTS: usize = 5;

for _ in 0..NUMBER_OF_REQUESTS {
// Record the start time for delay verification
let start = tokio::time::Instant::now();

let res = client
.get(format!("http://{}/", server.addr()))
.send()
.await
.expect("Failed to get response");

// Verify the decompressed response matches the original content
assert_eq!(
res.text().await.expect("Failed to read text"),
RESPONSE_CONTENT
);
assert!(start.elapsed() >= DELAY_BETWEEN_RESPONSE_PARTS - DELAY_MARGIN);
}

drop(client);

// Check that all peer addresses are the same
let peer_addrs = peer_addrs.lock().unwrap();
assert_eq!(
peer_addrs.len(),
NUMBER_OF_REQUESTS,
"Expected {} peer addresses, but got {}",
NUMBER_OF_REQUESTS,
peer_addrs.len()
);
let first_addr = peer_addrs[0];
assert!(
peer_addrs.iter().all(|addr| addr == &first_addr),
"All peer addresses should be the same, but found differences: {:?}",
peer_addrs
);
}

#[tokio::test]
async fn test_chunked_fragmented_response_1() {
const DELAY_BETWEEN_RESPONSE_PARTS: tokio::time::Duration =
Expand Down

0 comments on commit 79f5b45

Please sign in to comment.