From b6cf3de5581a50e2665d9ea3f1fc4249ca2dceec Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Wed, 20 Aug 2025 08:51:02 -0400 Subject: [PATCH 01/26] update dependabot Signed-off-by: Austin Abro --- .github/dependabot.yaml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/dependabot.yaml b/.github/dependabot.yaml index 1557067..6c47c66 100644 --- a/.github/dependabot.yaml +++ b/.github/dependabot.yaml @@ -3,8 +3,8 @@ updates: - package-ecosystem: github-actions directory: / schedule: - interval: daily - - package-ecosystem: cargo - directory: / - schedule: - interval: daily + interval: weekly + groups: + github-actions: + patterns: + - "actions/*" \ No newline at end of file From 96978357859c8bf0ad7bc9fef410767ba85e5340 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Wed, 20 Aug 2025 10:25:19 -0400 Subject: [PATCH 02/26] better name Signed-off-by: Austin Abro --- .github/dependabot.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/dependabot.yaml b/.github/dependabot.yaml index 6c47c66..c130433 100644 --- a/.github/dependabot.yaml +++ b/.github/dependabot.yaml @@ -5,6 +5,6 @@ updates: schedule: interval: weekly groups: - github-actions: + actions-organization: patterns: - "actions/*" \ No newline at end of file From b8feadc127306b436b3cb3c49325247c01026783 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Wed, 20 Aug 2025 10:25:28 -0400 Subject: [PATCH 03/26] better name Signed-off-by: Austin Abro --- .github/dependabot.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/dependabot.yaml b/.github/dependabot.yaml index c130433..9666da3 100644 --- a/.github/dependabot.yaml +++ b/.github/dependabot.yaml @@ -4,7 +4,7 @@ updates: directory: / schedule: interval: weekly - groups: - actions-organization: - patterns: - - "actions/*" \ No newline at end of file + groups: + actions-organization: + patterns: + - "actions/*" \ No newline at end of file From 0c109ac019363f7e4cdfa01a34df179a4519419a Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Wed, 20 Aug 2025 10:36:21 -0400 Subject: [PATCH 04/26] move groups to the right level Signed-off-by: Austin Abro --- .github/dependabot.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/dependabot.yaml b/.github/dependabot.yaml index 9666da3..c130433 100644 --- a/.github/dependabot.yaml +++ b/.github/dependabot.yaml @@ -4,7 +4,7 @@ updates: directory: / schedule: interval: weekly - groups: - actions-organization: - patterns: - - "actions/*" \ No newline at end of file + groups: + actions-organization: + patterns: + - "actions/*" \ No newline at end of file From 0ee6c7f6ad3130263fa12bc51e159273b1d19423 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Mon, 15 Sep 2025 09:39:36 -0400 Subject: [PATCH 05/26] readme blurb Signed-off-by: Austin Abro --- README.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/README.md b/README.md index 1ade677..c970e50 100644 --- a/README.md +++ b/README.md @@ -72,3 +72,7 @@ Build your injector by following the steps above then run the following the `tes zarf package create zarf init --confirm ``` + +## Automatic dependency updates + +Dependabot automatically creates pull requests to the zarf-injector repository if there is a security vulnerability in one of our dependencies. Aside from these updates, this repo does not a system to automatically update cargo dependencies. The injector is small and simple and therefore unlikely to see a practical improvement from a dependency update. Additionally, consistently updating the dependencies could inch us closer to the 1MiB limit. Ensuring we have some leeway from the limit makes it easier to update the injector in the event of a security vulnerability or additional required feature. \ No newline at end of file From ddaac5a11375d31c88431ede97d55167d096337a Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Mon, 15 Sep 2025 09:40:35 -0400 Subject: [PATCH 06/26] fix grammar Signed-off-by: Austin Abro --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index c970e50..4ebe1da 100644 --- a/README.md +++ b/README.md @@ -75,4 +75,4 @@ zarf init --confirm ## Automatic dependency updates -Dependabot automatically creates pull requests to the zarf-injector repository if there is a security vulnerability in one of our dependencies. Aside from these updates, this repo does not a system to automatically update cargo dependencies. The injector is small and simple and therefore unlikely to see a practical improvement from a dependency update. Additionally, consistently updating the dependencies could inch us closer to the 1MiB limit. Ensuring we have some leeway from the limit makes it easier to update the injector in the event of a security vulnerability or additional required feature. \ No newline at end of file +Dependabot automatically creates pull requests to the zarf-injector repository if there is a security vulnerability in one of our dependencies. Aside from these updates, this repo does not have a system to automatically update cargo dependencies. The injector is small and simple and therefore unlikely to see a practical improvement from a dependency update. Additionally, consistently updating the dependencies could inch us closer to the 1MiB limit. Ensuring we have some leeway from the limit makes it easier to update the injector in the event of a security vulnerability or additional required feature. \ No newline at end of file From ea91960307d71d0e5d1b468e499da09ce1636cc9 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Wed, 1 Oct 2025 10:29:46 -0400 Subject: [PATCH 07/26] WIP, add push Signed-off-by: Austin Abro --- Makefile | 2 +- src/main.rs | 594 +++++++++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 591 insertions(+), 5 deletions(-) diff --git a/Makefile b/Makefile index d3919a0..56b06c5 100644 --- a/Makefile +++ b/Makefile @@ -39,7 +39,7 @@ check-size: injector ## Validate that both injector binaries are under 1 MiB unit-test: ## Run cargo tests - cargo test + cargo test -- --test-threads=1 target/x86_64-unknown-linux-musl/release/zarf-injector: src/main.rs Cargo.toml rustup target add x86_64-unknown-linux-musl diff --git a/src/main.rs b/src/main.rs index 3898df6..8a4257a 100644 --- a/src/main.rs +++ b/src/main.rs @@ -12,8 +12,8 @@ use std::path::PathBuf; use axum::{ Router, body::Body, - extract::Path, - http::StatusCode, + extract::{Path, Request}, + http::{StatusCode, HeaderMap}, response::{IntoResponse, Response}, routing::get, }; @@ -119,7 +119,11 @@ fn start_seed_registry() -> Router { // The name and reference parameter identify the image // The reference may include a tag or digest. Router::new() - .route("/v2/*path", get(handler)) + .route("/v2/*path", get(handler) + .put(put_handler) + .head(head_handler) + .post(post_handler) + .patch(patch_handler)) .route( "/v2/", get(|| async { @@ -180,9 +184,19 @@ async fn handle_get_manifest(name: String, reference: String) -> Response { let json: Value = serde_json::from_str(&index).expect("unable to parse index.json"); let mut sha_manifest: String = "".to_owned(); + let mut media_type = OCI_MIME_TYPE.to_string(); if reference.starts_with("sha256:") { sha_manifest = reference.strip_prefix("sha256:").unwrap().to_owned(); + // Find media type from index + for manifest in json["manifests"].as_array().unwrap_or(&vec![]) { + if let Some(digest) = manifest["digest"].as_str() { + if digest == format!("sha256:{}", sha_manifest) { + media_type = manifest["mediaType"].as_str().unwrap_or(OCI_MIME_TYPE).to_string(); + break; + } + } + } } else { for manifest in json["manifests"].as_array().unwrap() { let image_base_name = manifest["annotations"]["org.opencontainers.image.base.name"] @@ -196,6 +210,8 @@ async fn handle_get_manifest(name: String, reference: String) -> Response { .strip_prefix("sha256:") .unwrap() .to_owned(); + media_type = manifest["mediaType"].as_str().unwrap_or(OCI_MIME_TYPE).to_string(); + break; } } } @@ -221,7 +237,7 @@ async fn handle_get_manifest(name: String, reference: String) -> Response { let stream = ReaderStream::new(file); Response::builder() .status(StatusCode::OK) - .header("Content-Type", OCI_MIME_TYPE) + .header("Content-Type", media_type) .header("Content-Length", metadata.len()) .header( "Docker-Content-Digest", @@ -270,6 +286,448 @@ async fn handle_get_digest(tag: String) -> Response { } } +async fn put_handler(Path(path): Path, headers: HeaderMap, request: Request) -> Response { + let query_string = request.uri().query().unwrap_or(""); + println!("PUT request: {} query: {}", path, query_string); + let manifest_re = Regex::new("(.+)/manifests/(.+)").unwrap(); + let blob_re = Regex::new("(.+)/blobs/uploads/(.+)").unwrap(); + + if manifest_re.is_match(&path) { + let caps = manifest_re.captures(&path).unwrap(); + let name = caps.get(1).unwrap().as_str().to_string(); + let reference = caps.get(2).unwrap().as_str().to_string(); + handle_put_manifest(name, reference, headers, request).await + } else if blob_re.is_match(&path) { + let caps = blob_re.captures(&path).unwrap(); + let upload_id = caps.get(2).unwrap().as_str().to_string(); + handle_put_blob(upload_id, headers, query_string.to_string(), request).await + } else { + Response::builder() + .status(StatusCode::NOT_FOUND) + .body("Not Found".to_string()) + .unwrap() + .into_response() + } +} + +async fn post_handler(Path(path): Path) -> Response { + println!("POST request: {}", path); + let blob_upload_re = Regex::new("(.+)/blobs/uploads/?$").unwrap(); + + if blob_upload_re.is_match(&path) { + handle_post_blob_upload(path).await + } else { + Response::builder() + .status(StatusCode::NOT_FOUND) + .body("Not Found".to_string()) + .unwrap() + .into_response() + } +} + +async fn head_handler(Path(path): Path) -> Response { + println!("HEAD request: {}", path); + let manifest_re = Regex::new("(.+)/manifests/(.+)").unwrap(); + let blob_re = Regex::new(".+/blobs/(.+)").unwrap(); + + if manifest_re.is_match(&path) { + let caps = manifest_re.captures(&path).unwrap(); + let name = caps.get(1).unwrap().as_str().to_string(); + let reference = caps.get(2).unwrap().as_str().to_string(); + handle_head_manifest(name, reference).await + } else if blob_re.is_match(&path) { + let caps = blob_re.captures(&path).unwrap(); + let digest = caps.get(1).unwrap().as_str().to_string(); + handle_head_blob(digest).await + } else { + Response::builder() + .status(StatusCode::NOT_FOUND) + .body(Body::empty()) + .unwrap() + } +} + +async fn patch_handler(Path(path): Path, request: Request) -> Response { + println!("PATCH request: {}", path); + let blob_re = Regex::new("(.+)/blobs/uploads/(.+)").unwrap(); + + if blob_re.is_match(&path) { + let caps = blob_re.captures(&path).unwrap(); + let upload_id = caps.get(2).unwrap().as_str().to_string(); + handle_patch_blob(upload_id, request).await + } else { + Response::builder() + .status(StatusCode::NOT_FOUND) + .body("Not Found".to_string()) + .unwrap() + .into_response() + } +} + +async fn handle_put_manifest(name: String, reference: String, headers: HeaderMap, request: Request) -> Response { + let root = PathBuf::from( + std::env::var("ZARF_INJECTOR_SEED_ROOT").unwrap_or_else(|_| String::from("/zarf-seed")), + ); + + // Read the body + let body_bytes = match axum::body::to_bytes(request.into_body(), usize::MAX).await { + Ok(bytes) => bytes, + Err(_) => { + return Response::builder() + .status(StatusCode::BAD_REQUEST) + .body("Failed to read body".into()) + .unwrap(); + } + }; + + // Calculate digest + let mut hasher = Sha256::new(); + hasher.update(&body_bytes); + let digest = hasher.finalize(); + let digest_str = format!("sha256:{}", digest.encode_hex::()); + + // Verify digest if provided + if let Some(expected_digest) = headers.get("Docker-Content-Digest") { + if expected_digest.to_str().unwrap() != digest_str { + return Response::builder() + .status(StatusCode::BAD_REQUEST) + .body("Digest mismatch".into()) + .unwrap(); + } + } + + // Write manifest to blobs + let blob_path = root.join("blobs").join("sha256").join(digest_str.strip_prefix("sha256:").unwrap()); + if let Err(_) = tokio::fs::create_dir_all(blob_path.parent().unwrap()).await { + return Response::builder() + .status(StatusCode::INTERNAL_SERVER_ERROR) + .body("Failed to create directory".into()) + .unwrap(); + } + + if let Err(_) = tokio::fs::write(&blob_path, &body_bytes).await { + return Response::builder() + .status(StatusCode::INTERNAL_SERVER_ERROR) + .body("Failed to write manifest".into()) + .unwrap(); + } + + // Update index.json + let index_path = root.join("index.json"); + let mut index: Value = match tokio::fs::read_to_string(&index_path).await { + Ok(content) => serde_json::from_str(&content).unwrap_or_else(|_| serde_json::json!({ + "schemaVersion": 2, + "manifests": [] + })), + Err(_) => serde_json::json!({ + "schemaVersion": 2, + "manifests": [] + }), + }; + + // Parse the manifest to get its mediaType + let manifest_media_type = if let Ok(manifest_json) = serde_json::from_slice::(&body_bytes) { + manifest_json.get("mediaType") + .and_then(|v| v.as_str()) + .unwrap_or(OCI_MIME_TYPE) + .to_string() + } else { + OCI_MIME_TYPE.to_string() + }; + + // Add or update manifest entry + let image_name = format!("{}:{}", name, reference); + let manifest_entry = serde_json::json!({ + "mediaType": manifest_media_type, + "digest": digest_str, + "size": body_bytes.len(), + "annotations": { + "org.opencontainers.image.base.name": image_name + } + }); + + if let Some(manifests) = index["manifests"].as_array_mut() { + // Remove existing entry with same name if it exists + manifests.retain(|m| { + m["annotations"]["org.opencontainers.image.base.name"].as_str() != Some(&image_name) + }); + manifests.push(manifest_entry); + } + + if let Err(_) = tokio::fs::write(&index_path, serde_json::to_string_pretty(&index).unwrap()).await { + return Response::builder() + .status(StatusCode::INTERNAL_SERVER_ERROR) + .body("Failed to update index".into()) + .unwrap(); + } + + Response::builder() + .status(StatusCode::CREATED) + .header("Docker-Content-Digest", digest_str.clone()) + .header("Location", format!("/v2/{}/manifests/{}", name, digest_str)) + .header("Docker-Distribution-Api-Version", "registry/2.0") + .body(Body::empty()) + .unwrap() + +} + +async fn handle_post_blob_upload(path: String) -> Response { + // Generate a simple unique ID for the upload session using timestamp and process id + use std::time::{SystemTime, UNIX_EPOCH}; + let timestamp = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_nanos(); + let pid = std::process::id(); + let upload_id = format!("{}-{}", timestamp, pid); + let location = format!("/v2/{}/{}", path.trim_end_matches('/'), upload_id); + + Response::builder() + .status(StatusCode::ACCEPTED) + .header("Location", location) + .header("Docker-Upload-UUID", upload_id) + .header("Range", "0-0") + .header("Docker-Distribution-Api-Version", "registry/2.0") + .body(Body::empty()) + .unwrap() +} + +async fn handle_put_blob(upload_id: String, headers: HeaderMap, query_string: String, request: Request) -> Response { + let root = PathBuf::from( + std::env::var("ZARF_INJECTOR_SEED_ROOT").unwrap_or_else(|_| String::from("/zarf-seed")), + ); + + // Read the body bytes from the request + let request_body_bytes = match axum::body::to_bytes(request.into_body(), usize::MAX).await { + Ok(bytes) => bytes, + Err(_) => { + return Response::builder() + .status(StatusCode::BAD_REQUEST) + .body("Failed to read body".into()) + .unwrap(); + } + }; + + // Try to read from temporary file first (from PATCH), otherwise use request body + let temp_path = root.join(".uploads").join(&upload_id); + let body_bytes = if temp_path.exists() { + match tokio::fs::read(&temp_path).await { + Ok(bytes) => bytes, + Err(_) => request_body_bytes.to_vec(), + } + } else { + request_body_bytes.to_vec() + }; + + // Extract digest from query parameter (e.g., digest=sha256:abc123) + // Note: query parameter is URL-encoded, so we need to decode it + let digest_str = if query_string.contains("digest=") { + let encoded = query_string + .split('&') + .find(|param| param.starts_with("digest=")) + .and_then(|param| param.strip_prefix("digest=")) + .unwrap_or(""); + // Simple URL decode for the colon + encoded.replace("%3A", ":").replace("%3a", ":") + } else if let Some(digest) = headers.get("Docker-Content-Digest") { + digest.to_str().unwrap().to_string() + } else { + // Calculate digest + let mut hasher = Sha256::new(); + hasher.update(&body_bytes); + let digest = hasher.finalize(); + format!("sha256:{}", digest.encode_hex::()) + }; + + if digest_str.is_empty() { + return Response::builder() + .status(StatusCode::BAD_REQUEST) + .body("Missing digest".into()) + .unwrap(); + } + + // Verify digest + let mut hasher = Sha256::new(); + hasher.update(&body_bytes); + let actual_digest = format!("sha256:{}", hasher.finalize().encode_hex::()); + + if digest_str != actual_digest { + return Response::builder() + .status(StatusCode::BAD_REQUEST) + .body(format!("Digest mismatch: expected {} got {}", digest_str, actual_digest).into()) + .unwrap(); + } + + // Write blob + let blob_path = root.join("blobs").join("sha256").join(digest_str.strip_prefix("sha256:").unwrap()); + if let Err(_) = tokio::fs::create_dir_all(blob_path.parent().unwrap()).await { + return Response::builder() + .status(StatusCode::INTERNAL_SERVER_ERROR) + .body("Failed to create directory".into()) + .unwrap(); + } + + if let Err(_) = tokio::fs::write(&blob_path, &body_bytes).await { + return Response::builder() + .status(StatusCode::INTERNAL_SERVER_ERROR) + .body("Failed to write blob".into()) + .unwrap(); + } + + // Clean up temporary file + if temp_path.exists() { + let _ = tokio::fs::remove_file(&temp_path).await; + } + + Response::builder() + .status(StatusCode::CREATED) + .header("Docker-Content-Digest", digest_str.clone()) + .header("Location", format!("/v2/blobs/{}", digest_str)) + .header("Docker-Distribution-Api-Version", "registry/2.0") + .body(Body::empty()) + .unwrap() +} + +async fn handle_head_manifest(name: String, reference: String) -> Response { + let root = PathBuf::from( + std::env::var("ZARF_INJECTOR_SEED_ROOT").unwrap_or_else(|_| String::from("/zarf-seed")), + ); + + let index = match fs::read_to_string(root.join("index.json")) { + Ok(content) => content, + Err(_) => { + return Response::builder() + .status(StatusCode::NOT_FOUND) + .body(Body::empty()) + .unwrap(); + } + }; + + let json: Value = match serde_json::from_str(&index) { + Ok(j) => j, + Err(_) => { + return Response::builder() + .status(StatusCode::INTERNAL_SERVER_ERROR) + .body(Body::empty()) + .unwrap(); + } + }; + + let mut sha_manifest: String = "".to_owned(); + let mut media_type = OCI_MIME_TYPE.to_string(); + + if reference.starts_with("sha256:") { + sha_manifest = reference.strip_prefix("sha256:").unwrap().to_owned(); + // Find media type from index + for manifest in json["manifests"].as_array().unwrap_or(&vec![]) { + if let Some(digest) = manifest["digest"].as_str() { + if digest == format!("sha256:{}", sha_manifest) { + media_type = manifest["mediaType"].as_str().unwrap_or(OCI_MIME_TYPE).to_string(); + break; + } + } + } + } else { + for manifest in json["manifests"].as_array().unwrap_or(&vec![]) { + if let Some(image_base_name) = manifest["annotations"]["org.opencontainers.image.base.name"].as_str() { + let requested_reference = format!("{}:{}", name, reference); + if requested_reference == image_base_name { + if let Some(digest) = manifest["digest"].as_str() { + sha_manifest = digest.strip_prefix("sha256:").unwrap_or(digest).to_owned(); + } + media_type = manifest["mediaType"].as_str().unwrap_or(OCI_MIME_TYPE).to_string(); + break; + } + } + } + } + + if sha_manifest.is_empty() { + Response::builder() + .status(StatusCode::NOT_FOUND) + .body(Body::empty()) + .unwrap() + } else { + let file_path = root.join("blobs").join("sha256").join(&sha_manifest); + match fs::metadata(&file_path) { + Ok(metadata) => Response::builder() + .status(StatusCode::OK) + .header("Content-Type", media_type) + .header("Content-Length", metadata.len()) + .header("Docker-Content-Digest", format!("sha256:{}", sha_manifest)) + .header("Docker-Distribution-Api-Version", "registry/2.0") + .body(Body::empty()) + .unwrap(), + Err(_) => Response::builder() + .status(StatusCode::NOT_FOUND) + .body(Body::empty()) + .unwrap(), + } + } +} + +async fn handle_head_blob(digest: String) -> Response { + let root = PathBuf::from( + std::env::var("ZARF_INJECTOR_SEED_ROOT").unwrap_or_else(|_| String::from("/zarf-seed")), + ); + let blob_path = root.join("blobs").join("sha256").join(digest.strip_prefix("sha256:").unwrap()); + + match fs::metadata(&blob_path) { + Ok(metadata) => Response::builder() + .status(StatusCode::OK) + .header("Content-Type", "application/octet-stream") + .header("Content-Length", metadata.len()) + .header("Docker-Content-Digest", digest.clone()) + .header("Docker-Distribution-Api-Version", "registry/2.0") + .body(Body::empty()) + .unwrap(), + Err(_) => Response::builder() + .status(StatusCode::NOT_FOUND) + .body(Body::empty()) + .unwrap(), + } +} + +async fn handle_patch_blob(upload_id: String, request: Request) -> Response { + let root = PathBuf::from( + std::env::var("ZARF_INJECTOR_SEED_ROOT").unwrap_or_else(|_| String::from("/zarf-seed")), + ); + + // Read the body + let body_bytes = match axum::body::to_bytes(request.into_body(), usize::MAX).await { + Ok(bytes) => bytes, + Err(_) => { + return Response::builder() + .status(StatusCode::BAD_REQUEST) + .body("Failed to read body".into()) + .unwrap(); + } + }; + + // Store the upload in a temporary location + let temp_dir = root.join(".uploads"); + if let Err(_) = tokio::fs::create_dir_all(&temp_dir).await { + return Response::builder() + .status(StatusCode::INTERNAL_SERVER_ERROR) + .body("Failed to create temp directory".into()) + .unwrap(); + } + + let temp_path = temp_dir.join(&upload_id); + if let Err(_) = tokio::fs::write(&temp_path, &body_bytes).await { + return Response::builder() + .status(StatusCode::INTERNAL_SERVER_ERROR) + .body("Failed to write temp file".into()) + .unwrap(); + } + + let current_size = body_bytes.len(); + + Response::builder() + .status(StatusCode::ACCEPTED) + .header("Range", format!("0-{}", current_size)) + .header("Docker-Distribution-Api-Version", "registry/2.0") + .body(Body::empty()) + .unwrap() +} + #[tokio::main(flavor = "current_thread")] async fn main() { let args: Vec = env::args().collect(); @@ -434,6 +892,134 @@ mod test { .expect("should have cleaned up the pulled test image"); } + #[tokio::test] + async fn test_push_integration() { + let docker = Docker::connect_with_socket_defaults() + .expect("should have been able to create a Docker client"); + + // Create a temporary directory that will auto-cleanup on drop + let tmpdir = TempDir::new().expect("should have created temporary directory"); + + let env = TestEnv::new(docker.clone(), TEST_IMAGE, tmpdir.path()) + .await + .expect("should have setup the test environment"); + + let output_root = env.output_dir(); + let _init_guard = EnvGuard::new( + "ZARF_INJECTOR_INIT_ROOT", + &env.input_dir().to_string_lossy(), + ); + let _seed_guard = EnvGuard::new("ZARF_INJECTOR_SEED_ROOT", &output_root.to_string_lossy()); + unpack(&env.shasum()); + + localize_test_image(TEST_IMAGE, &output_root) + .expect("should have localized the test image's index.json"); + + // Use :0 to let the operating system decide the random port to listen on + let listener = tokio::net::TcpListener::bind("127.0.0.1:0") + .await + .expect("should have been able to bind listener to a random port on localhost"); + let random_port = listener + .local_addr() + .expect("should have been able to resolve the address") + .port(); + + // Start registry in the background + tokio::spawn(async { + let app = start_seed_registry(); + axum::serve(listener, app) + .await + .expect("should have been able to start serving the registry"); + }); + + // Wait for registry to be ready + for _ in 0..10 { + if tokio::net::TcpStream::connect(format!("127.0.0.1:{}", random_port)) + .await + .is_ok() + { + break; + } + tokio::time::sleep(tokio::time::Duration::from_millis(50)).await; + } + + let test_image = TEST_IMAGE.replace("ghcr.io", &format!("127.0.0.1:{random_port}")); + + // Pull the original image first + let pull_options = Some(CreateImageOptions { + from_image: test_image.clone(), + ..Default::default() + }); + docker + .create_image(pull_options, None, None) + .try_collect::>() + .await + .expect("should have pulled test image"); + + // Tag it with a new name for pushing + let pushed_image = format!("127.0.0.1:{random_port}/zarf-dev/doom-game:pushed-test"); + docker + .tag_image( + &test_image, + Some(bollard::image::TagImageOptions { + repo: format!("127.0.0.1:{random_port}/zarf-dev/doom-game"), + tag: "pushed-test".to_string(), + }), + ) + .await + .expect("should have tagged image"); + + // Push the image to the registry + use bollard::image::PushImageOptions; + let push_result = docker + .push_image( + &format!("127.0.0.1:{random_port}/zarf-dev/doom-game:pushed-test"), + Some(PushImageOptions { + tag: "pushed-test".to_string(), + ..Default::default() + }), + None, + ) + .try_collect::>() + .await; + if let Err(ref e) = push_result { + eprintln!("Push error: {:?}", e); + } + assert!(push_result.is_ok(), "should have pushed image to registry: {:?}", push_result); + + // Verify we can pull it back with the new tag + docker + .remove_image(&pushed_image, None, None) + .await + .expect("should have removed local copy"); + + let verify_pull = docker + .create_image( + Some(CreateImageOptions { + from_image: pushed_image.clone(), + ..Default::default() + }), + None, + None, + ) + .try_collect::>() + .await; + if let Err(ref e) = verify_pull { + eprintln!("Pull back error: {:?}", e); + } + assert!(verify_pull.is_ok(), "should have pulled pushed image back: {:?}", verify_pull); + + // Cleanup + docker + .remove_image(&test_image, None, None) + .await + .expect("should have cleaned up test image"); + docker + .remove_image(&pushed_image, None, None) + .await + .expect("should have cleaned up pushed image"); + } + // This localizes the test image's index.json such that the registry server // will be able to match the test image from it fn localize_test_image(image_reference: &str, image_root: &Path) -> Result<()> { From 4905b59d5c0e143b9e003a799947360bcb8494f0 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Wed, 1 Oct 2025 10:50:21 -0400 Subject: [PATCH 08/26] add push Signed-off-by: Austin Abro --- Cargo.toml | 4 +- src/main.rs | 336 ++++++++++++++++++---------------------------------- 2 files changed, 118 insertions(+), 222 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 08e5cc3..c6d646c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,8 +6,6 @@ name = "zarf-injector" edition = "2024" [dependencies] -flate2 = "1.1.2" -tar = "0.4.40" sha2 = "0.10.8" hex = { version = "0.4.3", default-features = false } serde_json = { version = "1.0.113", default-features = false, features = [ @@ -21,7 +19,9 @@ regex-lite = "0.1.5" [dev-dependencies] anyhow = "1.0.86" bollard = "0.17.1" +flate2 = "1.1.2" futures-util = "0.3.30" +tar = "0.4.40" [profile.release] opt-level = "z" # Optimize for size. diff --git a/src/main.rs b/src/main.rs index 8a4257a..76c20e0 100644 --- a/src/main.rs +++ b/src/main.rs @@ -3,112 +3,24 @@ use std::env; use std::fs; -use std::fs::File; -use std::io; -use std::io::Read; -use std::io::Write; use std::path::PathBuf; use axum::{ Router, body::Body, extract::{Path, Request}, - http::{StatusCode, HeaderMap}, + http::{HeaderMap, StatusCode}, response::{IntoResponse, Response}, routing::get, }; -use flate2::read::GzDecoder; use hex::ToHex; use regex_lite::Regex; use serde_json::Value; use sha2::{Digest, Sha256}; -use tar::Archive; use tokio_util::io::ReaderStream; const OCI_MIME_TYPE: &str = "application/vnd.oci.image.manifest.v1+json"; -// Reads the binary contents of a file -fn get_file(path: &PathBuf) -> io::Result> { - // open the file - let mut f = File::open(path)?; - // create an empty buffer - let mut buffer = Vec::new(); - - // read the whole file - match f.read_to_end(&mut buffer) { - Ok(_) => Ok(buffer), - Err(e) => Err(e), - } -} - -// Merges all given files into one buffer -fn collect_binary_data(paths: &Vec) -> io::Result> { - // create an empty buffer - let mut buffer = Vec::new(); - - // add contents of all files in paths to buffer - for path in paths { - println!("Processing {}", path.display()); - let new_content = get_file(path); - buffer - .write_all(&new_content.unwrap()) - .expect("Could not add the file contents to the merged file buffer"); - } - - Ok(buffer) -} - -/// Unpacks the zarf-payload-* configmaps back into a tarball, then unpacks into ./zarf-seed -/// -/// Inspired by https://medium.com/@nlauchande/rust-coding-up-a-simple-concatenate-files-tool-and-first-impressions-a8cbe680e887 -fn unpack(sha_sum: &String) { - let init_root = - std::env::var("ZARF_INJECTOR_INIT_ROOT").unwrap_or_else(|_| String::from("/zarf-init")); - let seed_root = - std::env::var("ZARF_INJECTOR_SEED_ROOT").unwrap_or_else(|_| String::from("/zarf-seed")); - // get the list of file matches to merge - let entries = std::fs::read_dir(init_root).expect("failed to read from init directory"); - let mut file_partials: Vec = entries - // Filter out any entries that were errors - .filter_map(|entry| entry.ok()) - // Check that the entry is a file - .filter(|entry| entry.metadata().is_ok_and(|e| e.is_file())) - // Check that the entry's file name starts with zarf-injector- - .filter(|entry| { - entry - .file_name() - .to_str() - .is_some_and(|name| name.starts_with("zarf-payload-")) - }) - // Turn each entry in to a file path - .map(|entry| entry.path()) - .collect(); - - // ensure a default sort-order - file_partials.sort(); - - // get a buffer of the final merged file contents - let contents = collect_binary_data(&file_partials).unwrap(); - - // create a Sha256 object - let mut hasher = Sha256::new(); - - // write input message - hasher.update(&contents); - - // read hash digest and consume hasher - let result = hasher.finalize(); - let result_string = result.encode_hex::(); - assert_eq!(*sha_sum, result_string); - - // write the merged file to disk and extract it - let tar = GzDecoder::new(&contents[..]); - let mut archive = Archive::new(tar); - archive - .unpack(seed_root) - .expect("Unable to unarchive the resulting tarball"); -} - -/// Starts a static docker compliant registry server that only serves the single image from the CWD +/// Starts a docker compliant registry server that serves images from the seed directory /// /// (which is a OCI image layout): /// @@ -119,11 +31,14 @@ fn start_seed_registry() -> Router { // The name and reference parameter identify the image // The reference may include a tag or digest. Router::new() - .route("/v2/*path", get(handler) - .put(put_handler) - .head(head_handler) - .post(post_handler) - .patch(patch_handler)) + .route( + "/v2/*path", + get(handler) + .put(put_handler) + .head(head_handler) + .post(post_handler) + .patch(patch_handler), + ) .route( "/v2/", get(|| async { @@ -192,7 +107,10 @@ async fn handle_get_manifest(name: String, reference: String) -> Response { for manifest in json["manifests"].as_array().unwrap_or(&vec![]) { if let Some(digest) = manifest["digest"].as_str() { if digest == format!("sha256:{}", sha_manifest) { - media_type = manifest["mediaType"].as_str().unwrap_or(OCI_MIME_TYPE).to_string(); + media_type = manifest["mediaType"] + .as_str() + .unwrap_or(OCI_MIME_TYPE) + .to_string(); break; } } @@ -210,7 +128,10 @@ async fn handle_get_manifest(name: String, reference: String) -> Response { .strip_prefix("sha256:") .unwrap() .to_owned(); - media_type = manifest["mediaType"].as_str().unwrap_or(OCI_MIME_TYPE).to_string(); + media_type = manifest["mediaType"] + .as_str() + .unwrap_or(OCI_MIME_TYPE) + .to_string(); break; } } @@ -364,7 +285,12 @@ async fn patch_handler(Path(path): Path, request: Request) -> Response { } } -async fn handle_put_manifest(name: String, reference: String, headers: HeaderMap, request: Request) -> Response { +async fn handle_put_manifest( + name: String, + reference: String, + headers: HeaderMap, + request: Request, +) -> Response { let root = PathBuf::from( std::env::var("ZARF_INJECTOR_SEED_ROOT").unwrap_or_else(|_| String::from("/zarf-seed")), ); @@ -397,7 +323,10 @@ async fn handle_put_manifest(name: String, reference: String, headers: HeaderMap } // Write manifest to blobs - let blob_path = root.join("blobs").join("sha256").join(digest_str.strip_prefix("sha256:").unwrap()); + let blob_path = root + .join("blobs") + .join("sha256") + .join(digest_str.strip_prefix("sha256:").unwrap()); if let Err(_) = tokio::fs::create_dir_all(blob_path.parent().unwrap()).await { return Response::builder() .status(StatusCode::INTERNAL_SERVER_ERROR) @@ -415,10 +344,12 @@ async fn handle_put_manifest(name: String, reference: String, headers: HeaderMap // Update index.json let index_path = root.join("index.json"); let mut index: Value = match tokio::fs::read_to_string(&index_path).await { - Ok(content) => serde_json::from_str(&content).unwrap_or_else(|_| serde_json::json!({ - "schemaVersion": 2, - "manifests": [] - })), + Ok(content) => serde_json::from_str(&content).unwrap_or_else(|_| { + serde_json::json!({ + "schemaVersion": 2, + "manifests": [] + }) + }), Err(_) => serde_json::json!({ "schemaVersion": 2, "manifests": [] @@ -426,14 +357,16 @@ async fn handle_put_manifest(name: String, reference: String, headers: HeaderMap }; // Parse the manifest to get its mediaType - let manifest_media_type = if let Ok(manifest_json) = serde_json::from_slice::(&body_bytes) { - manifest_json.get("mediaType") - .and_then(|v| v.as_str()) - .unwrap_or(OCI_MIME_TYPE) - .to_string() - } else { - OCI_MIME_TYPE.to_string() - }; + let manifest_media_type = + if let Ok(manifest_json) = serde_json::from_slice::(&body_bytes) { + manifest_json + .get("mediaType") + .and_then(|v| v.as_str()) + .unwrap_or(OCI_MIME_TYPE) + .to_string() + } else { + OCI_MIME_TYPE.to_string() + }; // Add or update manifest entry let image_name = format!("{}:{}", name, reference); @@ -454,7 +387,9 @@ async fn handle_put_manifest(name: String, reference: String, headers: HeaderMap manifests.push(manifest_entry); } - if let Err(_) = tokio::fs::write(&index_path, serde_json::to_string_pretty(&index).unwrap()).await { + if let Err(_) = + tokio::fs::write(&index_path, serde_json::to_string_pretty(&index).unwrap()).await + { return Response::builder() .status(StatusCode::INTERNAL_SERVER_ERROR) .body("Failed to update index".into()) @@ -468,13 +403,15 @@ async fn handle_put_manifest(name: String, reference: String, headers: HeaderMap .header("Docker-Distribution-Api-Version", "registry/2.0") .body(Body::empty()) .unwrap() - } async fn handle_post_blob_upload(path: String) -> Response { // Generate a simple unique ID for the upload session using timestamp and process id use std::time::{SystemTime, UNIX_EPOCH}; - let timestamp = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_nanos(); + let timestamp = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_nanos(); let pid = std::process::id(); let upload_id = format!("{}-{}", timestamp, pid); let location = format!("/v2/{}/{}", path.trim_end_matches('/'), upload_id); @@ -489,7 +426,12 @@ async fn handle_post_blob_upload(path: String) -> Response { .unwrap() } -async fn handle_put_blob(upload_id: String, headers: HeaderMap, query_string: String, request: Request) -> Response { +async fn handle_put_blob( + upload_id: String, + headers: HeaderMap, + query_string: String, + request: Request, +) -> Response { let root = PathBuf::from( std::env::var("ZARF_INJECTOR_SEED_ROOT").unwrap_or_else(|_| String::from("/zarf-seed")), ); @@ -551,12 +493,21 @@ async fn handle_put_blob(upload_id: String, headers: HeaderMap, query_string: St if digest_str != actual_digest { return Response::builder() .status(StatusCode::BAD_REQUEST) - .body(format!("Digest mismatch: expected {} got {}", digest_str, actual_digest).into()) + .body( + format!( + "Digest mismatch: expected {} got {}", + digest_str, actual_digest + ) + .into(), + ) .unwrap(); } // Write blob - let blob_path = root.join("blobs").join("sha256").join(digest_str.strip_prefix("sha256:").unwrap()); + let blob_path = root + .join("blobs") + .join("sha256") + .join(digest_str.strip_prefix("sha256:").unwrap()); if let Err(_) = tokio::fs::create_dir_all(blob_path.parent().unwrap()).await { return Response::builder() .status(StatusCode::INTERNAL_SERVER_ERROR) @@ -619,20 +570,28 @@ async fn handle_head_manifest(name: String, reference: String) -> Response { for manifest in json["manifests"].as_array().unwrap_or(&vec![]) { if let Some(digest) = manifest["digest"].as_str() { if digest == format!("sha256:{}", sha_manifest) { - media_type = manifest["mediaType"].as_str().unwrap_or(OCI_MIME_TYPE).to_string(); + media_type = manifest["mediaType"] + .as_str() + .unwrap_or(OCI_MIME_TYPE) + .to_string(); break; } } } } else { for manifest in json["manifests"].as_array().unwrap_or(&vec![]) { - if let Some(image_base_name) = manifest["annotations"]["org.opencontainers.image.base.name"].as_str() { + if let Some(image_base_name) = + manifest["annotations"]["org.opencontainers.image.base.name"].as_str() + { let requested_reference = format!("{}:{}", name, reference); if requested_reference == image_base_name { if let Some(digest) = manifest["digest"].as_str() { sha_manifest = digest.strip_prefix("sha256:").unwrap_or(digest).to_owned(); } - media_type = manifest["mediaType"].as_str().unwrap_or(OCI_MIME_TYPE).to_string(); + media_type = manifest["mediaType"] + .as_str() + .unwrap_or(OCI_MIME_TYPE) + .to_string(); break; } } @@ -667,7 +626,10 @@ async fn handle_head_blob(digest: String) -> Response { let root = PathBuf::from( std::env::var("ZARF_INJECTOR_SEED_ROOT").unwrap_or_else(|_| String::from("/zarf-seed")), ); - let blob_path = root.join("blobs").join("sha256").join(digest.strip_prefix("sha256:").unwrap()); + let blob_path = root + .join("blobs") + .join("sha256") + .join(digest.strip_prefix("sha256:").unwrap()); match fs::metadata(&blob_path) { Ok(metadata) => Response::builder() @@ -732,17 +694,7 @@ async fn handle_patch_blob(upload_id: String, request: Request) -> Response { async fn main() { let args: Vec = env::args().collect(); - if args.len() < 2 { - println!("Usage: {} [bind_address]", args[0]); - return; - } - - println!("unpacking: {}", args[1]); - let payload_sha = &args[1]; - - let bind_addr = args.get(2).map(|s| s.as_str()).unwrap_or("0.0.0.0:5000"); - - unpack(payload_sha); + let bind_addr = args.get(1).map(|s| s.as_str()).unwrap_or("0.0.0.0:5000"); let listener = tokio::net::TcpListener::bind(bind_addr).await.unwrap(); println!("listening on {}", listener.local_addr().unwrap()); @@ -756,14 +708,13 @@ mod test { use flate2::{Compression, write::GzEncoder}; use futures_util::{TryStreamExt, future::ready}; use regex_lite::Regex; - use sha2::{Digest, Sha256}; use std::{ fs::File, - io::{BufRead, BufReader, Cursor, Seek, Write}, + io::{Cursor, Seek, Write}, path::{Path, PathBuf}, }; - use crate::{start_seed_registry, unpack}; + use crate::start_seed_registry; struct EnvGuard { key: String, @@ -811,9 +762,6 @@ mod test { } const TEST_IMAGE: &str = "ghcr.io/zarf-dev/doom-game:0.0.1"; - // Split gzip into 1024 * 768 kb chunks - const CHUNK_SIZE: usize = 1024 * 768; - const ZARF_PAYLOAD_PREFIX: &str = "zarf-payload"; // Based on upstream rust-oci-client regex: // https://github.com/oras-project/rust-oci-client/blob/657c1caf9e99ce2184a96aa319fde4f4a8c09439/src/regexp.rs#L3-L5 const REFERENCE_REGEXP: &str = r"^((?:(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+)?(?::[0-9]+)?/)?[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?)(?::([\w][\w.-]{0,127}))?(?:@([A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}))?$"; @@ -830,13 +778,8 @@ mod test { .await .expect("should have setup the test environment"); - let output_root = env.output_dir(); - let _init_guard = EnvGuard::new( - "ZARF_INJECTOR_INIT_ROOT", - &env.input_dir().to_string_lossy(), - ); + let output_root = env.seed_dir(); let _seed_guard = EnvGuard::new("ZARF_INJECTOR_SEED_ROOT", &output_root.to_string_lossy()); - unpack(&env.shasum()); // Assert the files and directory we expect to exist do exist assert!(Path::new(&output_root.join("index.json")).exists()); @@ -904,13 +847,8 @@ mod test { .await .expect("should have setup the test environment"); - let output_root = env.output_dir(); - let _init_guard = EnvGuard::new( - "ZARF_INJECTOR_INIT_ROOT", - &env.input_dir().to_string_lossy(), - ); + let output_root = env.seed_dir(); let _seed_guard = EnvGuard::new("ZARF_INJECTOR_SEED_ROOT", &output_root.to_string_lossy()); - unpack(&env.shasum()); localize_test_image(TEST_IMAGE, &output_root) .expect("should have localized the test image's index.json"); @@ -985,7 +923,11 @@ mod test { if let Err(ref e) = push_result { eprintln!("Push error: {:?}", e); } - assert!(push_result.is_ok(), "should have pushed image to registry: {:?}", push_result); + assert!( + push_result.is_ok(), + "should have pushed image to registry: {:?}", + push_result + ); // Verify we can pull it back with the new tag docker @@ -1007,7 +949,11 @@ mod test { if let Err(ref e) = verify_pull { eprintln!("Pull back error: {:?}", e); } - assert!(verify_pull.is_ok(), "should have pulled pushed image back: {:?}", verify_pull); + assert!( + verify_pull.is_ok(), + "should have pulled pushed image back: {:?}", + verify_pull + ); // Cleanup docker @@ -1113,100 +1059,50 @@ mod test { } struct TestEnv { - digest: String, - input_dir: PathBuf, - output_dir: PathBuf, + seed_dir: PathBuf, } impl TestEnv { async fn new(client: Docker, image: &str, root: &Path) -> Result { - // Ensure we have test directories set up - let input_dir = root.join("zarf-init"); - let output_dir = root.join("zarf-seed"); - std::fs::create_dir(&input_dir).context("should have created test input directory")?; - std::fs::create_dir(&output_dir) - .context("should have created test output directory")?; + // Ensure we have test directory set up + let seed_dir = root.join("zarf-seed"); + std::fs::create_dir(&seed_dir) + .context("should have created test seed directory")?; // Download test image Self::ensure_image_exists_locally(&client, image) .await .context("should have pulled down the test image")?; - // Export test image from docker into a stream to iterate over + // Export test image from docker as a tarball let image_stream = client.export_image(image).map_err(anyhow::Error::msg); - // Create an in-memory seekable buffer reading in the image and - // for iteration later when creating the zarf-payload-* chunks + // Collect the tarball into memory let buffer = Cursor::new(Vec::new()); - - // Encode test image as gzip into the buffer let mut gz = GzEncoder::new(buffer, Compression::default()); image_stream .try_for_each(|data| { - // We map the error to make sure we're propagating the - // same type of error across the board let res = gz.write_all(&data).map_err(anyhow::Error::msg); - // Ready needs to be called for the stream to do its thing ready(res) }) .await?; - let mut buffer = gz + let buffer = gz .finish() - .context("should have finished reading from stream")?; - - // Rewind to the beginning of the now gzip encoded contents image, - // so that it can be iterated over to create zarf-payload-* chunks - buffer - .rewind() - .context("should have rewound buffer for reading")?; - let mut reader = BufReader::with_capacity(CHUNK_SIZE, buffer); - - let mut hasher = Sha256::new(); - let mut chunk_id = 0; - while let std::result::Result::Ok(chunk) = reader.fill_buf() { - let read_bytes = chunk.len(); - if read_bytes == 0 { - break; - } + .context("should have finished encoding image")?; - hasher.update(chunk); - - // Write chunks to disk as zarf-payload-00X in temp dir - let mut chunk_file = File::create( - input_dir.join(format!("{}-{:0>3}", ZARF_PAYLOAD_PREFIX, chunk_id)), - ) - .context("should have created chunk file")?; - chunk_file - .write_all(chunk) - .context("should have written chunk to file")?; - chunk_file - .flush() - .context("should have flushed chunk file")?; - chunk_id += 1; - - reader.consume(read_bytes); - } - let hash = hasher.finalize(); - let digest = format!("{hash:x}"); - - Ok(Self { - digest, - input_dir, - output_dir, - }) - } - - fn shasum(&self) -> String { - self.digest.to_owned() - } + // Extract the tarball directly to the seed directory + let tar = flate2::read::GzDecoder::new(&buffer.get_ref()[..]); + let mut archive = tar::Archive::new(tar); + archive + .unpack(&seed_dir) + .context("should have unpacked image to seed directory")?; - fn input_dir(&self) -> PathBuf { - self.input_dir.to_owned() + Ok(Self { seed_dir }) } - fn output_dir(&self) -> PathBuf { - self.output_dir.to_owned() + fn seed_dir(&self) -> PathBuf { + self.seed_dir.to_owned() } async fn ensure_image_exists_locally(client: &Docker, image: &str) -> Result<()> { From 73d228a2c184ba34810a7d34f9bacc91c2d07292 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Wed, 1 Oct 2025 11:34:17 -0400 Subject: [PATCH 09/26] reset main Signed-off-by: Austin Abro --- .github/dependabot.yaml | 10 +++++----- README.md | 4 ---- 2 files changed, 5 insertions(+), 9 deletions(-) diff --git a/.github/dependabot.yaml b/.github/dependabot.yaml index c130433..1557067 100644 --- a/.github/dependabot.yaml +++ b/.github/dependabot.yaml @@ -3,8 +3,8 @@ updates: - package-ecosystem: github-actions directory: / schedule: - interval: weekly - groups: - actions-organization: - patterns: - - "actions/*" \ No newline at end of file + interval: daily + - package-ecosystem: cargo + directory: / + schedule: + interval: daily diff --git a/README.md b/README.md index 4ebe1da..1ade677 100644 --- a/README.md +++ b/README.md @@ -72,7 +72,3 @@ Build your injector by following the steps above then run the following the `tes zarf package create zarf init --confirm ``` - -## Automatic dependency updates - -Dependabot automatically creates pull requests to the zarf-injector repository if there is a security vulnerability in one of our dependencies. Aside from these updates, this repo does not have a system to automatically update cargo dependencies. The injector is small and simple and therefore unlikely to see a practical improvement from a dependency update. Additionally, consistently updating the dependencies could inch us closer to the 1MiB limit. Ensuring we have some leeway from the limit makes it easier to update the injector in the event of a security vulnerability or additional required feature. \ No newline at end of file From e9ea38e2dff1bcfa5180a168d2bb9a9e7f8fbc00 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Thu, 2 Oct 2025 10:19:57 -0400 Subject: [PATCH 10/26] run tests serially Signed-off-by: Austin Abro --- Cargo.lock | 136 ++++++++++++++++++++++++++++++++++++++++++++++++++-- Cargo.toml | 1 + Makefile | 2 +- src/main.rs | 8 +++- 4 files changed, 140 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5d0ec89..502955c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -334,7 +334,7 @@ checksum = "1ee447700ac8aa0b2f2bd7bc4462ad686ba06baa6727ac149a2d6277f0d240fd" dependencies = [ "cfg-if", "libc", - "redox_syscall", + "redox_syscall 0.4.1", "windows-sys", ] @@ -363,13 +363,29 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "futures" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" +dependencies = [ + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + [[package]] name = "futures-channel" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" dependencies = [ "futures-core", + "futures-sink", ] [[package]] @@ -378,6 +394,23 @@ version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" +[[package]] +name = "futures-executor" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-io" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" + [[package]] name = "futures-macro" version = "0.3.31" @@ -391,9 +424,9 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" [[package]] name = "futures-task" @@ -407,9 +440,13 @@ version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" dependencies = [ + "futures-channel", "futures-core", + "futures-io", "futures-macro", + "futures-sink", "futures-task", + "memchr", "pin-project-lite", "pin-utils", "slab", @@ -758,6 +795,16 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "241eaef5fd12c88705a01fc1066c48c4b36e0dd4377dcdc7ec3942cea7a69956" +[[package]] +name = "lock_api" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96936507f153605bddfcda068dd804796c84324ed2510809e5b2a624c81da765" +dependencies = [ + "autocfg", + "scopeguard", +] + [[package]] name = "log" version = "0.4.21" @@ -842,6 +889,29 @@ version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" +[[package]] +name = "parking_lot" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70d58bf43669b5795d1576d0641cfb6fbb2057bf629506267a92807158584a13" +dependencies = [ + "lock_api", + "parking_lot_core", +] + +[[package]] +name = "parking_lot_core" +version = "0.9.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc838d2a56b5b1a6c25f55575dfc605fabb63bb2365f6c2353ef9159aa69e4a5" +dependencies = [ + "cfg-if", + "libc", + "redox_syscall 0.5.17", + "smallvec", + "windows-targets", +] + [[package]] name = "percent-encoding" version = "2.3.1" @@ -922,6 +992,15 @@ dependencies = [ "bitflags 1.3.2", ] +[[package]] +name = "redox_syscall" +version = "0.5.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5407465600fb0548f1442edf71dd20683c6ed326200ace4b1ef0763521bb3b77" +dependencies = [ + "bitflags 2.5.0", +] + [[package]] name = "ref-cast" version = "1.0.24" @@ -979,6 +1058,15 @@ version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e86697c916019a8588c99b5fac3cead74ec0b4b819707a682fd4d23fa0ce1ba1" +[[package]] +name = "scc" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46e6f046b7fef48e2660c57ed794263155d713de679057f2d0c169bfc6e756cc" +dependencies = [ + "sdd", +] + [[package]] name = "schemars" version = "0.9.0" @@ -1003,6 +1091,18 @@ dependencies = [ "serde_json", ] +[[package]] +name = "scopeguard" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" + +[[package]] +name = "sdd" +version = "3.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "490dcfcbfef26be6800d11870ff2df8774fa6e86d047e3e8c8a76b25655e41ca" + [[package]] name = "serde" version = "1.0.198" @@ -1087,6 +1187,31 @@ dependencies = [ "time", ] +[[package]] +name = "serial_test" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b258109f244e1d6891bf1053a55d63a5cd4f8f4c30cf9a1280989f80e7a1fa9" +dependencies = [ + "futures", + "log", + "once_cell", + "parking_lot", + "scc", + "serial_test_derive", +] + +[[package]] +name = "serial_test_derive" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d69265a08751de7844521fd15003ae0a888e035773ba05695c5c759a6f89eef" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "sha2" version = "0.10.8" @@ -1642,6 +1767,7 @@ dependencies = [ "hex", "regex-lite", "serde_json", + "serial_test", "sha2", "tar", "tokio", diff --git a/Cargo.toml b/Cargo.toml index c6d646c..6faceea 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -22,6 +22,7 @@ bollard = "0.17.1" flate2 = "1.1.2" futures-util = "0.3.30" tar = "0.4.40" +serial_test = "3.2.0" [profile.release] opt-level = "z" # Optimize for size. diff --git a/Makefile b/Makefile index 564204c..edc86ec 100644 --- a/Makefile +++ b/Makefile @@ -48,7 +48,7 @@ check-size: injector ## Validate that both injector binaries are under 1 MiB unit-test: ## Run cargo tests - cargo test -- --test-threads=1 + cargo test -- --no-capture target/x86_64-unknown-linux-musl/release/zarf-injector: src/main.rs Cargo.toml cross build --target x86_64-unknown-linux-musl --release diff --git a/src/main.rs b/src/main.rs index 901ec05..4ccfcd0 100644 --- a/src/main.rs +++ b/src/main.rs @@ -116,7 +116,6 @@ async fn handle_get_manifest(name: String, reference: String) -> Response { .strip_prefix("sha256:") .unwrap() .to_owned(); - break; } } } @@ -320,6 +319,10 @@ async fn handle_put_manifest( // Verify digest if provided if let Some(expected_digest) = headers.get("Docker-Content-Digest") { + // println!( + // "Docker-content digest {}", + // expected_digest.to_str().unwrap() + // ); if expected_digest.to_str().unwrap() != digest_str { return Response::builder() .status(StatusCode::BAD_REQUEST) @@ -714,6 +717,7 @@ mod test { use flate2::{Compression, write::GzEncoder}; use futures_util::{TryStreamExt, future::ready}; use regex_lite::Regex; + use serial_test::serial; use std::{ fs::File, io::{Cursor, Seek, Write}, @@ -774,6 +778,7 @@ mod test { const REFERENCE_REGEXP: &str = r"^((?:(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+)?(?::[0-9]+)?/)?[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?)(?::([\w][\w.-]{0,127}))?(?:@([A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}))?$"; #[tokio::test] + #[serial] async fn test_integration() { let media_types = [OCI_MIME_TYPE, DOCKER_MEDIA_TYPE]; for media_type in media_types { @@ -854,6 +859,7 @@ mod test { } #[tokio::test] + #[serial] async fn test_push_integration() { let docker = Docker::connect_with_socket_defaults() .expect("should have been able to create a Docker client"); From 068286821016a23020b5d1060586e1fc3ac9a232 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Thu, 2 Oct 2025 10:26:14 -0400 Subject: [PATCH 11/26] add sha test Signed-off-by: Austin Abro --- src/main.rs | 106 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 106 insertions(+) diff --git a/src/main.rs b/src/main.rs index 4ccfcd0..6e9388c 100644 --- a/src/main.rs +++ b/src/main.rs @@ -861,6 +861,11 @@ mod test { #[tokio::test] #[serial] async fn test_push_integration() { + test_push_with_tag().await; + test_push_with_sha().await; + } + + async fn test_push_with_tag() { let docker = Docker::connect_with_socket_defaults() .expect("should have been able to create a Docker client"); @@ -990,6 +995,107 @@ mod test { .expect("should have cleaned up pushed image"); } + async fn test_push_with_sha() { + let docker = Docker::connect_with_socket_defaults() + .expect("should have been able to create a Docker client"); + + // Create a temporary directory that will auto-cleanup on drop + let tmpdir = TempDir::new().expect("should have created temporary directory"); + + let env = TestEnv::new(docker.clone(), TEST_IMAGE, tmpdir.path()) + .await + .expect("should have setup the test environment"); + + let output_root = env.seed_dir(); + let _seed_guard = EnvGuard::new("ZARF_INJECTOR_SEED_ROOT", &output_root.to_string_lossy()); + + localize_test_image(TEST_IMAGE, &output_root) + .expect("should have localized the test image's index.json"); + + // Use :0 to let the operating system decide the random port to listen on + let listener = tokio::net::TcpListener::bind("127.0.0.1:0") + .await + .expect("should have been able to bind listener to a random port on localhost"); + let random_port = listener + .local_addr() + .expect("should have been able to resolve the address") + .port(); + + // Start registry in the background + tokio::spawn(async { + let app = start_seed_registry(); + axum::serve(listener, app) + .await + .expect("should have been able to start serving the registry"); + }); + + // Wait for registry to be ready + for _ in 0..10 { + if tokio::net::TcpStream::connect(format!("127.0.0.1:{}", random_port)) + .await + .is_ok() + { + break; + } + tokio::time::sleep(tokio::time::Duration::from_millis(50)).await; + } + + let test_image = TEST_IMAGE.replace("ghcr.io", &format!("127.0.0.1:{random_port}")); + + // Pull the original image first + let pull_options = Some(CreateImageOptions { + from_image: test_image.clone(), + ..Default::default() + }); + docker + .create_image(pull_options, None, None) + .try_collect::>() + .await + .expect("should have pulled test image"); + + // Read the index to find the actual manifest digest + let index_path = output_root.join("index.json"); + let index_content = tokio::fs::read_to_string(&index_path) + .await + .expect("should read index.json"); + let index_json: serde_json::Value = serde_json::from_str(&index_content) + .expect("should parse index.json"); + let manifest_digest = index_json["manifests"][0]["digest"] + .as_str() + .expect("should have digest") + .to_string(); + + let pushed_image_by_digest = format!("127.0.0.1:{random_port}/zarf-dev/doom-game@{}", manifest_digest); + + // Verify we can pull the image using its digest + let verify_pull = docker + .create_image( + Some(CreateImageOptions { + from_image: pushed_image_by_digest.clone(), + ..Default::default() + }), + None, + None, + ) + .try_collect::>() + .await; + if let Err(ref e) = verify_pull { + eprintln!("Pull with SHA error: {:?}", e); + } + assert!( + verify_pull.is_ok(), + "should have pulled image with SHA: {:?}", + verify_pull + ); + + // Cleanup + docker + .remove_image(&test_image, None, None) + .await + .expect("should have cleaned up test image"); + let _ = docker.remove_image(&pushed_image_by_digest, None, None).await; + } + // This localizes the test image's index.json such that the registry server // will be able to match the test image from it fn localize_test_image(image_reference: &str, image_root: &Path) -> Result<()> { From 67ce256179c42f441c5ac6d57a4ac7ce23cc3f63 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Thu, 2 Oct 2025 10:46:33 -0400 Subject: [PATCH 12/26] simplify test Signed-off-by: Austin Abro --- src/main.rs | 216 +++++++++++++++++++++++----------------------------- 1 file changed, 95 insertions(+), 121 deletions(-) diff --git a/src/main.rs b/src/main.rs index 6e9388c..4598902 100644 --- a/src/main.rs +++ b/src/main.rs @@ -858,89 +858,111 @@ mod test { .expect("should have cleaned up the pulled test image"); } - #[tokio::test] - #[serial] - async fn test_push_integration() { - test_push_with_tag().await; - test_push_with_sha().await; + struct TestRegistry { + random_port: u16, + output_root: PathBuf, + _seed_guard: EnvGuard, + _tmpdir: TempDir, } - async fn test_push_with_tag() { - let docker = Docker::connect_with_socket_defaults() - .expect("should have been able to create a Docker client"); - - // Create a temporary directory that will auto-cleanup on drop - let tmpdir = TempDir::new().expect("should have created temporary directory"); + impl TestRegistry { + async fn new(image: &str) -> Self { + let tmpdir = TempDir::new().expect("should have created temporary directory"); - let env = TestEnv::new(docker.clone(), TEST_IMAGE, tmpdir.path()) - .await - .expect("should have setup the test environment"); + let docker = Docker::connect_with_socket_defaults() + .expect("should have been able to create a Docker client"); - let output_root = env.seed_dir(); - let _seed_guard = EnvGuard::new("ZARF_INJECTOR_SEED_ROOT", &output_root.to_string_lossy()); + let env = TestEnv::new(docker.clone(), image, tmpdir.path()) + .await + .expect("should have setup the test environment"); - localize_test_image(TEST_IMAGE, &output_root) - .expect("should have localized the test image's index.json"); + let output_root = env.seed_dir(); + let _seed_guard = + EnvGuard::new("ZARF_INJECTOR_SEED_ROOT", &output_root.to_string_lossy()); - // Use :0 to let the operating system decide the random port to listen on - let listener = tokio::net::TcpListener::bind("127.0.0.1:0") - .await - .expect("should have been able to bind listener to a random port on localhost"); - let random_port = listener - .local_addr() - .expect("should have been able to resolve the address") - .port(); + localize_test_image(image, &output_root) + .expect("should have localized the test image's index.json"); - // Start registry in the background - tokio::spawn(async { - let app = start_seed_registry(); - axum::serve(listener, app) + let listener = tokio::net::TcpListener::bind("127.0.0.1:0") .await - .expect("should have been able to start serving the registry"); - }); + .expect("should have been able to bind listener to a random port on localhost"); + let random_port = listener + .local_addr() + .expect("should have been able to resolve the address") + .port(); + + tokio::spawn(async { + let app = start_seed_registry(); + axum::serve(listener, app) + .await + .expect("should have been able to start serving the registry"); + }); - // Wait for registry to be ready - for _ in 0..10 { - if tokio::net::TcpStream::connect(format!("127.0.0.1:{}", random_port)) - .await - .is_ok() - { - break; + for _ in 0..10 { + if tokio::net::TcpStream::connect(format!("127.0.0.1:{}", random_port)) + .await + .is_ok() + { + break; + } + tokio::time::sleep(tokio::time::Duration::from_millis(50)).await; + } + + Self { + random_port, + output_root, + _seed_guard, + _tmpdir: tmpdir, } - tokio::time::sleep(tokio::time::Duration::from_millis(50)).await; } + } - let test_image = TEST_IMAGE.replace("ghcr.io", &format!("127.0.0.1:{random_port}")); + #[tokio::test] + #[serial] + async fn test_push_integration() { + test_push_with_tag().await; + test_push_with_sha().await; + } - // Pull the original image first - let pull_options = Some(CreateImageOptions { - from_image: test_image.clone(), - ..Default::default() - }); + async fn test_push_with_tag() { + let registry = TestRegistry::new(TEST_IMAGE).await; + let docker = Docker::connect_with_socket_defaults() + .expect("should have been able to create a Docker client"); + + let test_image = + TEST_IMAGE.replace("ghcr.io", &format!("127.0.0.1:{}", registry.random_port)); docker - .create_image(pull_options, None, None) + .create_image( + Some(CreateImageOptions { + from_image: test_image.clone(), + ..Default::default() + }), + None, + None, + ) .try_collect::>() .await .expect("should have pulled test image"); - // Tag it with a new name for pushing - let pushed_image = format!("127.0.0.1:{random_port}/zarf-dev/doom-game:pushed-test"); + let pushed_image = format!( + "127.0.0.1:{}/zarf-dev/doom-game:pushed-test", + registry.random_port + ); docker .tag_image( &test_image, Some(bollard::image::TagImageOptions { - repo: format!("127.0.0.1:{random_port}/zarf-dev/doom-game"), + repo: format!("127.0.0.1:{}/zarf-dev/doom-game", registry.random_port), tag: "pushed-test".to_string(), }), ) .await .expect("should have tagged image"); - // Push the image to the registry use bollard::image::PushImageOptions; let push_result = docker .push_image( - &format!("127.0.0.1:{random_port}/zarf-dev/doom-game:pushed-test"), + &pushed_image, Some(PushImageOptions { tag: "pushed-test".to_string(), ..Default::default() @@ -949,16 +971,12 @@ mod test { ) .try_collect::>() .await; - if let Err(ref e) = push_result { - eprintln!("Push error: {:?}", e); - } assert!( push_result.is_ok(), "should have pushed image to registry: {:?}", push_result ); - // Verify we can pull it back with the new tag docker .remove_image(&pushed_image, None, None) .await @@ -975,16 +993,12 @@ mod test { ) .try_collect::>() .await; - if let Err(ref e) = verify_pull { - eprintln!("Pull back error: {:?}", e); - } assert!( verify_pull.is_ok(), "should have pulled pushed image back: {:?}", verify_pull ); - // Cleanup docker .remove_image(&test_image, None, None) .await @@ -996,78 +1010,40 @@ mod test { } async fn test_push_with_sha() { + let registry = TestRegistry::new(TEST_IMAGE).await; let docker = Docker::connect_with_socket_defaults() .expect("should have been able to create a Docker client"); - // Create a temporary directory that will auto-cleanup on drop - let tmpdir = TempDir::new().expect("should have created temporary directory"); - - let env = TestEnv::new(docker.clone(), TEST_IMAGE, tmpdir.path()) - .await - .expect("should have setup the test environment"); - - let output_root = env.seed_dir(); - let _seed_guard = EnvGuard::new("ZARF_INJECTOR_SEED_ROOT", &output_root.to_string_lossy()); - - localize_test_image(TEST_IMAGE, &output_root) - .expect("should have localized the test image's index.json"); - - // Use :0 to let the operating system decide the random port to listen on - let listener = tokio::net::TcpListener::bind("127.0.0.1:0") - .await - .expect("should have been able to bind listener to a random port on localhost"); - let random_port = listener - .local_addr() - .expect("should have been able to resolve the address") - .port(); - - // Start registry in the background - tokio::spawn(async { - let app = start_seed_registry(); - axum::serve(listener, app) - .await - .expect("should have been able to start serving the registry"); - }); - - // Wait for registry to be ready - for _ in 0..10 { - if tokio::net::TcpStream::connect(format!("127.0.0.1:{}", random_port)) - .await - .is_ok() - { - break; - } - tokio::time::sleep(tokio::time::Duration::from_millis(50)).await; - } - - let test_image = TEST_IMAGE.replace("ghcr.io", &format!("127.0.0.1:{random_port}")); - - // Pull the original image first - let pull_options = Some(CreateImageOptions { - from_image: test_image.clone(), - ..Default::default() - }); + let test_image = + TEST_IMAGE.replace("ghcr.io", &format!("127.0.0.1:{}", registry.random_port)); docker - .create_image(pull_options, None, None) + .create_image( + Some(CreateImageOptions { + from_image: test_image.clone(), + ..Default::default() + }), + None, + None, + ) .try_collect::>() .await .expect("should have pulled test image"); - // Read the index to find the actual manifest digest - let index_path = output_root.join("index.json"); - let index_content = tokio::fs::read_to_string(&index_path) + let index_content = tokio::fs::read_to_string(registry.output_root.join("index.json")) .await .expect("should read index.json"); - let index_json: serde_json::Value = serde_json::from_str(&index_content) - .expect("should parse index.json"); + let index_json: serde_json::Value = + serde_json::from_str(&index_content).expect("should parse index.json"); let manifest_digest = index_json["manifests"][0]["digest"] .as_str() .expect("should have digest") .to_string(); - let pushed_image_by_digest = format!("127.0.0.1:{random_port}/zarf-dev/doom-game@{}", manifest_digest); + let pushed_image_by_digest = format!( + "127.0.0.1:{}/zarf-dev/doom-game@{}", + registry.random_port, manifest_digest + ); - // Verify we can pull the image using its digest let verify_pull = docker .create_image( Some(CreateImageOptions { @@ -1079,21 +1055,19 @@ mod test { ) .try_collect::>() .await; - if let Err(ref e) = verify_pull { - eprintln!("Pull with SHA error: {:?}", e); - } assert!( verify_pull.is_ok(), "should have pulled image with SHA: {:?}", verify_pull ); - // Cleanup docker .remove_image(&test_image, None, None) .await .expect("should have cleaned up test image"); - let _ = docker.remove_image(&pushed_image_by_digest, None, None).await; + let _ = docker + .remove_image(&pushed_image_by_digest, None, None) + .await; } // This localizes the test image's index.json such that the registry server From 47b0e1257b9825ca117768619583990d802cb12d Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Thu, 2 Oct 2025 10:50:07 -0400 Subject: [PATCH 13/26] simplify registry Signed-off-by: Austin Abro --- src/main.rs | 72 ++++++++++++++--------------------------------------- 1 file changed, 18 insertions(+), 54 deletions(-) diff --git a/src/main.rs b/src/main.rs index 4598902..9bf8d58 100644 --- a/src/main.rs +++ b/src/main.rs @@ -782,73 +782,37 @@ mod test { async fn test_integration() { let media_types = [OCI_MIME_TYPE, DOCKER_MEDIA_TYPE]; for media_type in media_types { - test_registry("ghcr.io/zarf-dev/doom-game:0.0.1", media_type).await; + test_registry(TEST_IMAGE, media_type).await; } } async fn test_registry(image: &str, media_type: &str) { - let docker = Docker::connect_with_socket_defaults() - .expect("should have been able to create a Docker client"); - - // Create a temporary directory that will auto-cleanup on drop - let tmpdir = TempDir::new().expect("should have created temporary directory"); - - let env = TestEnv::new(docker.clone(), image, tmpdir.path()) - .await - .expect("should have setup the test environment"); - - let output_root = env.seed_dir(); - let _seed_guard = EnvGuard::new("ZARF_INJECTOR_SEED_ROOT", &output_root.to_string_lossy()); + let registry = TestRegistry::new(image).await; // Assert the files and directory we expect to exist do exist - assert!(Path::new(&output_root.join("index.json")).exists()); - assert!(Path::new(&output_root.join("manifest.json")).exists()); - assert!(Path::new(&output_root.join("oci-layout")).exists()); - assert!(Path::new(&output_root.join("repositories")).exists()); - - localize_test_image(image, &output_root) - .expect("should have localized the test image's index.json"); + assert!(Path::new(®istry.output_root.join("index.json")).exists()); + assert!(Path::new(®istry.output_root.join("manifest.json")).exists()); + assert!(Path::new(®istry.output_root.join("oci-layout")).exists()); + assert!(Path::new(®istry.output_root.join("repositories")).exists()); - change_manifest_media_type(&output_root, media_type) + change_manifest_media_type(®istry.output_root, media_type) .expect("should have changed the mediaType of the manifest"); - // Use :0 to let the operating system decide the random port to listen on - let listener = tokio::net::TcpListener::bind("127.0.0.1:0") - .await - .expect("should have been able to bind listener to a random port on localhost"); - let random_port = listener - .local_addr() - .expect("should have been able to resolve the address") - .port(); - - // Start registry in the background - tokio::spawn(async { - let app = start_seed_registry(); - axum::serve(listener, app) - .await - .expect("should have been able to start serving the registry"); - }); - - // Wait for registry to be ready - for _ in 0..10 { - if tokio::net::TcpStream::connect(format!("127.0.0.1:{}", random_port)) - .await - .is_ok() - { - break; - } - tokio::time::sleep(tokio::time::Duration::from_millis(50)).await; - } + let docker = Docker::connect_with_socket_defaults() + .expect("should have been able to create a Docker client"); let image_name = extract_name(image); - let test_image = &format!("127.0.0.1:{random_port}/{image_name}"); - let options = Some(CreateImageOptions { - from_image: test_image.clone(), - ..Default::default() - }); + let test_image = format!("127.0.0.1:{}/{}", registry.random_port, image_name); let test_image_pull = docker - .create_image(options, None, None) + .create_image( + Some(CreateImageOptions { + from_image: test_image.clone(), + ..Default::default() + }), + None, + None, + ) .try_collect::>() .await; assert!(test_image_pull.is_ok()); From 1384cb44979b04cee38b8da2f46e1e0bfd2789b6 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Thu, 2 Oct 2025 11:31:13 -0400 Subject: [PATCH 14/26] get the zarf repo at a branch Signed-off-by: Austin Abro --- .github/workflows/test-init.yaml | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/.github/workflows/test-init.yaml b/.github/workflows/test-init.yaml index c97d409..f51bc0b 100644 --- a/.github/workflows/test-init.yaml +++ b/.github/workflows/test-init.yaml @@ -51,8 +51,14 @@ jobs: name: injector-arm path: target/aarch64-unknown-linux-musl/release/ - - name: Install Zarf - uses: zarf-dev/setup-zarf@10e539efed02f75ec39eb8823e22a5c795f492ae # v1.0.1 + - name: Clone and build Zarf + run: | + git clone https://github.com/zarf-dev/zarf.git + cd zarf + git checkout use-push-injector + make + chmod +x build/zarf + sudo mv build/zarf /usr/local/bin/zarf - name: "Setup K3d" run: | From 29a8111480eea94c67db12b5362d11654d77e4af Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Thu, 2 Oct 2025 11:43:59 -0400 Subject: [PATCH 15/26] fix e2e Signed-off-by: Austin Abro --- .github/workflows/test-init.yaml | 12 +++++++++--- src/main.rs | 2 +- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/.github/workflows/test-init.yaml b/.github/workflows/test-init.yaml index f51bc0b..3fe7c6d 100644 --- a/.github/workflows/test-init.yaml +++ b/.github/workflows/test-init.yaml @@ -56,9 +56,15 @@ jobs: git clone https://github.com/zarf-dev/zarf.git cd zarf git checkout use-push-injector - make - chmod +x build/zarf - sudo mv build/zarf /usr/local/bin/zarf + if [ "${{ runner.arch }}" = "ARM64" ]; then + make build-cli-linux-arm + chmod +x build/zarf-arm + sudo mv build/zarf-arm /usr/local/bin/zarf + else + make build-cli-linux-amd + chmod +x build/zarf + sudo mv build/zarf /usr/local/bin/zarf + fi - name: "Setup K3d" run: | diff --git a/src/main.rs b/src/main.rs index 9bf8d58..cbc2a3d 100644 --- a/src/main.rs +++ b/src/main.rs @@ -457,7 +457,7 @@ async fn handle_put_blob( }; // Try to read from temporary file first (from PATCH), otherwise use request body - let temp_path = root.join(".uploads").join(&upload_id); + let temp_path: PathBuf = root.join(".uploads").join(&upload_id); let body_bytes = if temp_path.exists() { match tokio::fs::read(&temp_path).await { Ok(bytes) => bytes, From c880bf655b840321f7e888ec4ba9694634d93724 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Thu, 2 Oct 2025 12:10:58 -0400 Subject: [PATCH 16/26] stop checking for Docker-Content-Digest header in put requests Signed-off-by: Austin Abro --- src/main.rs | 38 ++++++-------------------------------- 1 file changed, 6 insertions(+), 32 deletions(-) diff --git a/src/main.rs b/src/main.rs index cbc2a3d..a252d17 100644 --- a/src/main.rs +++ b/src/main.rs @@ -9,7 +9,7 @@ use axum::{ Router, body::Body, extract::{Path, Request}, - http::{HeaderMap, StatusCode}, + http::StatusCode, response::{IntoResponse, Response}, routing::get, }; @@ -212,7 +212,7 @@ async fn handle_get_digest(tag: String) -> Response { } } -async fn put_handler(Path(path): Path, headers: HeaderMap, request: Request) -> Response { +async fn put_handler(Path(path): Path, request: Request) -> Response { let query_string = request.uri().query().unwrap_or(""); println!("PUT request: {} query: {}", path, query_string); let manifest_re = Regex::new("(.+)/manifests/(.+)").unwrap(); @@ -222,11 +222,11 @@ async fn put_handler(Path(path): Path, headers: HeaderMap, request: Requ let caps = manifest_re.captures(&path).unwrap(); let name = caps.get(1).unwrap().as_str().to_string(); let reference = caps.get(2).unwrap().as_str().to_string(); - handle_put_manifest(name, reference, headers, request).await + handle_put_manifest(name, reference, request).await } else if blob_re.is_match(&path) { let caps = blob_re.captures(&path).unwrap(); let upload_id = caps.get(2).unwrap().as_str().to_string(); - handle_put_blob(upload_id, headers, query_string.to_string(), request).await + handle_put_blob(upload_id, query_string.to_string(), request).await } else { Response::builder() .status(StatusCode::NOT_FOUND) @@ -290,12 +290,7 @@ async fn patch_handler(Path(path): Path, request: Request) -> Response { } } -async fn handle_put_manifest( - name: String, - reference: String, - headers: HeaderMap, - request: Request, -) -> Response { +async fn handle_put_manifest(name: String, reference: String, request: Request) -> Response { let root = PathBuf::from( std::env::var("ZARF_INJECTOR_SEED_ROOT").unwrap_or_else(|_| String::from("/zarf-seed")), ); @@ -317,20 +312,6 @@ async fn handle_put_manifest( let digest = hasher.finalize(); let digest_str = format!("sha256:{}", digest.encode_hex::()); - // Verify digest if provided - if let Some(expected_digest) = headers.get("Docker-Content-Digest") { - // println!( - // "Docker-content digest {}", - // expected_digest.to_str().unwrap() - // ); - if expected_digest.to_str().unwrap() != digest_str { - return Response::builder() - .status(StatusCode::BAD_REQUEST) - .body("Digest mismatch".into()) - .unwrap(); - } - } - // Write manifest to blobs let blob_path = root .join("blobs") @@ -435,12 +416,7 @@ async fn handle_post_blob_upload(path: String) -> Response { .unwrap() } -async fn handle_put_blob( - upload_id: String, - headers: HeaderMap, - query_string: String, - request: Request, -) -> Response { +async fn handle_put_blob(upload_id: String, query_string: String, request: Request) -> Response { let root = PathBuf::from( std::env::var("ZARF_INJECTOR_SEED_ROOT").unwrap_or_else(|_| String::from("/zarf-seed")), ); @@ -477,8 +453,6 @@ async fn handle_put_blob( .unwrap_or(""); // Simple URL decode for the colon encoded.replace("%3A", ":").replace("%3a", ":") - } else if let Some(digest) = headers.get("Docker-Content-Digest") { - digest.to_str().unwrap().to_string() } else { // Calculate digest let mut hasher = Sha256::new(); From 9a0118c0ce253d744abbe33966e3417e093301b9 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Thu, 2 Oct 2025 12:15:06 -0400 Subject: [PATCH 17/26] test names Signed-off-by: Austin Abro --- src/main.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/main.rs b/src/main.rs index a252d17..bcfb2c1 100644 --- a/src/main.rs +++ b/src/main.rs @@ -753,14 +753,14 @@ mod test { #[tokio::test] #[serial] - async fn test_integration() { + async fn test_pull_mt() { let media_types = [OCI_MIME_TYPE, DOCKER_MEDIA_TYPE]; for media_type in media_types { - test_registry(TEST_IMAGE, media_type).await; + test_pull(TEST_IMAGE, media_type).await; } } - async fn test_registry(image: &str, media_type: &str) { + async fn test_pull(image: &str, media_type: &str) { let registry = TestRegistry::new(image).await; // Assert the files and directory we expect to exist do exist @@ -857,7 +857,7 @@ mod test { #[tokio::test] #[serial] - async fn test_push_integration() { + async fn test_push() { test_push_with_tag().await; test_push_with_sha().await; } From 0a69de378a426b488d9215f29c04b92d9bc812ed Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Thu, 2 Oct 2025 12:23:54 -0400 Subject: [PATCH 18/26] delete unnecessary id Signed-off-by: Austin Abro --- src/main.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/src/main.rs b/src/main.rs index bcfb2c1..384a078 100644 --- a/src/main.rs +++ b/src/main.rs @@ -409,7 +409,6 @@ async fn handle_post_blob_upload(path: String) -> Response { Response::builder() .status(StatusCode::ACCEPTED) .header("Location", location) - .header("Docker-Upload-UUID", upload_id) .header("Range", "0-0") .header("Docker-Distribution-Api-Version", "registry/2.0") .body(Body::empty()) From 17fdc88f27025d8962a2a18f33ad53918151b737 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Thu, 2 Oct 2025 13:37:55 -0400 Subject: [PATCH 19/26] fix range Signed-off-by: Austin Abro --- src/main.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/main.rs b/src/main.rs index 384a078..c2d9e5e 100644 --- a/src/main.rs +++ b/src/main.rs @@ -409,7 +409,6 @@ async fn handle_post_blob_upload(path: String) -> Response { Response::builder() .status(StatusCode::ACCEPTED) .header("Location", location) - .header("Range", "0-0") .header("Docker-Distribution-Api-Version", "registry/2.0") .body(Body::empty()) .unwrap() @@ -662,11 +661,13 @@ async fn handle_patch_blob(upload_id: String, request: Request) -> Response { .unwrap(); } - let current_size = body_bytes.len(); + // Calculate the range: end_of_range is the position of the last byte (0-indexed) + // For example, if we uploaded 1000 bytes, positions are 0-999 + let end_of_range = body_bytes.len().saturating_sub(1); Response::builder() .status(StatusCode::ACCEPTED) - .header("Range", format!("0-{}", current_size)) + .header("Range", format!("0-{}", end_of_range)) .header("Docker-Distribution-Api-Version", "registry/2.0") .body(Body::empty()) .unwrap() From 9913ac1ec80b7142cb5a3168ca953929a4358ffc Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Thu, 2 Oct 2025 13:52:33 -0400 Subject: [PATCH 20/26] test resumable uploads Signed-off-by: Austin Abro --- Cargo.lock | 464 ++++++++++++++++++++++++++++++++++++++++++++++++++-- Cargo.toml | 1 + src/main.rs | 156 ++++++++++++++++-- 3 files changed, 594 insertions(+), 27 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 502955c..7da17e2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -216,9 +216,13 @@ checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" [[package]] name = "cc" -version = "1.0.95" +version = "1.2.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d32a725bc159af97c3e629873bb9f88fb8cf8a4867175f76dc987815ea07c83b" +checksum = "e1354349954c6fc9cb0deab020f27f783cf0b604e8bb754dc4658ecf0d29c35f" +dependencies = [ + "find-msvc-tools", + "shlex", +] [[package]] name = "cfg-if" @@ -226,6 +230,12 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +[[package]] +name = "cfg_aliases" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" + [[package]] name = "chrono" version = "0.4.41" @@ -323,7 +333,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" dependencies = [ "libc", - "windows-sys", + "windows-sys 0.52.0", ] [[package]] @@ -335,9 +345,15 @@ dependencies = [ "cfg-if", "libc", "redox_syscall 0.4.1", - "windows-sys", + "windows-sys 0.52.0", ] +[[package]] +name = "find-msvc-tools" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ced73b1dacfc750a6db6c0a0c3a3853c8b41997e2e2c563dc90804ae6867959" + [[package]] name = "flate2" version = "1.1.2" @@ -462,6 +478,17 @@ dependencies = [ "version_check", ] +[[package]] +name = "getrandom" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" +dependencies = [ + "cfg-if", + "libc", + "wasi", +] + [[package]] name = "gimli" version = "0.28.1" @@ -573,6 +600,23 @@ dependencies = [ "winapi", ] +[[package]] +name = "hyper-rustls" +version = "0.27.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58" +dependencies = [ + "http", + "hyper", + "hyper-util", + "rustls", + "rustls-pki-types", + "tokio", + "tokio-rustls", + "tower-service", + "webpki-roots 1.0.2", +] + [[package]] name = "hyper-util" version = "0.1.5" @@ -761,6 +805,12 @@ dependencies = [ "serde", ] +[[package]] +name = "ipnet" +version = "2.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" + [[package]] name = "itoa" version = "1.0.11" @@ -856,7 +906,7 @@ dependencies = [ "hermit-abi", "libc", "wasi", - "windows-sys", + "windows-sys 0.52.0", ] [[package]] @@ -909,7 +959,7 @@ dependencies = [ "libc", "redox_syscall 0.5.17", "smallvec", - "windows-targets", + "windows-targets 0.52.5", ] [[package]] @@ -965,6 +1015,15 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" +[[package]] +name = "ppv-lite86" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" +dependencies = [ + "zerocopy", +] + [[package]] name = "proc-macro2" version = "1.0.81" @@ -974,6 +1033,55 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "quinn" +version = "0.11.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c7c5fdde3cdae7203427dc4f0a68fe0ed09833edc525a03456b153b79828684" +dependencies = [ + "bytes", + "pin-project-lite", + "quinn-proto", + "quinn-udp", + "rustc-hash", + "rustls", + "socket2", + "thiserror", + "tokio", + "tracing", +] + +[[package]] +name = "quinn-proto" +version = "0.11.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fadfaed2cd7f389d0161bb73eeb07b7b78f8691047a6f3e73caaeae55310a4a6" +dependencies = [ + "bytes", + "rand", + "ring", + "rustc-hash", + "rustls", + "slab", + "thiserror", + "tinyvec", + "tracing", +] + +[[package]] +name = "quinn-udp" +version = "0.5.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcebb1209ee276352ef14ff8732e24cc2b02bbac986cd74a4c81bcb2f9881970" +dependencies = [ + "cfg_aliases", + "libc", + "once_cell", + "socket2", + "tracing", + "windows-sys 0.52.0", +] + [[package]] name = "quote" version = "1.0.36" @@ -983,6 +1091,36 @@ dependencies = [ "proc-macro2", ] +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha", + "rand_core", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom", +] + [[package]] name = "redox_syscall" version = "0.4.1" @@ -1027,12 +1165,74 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "30b661b2f27137bdbc16f00eda72866a92bb28af1753ffbd56744fb6e2e9cd8e" +[[package]] +name = "reqwest" +version = "0.12.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7d6d2a27d57148378eb5e111173f4276ad26340ecc5c49a4a2152167a2d6a37" +dependencies = [ + "base64", + "bytes", + "futures-core", + "futures-util", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-rustls", + "hyper-util", + "ipnet", + "js-sys", + "log", + "mime", + "once_cell", + "percent-encoding", + "pin-project-lite", + "quinn", + "rustls", + "rustls-pemfile", + "rustls-pki-types", + "serde", + "serde_json", + "serde_urlencoded", + "sync_wrapper 1.0.1", + "tokio", + "tokio-rustls", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", + "webpki-roots 0.26.11", + "winreg", +] + +[[package]] +name = "ring" +version = "0.17.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" +dependencies = [ + "cc", + "cfg-if", + "getrandom", + "libc", + "untrusted", + "windows-sys 0.52.0", +] + [[package]] name = "rustc-demangle" version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" +[[package]] +name = "rustc-hash" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d" + [[package]] name = "rustix" version = "0.38.34" @@ -1043,7 +1243,50 @@ dependencies = [ "errno", "libc", "linux-raw-sys", - "windows-sys", + "windows-sys 0.52.0", +] + +[[package]] +name = "rustls" +version = "0.23.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd3c25631629d034ce7cd9940adc9d45762d46de2b0f57193c4443b92c6d4d40" +dependencies = [ + "once_cell", + "ring", + "rustls-pki-types", + "rustls-webpki", + "subtle", + "zeroize", +] + +[[package]] +name = "rustls-pemfile" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50" +dependencies = [ + "rustls-pki-types", +] + +[[package]] +name = "rustls-pki-types" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "229a4a4c221013e7e1f1a043678c5cc39fe5171437c88fb47151a21e6f5b5c79" +dependencies = [ + "zeroize", +] + +[[package]] +name = "rustls-webpki" +version = "0.103.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e10b3f4191e8a80e6b43eebabfac91e5dcecebb27a71f04e820c47ec41d314bf" +dependencies = [ + "ring", + "rustls-pki-types", + "untrusted", ] [[package]] @@ -1223,6 +1466,12 @@ dependencies = [ "digest", ] +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + [[package]] name = "slab" version = "0.4.11" @@ -1242,7 +1491,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05ffd9c0a93b7543e062e759284fcf5f5e3b098501104bfbdde4d404db792871" dependencies = [ "libc", - "windows-sys", + "windows-sys 0.52.0", ] [[package]] @@ -1251,6 +1500,12 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" +[[package]] +name = "subtle" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" + [[package]] name = "syn" version = "2.0.60" @@ -1357,6 +1612,21 @@ dependencies = [ "zerovec", ] +[[package]] +name = "tinyvec" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09b3661f17e86524eccd4371ab0429194e0d7c008abb45f7a7495b1719463c71" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" + [[package]] name = "tokio" version = "1.43.1" @@ -1370,7 +1640,7 @@ dependencies = [ "pin-project-lite", "socket2", "tokio-macros", - "windows-sys", + "windows-sys 0.52.0", ] [[package]] @@ -1384,6 +1654,16 @@ dependencies = [ "syn", ] +[[package]] +name = "tokio-rustls" +version = "0.26.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1729aa945f29d91ba541258c8df89027d5792d85a8841fb65e8bf0f4ede4ef61" +dependencies = [ + "rustls", + "tokio", +] + [[package]] name = "tokio-util" version = "0.7.10" @@ -1464,6 +1744,12 @@ version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" +[[package]] +name = "untrusted" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + [[package]] name = "url" version = "2.5.4" @@ -1528,6 +1814,19 @@ dependencies = [ "wasm-bindgen-shared", ] +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.50" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "555d470ec0bc3bb57890405e5d4322cc9ea83cebb085523ced7be4144dac1e61" +dependencies = [ + "cfg-if", + "js-sys", + "once_cell", + "wasm-bindgen", + "web-sys", +] + [[package]] name = "wasm-bindgen-macro" version = "0.2.100" @@ -1560,6 +1859,34 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "web-sys" +version = "0.3.77" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "webpki-roots" +version = "0.26.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "521bc38abb08001b01866da9f51eb7c5d647a19260e00054a8c7fd5f9e57f7a9" +dependencies = [ + "webpki-roots 1.0.2", +] + +[[package]] +name = "webpki-roots" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e8983c3ab33d6fb807cfcdad2491c4ea8cbc8ed839181c7dfd9c67c83e261b2" +dependencies = [ + "rustls-pki-types", +] + [[package]] name = "winapi" version = "0.3.9" @@ -1641,13 +1968,37 @@ dependencies = [ "windows-link", ] +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.5", +] + [[package]] name = "windows-sys" version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets", + "windows-targets 0.52.5", +] + +[[package]] +name = "windows-targets" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" +dependencies = [ + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", ] [[package]] @@ -1656,28 +2007,46 @@ version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb" dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", + "windows_aarch64_gnullvm 0.52.5", + "windows_aarch64_msvc 0.52.5", + "windows_i686_gnu 0.52.5", "windows_i686_gnullvm", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", + "windows_i686_msvc 0.52.5", + "windows_x86_64_gnu 0.52.5", + "windows_x86_64_gnullvm 0.52.5", + "windows_x86_64_msvc 0.52.5", ] +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" + [[package]] name = "windows_aarch64_gnullvm" version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263" +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" + [[package]] name = "windows_aarch64_msvc" version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6" +[[package]] +name = "windows_i686_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" + [[package]] name = "windows_i686_gnu" version = "0.52.5" @@ -1690,30 +2059,64 @@ version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9" +[[package]] +name = "windows_i686_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" + [[package]] name = "windows_i686_msvc" version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf" +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" + [[package]] name = "windows_x86_64_gnu" version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" + [[package]] name = "windows_x86_64_gnullvm" version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596" +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" + [[package]] name = "windows_x86_64_msvc" version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0" +[[package]] +name = "winreg" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a277a57398d4bfa075df44f501a17cfdf8542d224f0d36095a2adc7aee4ef0a5" +dependencies = [ + "cfg-if", + "windows-sys 0.48.0", +] + [[package]] name = "writeable" version = "0.6.1" @@ -1766,6 +2169,7 @@ dependencies = [ "futures-util", "hex", "regex-lite", + "reqwest", "serde_json", "serial_test", "sha2", @@ -1774,6 +2178,26 @@ dependencies = [ "tokio-util", ] +[[package]] +name = "zerocopy" +version = "0.8.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1039dd0d3c310cf05de012d8a39ff557cb0d23087fd44cad61df08fc31907a2f" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.8.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ecf5b4cc5364572d7f4c329661bcc82724222973f2cab6f050a4e5c22f75181" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "zerofrom" version = "0.1.6" @@ -1795,6 +2219,12 @@ dependencies = [ "synstructure", ] +[[package]] +name = "zeroize" +version = "1.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b97154e67e32c85465826e8bcc1c59429aaaf107c1e4a9e53c8d8ccd5eff88d0" + [[package]] name = "zerotrie" version = "0.2.2" diff --git a/Cargo.toml b/Cargo.toml index 6faceea..87fc030 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -23,6 +23,7 @@ flate2 = "1.1.2" futures-util = "0.3.30" tar = "0.4.40" serial_test = "3.2.0" +reqwest = { version = "0.12", default-features = false, features = ["rustls-tls"] } [profile.release] opt-level = "z" # Optimize for size. diff --git a/src/main.rs b/src/main.rs index c2d9e5e..46f49f4 100644 --- a/src/main.rs +++ b/src/main.rs @@ -279,8 +279,9 @@ async fn patch_handler(Path(path): Path, request: Request) -> Response { if blob_re.is_match(&path) { let caps = blob_re.captures(&path).unwrap(); + let name = caps.get(1).unwrap().as_str().to_string(); let upload_id = caps.get(2).unwrap().as_str().to_string(); - handle_patch_blob(upload_id, request).await + handle_patch_blob(name, upload_id, request).await } else { Response::builder() .status(StatusCode::NOT_FOUND) @@ -628,11 +629,18 @@ async fn handle_head_blob(digest: String) -> Response { } } -async fn handle_patch_blob(upload_id: String, request: Request) -> Response { +async fn handle_patch_blob(name: String, upload_id: String, request: Request) -> Response { let root = PathBuf::from( std::env::var("ZARF_INJECTOR_SEED_ROOT").unwrap_or_else(|_| String::from("/zarf-seed")), ); + // Get Content-Range header to validate upload order + let content_range = request + .headers() + .get("Content-Range") + .and_then(|h| h.to_str().ok()) + .map(|s| s.to_string()); + // Read the body let body_bytes = match axum::body::to_bytes(request.into_body(), usize::MAX).await { Ok(bytes) => bytes, @@ -654,19 +662,84 @@ async fn handle_patch_blob(upload_id: String, request: Request) -> Response { } let temp_path = temp_dir.join(&upload_id); - if let Err(_) = tokio::fs::write(&temp_path, &body_bytes).await { - return Response::builder() - .status(StatusCode::INTERNAL_SERVER_ERROR) - .body("Failed to write temp file".into()) - .unwrap(); + + // Get the current size of existing data + let existing_size = if temp_path.exists() { + match tokio::fs::metadata(&temp_path).await { + Ok(meta) => meta.len() as usize, + Err(_) => 0, + } + } else { + 0 + }; + + // Validate Content-Range if provided + if let Some(range) = content_range { + // Parse Content-Range header (Example: "0-1000") + let range_re = Regex::new(r"^(\d+)-(\d+)$").unwrap(); + if let Some(caps) = range_re.captures(&range) { + let start: usize = caps.get(1).unwrap().as_str().parse().unwrap_or(0); + let end: usize = caps.get(2).unwrap().as_str().parse().unwrap_or(0); + + // Validate that start matches existing_size + if start != existing_size { + return Response::builder() + .status(StatusCode::RANGE_NOT_SATISFIABLE) + .header("Range", format!("0-{}", existing_size.saturating_sub(1))) + .body("Chunk out of order".into()) + .unwrap(); + } + + // Validate that the chunk size matches end - start + 1 + let expected_size = end - start + 1; + if body_bytes.len() != expected_size { + return Response::builder() + .status(StatusCode::BAD_REQUEST) + .body("Content-Length does not match Content-Range".into()) + .unwrap(); + } + } + } + + // Append data to the temporary file + if existing_size > 0 { + // Read existing data, append new data, and write back + match tokio::fs::read(&temp_path).await { + Ok(mut existing_data) => { + existing_data.extend_from_slice(&body_bytes); + if let Err(_) = tokio::fs::write(&temp_path, &existing_data).await { + return Response::builder() + .status(StatusCode::INTERNAL_SERVER_ERROR) + .body("Failed to write temp file".into()) + .unwrap(); + } + } + Err(_) => { + return Response::builder() + .status(StatusCode::INTERNAL_SERVER_ERROR) + .body("Failed to read existing temp file".into()) + .unwrap(); + } + } + } else { + // No existing data, just write the new data + if let Err(_) = tokio::fs::write(&temp_path, &body_bytes).await { + return Response::builder() + .status(StatusCode::INTERNAL_SERVER_ERROR) + .body("Failed to write temp file".into()) + .unwrap(); + } } - // Calculate the range: end_of_range is the position of the last byte (0-indexed) - // For example, if we uploaded 1000 bytes, positions are 0-999 - let end_of_range = body_bytes.len().saturating_sub(1); + // Calculate new total size (end-of-range is the position of the last byte) + let new_total_size = existing_size + body_bytes.len(); + let end_of_range = new_total_size.saturating_sub(1); + + let location = format!("/v2/{}/blobs/uploads/{}", name, upload_id); Response::builder() .status(StatusCode::ACCEPTED) + .header("Location", location) .header("Range", format!("0-{}", end_of_range)) .header("Docker-Distribution-Api-Version", "registry/2.0") .body(Body::empty()) @@ -1008,6 +1081,69 @@ mod test { .await; } + #[tokio::test] + #[serial] + async fn test_multi_chunk_upload() { + use sha2::{Digest, Sha256}; + + let registry = TestRegistry::new(TEST_IMAGE).await; + let client = reqwest::Client::new(); + let base_url = format!("http://127.0.0.1:{}", registry.random_port); + + // Create test data (1MB) + let chunk1 = vec![1u8; 512 * 1024]; + let chunk2 = vec![2u8; 512 * 1024]; + let all_data = [chunk1.clone(), chunk2.clone()].concat(); + + // Calculate digest + let mut hasher = Sha256::new(); + hasher.update(&all_data); + let digest = format!("sha256:{}", hex::encode(hasher.finalize())); + + // POST to start upload + let resp = client + .post(&format!("{}/v2/test/blobs/uploads/", base_url)) + .send() + .await + .unwrap(); + assert_eq!(resp.status(), 202); + let location = resp.headers().get("Location").unwrap().to_str().unwrap(); + + // PATCH chunk 1 + let resp = client + .patch(&format!("{}{}", base_url, location)) + .header("Content-Range", "0-524287") + .header("Content-Length", chunk1.len()) + .body(chunk1) + .send() + .await + .unwrap(); + assert_eq!(resp.status(), 202); + assert_eq!(resp.headers().get("Range").unwrap(), "0-524287"); + let location = resp.headers().get("Location").unwrap().to_str().unwrap(); + + // PATCH chunk 2 + let resp = client + .patch(&format!("{}{}", base_url, location)) + .header("Content-Range", "524288-1048575") + .header("Content-Length", chunk2.len()) + .body(chunk2) + .send() + .await + .unwrap(); + assert_eq!(resp.status(), 202); + assert_eq!(resp.headers().get("Range").unwrap(), "0-1048575"); + let location = resp.headers().get("Location").unwrap().to_str().unwrap(); + + // PUT to close + let resp = client + .put(&format!("{}{}?digest={}", base_url, location, digest)) + .send() + .await + .unwrap(); + assert_eq!(resp.status(), 201); + } + // This localizes the test image's index.json such that the registry server // will be able to match the test image from it fn localize_test_image(image_reference: &str, image_root: &Path) -> Result<()> { From b6ba6c9cdc66f6dcc0d7766134441f97ccc02bd7 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Thu, 2 Oct 2025 13:59:33 -0400 Subject: [PATCH 21/26] fix tests Signed-off-by: Austin Abro --- src/main.rs | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/src/main.rs b/src/main.rs index 46f49f4..109a439 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1090,9 +1090,10 @@ mod test { let client = reqwest::Client::new(); let base_url = format!("http://127.0.0.1:{}", registry.random_port); + let chunk_size = 512 * 1024; // Create test data (1MB) - let chunk1 = vec![1u8; 512 * 1024]; - let chunk2 = vec![2u8; 512 * 1024]; + let chunk1 = vec![1u8; chunk_size]; + let chunk2 = vec![2u8; chunk_size]; let all_data = [chunk1.clone(), chunk2.clone()].concat(); // Calculate digest @@ -1112,27 +1113,27 @@ mod test { // PATCH chunk 1 let resp = client .patch(&format!("{}{}", base_url, location)) - .header("Content-Range", "0-524287") + .header("Content-Range", format!("0-{}", chunk_size - 1)) .header("Content-Length", chunk1.len()) .body(chunk1) .send() .await .unwrap(); assert_eq!(resp.status(), 202); - assert_eq!(resp.headers().get("Range").unwrap(), "0-524287"); + assert_eq!(resp.headers().get("Range").unwrap(), &format!("0-{}", chunk_size - 1)); let location = resp.headers().get("Location").unwrap().to_str().unwrap(); // PATCH chunk 2 let resp = client .patch(&format!("{}{}", base_url, location)) - .header("Content-Range", "524288-1048575") + .header("Content-Range", format!("{}-{}", chunk_size, 2 * chunk_size - 1)) .header("Content-Length", chunk2.len()) .body(chunk2) .send() .await .unwrap(); assert_eq!(resp.status(), 202); - assert_eq!(resp.headers().get("Range").unwrap(), "0-1048575"); + assert_eq!(resp.headers().get("Range").unwrap(), &format!("0-{}", 2 * chunk_size - 1)); let location = resp.headers().get("Location").unwrap().to_str().unwrap(); // PUT to close From 09204082804448e1adf20a0f8162c2d16271029d Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Thu, 2 Oct 2025 14:08:34 -0400 Subject: [PATCH 22/26] rename Signed-off-by: Austin Abro --- src/main.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/main.rs b/src/main.rs index 109a439..d065a05 100644 --- a/src/main.rs +++ b/src/main.rs @@ -34,7 +34,7 @@ fn start_seed_registry() -> Router { Router::new() .route( "/v2/*path", - get(handler) + get(get_handler) .put(put_handler) .head(head_handler) .post(post_handler) @@ -66,7 +66,7 @@ fn start_seed_registry() -> Router { ) } -async fn handler(Path(path): Path) -> Response { +async fn get_handler(Path(path): Path) -> Response { println!("request: {}", path); let path = &path; let manifest = Regex::new("(.+)/manifests/(.+)").unwrap(); From d686e9f79539e6ba04a4a4a8c309b972f03f3763 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Thu, 2 Oct 2025 14:10:27 -0400 Subject: [PATCH 23/26] format Signed-off-by: Austin Abro --- src/main.rs | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/src/main.rs b/src/main.rs index d065a05..2b58759 100644 --- a/src/main.rs +++ b/src/main.rs @@ -263,7 +263,7 @@ async fn head_handler(Path(path): Path) -> Response { handle_head_manifest(name, reference).await } else if blob_re.is_match(&path) { let caps = blob_re.captures(&path).unwrap(); - let digest = caps.get(1).unwrap().as_str().to_string(); + let digest: String = caps.get(1).unwrap().as_str().to_string(); handle_head_blob(digest).await } else { Response::builder() @@ -1120,20 +1120,29 @@ mod test { .await .unwrap(); assert_eq!(resp.status(), 202); - assert_eq!(resp.headers().get("Range").unwrap(), &format!("0-{}", chunk_size - 1)); + assert_eq!( + resp.headers().get("Range").unwrap(), + &format!("0-{}", chunk_size - 1) + ); let location = resp.headers().get("Location").unwrap().to_str().unwrap(); // PATCH chunk 2 let resp = client .patch(&format!("{}{}", base_url, location)) - .header("Content-Range", format!("{}-{}", chunk_size, 2 * chunk_size - 1)) + .header( + "Content-Range", + format!("{}-{}", chunk_size, 2 * chunk_size - 1), + ) .header("Content-Length", chunk2.len()) .body(chunk2) .send() .await .unwrap(); assert_eq!(resp.status(), 202); - assert_eq!(resp.headers().get("Range").unwrap(), &format!("0-{}", 2 * chunk_size - 1)); + assert_eq!( + resp.headers().get("Range").unwrap(), + &format!("0-{}", 2 * chunk_size - 1) + ); let location = resp.headers().get("Location").unwrap().to_str().unwrap(); // PUT to close From ded50fccb8113214afbe50de1168acb52fe33047 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Thu, 2 Oct 2025 14:21:06 -0400 Subject: [PATCH 24/26] test names Signed-off-by: Austin Abro --- src/main.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/main.rs b/src/main.rs index 2b58759..0797674 100644 --- a/src/main.rs +++ b/src/main.rs @@ -826,14 +826,14 @@ mod test { #[tokio::test] #[serial] - async fn test_pull_mt() { + async fn test_pull() { let media_types = [OCI_MIME_TYPE, DOCKER_MEDIA_TYPE]; for media_type in media_types { - test_pull(TEST_IMAGE, media_type).await; + test_pull_mt(TEST_IMAGE, media_type).await; } } - async fn test_pull(image: &str, media_type: &str) { + async fn test_pull_mt(image: &str, media_type: &str) { let registry = TestRegistry::new(image).await; // Assert the files and directory we expect to exist do exist From fd0cab6e89d8045cf4e6222f64feb2e60d55e123 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Thu, 2 Oct 2025 15:33:14 -0400 Subject: [PATCH 25/26] improve tests Signed-off-by: Austin Abro --- src/main.rs | 32 ++++++++++++++------------------ 1 file changed, 14 insertions(+), 18 deletions(-) diff --git a/src/main.rs b/src/main.rs index 0797674..075bf33 100644 --- a/src/main.rs +++ b/src/main.rs @@ -849,7 +849,7 @@ mod test { .expect("should have been able to create a Docker client"); let image_name = extract_name(image); - let test_image = format!("127.0.0.1:{}/{}", registry.random_port, image_name); + let test_image = format!("{}/{}", registry.address, image_name); let test_image_pull = docker .create_image( @@ -870,13 +870,14 @@ mod test { } struct TestRegistry { - random_port: u16, + address: String, output_root: PathBuf, _seed_guard: EnvGuard, _tmpdir: TempDir, } impl TestRegistry { + // This setups up a registry with a test image packed into it async fn new(image: &str) -> Self { let tmpdir = TempDir::new().expect("should have created temporary directory"); @@ -897,10 +898,10 @@ mod test { let listener = tokio::net::TcpListener::bind("127.0.0.1:0") .await .expect("should have been able to bind listener to a random port on localhost"); - let random_port = listener + let address = listener .local_addr() .expect("should have been able to resolve the address") - .port(); + .to_string(); tokio::spawn(async { let app = start_seed_registry(); @@ -910,7 +911,7 @@ mod test { }); for _ in 0..10 { - if tokio::net::TcpStream::connect(format!("127.0.0.1:{}", random_port)) + if tokio::net::TcpStream::connect(&address) .await .is_ok() { @@ -920,7 +921,7 @@ mod test { } Self { - random_port, + address, output_root, _seed_guard, _tmpdir: tmpdir, @@ -940,8 +941,7 @@ mod test { let docker = Docker::connect_with_socket_defaults() .expect("should have been able to create a Docker client"); - let test_image = - TEST_IMAGE.replace("ghcr.io", &format!("127.0.0.1:{}", registry.random_port)); + let test_image = TEST_IMAGE.replace("ghcr.io", ®istry.address); docker .create_image( Some(CreateImageOptions { @@ -955,15 +955,12 @@ mod test { .await .expect("should have pulled test image"); - let pushed_image = format!( - "127.0.0.1:{}/zarf-dev/doom-game:pushed-test", - registry.random_port - ); + let pushed_image = format!("{}/zarf-dev/doom-game:pushed-test", registry.address); docker .tag_image( &test_image, Some(bollard::image::TagImageOptions { - repo: format!("127.0.0.1:{}/zarf-dev/doom-game", registry.random_port), + repo: format!("{}/zarf-dev/doom-game", registry.address), tag: "pushed-test".to_string(), }), ) @@ -1025,8 +1022,7 @@ mod test { let docker = Docker::connect_with_socket_defaults() .expect("should have been able to create a Docker client"); - let test_image = - TEST_IMAGE.replace("ghcr.io", &format!("127.0.0.1:{}", registry.random_port)); + let test_image = TEST_IMAGE.replace("ghcr.io", ®istry.address); docker .create_image( Some(CreateImageOptions { @@ -1051,8 +1047,8 @@ mod test { .to_string(); let pushed_image_by_digest = format!( - "127.0.0.1:{}/zarf-dev/doom-game@{}", - registry.random_port, manifest_digest + "{}/zarf-dev/doom-game@{}", + registry.address, manifest_digest ); let verify_pull = docker @@ -1088,7 +1084,7 @@ mod test { let registry = TestRegistry::new(TEST_IMAGE).await; let client = reqwest::Client::new(); - let base_url = format!("http://127.0.0.1:{}", registry.random_port); + let base_url = format!("http://{}", registry.address); let chunk_size = 512 * 1024; // Create test data (1MB) From 9c07dacd51d0ceeb25083a2f93d91e83192e17db Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Thu, 2 Oct 2025 15:36:24 -0400 Subject: [PATCH 26/26] remove pid Signed-off-by: Austin Abro --- src/main.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/main.rs b/src/main.rs index 075bf33..27d8b8f 100644 --- a/src/main.rs +++ b/src/main.rs @@ -397,14 +397,13 @@ async fn handle_put_manifest(name: String, reference: String, request: Request) } async fn handle_post_blob_upload(path: String) -> Response { - // Generate a simple unique ID for the upload session using timestamp and process id + // Generate a unique ID for the upload session using timestamp use std::time::{SystemTime, UNIX_EPOCH}; let timestamp = SystemTime::now() .duration_since(UNIX_EPOCH) .unwrap() .as_nanos(); - let pid = std::process::id(); - let upload_id = format!("{}-{}", timestamp, pid); + let upload_id = format!("{}", timestamp); let location = format!("/v2/{}/{}", path.trim_end_matches('/'), upload_id); Response::builder()