diff --git a/gradle/libs.versions.toml b/gradle/libs.versions.toml index a67788d6..592a3b3c 100644 --- a/gradle/libs.versions.toml +++ b/gradle/libs.versions.toml @@ -65,6 +65,7 @@ edc-transfer-pull-http-receiver = { module = "org.eclipse.edc:transfer-pull-http edc-transfer-pull-http-dynamic-receiver = { module = "org.eclipse.edc:transfer-pull-http-dynamic-receiver", version.ref = "edc" } edc-util = { module = "org.eclipse.edc:util", version.ref = "edc" } edc-vault-azure = { module = "org.eclipse.edc.azure:vault-azure", version.ref = "edc" } +edc-vault-hashicorp = { module = "org.eclipse.edc:vault-hashicorp", version.ref = "edc" } edc-validator-data-address-http-data = { module = "org.eclipse.edc:validator-data-address-http-data", version.ref = "edc" } jakarta-rsApi = { module = "jakarta.ws.rs:jakarta.ws.rs-api", version.ref = "rsApi" } jakartaJson = { module = "org.glassfish:jakarta.json", version.ref = "jakarta-json" } @@ -82,6 +83,10 @@ testcontainers-junit-jupiter = { module = "org.testcontainers:junit-jupiter", ve kafka-clients = { module = "org.apache.kafka:kafka-clients", version.ref = "kafkaClients" } testcontainers-kafka = { module = "org.testcontainers:kafka", version.ref = "testcontainers" } testcontainers-junit = { module = "org.testcontainers:junit-jupiter", version.ref = "testcontainers" } +testcontainers-minio = { module = "org.testcontainers:minio", version.ref = "testcontainers" } +testcontainers-hashicorp-vault = { module = "org.testcontainers:vault", version.ref = "testcontainers" } +azure-storage-blob = { module = "com.azure:azure-storage-blob", version = "12.26.0" } +minio-io = { module = "io.minio:minio", version = "8.5.11" } [plugins] shadow = { id = "com.github.johnrengelman.shadow", version = "8.1.1" } diff --git a/system-tests/build.gradle.kts b/system-tests/build.gradle.kts index c9076e34..03370623 100644 --- a/system-tests/build.gradle.kts +++ b/system-tests/build.gradle.kts @@ -29,6 +29,10 @@ dependencies { testImplementation(libs.testcontainers.junit) testImplementation(libs.testcontainers.kafka) testImplementation(libs.kafka.clients) + testImplementation(libs.testcontainers.minio) + testImplementation(libs.testcontainers.hashicorp.vault) + testImplementation(libs.azure.storage.blob) + testImplementation(libs.minio.io) // runtimes testCompileOnly(project(":basic:basic-01-basic-connector")) @@ -47,6 +51,10 @@ dependencies { testCompileOnly(project(":policy:policy-01-policy-enforcement:policy-enforcement-provider")) testCompileOnly(project(":policy:policy-01-policy-enforcement:policy-enforcement-consumer")) testCompileOnly(project(":policy:policy-01-policy-enforcement:policy-functions")) + + testCompileOnly(project(":transfer:transfer-05-file-transfer-cloud:cloud-transfer-provider")) + testCompileOnly(project(":transfer:transfer-05-file-transfer-cloud:cloud-transfer-consumer")) + testCompileOnly(project(":transfer:transfer-05-file-transfer-cloud:transfer-file-cloud")) } tasks.compileJava { diff --git a/system-tests/src/test/java/org/eclipse/edc/samples/common/FileTransferCloudCommon.java b/system-tests/src/test/java/org/eclipse/edc/samples/common/FileTransferCloudCommon.java new file mode 100644 index 00000000..c924696c --- /dev/null +++ b/system-tests/src/test/java/org/eclipse/edc/samples/common/FileTransferCloudCommon.java @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2022 Microsoft Corporation + * + * This program and the accompanying materials are made available under the + * terms of the Apache License, Version 2.0 which is available at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * SPDX-License-Identifier: Apache-2.0 + * + * Contributors: + * Materna Information & Communications SE - initial test implementation for sample + * + */ + +package org.eclipse.edc.samples.common; + +import java.util.Objects; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.awaitility.Awaitility.await; +import static org.eclipse.edc.samples.common.FileTransferCommon.getFileContentFromRelativePath; +import static org.eclipse.edc.samples.util.TransferUtil.POLL_INTERVAL; +import static org.eclipse.edc.samples.util.TransferUtil.TIMEOUT; +import static org.eclipse.edc.samples.util.TransferUtil.get; +import static org.eclipse.edc.samples.util.TransferUtil.post; + +public class FileTransferCloudCommon { + + private static final String CONSUMER_MANAGEMENT_URL = "http://localhost:29193/management"; + private static final String V3_CATALOG_DATASET_REQUEST_PATH = "/v3/catalog/dataset/request"; + private static final String FETCH_DATASET_FROM_CATALOG_FILE_PATH = "transfer/transfer-05-file-transfer-cloud/resources/get-dataset.json"; + private static final String CATALOG_DATASET_ID = "\"odrl:hasPolicy\".'@id'"; + private static final String NEGOTIATE_CONTRACT_FILE_PATH = "transfer/transfer-05-file-transfer-cloud/resources/negotiate-contract.json"; + private static final String V3_CONTRACT_NEGOTIATIONS_PATH = "/v3/contractnegotiations/"; + private static final String CONTRACT_NEGOTIATION_ID = "@id"; + private static final String CONTRACT_AGREEMENT_ID = "contractAgreementId"; + private static final String CONTRACT_OFFER_ID_KEY = "{{contract-offer-id}}"; + + public static String fetchDatasetFromCatalog(String fetchDatasetFromCatalogFilePath) { + var catalogDatasetId = post( + CONSUMER_MANAGEMENT_URL + V3_CATALOG_DATASET_REQUEST_PATH, + getFileContentFromRelativePath(fetchDatasetFromCatalogFilePath), + CATALOG_DATASET_ID + ); + assertThat(catalogDatasetId).isNotEmpty(); + return catalogDatasetId; + } + + public static String negotiateContract(String negotiateContractFilePath, String catalogDatasetId) { + var requestBody = getFileContentFromRelativePath(negotiateContractFilePath) + .replace(CONTRACT_OFFER_ID_KEY, catalogDatasetId); + var contractNegotiationId = post( + CONSUMER_MANAGEMENT_URL + V3_CONTRACT_NEGOTIATIONS_PATH, + requestBody, + CONTRACT_NEGOTIATION_ID + ); + assertThat(contractNegotiationId).isNotEmpty(); + return contractNegotiationId; + } + + public static String getContractAgreementId(String contractNegotiationId) { + var url = CONSUMER_MANAGEMENT_URL + V3_CONTRACT_NEGOTIATIONS_PATH + contractNegotiationId; + return await() + .atMost(TIMEOUT) + .pollInterval(POLL_INTERVAL) + .until(() -> get(url, CONTRACT_AGREEMENT_ID), Objects::nonNull); + } + + public static String runNegotiation() { + var catalogDatasetId = fetchDatasetFromCatalog(FETCH_DATASET_FROM_CATALOG_FILE_PATH); + var contractNegotiationId = negotiateContract(NEGOTIATE_CONTRACT_FILE_PATH, catalogDatasetId); + return getContractAgreementId(contractNegotiationId); + } + +} diff --git a/system-tests/src/test/java/org/eclipse/edc/samples/transfer/Transfer05fileTransferCloudTest.java b/system-tests/src/test/java/org/eclipse/edc/samples/transfer/Transfer05fileTransferCloudTest.java new file mode 100644 index 00000000..404a8f92 --- /dev/null +++ b/system-tests/src/test/java/org/eclipse/edc/samples/transfer/Transfer05fileTransferCloudTest.java @@ -0,0 +1,240 @@ +/* + * Copyright (c) 2022 Microsoft Corporation + * + * This program and the accompanying materials are made available under the + * terms of the Apache License, Version 2.0 which is available at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * SPDX-License-Identifier: Apache-2.0 + * + * Contributors: + * Materna Information & Communications SE - initial test implementation for sample + * + */ + +package org.eclipse.edc.samples.transfer; + +import com.azure.storage.blob.BlobContainerClientBuilder; +import com.azure.storage.common.StorageSharedKeyCredential; +import io.minio.ListObjectsArgs; +import io.minio.MakeBucketArgs; +import io.minio.MinioClient; +import org.eclipse.edc.connector.controlplane.transfer.spi.types.TransferProcessStates; +import org.eclipse.edc.junit.annotations.EndToEndTest; +import org.eclipse.edc.junit.extensions.EmbeddedRuntime; +import org.eclipse.edc.junit.extensions.RuntimeExtension; +import org.eclipse.edc.junit.extensions.RuntimePerClassExtension; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.RegisterExtension; +import org.testcontainers.containers.GenericContainer; +import org.testcontainers.containers.MinIOContainer; +import org.testcontainers.containers.output.OutputFrame; +import org.testcontainers.junit.jupiter.Container; +import org.testcontainers.junit.jupiter.Testcontainers; +import org.testcontainers.utility.DockerImageName; +import org.testcontainers.vault.VaultContainer; + +import java.io.ByteArrayInputStream; +import java.nio.charset.StandardCharsets; +import java.util.Map; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Fail.fail; +import static org.eclipse.edc.samples.common.FileTransferCloudCommon.runNegotiation; +import static org.eclipse.edc.samples.common.FileTransferCommon.getFileContentFromRelativePath; +import static org.eclipse.edc.samples.common.FileTransferCommon.getFileFromRelativePath; +import static org.eclipse.edc.samples.util.TransferUtil.checkTransferStatus; +import static org.eclipse.edc.samples.util.TransferUtil.startTransfer; + +@Testcontainers +@EndToEndTest +public class Transfer05fileTransferCloudTest { + + private static final String EDC_FS_CONFIG = "edc.fs.config"; + + private static final String CLOUD_CONSUMER_CONFIG_PROPERTIES_FILE_PATH = "transfer/transfer-05-file-transfer-cloud/cloud-transfer-consumer/config.properties"; + private static final String START_TRANSFER_FILE_PATH = "transfer/transfer-05-file-transfer-cloud/resources/start-transfer.json"; + + private static final String PROVIDER = "provider"; + private static final String CONSUMER = "consumer"; + + private static final String PROVIDER_MODULE_PATH = ":transfer:transfer-05-file-transfer-cloud:cloud-transfer-provider"; + private static final String CONSUMER_MODULE_PATH = ":transfer:transfer-05-file-transfer-cloud:cloud-transfer-consumer"; + + private static final String AZURITE_IMAGE_NAME = "mcr.microsoft.com/azure-storage/azurite:latest"; + private static final String AZURITE_ACCOUNT_NAME = "provider"; + private static final String AZURITE_ACCOUNT_KEY = "password"; + private static final String AZURITE_CONTAINER_NAME = "src-container"; + private static final int AZURITE_PORT = 10000; + + private static final String FILE_NAME = "test-document.txt"; + + private static final String MINIO_IMAGE_NAME = "minio/minio:latest"; + private static final String MINIO_ACCOUNT_NAME = "consumer"; + private static final String MINIO_ACCOUNT_KEY = "password"; + private static final String MINIO_BUCKET_NAME = "src-bucket"; + private static final int MINIO_PORT = 9000; + + private static final String VAULT_IMAGE_NAME = "hashicorp/vault:latest"; + private static final String VAULT_TOKEN = ""; + private static final int VAULT_PORT = 8200; + + + @AfterAll + static void tearDown() { + + if (vaultContainer != null) { + vaultContainer.stop(); + } + if (azuriteContainer != null) { + azuriteContainer.stop(); + } + if (minioContainer != null) { + minioContainer.stop(); + } + + } + + @Container + protected static VaultContainer vaultContainer = new VaultContainer<>(DockerImageName.parse(VAULT_IMAGE_NAME)) + .withExposedPorts(VAULT_PORT) + .withVaultToken(VAULT_TOKEN) + .withInitCommand( + "kv put secret/accessKeyId content=" + MINIO_ACCOUNT_NAME, + "kv put secret/secretAccessKey content=" + MINIO_ACCOUNT_KEY, + "kv put secret/provider-key content=" + AZURITE_ACCOUNT_KEY + ) + .withLogConsumer((OutputFrame outputFrame) -> System.out.print(outputFrame.getUtf8String())); + + @Container + protected static MinIOContainer minioContainer = new MinIOContainer(DockerImageName.parse(MINIO_IMAGE_NAME)) + .withEnv("MINIO_ROOT_USER", MINIO_ACCOUNT_NAME) + .withEnv("MINIO_ROOT_PASSWORD", MINIO_ACCOUNT_KEY) + .withExposedPorts(MINIO_PORT) + .withLogConsumer(frame -> System.out.print(frame.getUtf8String())); + + @Container + protected static GenericContainer azuriteContainer = new GenericContainer<>(DockerImageName.parse(AZURITE_IMAGE_NAME)) + .withExposedPorts(AZURITE_PORT) + .withEnv("AZURITE_ACCOUNTS", AZURITE_ACCOUNT_NAME + ":" + AZURITE_ACCOUNT_KEY) + .withLogConsumer(frame -> System.out.print(frame.getUtf8String())); + + @RegisterExtension + protected static RuntimeExtension consumer = new RuntimePerClassExtension(new EmbeddedRuntime( + CONSUMER, + Map.of( + EDC_FS_CONFIG, getFileFromRelativePath(CLOUD_CONSUMER_CONFIG_PROPERTIES_FILE_PATH).getAbsolutePath() + ), + CONSUMER_MODULE_PATH + )); + + @RegisterExtension + protected static RuntimeExtension provider = new RuntimePerClassExtension(new EmbeddedRuntime( + PROVIDER, + Map.ofEntries( + Map.entry("edc.participant.id", "provider"), + Map.entry("edc.dsp.callback.address", "http://localhost:19194/protocol"), + Map.entry("web.http.port", "19191"), + Map.entry("web.http.path", "/api"), + Map.entry("web.http.management.port", "19193"), + Map.entry("web.http.management.path", "/management"), + Map.entry("web.http.protocol.port", "19194"), + Map.entry("web.http.protocol.path", "/protocol"), + Map.entry("edc.api.auth.key", "password"), + Map.entry("edc.transfer.proxy.token.signer.privatekey.alias", "private-key"), + Map.entry("edc.transfer.proxy.token.verifier.publickey.alias", "public-key"), + Map.entry("web.http.public.port", "19291"), + Map.entry("web.http.public.path", "/public"), + Map.entry("web.http.control.port", "19192"), + Map.entry("web.http.control.path", "/control"), + Map.entry("edc.vault.hashicorp.url", "http://127.0.0.1:" + getVaultPort()), + Map.entry("edc.vault.hashicorp.token", ""), + Map.entry("edc.vault.hashicorp.api.secret.path", "/v1/secret"), + Map.entry("edc.vault.hashicorp.health.check.enabled", "false"), + Map.entry("edc.blobstore.endpoint.template", "http://127.0.0.1:" + getAzuritePort() + "/%s"), + Map.entry("edc.aws.access.key", "accessKeyId"), + Map.entry("edc.aws.secret.access.key", "secretAccessKey") + ), + PROVIDER_MODULE_PATH + )); + + @Test + void pushFile() throws Exception { + + var minioClient = + MinioClient.builder() + .endpoint("http://" + minioContainer.getHost() + ":" + minioContainer.getMappedPort(MINIO_PORT)) + .credentials(MINIO_ACCOUNT_NAME, MINIO_ACCOUNT_KEY) + .build(); + + minioClient.makeBucket(MakeBucketArgs.builder().bucket(MINIO_BUCKET_NAME).build()); + + var requestBody = getFileContentFromRelativePath(START_TRANSFER_FILE_PATH) + .replace("http://localhost:9000", "http://localhost:" + minioContainer.getMappedPort(9000).toString()); + + var contractAgreementId = runNegotiation(); + + var transferProcessId = startTransfer(requestBody, contractAgreementId); + + checkTransferStatus(transferProcessId, TransferProcessStates.COMPLETED); + + var objects = minioClient.listObjects( + ListObjectsArgs.builder().bucket(MINIO_BUCKET_NAME).build()); + + assertThat(objects) + .isNotEmpty().first() + .extracting(result -> { + try { + return result.get(); + } catch (Exception e) { + return fail(); + } + }) + .satisfies(item -> assertThat(item.objectName()).isEqualTo(FILE_NAME)); + } + + private static void configureAzurite() { + + var blobServiceUrl = String.format("http://%s:%d/%s", + azuriteContainer.getHost(), + azuriteContainer.getMappedPort(AZURITE_PORT), + AZURITE_ACCOUNT_NAME); + + var credential = new StorageSharedKeyCredential(AZURITE_ACCOUNT_NAME, AZURITE_ACCOUNT_KEY); + + var blobContainerClient = new BlobContainerClientBuilder() + .endpoint(blobServiceUrl) + .credential(credential) + .containerName(AZURITE_CONTAINER_NAME) + .buildClient(); + + blobContainerClient.create(); + + var blobClient = blobContainerClient.getBlobClient(FILE_NAME); + var blobContent = "Test"; + + blobClient.upload(new ByteArrayInputStream(blobContent.getBytes(StandardCharsets.UTF_8)), blobContent.length()); + + } + + private static int getAzuritePort() { + + if (!azuriteContainer.isRunning()) { + azuriteContainer.start(); + } + configureAzurite(); + + return azuriteContainer.getMappedPort(AZURITE_PORT); + } + + private static int getVaultPort() { + + if (!vaultContainer.isRunning()) { + vaultContainer.start(); + } + + return vaultContainer.getMappedPort(VAULT_PORT); + } + +} diff --git a/transfer/transfer-05-file-transfer-cloud/README.md b/transfer/transfer-05-file-transfer-cloud/README.md index 353d0f8e..bedf6865 100644 --- a/transfer/transfer-05-file-transfer-cloud/README.md +++ b/transfer/transfer-05-file-transfer-cloud/README.md @@ -1,192 +1,144 @@ # Improve the file transfer -So far we have performed a file transfer on a local machine using the Eclipse Dataspace Connector. While that is already -great progress, it probably won't be much use in a real-world production application. +So far, we have performed a file transfer on a local machine using the Eclipse Dataspace Connector. While that is already great progress, it probably won't be much use in a real-world production application. -This chapter improves on this by moving the file transfer "to the cloud". What we mean by that is that instead of -reading and writing the file from/to the disk, we will now: +This chapter improves on this by shifting the file transfer between cloud storage emulators. We will now: -- read the source from an Azure Storage, -- put the destination file into an AWS S3 Bucket. +- read the source from an Azurite instance, +- put the destination file into a MinIO instance. -## Setup local dev environment +## Prerequisites -Before we get into the nitty-gritty of cloud-based data transfers, we need to set up cloud resources. While we could do -that manually clicking through the Azure and AWS portals, there are simply more elegant solutions around. We use -Hashicorp Terraform for deployment and maintenance. +The following steps assume that you have Docker, Vault and the Azure CLI installed. If this is not the case, you can use the following links to access the installation instructions for all three. -> You will need an active Azure Subscription and an AWS Account with root-user/admin access! Both platforms offer free -> tiers, so no immediate cost incurs. +- Docker: https://docs.docker.com/engine/install/ +- Vault: https://developer.hashicorp.com/vault/docs/install +- Azure CLI: https://learn.microsoft.com/en-us/cli/azure/install-azure-cli -Also, you will need to be logged in to your Azure CLI as well as AWS CLI by entering the following commands in a shell: +## Start the docker-compose file ```bash -az login -aws configure +docker compose -f transfer/transfer-05-file-transfer-cloud/resources/docker-compose.yaml up -d ``` -The deployment scripts will provision all resources in Azure and AWS (that's why you need to be logged in to the CLIs) -and store all access credentials in an Azure Vault (learn more -[here](https://azure.microsoft.com/de-de/services/key-vault/#product-overview)). +Please check in the logs that minio, azurite and hashicorp-vault have started correctly. -## Deploy cloud resources +## Create bucket in minio -It's as simple as running the main terraform script: +Go to http://localhost:9001 and login with the credentials which you can find in the [docker-compose](resources/docker-compose.yaml) file (line 20-21), then go to 'Buckets' and create a bucket with the name “src-bucket”. + +## Upload file to azurite +Let`s create a container with the following commands: ```bash -cd transfer/transfer-05-file-transfer-cloud/terraform -terraform init --upgrade -terraform apply +conn_str="DefaultEndpointsProtocol=http;AccountName=provider;AccountKey=password;BlobEndpoint=http://127.0.0.1:10000/provider;" +az storage container create --name src-container --connection-string $conn_str ``` -it will prompt you to enter a unique name, which will serve as prefix for many resources both in Azure and in AWS. Then, -enter "yes" and let terraform works its magic. - -It shouldn't take more than a couple of minutes, and when it's done it will log the `client_id`, `tenant_id` -, `vault-name`, `storage-container-name` and `storage-account-name`. -> Take a note of these values! +If the container is created successfully, you will get this: +```json +{ + "created": true +} +``` -Download the certificate used to authenticate the generated service principal against Azure Active Directory: +Upload the file to the blob storage: ```bash -terraform output -raw certificate | base64 --decode > cert.pfx +az storage blob upload -f ./transfer/transfer-05-file-transfer-cloud/resources/test-document.txt --container-name src-container --name test-document.txt --connection-string $conn_str ``` -## Update connector config +You can run the following command to check if the file was added successfully -_Do the following for both the consumer's and the provider's `config.properties`!_ +```bash +az storage blob list --container-name src-container --connection-string "DefaultEndpointsProtocol=http;AccountName=provider;AccountKey=password;BlobEndpoint=http://127.0.0.1:10000/provider;" --query "[].{name:name}" --output table +``` -Let's modify the following config values to the connector configuration `config.properties` and insert the values that -terraform logged before: +You should see the test-document.txt file. -```properties -edc.vault.clientid= -edc.vault.tenantid= -edc.vault.certificate= -edc.vault.name= +```sh +Name +-------------------------- +test-document.txt ``` -## Update data seeder +## Configure the vault +We already started the vault at the beginning with docker compose. Now the following commands must be executed in a terminal window to add the necessary secrets. -Put the storage account name into the `DataAddress` builders within the `CloudTransferExtension` class. - -``` -DataAddress.Builder.newInstance() - .type("AzureStorage") - .property("account", "") - .property("container", "src-container") - .property("blobname", "test-document.txt") - .keyName("-key1") - .build(); +```bash +export VAULT_ADDR='http://0.0.0.0:8200' +vault kv put secret/accessKeyId content=consumer +vault kv put secret/secretAccessKey content=password +vault kv put secret/provider-key content=password ``` ## Bringing it all together ### 1. Boot connectors -While we have deployed several cloud resources in the previous chapter, the connectors themselves still run locally. -Thus, we can simply rebuild and run them: - ```bash ./gradlew clean build -java -Dedc.fs.config=transfer/transfer-05-file-transfer-cloud/cloud-transfer-consumer/config.properties -jar transfer/transfer-05-file-transfer-cloud/cloud-transfer-consumer/build/libs/consumer.jar -# in another terminal window: java -Dedc.fs.config=transfer/transfer-05-file-transfer-cloud/cloud-transfer-provider/config.properties -jar transfer/transfer-05-file-transfer-cloud/cloud-transfer-provider/build/libs/provider.jar +# in another terminal window: +java -Dedc.fs.config=transfer/transfer-05-file-transfer-cloud/cloud-transfer-consumer/config.properties -jar transfer/transfer-05-file-transfer-cloud/cloud-transfer-consumer/build/libs/consumer.jar ``` -### 2. Retrieve provider Contract Offers -To request data offers from the provider, run: +### 2. Retrieve provider Contract Offers ```bash -curl -X POST "http://localhost:9192/management/v3/catalog/request" \ ---header 'X-Api-Key: password' \ ---header 'Content-Type: application/json' \ ---data-raw '{ - "@context": { - "@vocab": "https://w3id.org/edc/v0.0.1/ns/" - }, - "counterPartyAddress": "http://localhost:8282/protocol", - "protocol": "dataspace-protocol-http" -}' +curl -X POST "http://localhost:29193/management/v3/catalog/request" \ + -H 'X-Api-Key: password' -H 'Content-Type: application/json' \ + -d @transfer/transfer-05-file-transfer-cloud/resources/fetch-catalog.json -s | jq ``` -#### 3. Negotiate Contract +Please replace the {{contract-offer-id}} placeholder in the [negotiate-contract.json](resources/negotiate-contract.json) file with the contract offer id you found in the catalog at the path dcat:dataset.odrl:hasPolicy.@id (the asset with "@id: 1"). -To negotiate a contract copy one of the contract offers into the statement below and execute it. At the time of writing -it is only possible to negotiate an _unchanged_ contract, so counter offers are not supported. +### 3. Negotiate Contract ```bash -curl --location --request POST 'http://localhost:9192/management/v3/contractnegotiations' \ ---header 'X-API-Key: password' \ ---header 'Content-Type: application/json' \ ---data-raw '{ - "connectorId": "provider", - "counterPartyAddress": "http://localhost:8282/protocol", - "protocol": "dataspace-protocol-http", - "policy": { } -}' +curl -d @transfer/transfer-05-file-transfer-cloud/resources/negotiate-contract.json \ + -H 'X-Api-Key: password' X POST -H 'content-type: application/json' http://localhost:29193/management/v3/contractnegotiations \ + -s | jq ``` -The EDC will answer with the contract negotiation id. This id will be used in step 4. +We can now use the UUID to check the current status of the negotiation using an endpoint on the consumer side. -#### 4. Get Contract Agreement Id - -To get the contract agreement id insert the negotiation id into the following statement end execute it. +### 4. Get Contract Agreement Id ```bash -curl -X GET -H 'X-Api-Key: password' "http://localhost:9192/management/v3/contractnegotiations/{negotiationId}" +curl -X GET "http://localhost:29193/management/v3/contractnegotiations/{{contract-negotiation-id}}" \ + -H 'X-Api-Key: password' --header 'Content-Type: application/json' \ + -s | jq ``` -The EDC will return the current state of the contract negotiation. When the negotiation is completed successfully -(this may take a few seconds), the response will also contain an agreement id, that is required in the next step. - -#### 5. Transfer Data +Please replace the {{contract-agreement-id}} placeholder in the [start-transfer.json](resources/start-transfer.json) file with the contractAgreementId from the previous response. -To initiate the data transfer, execute the statement below. Please take care of setting the contract agreement id -obtained at previous step as well as a unique bucket name. +### 5. Transfer Data ```bash -curl --location --request POST 'http://localhost:9192/management/v3/transferprocesses' \ ---header 'X-API-Key: password' \ ---header 'Content-Type: application/json' \ ---data-raw ' -{ - "counterPartyAddress": "http://localhost:8282/protocol", - "protocol": "dataspace-protocol-http", - "connectorId": "consumer", - "assetId": "1", - "contractId": "", - "dataDestination": { - "type": "AmazonS3", - "region": "us-east-1", - "bucketName": "" - }, - "transferType": { - "contentType": "application/octet-stream", - "isFinite": true - } -}' +curl -X POST "http://localhost:29193/management/v3/transferprocesses" \ + -H 'X-Api-Key: password' -H "Content-Type: application/json" \ + -d @transfer/transfer-05-file-transfer-cloud/resources/start-transfer.json \ + -s | jq ``` -This command will return a transfer process id which will used to request the deprovisioning of the resources. +With the given UUID, we can check the transfer process. -#### 6. Deprovision resources - -Deprovisioning is not necessary per se, but it will do some cleanup, delete the temporary AWS role and the S3 bucket, so -it's generally advisable to do it. +### 6. Check Transfer Status ```bash -curl -X POST -H 'X-Api-Key: password' "http://localhost:9192/management/v3/transferprocesses/{transferProcessId}/deprovision" +curl -H 'X-Api-Key: password' http://localhost:29193/management/v3/transferprocesses/ -s | jq ``` -Finally, run terraform to clean-up the vault and other remaining stuffs: +## Stop docker container +Execute the following command in a terminal window to stop the docker container: ```bash -cd transfer/transfer-05-file-transfer-cloud/terraform -terraform destroy +docker compose -f transfer/transfer-05-file-transfer-cloud/resources/docker-compose.yaml down ``` + --- -[Previous Chapter](../transfer-04-open-telemetry/README.md) +[Previous Chapter](../transfer-04-event-consumer/README.md) \ No newline at end of file diff --git a/transfer/transfer-05-file-transfer-cloud/cloud-transfer-consumer/build.gradle.kts b/transfer/transfer-05-file-transfer-cloud/cloud-transfer-consumer/build.gradle.kts index a2b98a41..fc088f38 100644 --- a/transfer/transfer-05-file-transfer-cloud/cloud-transfer-consumer/build.gradle.kts +++ b/transfer/transfer-05-file-transfer-cloud/cloud-transfer-consumer/build.gradle.kts @@ -21,33 +21,27 @@ plugins { } dependencies { + implementation(libs.edc.control.api.configuration) + implementation(libs.edc.control.plane.api.client) + implementation(libs.edc.control.plane.api) implementation(libs.edc.control.plane.core) - implementation(libs.edc.api.observability) + implementation(libs.edc.dsp) implementation(libs.edc.configuration.filesystem) - implementation(libs.edc.http) - implementation(libs.edc.provision.aws.s3) - implementation(libs.edc.iam.mock) - implementation(libs.edc.vault.azure) - - implementation(libs.edc.auth.tokenbased) implementation(libs.edc.management.api) - - implementation(libs.edc.dsp) - - implementation(libs.edc.data.plane.selector.core) - - implementation(libs.edc.control.plane.api.client) - implementation(libs.edc.control.plane.api) implementation(libs.edc.transfer.data.plane.signaling) implementation(libs.edc.transfer.pull.http.receiver) + implementation(libs.edc.validator.data.address.http.data) implementation(libs.edc.data.plane.selector.api) + implementation(libs.edc.data.plane.selector.core) + implementation(libs.edc.data.plane.self.registration) implementation(libs.edc.data.plane.control.api) implementation(libs.edc.data.plane.public.api) implementation(libs.edc.data.plane.core) implementation(libs.edc.data.plane.http) + } application { diff --git a/transfer/transfer-05-file-transfer-cloud/cloud-transfer-consumer/config.properties b/transfer/transfer-05-file-transfer-cloud/cloud-transfer-consumer/config.properties index ae19e14c..2ec09f7f 100644 --- a/transfer/transfer-05-file-transfer-cloud/cloud-transfer-consumer/config.properties +++ b/transfer/transfer-05-file-transfer-cloud/cloud-transfer-consumer/config.properties @@ -1,24 +1,15 @@ -web.http.port=9191 +edc.participant.id=consumer +edc.dsp.callback.address=http://localhost:29194/protocol +web.http.port=29191 web.http.path=/api -web.http.management.port=9192 +web.http.management.port=29193 web.http.management.path=/management -web.http.protocol.port=9292 +web.http.protocol.port=29194 web.http.protocol.path=/protocol edc.api.auth.key=password -edc.vault.clientid= -edc.vault.tenantid= -edc.vault.certificate= -edc.vault.name= -edc.dsp.callback.address=http://localhost:9292/protocol - -#configuration from earlier examples -edc.participant.id=consumer -edc.receiver.http.endpoint=http://localhost:4000/receiver/urn:connector:provider/callback -edc.transfer.dataplane.token.signer.privatekey.alias=1 -edc.transfer.proxy.token.signer.privatekey.alias=1 +edc.transfer.proxy.token.signer.privatekey.alias=private-key edc.transfer.proxy.token.verifier.publickey.alias=public-key web.http.public.port=29291 web.http.public.path=/public web.http.control.port=29192 -web.http.control.path=/control -edc.dataplane.token.validation.endpoint=http://localhost:29192/control/token +web.http.control.path=/control \ No newline at end of file diff --git a/transfer/transfer-05-file-transfer-cloud/cloud-transfer-provider/build.gradle.kts b/transfer/transfer-05-file-transfer-cloud/cloud-transfer-provider/build.gradle.kts index dc5b397a..525bfb64 100644 --- a/transfer/transfer-05-file-transfer-cloud/cloud-transfer-provider/build.gradle.kts +++ b/transfer/transfer-05-file-transfer-cloud/cloud-transfer-provider/build.gradle.kts @@ -21,33 +21,31 @@ plugins { } dependencies { - implementation(libs.edc.control.plane.core) - - implementation(libs.edc.api.observability) - - implementation(libs.edc.configuration.filesystem) - implementation(libs.edc.iam.mock) - implementation(libs.edc.vault.azure) - implementation(libs.edc.http) - - implementation(libs.edc.auth.tokenbased) - implementation(libs.edc.management.api) - - implementation(libs.edc.dsp) implementation(project(":transfer:transfer-05-file-transfer-cloud:transfer-file-cloud")) + implementation(libs.edc.control.api.configuration) implementation(libs.edc.control.plane.api.client) implementation(libs.edc.control.plane.api) + implementation(libs.edc.control.plane.core) + implementation(libs.edc.dsp) + implementation(libs.edc.configuration.filesystem) + implementation(libs.edc.iam.mock) + implementation(libs.edc.management.api) implementation(libs.edc.transfer.data.plane.signaling) implementation(libs.edc.transfer.pull.http.receiver) + implementation(libs.edc.validator.data.address.http.data) implementation(libs.edc.data.plane.selector.api) + implementation(libs.edc.data.plane.selector.core) + implementation(libs.edc.data.plane.self.registration) implementation(libs.edc.data.plane.control.api) implementation(libs.edc.data.plane.public.api) implementation(libs.edc.data.plane.core) implementation(libs.edc.data.plane.http) + + implementation(libs.edc.vault.hashicorp) } application { diff --git a/transfer/transfer-05-file-transfer-cloud/cloud-transfer-provider/config.properties b/transfer/transfer-05-file-transfer-cloud/cloud-transfer-provider/config.properties index a9f7dd05..60da868c 100644 --- a/transfer/transfer-05-file-transfer-cloud/cloud-transfer-provider/config.properties +++ b/transfer/transfer-05-file-transfer-cloud/cloud-transfer-provider/config.properties @@ -1,24 +1,25 @@ -web.http.port=8181 +edc.participant.id=provider +edc.dsp.callback.address=http://localhost:19194/protocol +web.http.port=19191 web.http.path=/api -web.http.management.port=8182 +web.http.management.port=19193 web.http.management.path=/management -web.http.protocol.port=8282 +web.http.protocol.port=19194 web.http.protocol.path=/protocol edc.api.auth.key=password -edc.vault.clientid= -edc.vault.tenantid= -edc.vault.certificate= -edc.vault.name= -edc.dsp.callback.address=http://localhost:8282/protocol - -#configuration from earlier examples -edc.participant.id=provider -edc.receiver.http.endpoint=http://localhost:4000/receiver/urn:connector:provider/callback -edc.transfer.dataplane.token.signer.privatekey.alias=1 -edc.transfer.proxy.token.signer.privatekey.alias=1 +edc.transfer.proxy.token.signer.privatekey.alias=private-key edc.transfer.proxy.token.verifier.publickey.alias=public-key web.http.public.port=19291 web.http.public.path=/public web.http.control.port=19192 web.http.control.path=/control -edc.dataplane.token.validation.endpoint=http://localhost:19192/control/token + +edc.vault.hashicorp.url=http://127.0.0.1:8200 +edc.vault.hashicorp.token= +edc.vault.hashicorp.api.secret.path=/v1/secret +edc.vault.hashicorp.health.check.enabled=false + +edc.blobstore.endpoint.template=http://127.0.0.1:10000/%s + +edc.aws.access.key=accessKeyId +edc.aws.secret.access.key=secretAccessKey \ No newline at end of file diff --git a/transfer/transfer-05-file-transfer-cloud/resources/docker-compose.yaml b/transfer/transfer-05-file-transfer-cloud/resources/docker-compose.yaml new file mode 100644 index 00000000..f2c2e00d --- /dev/null +++ b/transfer/transfer-05-file-transfer-cloud/resources/docker-compose.yaml @@ -0,0 +1,31 @@ +services: + azurite: + container_name: azurite + image: mcr.microsoft.com/azure-storage/azurite + ports: + - "10000:10000" + - "10001:10001" + - "10002:10002" + environment: + AZURITE_ACCOUNTS: provider:password + + minio: + container_name: minio + image: quay.io/minio/minio + ports: + - "9000:9000" + - "9001:9001" + command: server /data --console-address ":9001" + environment: + MINIO_ROOT_USER: consumer + MINIO_ROOT_PASSWORD: password + + vault: + container_name: vault + image: hashicorp/vault + ports: + - "8200:8200" + cap_add: + - IPC_LOCK + environment: + VAULT_DEV_ROOT_TOKEN_ID: "" \ No newline at end of file diff --git a/transfer/transfer-05-file-transfer-cloud/resources/fetch-catalog.json b/transfer/transfer-05-file-transfer-cloud/resources/fetch-catalog.json new file mode 100644 index 00000000..b81e8ed5 --- /dev/null +++ b/transfer/transfer-05-file-transfer-cloud/resources/fetch-catalog.json @@ -0,0 +1,7 @@ +{ + "@context": { + "@vocab": "https://w3id.org/edc/v0.0.1/ns/" + }, + "counterPartyAddress": "http://localhost:19194/protocol", + "protocol": "dataspace-protocol-http" +} \ No newline at end of file diff --git a/transfer/transfer-05-file-transfer-cloud/resources/get-dataset.json b/transfer/transfer-05-file-transfer-cloud/resources/get-dataset.json new file mode 100644 index 00000000..b6674fe9 --- /dev/null +++ b/transfer/transfer-05-file-transfer-cloud/resources/get-dataset.json @@ -0,0 +1,7 @@ +{ + "@context": { "@vocab": "https://w3id.org/edc/v0.0.1/ns/"}, + "@type": "DatasetRequest", + "@id": "1", + "counterPartyAddress": "http://localhost:19194/protocol", + "protocol": "dataspace-protocol-http" +} \ No newline at end of file diff --git a/transfer/transfer-05-file-transfer-cloud/resources/negotiate-contract.json b/transfer/transfer-05-file-transfer-cloud/resources/negotiate-contract.json new file mode 100644 index 00000000..d04e405a --- /dev/null +++ b/transfer/transfer-05-file-transfer-cloud/resources/negotiate-contract.json @@ -0,0 +1,20 @@ +{ + "@context": { + "@vocab": "https://w3id.org/edc/v0.0.1/ns/" + }, + "@type": "ContractRequest", + "counterPartyAddress": "http://localhost:19194/protocol", + "protocol": "dataspace-protocol-http", + "policy": { + "@context": "http://www.w3.org/ns/odrl.jsonld", + "@id": "{{contract-offer-id}}", + "@type": "Offer", + "assigner": "provider", + "target": "1", + "odrl:permission": { + "odrl:action": { + "odrl:type": "USE" + } + } + } +} \ No newline at end of file diff --git a/transfer/transfer-05-file-transfer-cloud/resources/start-transfer.json b/transfer/transfer-05-file-transfer-cloud/resources/start-transfer.json new file mode 100644 index 00000000..3034f77e --- /dev/null +++ b/transfer/transfer-05-file-transfer-cloud/resources/start-transfer.json @@ -0,0 +1,19 @@ +{ + "@context": { + "@vocab": "https://w3id.org/edc/v0.0.1/ns/" + }, + "@type": "TransferRequestDto", + "connectorId": "provider", + "counterPartyAddress": "http://localhost:19194/protocol", + "contractId": "{{contract-agreement-id}}", + "assetId": "1", + "protocol": "dataspace-protocol-http", + "transferType": "AmazonS3-PUSH", + "dataDestination": { + "type": "AmazonS3", + "region": "eu-west-1", + "bucketName": "src-bucket", + "objectName": "test-document.txt", + "endpointOverride": "http://localhost:9000" + } +} \ No newline at end of file diff --git a/transfer/transfer-05-file-transfer-cloud/resources/test-document.txt b/transfer/transfer-05-file-transfer-cloud/resources/test-document.txt new file mode 100644 index 00000000..8318c86b --- /dev/null +++ b/transfer/transfer-05-file-transfer-cloud/resources/test-document.txt @@ -0,0 +1 @@ +Test \ No newline at end of file diff --git a/transfer/transfer-05-file-transfer-cloud/terraform/aws.tf b/transfer/transfer-05-file-transfer-cloud/terraform/aws.tf deleted file mode 100644 index 5a6794e1..00000000 --- a/transfer/transfer-05-file-transfer-cloud/terraform/aws.tf +++ /dev/null @@ -1,39 +0,0 @@ -provider "aws" { - # Configuration options - region = var.aws_region -} - - -resource "aws_iam_user" "user" { - name = "${var.environment}-aws-user" - path = "/" - force_destroy = true -} - -resource "aws_iam_access_key" "access_key" { - user = aws_iam_user.user.name - -} - -resource "aws_iam_user_policy_attachment" "s3fullaccess" { - user = aws_iam_user.user.name - policy_arn = "arn:aws:iam::aws:policy/AmazonS3FullAccess" -} - -resource "aws_iam_user_policy_attachment" "iamfullaccess" { - user = aws_iam_user.user.name - policy_arn = "arn:aws:iam::aws:policy/IAMFullAccess" -} - - -resource "aws_s3_bucket" "src-bucket" { - bucket = "${var.environment}-src-bucket" -} - -output "new_user" { - sensitive = true - value = { - secret = aws_iam_access_key.access_key.secret - id = aws_iam_access_key.access_key.id - } -} diff --git a/transfer/transfer-05-file-transfer-cloud/terraform/main.tf b/transfer/transfer-05-file-transfer-cloud/terraform/main.tf deleted file mode 100644 index 78183b88..00000000 --- a/transfer/transfer-05-file-transfer-cloud/terraform/main.tf +++ /dev/null @@ -1,217 +0,0 @@ -# Configure the Azure provider -terraform { - required_providers { - azurerm = { - source = "hashicorp/azurerm" - version = ">= 2.88.1" - } - azuread = { - source = "hashicorp/azuread" - version = ">= 2.12.0" - } - aws = { - source = "hashicorp/aws" - version = ">= 3.45.0" - } - } -} - -provider "azurerm" { - features { - key_vault { - purge_soft_delete_on_destroy = true - recover_soft_deleted_key_vaults = true - } - } -} -provider "azuread" { - # Configuration options -} - -data "azurerm_client_config" "current" {} -data "azurerm_subscription" "primary" {} - - -resource "azurerm_resource_group" "core-resourcegroup" { - name = "${var.environment}-resources" - location = var.location - tags = { - "Environment" : "EDC" - } -} - -# App registration for the primary identity -resource "azuread_application" "demo-app-id" { - display_name = "PrimaryIdentity-${var.environment}" -} - -# Allow the app to authenticate with the generated principal -resource "azuread_application_certificate" "demo-main-identity-cert" { - type = "AsymmetricX509Cert" - application_object_id = azuread_application.demo-app-id.id - value = azurerm_key_vault_certificate.demo-main-identity-cert.certificate_data_base64 - end_date = azurerm_key_vault_certificate.demo-main-identity-cert.certificate_attribute[0].expires - start_date = azurerm_key_vault_certificate.demo-main-identity-cert.certificate_attribute[0].not_before -} - -# Generate a service principal -resource "azuread_service_principal" "main-app-sp" { - application_id = azuread_application.demo-app-id.application_id - app_role_assignment_required = false - tags = [ - "terraform"] -} - -# Create central Key Vault for storing generated identity information and credentials -resource "azurerm_key_vault" "main-vault" { - name = "${var.environment}-vault" - location = azurerm_resource_group.core-resourcegroup.location - resource_group_name = azurerm_resource_group.core-resourcegroup.name - enabled_for_disk_encryption = false - tenant_id = data.azurerm_client_config.current.tenant_id - soft_delete_retention_days = 7 - purge_protection_enabled = false - - sku_name = "standard" - enable_rbac_authorization = true - -} - -# Role assignment so that the primary identity may access the vault -resource "azurerm_role_assignment" "primary-id" { - scope = azurerm_key_vault.main-vault.id - role_definition_name = "Key Vault Secrets Officer" - principal_id = azuread_service_principal.main-app-sp.object_id -} - -#Role assignment so that the currently logged in user may access the vault, needed to add certificates -resource "azurerm_role_assignment" "current-user-certificates" { - scope = azurerm_key_vault.main-vault.id - role_definition_name = "Key Vault Certificates Officer" - principal_id = data.azurerm_client_config.current.object_id -} - -#Role assignment so that the currently logged in user may access the vault, needed to add secrets -resource "azurerm_role_assignment" "current-user-secrets" { - scope = azurerm_key_vault.main-vault.id - role_definition_name = "Key Vault Secrets Officer" - principal_id = data.azurerm_client_config.current.object_id -} - -# Generate a certificate to be used by the generated principal -resource "azurerm_key_vault_certificate" "demo-main-identity-cert" { - name = "demo-app-id-certificate" - key_vault_id = azurerm_key_vault.main-vault.id - - certificate_policy { - issuer_parameters { - name = "Self" - } - - key_properties { - exportable = true - key_size = 2048 - key_type = "RSA" - reuse_key = true - } - - lifetime_action { - action { - action_type = "AutoRenew" - } - - trigger { - days_before_expiry = 30 - } - } - - secret_properties { - content_type = "application/x-pkcs12" - } - - x509_certificate_properties { - # Server Authentication = 1.3.6.1.5.5.7.3.1 - # Client Authentication = 1.3.6.1.5.5.7.3.2 - extended_key_usage = ["1.3.6.1.5.5.7.3.1"] - - key_usage = [ - "cRLSign", - "dataEncipherment", - "digitalSignature", - "keyAgreement", - "keyCertSign", - "keyEncipherment", - ] - - subject = "CN=${azurerm_resource_group.core-resourcegroup.name}" - validity_in_months = 12 - } - } - depends_on = [ - azurerm_role_assignment.current-user-certificates - ] -} - -# Retrieve the Certificate from the Key Vault. -# Note that the data source is actually a Certificate in Key Vault, and not a Secret. -# However this actually works, and retrieves the Certificate base64 encoded. -# An advantage of this method is that the "Key Vault Secrets User" (read-only) -# role is then sufficient to export the certificate. -# This is documented at https://docs.microsoft.com/azure/key-vault/certificates/how-to-export-certificate. -data "azurerm_key_vault_secret" "certificate" { - name = azurerm_key_vault_certificate.demo-main-identity-cert.name - key_vault_id = azurerm_key_vault.main-vault.id -} - -#storage account -resource "azurerm_storage_account" "main-blobstore" { - name = "${replace(var.environment, "-", "")}storage" - resource_group_name = azurerm_resource_group.core-resourcegroup.name - location = azurerm_resource_group.core-resourcegroup.location - account_tier = "Standard" - account_replication_type = "GRS" - account_kind = "StorageV2" - //allows for blobs, queues, fileshares, etc. -} - -# storage container -resource "azurerm_storage_container" "main-blob-container" { - - name = "src-container" - storage_account_name = azurerm_storage_account.main-blobstore.name -} - -# put a file as blob to the storage container -resource "azurerm_storage_blob" "testfile" { - name = "test-document.txt" - storage_account_name = azurerm_storage_account.main-blobstore.name - storage_container_name = azurerm_storage_container.main-blob-container.name - type = "Block" - source = "test-document.txt" -} - -// primary key for the blob store -resource "azurerm_key_vault_secret" "blobstorekey" { - name = "${azurerm_storage_account.main-blobstore.name}-key1" - value = azurerm_storage_account.main-blobstore.primary_access_key - key_vault_id = azurerm_key_vault.main-vault.id - depends_on = [ - azurerm_role_assignment.current-user-secrets] -} - -// the AWS access credentials -resource "azurerm_key_vault_secret" "aws-keyid" { - name = "edc-aws-access-key" - value = aws_iam_access_key.access_key.id - key_vault_id = azurerm_key_vault.main-vault.id - depends_on = [ - azurerm_role_assignment.current-user-secrets] -} - -resource "azurerm_key_vault_secret" "aws-secret" { - name = "edc-aws-secret-access-key" - value = aws_iam_access_key.access_key.secret - key_vault_id = azurerm_key_vault.main-vault.id - depends_on = [ - azurerm_role_assignment.current-user-secrets] -} diff --git a/transfer/transfer-05-file-transfer-cloud/terraform/output.tf b/transfer/transfer-05-file-transfer-cloud/terraform/output.tf deleted file mode 100644 index 050920be..00000000 --- a/transfer/transfer-05-file-transfer-cloud/terraform/output.tf +++ /dev/null @@ -1,24 +0,0 @@ -output "client_id" { - value = azuread_application.demo-app-id.application_id -} - -output "tenant-id" { - value = data.azurerm_client_config.current.tenant_id -} - -output "certificate" { - value = data.azurerm_key_vault_secret.certificate.value - sensitive = true -} - -output "vault-name" { - value = azurerm_key_vault.main-vault.name -} - -output "storage-account-name" { - value = azurerm_storage_account.main-blobstore.name -} - -output "storage-container-name" { - value = azurerm_storage_container.main-blob-container.name -} diff --git a/transfer/transfer-05-file-transfer-cloud/terraform/test-document.txt b/transfer/transfer-05-file-transfer-cloud/terraform/test-document.txt deleted file mode 100644 index 84362ca0..00000000 --- a/transfer/transfer-05-file-transfer-cloud/terraform/test-document.txt +++ /dev/null @@ -1 +0,0 @@ -Test file \ No newline at end of file diff --git a/transfer/transfer-05-file-transfer-cloud/terraform/variables.tf b/transfer/transfer-05-file-transfer-cloud/terraform/variables.tf deleted file mode 100644 index 47ac178d..00000000 --- a/transfer/transfer-05-file-transfer-cloud/terraform/variables.tf +++ /dev/null @@ -1,15 +0,0 @@ -variable "location" { - description = "geographic location of the Azure resources" - default = "westeurope" - type = string -} - -variable "aws_region" { - description = "geographic location of the AWS resources" - default = "us-east-1" - type = string -} - -variable "environment" { - description = "identifying string that is used in all azure resources" -} diff --git a/transfer/transfer-05-file-transfer-cloud/transfer-file-cloud/src/main/java/org/eclipse/edc/sample/extension/transfer/CloudTransferExtension.java b/transfer/transfer-05-file-transfer-cloud/transfer-file-cloud/src/main/java/org/eclipse/edc/sample/extension/transfer/CloudTransferExtension.java index 78d29cda..663baca5 100644 --- a/transfer/transfer-05-file-transfer-cloud/transfer-file-cloud/src/main/java/org/eclipse/edc/sample/extension/transfer/CloudTransferExtension.java +++ b/transfer/transfer-05-file-transfer-cloud/transfer-file-cloud/src/main/java/org/eclipse/edc/sample/extension/transfer/CloudTransferExtension.java @@ -55,20 +55,22 @@ public void initialize(ServiceExtensionContext context) { public void registerDataEntries() { var dataAddress = DataAddress.Builder.newInstance() .type("AzureStorage") - .property("account", "") + .property("@type", "DataAddress") + .property("account", "provider") .property("container", "src-container") - .property("blobname", "test-document.txt") - .keyName("-key1") + .property("blobName", "test-document.txt") + .keyName("provider-key") .build(); var asset = Asset.Builder.newInstance().id("1").dataAddress(dataAddress).build(); assetIndex.create(asset); var dataAddress2 = DataAddress.Builder.newInstance() .type("AzureStorage") - .property("account", "") + .property("@type", "DataAddress") + .property("account", "provider") .property("container", "src-container") - .property("blobname", "test-document.txt") - .keyName("-key1") + .property("blobName", "test-document.txt") + .keyName("provider-key") .build(); var asset2 = Asset.Builder.newInstance().id("2").dataAddress(dataAddress2).build(); assetIndex.create(asset2);